filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13625 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_extract_key_phrases_async.py
DESCRIPTION:
This sample demonstrates how to extract key talking points from a batch of documents.
USAGE:
python sample_extract_key_phrases_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
import asyncio
class ExtractKeyPhrasesSampleAsync(object):
endpoint = os.getenv("AZURE_TEXT_ANALYTICS_ENDPOINT")
key = os.getenv("AZURE_TEXT_ANALYTICS_KEY")
async def extract_key_phrases_async(self):
# [START batch_extract_key_phrases_async]
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import TextAnalyticsApiKeyCredential
text_analytics_client = TextAnalyticsClient(endpoint=self.endpoint, credential=TextAnalyticsApiKeyCredential(self.key))
documents = [
"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.",
"I need to take my cat to the veterinarian.",
"I will travel to South America in the summer.",
]
async with text_analytics_client:
result = await text_analytics_client.extract_key_phrases(documents)
for doc in result:
if not doc.is_error:
print(doc.key_phrases)
if doc.is_error:
print(doc.id, doc.error)
# [END batch_extract_key_phrases_async]
async def main():
sample = ExtractKeyPhrasesSampleAsync()
await sample.extract_key_phrases_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
the-stack_0_13626 | import os
from indico import IndicoClient, IndicoConfig
# Will connect to https://app.indico.io
client = IndicoClient()
# Environment variables override defaults
os.environ["INDICO_HOST"] = "foo.bar.com"
# Will connect to https://foo.bar.com
client = IndicoClient()
# IndicoConfig will override environment variables and defaults
my_config = IndicoConfig(
host="indico.my-company.com", # Overrides environment variable
api_token_path="../path/to/custom_api_token.txt",
)
# Will connect to https://indico.my-company.com
client = IndicoClient(config=my_config)
|
the-stack_0_13627 | #!/usr/bin/env python3
import common_test_lib
import os
import subprocess
import argparse
#######################################################################################
# edit test parameters into these lists to run different workloads
ibof_root = os.path.dirname(os.path.abspath(__file__)) + "/../../../"
#######################################################################################
stdout_type = subprocess.STDOUT
unittest_path = ["src/bio/ubio_error_test", "src/device/unvme/mdts_detach_test"]
def build_ibofos_library_option():
current_path = os.getcwd()
os.chdir(ibof_root)
subprocess.call(["./configure", \
"--with-library-build"])
ret = subprocess.call(["make", "-j4"])
if(ret != 0):
print("\tBuild Failed !!")
exit(-1)
os.chdir(current_path)
def build_and_test(fabric_ip):
current_path = os.getcwd()
for test_path in unittest_path:
common_test_lib.print_start(test_path);
os.chdir(ibof_root)
os.chdir(ibof_root+test_path)
ret = subprocess.call(["make"])
if(ret != 0):
print("\tMake failed for %s" % (test_path))
exit(-1)
test_name = test_path.split('/')[-1]
common_test_lib.kill_and_wait([test_name, "poseidonos", "fio"])
ret = subprocess.call(["./" + test_name, "-a", fabric_ip])
if (ret != 0 and ret != -9): #Sigkill is correct.
print("\tTest failed for %s, ret : %d" % (test_path, ret))
exit(-1)
os.chdir(current_path)
default_target_ip = "10.100.11.9"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='IO Unit Test')
parser.add_argument('-f', '--fabric_ip', default=default_target_ip,\
help='Set target IP, default: ' + default_target_ip)
args = parser.parse_args()
if(args.fabric_ip != None):
default_target_ip = args.fabric_ip
build_ibofos_library_option()
print (default_target_ip)
build_and_test(default_target_ip)
|
the-stack_0_13628 | import gevent
import io
import logging
import re
from datetime import datetime
from gevent import Greenlet, sleep
from gtts import gTTS
from .blob import blobs
from .hashtag import HashtagModel
from .twitter import get
from .tweet import TweetModel
logger = logging.getLogger('umahuesla')
class Crawler(Greenlet):
def run(self):
while True:
gevent.joinall([gevent.spawn(self.do_it)])
sleep(120)
def do_it(self):
hashtags = HashtagModel.query().filter(
HashtagModel.active == True # noqa
).all()
for hashtag in hashtags:
kwargs = {
'term': f'#{hashtag.tag} -filter:nativeretweets'
}
res = get(**kwargs)
for item in res:
self.store(item, hashtag.tag)
def store(self, item, hashtag):
tweet = TweetModel.get(item.id_str)
if not tweet:
text = re.sub(r'https?:\/\/.*[\r\n]*', '', item.full_text)
tts = gTTS(text.replace('#', '').replace('@', ''), lang='de')
stream = io.BytesIO()
tts.write_to_fp(stream)
res = blobs.create(stream)
alexa_text = text.replace('#', '<phoneme alphabet="ipa" ph="ˈhæʃtæɡ">#</phoneme>')
video_url = None
if item.media:
for i in item.media:
if i.type == 'video' and i.video_info:
bitrate = 0
for var in i.video_info.get('variants', []):
if var.get('bitrate', 0) > bitrate:
bitrate = var['bitrate']
video_url = var.get('url')
try:
logger.info(f"{item.user.name} - {text}")
tweet = TweetModel(
uid=item.id_str,
update_date=datetime.fromtimestamp(item.created_at_in_seconds),
title_text=item.user.name,
main_text=alexa_text,
stream_id=res,
video_url=video_url,
hashtags=[hashtag]
)
tweet.add_to_session()
tweet.session.flush()
except Exception as e:
logging.error(e.message)
elif hashtag not in tweet.hashtags:
tweet.hashtags.append(hashtag)
tweet.session.flush()
|
the-stack_0_13629 | import logging
from ..redislist import RedisDropboxIndexList
from .solrupdater import DropboxSolrUpdater
log = logging.getLogger('dropbox')
class DropboxIndexer:
"""
Read all Dropbox entries stored in Redis for a `bearertoken_id` and send them to Solr.
Parameters:
bearertoken_id -- a `models.BearerToken.id`.
access_token -- a `models.BearerToken.access_token`.
"""
def __init__(self, bearertoken_id, access_token):
self.bearertoken_id = bearertoken_id
self.access_token = access_token
def run(self):
redis = RedisDropboxIndexList(self.bearertoken_id)
solr_updater = DropboxSolrUpdater(self.bearertoken_id)
for redis_entry in redis.iterate():
# `redis_entry` is a `RedisDropboxEntry` instance.
# If:
# - `redis_entry.is_del()`: delete the file from Sorl
# - `redis_entry.is_reset()`: delete the entire index from Solr
# - `redis_entry.is_add()`: add the file to Solr (the file has already
# been downloaded locally)
#
# Bear in mind that:
# - entries with `redis_entry.is_add()` are only files (no dirs cause they have
# already been filtered out)
# - entries with `redis_entry.is_del()`: we don't know if they are files or dir
# but we don't care since during indexing we ask Solr to delete: name and name/*
# And a sanity check is run when creating a `RedisDropboxEntry` instance.
if redis_entry.is_del():
log.debug('Solr DEL: {}'.format(redis_entry.remote_path))
solr_updater.delete(redis_entry)
if redis_entry.is_reset():
log.debug('Solr RESET')
solr_updater.reset()
if redis_entry.is_add():
log.debug('Solr ADD: {}'.format(redis_entry.remote_path))
solr_updater.add(redis_entry)
solr_updater.commit() |
the-stack_0_13631 | from math import sqrt
import pytest
import torch
from torch_geometric.testing import withPackage
from torch_geometric.utils import geodesic_distance
@withPackage('gdist')
@pytest.mark.skip(reason="No way of currently testing this")
def test_geodesic_distance():
pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])
face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t()
out = geodesic_distance(pos, face)
expected = [
[0, 1, 1, sqrt(2)],
[1, 0, sqrt(2), 1],
[1, sqrt(2), 0, 1],
[sqrt(2), 1, 1, 0],
]
assert torch.allclose(out, torch.tensor(expected))
assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1))
out = geodesic_distance(pos, face, norm=False)
expected = [
[0, 2, 2, 2 * sqrt(2)],
[2, 0, 2 * sqrt(2), 2],
[2, 2 * sqrt(2), 0, 2],
[2 * sqrt(2), 2, 2, 0],
]
assert torch.allclose(out, torch.tensor(expected))
src = torch.tensor([0, 0, 0, 0])
dest = torch.tensor([0, 1, 2, 3])
out = geodesic_distance(pos, face, src=src, dest=dest)
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, dest=dest)
expected = [0, 0, 0, 0]
assert torch.allclose(out, torch.Tensor(expected))
|
the-stack_0_13635 | #
# voter business logic: put commits here
#
import config
from model import Voter
db = config.db
import voter_dao
import user
def insert_voters_array(votation_id, ar):
"""returns number of inserted rows"""
count = 0
for user_name in ar:
u = user.load_user_by_username(user_name)
if u:
n = db.session.query(Voter).filter(Voter.user_id == u.user_id,Voter.votation_id==votation_id).count()
if n == 0:
o = Voter(votation_id = votation_id, user_id = u.user_id, voted = 0)
if voter_dao.insert_dto(o):
count += 1
if count > 0:
db.session.commit()
return count
|
the-stack_0_13636 |
import os, pathlib, PIL
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from ResNet18 import ResNet18
from ResNet18V2 import ResNet18V2
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.applications.resnet import ResNet101
from tensorflow.keras.applications.resnet import ResNet152
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.keras.applications.resnet_v2 import ResNet101V2
from tensorflow.keras.applications.resnet_v2 import ResNet152V2
class ResNet(Model):
def __init__(self, data_shape=(224, 224, 3), resnet_version=1, resnet_layer_number=50, num_classes=1000):
super(ResNet, self).__init__()
weights = None
if num_classes == 1000 and data_shape == (224, 224, 3):
weights = 'imagenet'
self.resnet_version = resnet_version
self.data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip(
"horizontal",
input_shape=data_shape),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
self.rescaling = layers.experimental.preprocessing.Rescaling(1./255)
def preprocess_input(x, data_format=None):
from tensorflow.keras.applications import imagenet_utils
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='tf')
#return x
self.preprocess_input = preprocess_input
if resnet_layer_number == 18:
if resnet_version == 1:
self.resnet = ResNet18(category_num=num_classes)
else:
self.resnet = ResNet18V2(category_num=num_classes)
elif resnet_layer_number == 50:
if resnet_version == 1:
self.resnet = ResNet50(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet50V2(weights=weights, input_shape=data_shape, classes=num_classes)
elif resnet_layer_number == 101:
if resnet_version == 1:
self.resnet = ResNet101(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet101V2(weights=weights, input_shape=data_shape, classes=num_classes)
elif resnet_layer_number == 152:
if resnet_version == 1:
self.resnet = ResNet152(weights=weights, input_shape=data_shape, classes=num_classes)
else:
self.resnet = ResNet152V2(weights=weights, input_shape=data_shape, classes=num_classes)
self.build((None,) + data_shape)
def call(self, x):
x = self.data_augmentation(x)
x = self.rescaling(x)
x = self.preprocess_input(x)
x = tf.keras.applications.mobilenet.preprocess_input(x)
x = self.resnet(x)
return x
class ResNetWork():
def __init__(self, args):
# dataset
train_data_dir = pathlib.Path(args.train_dataset_path)
test_data_dir = pathlib.Path(args.test_dataset_path)
self.image_height = args.image_height
self.image_width = args.image_width
data_shape = (args.image_height, args.image_width, 3)
batch_size = args.batchsize
pretrain_model_path_or_dir = args.pre_train_model_path_dir
# create model
self.model = ResNet(
data_shape = data_shape,
resnet_version=args.resnet_version,
resnet_layer_number=args.resnet_layer_number,
num_classes=args.classes)
if os.path.exists(pretrain_model_path_or_dir):
if args.use_whole_network_model:
dir = pretrain_model_path_or_dir
self.model = keras.models.load_model(dir)
print("Whole network load from {} dir".format(dir))
else:
path = pretrain_model_path_or_dir
self.model.load_weights(path)
print("Network model load from {}".format(path))
# Optimization
self.learning_rate = args.lr
self.epochs = args.epochs
if args.opt_type == 'Adam':
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr)
elif args.opt_type == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(
learning_rate=args.lr,
momentum=args.momentum)
elif args.opt_type == 'Adadelta':
self.optimizer = tf.keras.optimizers.Adadelta(
learning_rate=args.lr)
elif args.opt_type == 'Adamax':
self.optimizer = tf.keras.optimizers.Adamax(
learning_rate=args.lr)
elif args.opt_type == 'Ftrl':
self.optimizer = tf.keras.optimizers.Ftrl(
learning_rate=args.lr)
elif args.opt_type == 'Nadam':
self.optimizer = tf.keras.optimizers.Nadam(
learning_rate=args.lr)
else:
self.optimizer = tf.keras.optimizers.RMSprop(
learning_rate=args.lr)
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# get the data set
train_image_count = 0
train_image_count += len(list(train_data_dir.glob('*/*.jpg')))
train_image_count += len(list(train_data_dir.glob('*/*.JPEG')))
print("train image number:", train_image_count)
test_image_count = 0
test_image_count += len(list(test_data_dir.glob('*/*.jpg')))
test_image_count += len(list(test_data_dir.glob('*/*.JPEG')))
print("Test image number:", test_image_count)
# train dataset
self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_data_dir,
#subset="training",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.class_names = self.train_ds.class_names
self.train_ds = self.train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# valid/test dataset
self.test_ds = tf.keras.preprocessing.image_dataset_from_directory(
test_data_dir,
#subset="validation",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.test_ds = self.test_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.test_loss = tf.keras.metrics.Mean(name='valid_loss')
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='vaild_accuracy')
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model(images)
loss = self.loss_object(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.train_loss(loss)
self.train_accuracy(labels, predictions)
# [end train_step]
@tf.function
def test_step(self, images, labels):
predictions = self.model(images)
t_loss = self.loss_object(labels, predictions)
self.test_loss(t_loss)
self.test_accuracy(labels, predictions)
# [end test_step]
def train(self):
# Model summary
self.model.summary()
for epoch in range(self.epochs):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
try:
with tqdm(self.train_ds, ncols=80) as t:
for images, labels in t:
self.train_step(images, labels)
template = '[Train\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.train_loss.result(), self.train_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
try:
with tqdm(self.test_ds, ncols=80) as t:
for test_images, test_labels in t:
self.test_step(test_images, test_labels)
template = '[Test\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.test_loss.result(), self.test_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
# [end train]
def saveModel(self, path_or_dir, mode='save_weight'):
if mode == 'save_weight':
path = path_or_dir
self.model.save_weights(path)
print("Network model save to {}".format(path))
elif mode == 'whole_network':
dir = path_or_dir
self.model.save(dir)
print("Whole network save to {} dir".format(dir))
# [end saveModel]
def test(self, args):
if not os.path.exists(args.test_image):
return
image_path = args.test_image
img = keras.preprocessing.image.load_img(
image_path, target_size=(
args.image_height,
args.image_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = self.model.predict(img_array)
score = tf.nn.softmax(predictions[0])
import numpy as np
print("{} most likely belongs to {} with a {:.2f} percent confidence.".format(image_path, self.class_names[np.argmax(score)], 100 * np.max(score)))
# [end test]
|
the-stack_0_13637 | ###########
# testing #
###########
from exhibitionist.isubscriber import ISubscriber
from exhibitionist.pubsubdispatch import PubSubDispatch
import unittest
import time
import threading
class IOLoopMock(object):
def add_callback(self, callback):
# import random
# time.sleep(random.random()*0.05)
callback()
pass
def running(self):
return True
class Testpubsubdispatch(unittest.TestCase):
def setUp(self):
self.pubsubdispatch = PubSubDispatch(IOLoopMock())
def tearDown(self):
pass
@staticmethod
def wait_for_predicate(pred, timeout, interval=None):
interval = interval or timeout
waited = 0
while waited <= timeout:
if pred():
return True
time.sleep(interval)
waited += interval
return False
def test_message_rx_tx(self):
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(payload)
self.pubsubdispatch.subscribe(A(), "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload")
self.wait_for_predicate(lambda: len(l), 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload")
# and again
self.pubsubdispatch.publish(channel="ch1", payload="the payload2")
self.wait_for_predicate(lambda: len(l), 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload2")
# two receivers
self.assertEqual(len(l), 0)
self.pubsubdispatch.subscribe(A(), "ch1")
self.pubsubdispatch.publish(channel="ch1", payload="the payload3")
self.wait_for_predicate(lambda: len(l) >= 2, 1, 0.001)
self.assertEqual(len(l), 2)
self.assertEqual(l.pop(), "the payload3")
self.assertEqual(l.pop(), "the payload3")
# just the registered channels get the messages for a channel
self.assertEqual(len(l), 0)
self.pubsubdispatch.subscribe(A(), "ch2")
self.pubsubdispatch.publish(channel="ch1", payload="the payload4")
self.wait_for_predicate(lambda: len(l) >= 2, 1, 0.001)
self.assertEqual(len(l), 2)
self.assertEqual(l.pop(), "the payload4")
self.assertEqual(l.pop(), "the payload4")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch2", payload="the payload5")
self.wait_for_predicate(lambda: len(l) >= 1, 1, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload5")
def test_make_sure_we_dont_recieve_our_own_message(self):
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(payload)
a=A()
#do
self.pubsubdispatch.subscribe(a, "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload")
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), "the payload")
#don't
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=a)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 0)
def test_make_sure_we_dont_recieve_our_own_message_multiple_subs(self):
# make sure the other subscriber does get it, no matter the subscribe order
l = []
class A(ISubscriber):
def notify(self,channel, payload):
l.append(self)
a=A()
b=A()
#do
self.pubsubdispatch.subscribe(a, "ch1")
self.pubsubdispatch.subscribe(b, "ch1")
self.assertEqual(len(l), 0)
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=a)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), b)
self.pubsubdispatch.publish(channel="ch1", payload="the payload",exclude=b)
self.wait_for_predicate(lambda: len(l), 0.2, 0.001)
self.assertEqual(len(l), 1)
self.assertEqual(l.pop(), a) |
the-stack_0_13638 | with open('python.txt') as file_object:
contents = file_object.read()
print(contents.replace('python','java')) #use the replace() method to replace any word in astring with a different word.
#print(contents)
#Reading line by line.
filename = 'python.txt'
with open(filename) as object:
for line in object:
print(line.rstrip())
#Making a list of lines from a file
filename = 'python.txt'
with open (filename) as object:
lines = object.readlines()
"""counting total letters"""
string = ' '
for line in lines:
string += line.strip()
print(line.rstrip())
print(len(line))
|
the-stack_0_13639 | import cv2
import numpy as np
cap = cv2.VideoCapture('video.mp4')
while(1):
# Take each frame
frame = cap.read()
print(frame)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
print(hsv)
# define range of blue color in HSV
lower_blue = np.array([110, 50, 50])
upper_blue = np.array([130, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
the-stack_0_13640 | from __future__ import with_statement, absolute_import
import time
from contextlib import closing
import psycopg2
from . import print_row_progress, status_logger
from .postgres_writer import PostgresWriter
class PostgresDbWriter(PostgresWriter):
"""Class used to stream DDL and/or data
from a MySQL server to a PostgreSQL.
:Parameters:
- `db_options`: :py:obj:`dict` containing connection specific variables
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
class FileObjFaker(object):
"""A file-like class to support streaming
table data directly to :py:meth:`pscopg2.copy_from`.
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `data`:
- `processor`:
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
def __init__(self, table, data, processor, verbose=False):
self.data = iter(data)
self.table = table
self.processor = processor
self.verbose = verbose
if verbose:
self.idx = 1
self.start_time = time.time()
self.prev_val_len = 0
self.prev_idx = 0
def readline(self, *args, **kwargs):
try:
row = list(self.data.next())
except StopIteration:
if self.verbose:
print('')
return ''
else:
self.processor(self.table, row)
try:
return '%s\n' % ('\t'.join(row))
except UnicodeDecodeError:
return '%s\n' % ('\t'.join(r.decode('utf8') for r in row))
finally:
if self.verbose:
if (self.idx % 20000) == 0:
now = time.time()
elapsed = now - self.start_time
val = '%.2f rows/sec [%s] ' % ((self.idx - self.prev_idx) / elapsed, self.idx)
print_row_progress('%s%s' % (("\b" * self.prev_val_len), val)),
self.prev_val_len = len(val) + 3
self.start_time = now
self.prev_idx = self.idx + 0
self.idx += 1
def read(self, *args, **kwargs):
return self.readline(*args, **kwargs)
def __init__(self, db_options, verbose=False, *args, **kwargs):
super(PostgresDbWriter, self).__init__(*args, **kwargs)
self.execute_error_log = ''
self.verbose = verbose
self.db_options = {
'host': str(db_options['hostname']),
'port': db_options.get('port', 5432),
'database': str(db_options['database']),
'password': str(db_options.get('password', None)) or '',
'user': str(db_options['username']),
}
if ':' in str(db_options['database']):
self.db_options['database'], self.schema = self.db_options['database'].split(':')
else:
self.schema = None
self.open()
def open(self):
self.conn = psycopg2.connect(**self.db_options)
with closing(self.conn.cursor()) as cur:
if self.schema:
cur.execute('SET search_path TO %s' % self.schema)
cur.execute('SET client_encoding = \'UTF8\'')
if self.conn.server_version >= 80200:
cur.execute('SET standard_conforming_strings = off')
cur.execute('SET check_function_bodies = false')
cur.execute('SET client_min_messages = warning')
def query(self, sql, args=(), one=False):
with closing(self.conn.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone() if one else cur
def execute(self, sql, args=(), many=False):
with closing(self.conn.cursor()) as cur:
try:
if many:
cur.executemany(sql, args)
else:
cur.execute(sql, args)
except Exception as e:
self.execute_error_log += '\n######POSTGRES SCRIPTS:######\n '+sql+'\n######ERROR:######\n '+str(e)
print('ERROR: '+str(e))
self.conn.commit()
def copy_from(self, file_obj, table_name, columns):
with closing(self.conn.cursor()) as cur:
cur.copy_from(file_obj,
table=table_name,
columns=columns
)
self.conn.commit()
def close(self):
"""Closes connection to the PostgreSQL server"""
self.conn.close()
def exists(self, relname):
rc = self.query('SELECT COUNT(!) FROM pg_class WHERE relname = %s', (relname, ), one=True)
return rc and int(rc[0]) == 1
@status_logger
def truncate(self, table):
"""Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
truncate_sql, serial_key_sql = super(PostgresDbWriter, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
@status_logger
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql, table_comment_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql)
"""Execute comment with the error encoding(sometimes):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe7 in position 94: ordinal not in range(128)
"""
for sql in table_comment_sql:
self.execute(sql)
@status_logger
def write_indexes(self, table):
"""Send DDL to create the specified `table` indexes
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_indexes(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_triggers(self, table):
"""Send DDL to create the specified `table` triggers
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_triggers(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_constraints(self, table):
"""Send DDL to create the specified `table` constraints
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
constraint_sql = super(PostgresDbWriter, self).write_constraints(table)
for sql in constraint_sql:
self.execute(sql)
@status_logger
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
|
the-stack_0_13642 | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException, CMakeTarget, TargetOptions, CMakeConfiguration, language_map, check_cmake_args
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel, ReplyCMakeInputs, ReplyCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .toolchain import CMakeToolchain, CMakeExecScope
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog, mesonlib
from ..mesonlib import MachineChoice, OrderedSet, version_compare, path_is_in_root, relative_to_if_possible, OptionKey
from ..mesondata import mesondata
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from enum import Enum
from functools import lru_cache
from pathlib import Path
import typing as T
import re
from os import environ
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if T.TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
from ..environment import Environment
TYPE_mixed = T.Union[str, int, bool, Path, BaseNode]
TYPE_mixed_list = T.Union[TYPE_mixed, T.Sequence[TYPE_mixed]]
TYPE_mixed_kwargs = T.Dict[str, TYPE_mixed_list]
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
'CMP0102',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
transfer_dependencies_from = ['header_only']
_cmake_name_regex = re.compile(r'[^_a-zA-Z0-9]')
def _sanitize_cmake_name(name: str) -> str:
name = _cmake_name_regex.sub('_', name)
return 'cm_' + name
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: Path):
self.tgt_map = {} # type: T.Dict[str, T.Union['ConverterTarget', 'ConverterCustomTarget']]
self.build_dir = build_dir
def add(self, tgt: T.Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: T.List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: T.List[str]) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def executable(self, name: str) -> T.Optional['ConverterTarget']:
tgt = self.target(name)
if tgt is None or not isinstance(tgt, ConverterTarget):
return None
if tgt.meson_func() != 'executable':
return None
return tgt
def artifact(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(Path(i)), Path(i).name, self._base_artifact_key(Path(i))]
return self._return_first_valid_key(keys)
def generated(self, name: Path) -> T.Optional['ConverterCustomTarget']:
res = self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
assert res is None or isinstance(res, ConverterCustomTarget)
return res
# Utility functions to generate local keys
def _rel_path(self, fname: Path) -> T.Optional[Path]:
try:
return fname.resolve().relative_to(self.build_dir)
except ValueError:
pass
return None
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: Path) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path.as_posix()) if path else None
def _base_generated_file_key(self, fname: Path) -> str:
return '__gen_{}__'.format(fname.name)
def _rel_artifact_key(self, fname: Path) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path.as_posix()) if path else None
def _base_artifact_key(self, fname: Path) -> str:
return '__art_{}__'.format(fname.name)
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: 'Environment', for_machine: MachineChoice) -> None:
self.env = env
self.for_machine = for_machine
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = None # type: T.Optional[Path]
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = [] # type: T.List[str]
self.depends = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = set() # type: T.Set[str]
self.sources = [] # type: T.List[Path]
self.generated = [] # type: T.List[Path]
self.generated_ctgt = [] # type: T.List[CustomTargetReference]
self.includes = [] # type: T.List[Path]
self.sys_includes = [] # type: T.List[Path]
self.link_with = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
self.object_libs = [] # type: T.List[ConverterTarget]
self.compile_opts = {} # type: T.Dict[str, T.List[str]]
self.public_compile_opts = [] # type: T.List[str]
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = [] # type: T.List[str]
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
self.generated_raw = [] # type: T.List[Path]
for i in target.files:
languages = set() # type: T.Set[str]
src_suffixes = set() # type: T.Set[str]
# Insert suffixes
for j in i.sources:
if not j.suffix:
continue
src_suffixes.add(j.suffix[1:])
# Determine the meson language(s)
# Extract the default language from the explicit CMake field
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
languages.add(lang_cmake_to_meson.get(i.language.lower(), 'c'))
# Determine missing languages from the source suffixes
for sfx in src_suffixes:
for key, val in lang_suffixes.items():
if sfx in val:
languages.add(key)
break
# Register the new languages and initialize the compile opts array
for lang in languages:
self.languages.add(lang)
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
for lang in languages:
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x.path for x in i.includes if x.path not in self.includes and not x.isSystem]
self.sys_includes += [x.path for x in i.includes if x.path not in self.sys_includes and x.isSystem]
# Add sources to the right array
if i.is_generated:
self.generated_raw += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: Path, subdir: Path, install_prefix: Path, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard and do additional compiler args manipulation
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
ctgt = output_target_map.generated(Path(j))
if m:
std = m.group(2)
supported = self._all_lang_stds(i)
if std not in supported:
mlog.warning(
'Unknown {0}_std "{1}" -> Ignoring. Try setting the project-'
'level {0}_std if build errors occur. Known '
'{0}_stds are: {2}'.format(i, std, ' '.join(supported)),
once=True
)
continue
self.override_options += ['{}_std={}'.format(i, std)]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif isinstance(ctgt, ConverterCustomTarget):
# Sometimes projects pass generated source files as compiler
# flags. Add these as generated sources to ensure that the
# corresponding custom target is run.2
self.generated_raw += [Path(j)]
temp += [j]
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
# TODO refactor this copy paste from CMakeDependency for future releases
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
to_process = [self.cmake_name]
processed = []
while len(to_process) > 0:
curr = to_process.pop(0)
if curr in processed or curr not in trace.targets:
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
libraries = []
mlog.debug(str(tgt))
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
self.includes += [Path(x) for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
self.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
self.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
self.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['CONFIGURATIONS'] if x]
cfg = cfgs[0]
is_debug = self.env.coredata.get_option(OptionKey('debug'));
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if 'IMPORTED_IMPLIB_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB_{}'.format(cfg)] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif 'IMPORTED_LOCATION_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION_{}'.format(cfg)] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if 'IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg) in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg)] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in trace.targets:
to_process += [j]
elif reg_is_lib.match(j) or Path(j).exists():
libraries += [j]
for j in libraries:
if j not in self.link_libraries:
self.link_libraries += [j]
processed += [curr]
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not Path(i).is_absolute():
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.name.endswith(y) for y in supported])]
self.generated_raw = [x for x in self.generated_raw if any([x.name.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: Path, is_header: bool, is_generated: bool) -> T.Optional[Path]:
if not x.is_absolute():
x = self.src_dir / x
x = x.resolve()
assert x.is_absolute()
if not x.exists() and not any([x.name.endswith(y) for y in obj_suffixes]) and not is_generated:
if path_is_in_root(x, Path(self.env.get_build_dir()), resolve=True):
x.mkdir(parents=True, exist_ok=True)
return x.relative_to(Path(self.env.get_build_dir()) / subdir)
else:
mlog.warning('CMake: path', mlog.bold(x.as_posix()), 'does not exist.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if x in trace.explicit_headers:
return None
if (
path_is_in_root(x, Path(self.env.get_source_dir()))
and not (
path_is_in_root(x, root_src_dir) or
path_is_in_root(x, Path(self.env.get_build_dir()))
)
):
mlog.warning('CMake: path', mlog.bold(x.as_posix()), 'is inside the root project but', mlog.bold('not'), 'inside the subproject.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if path_is_in_root(x, Path(self.env.get_build_dir())) and is_header:
return x.relative_to(Path(self.env.get_build_dir()) / subdir)
if path_is_in_root(x, root_src_dir):
return x.relative_to(root_src_dir)
return x
build_dir_rel = self.build_dir.relative_to(Path(self.env.get_build_dir()) / subdir)
self.generated_raw = [rel_path(x, False, True) for x in self.generated_raw]
self.includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.includes)] + [build_dir_rel]))
self.sys_includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
# Resolve custom targets
for gen_file in self.generated_raw:
ctgt = output_target_map.generated(gen_file)
if ctgt:
assert isinstance(ctgt, ConverterCustomTarget)
ref = ctgt.get_ref(gen_file)
assert isinstance(ref, CustomTargetReference) and ref.valid()
self.generated_ctgt += [ref]
elif gen_file is not None:
self.generated += [gen_file]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
# Make sure '.' is always in the include directories
if Path('.') not in self.includes:
self.includes += [Path('.')]
# make install dir relative to the install prefix
if self.install_dir and self.install_dir.is_absolute():
if path_is_in_root(self.install_dir, install_prefix):
self.install_dir = self.install_dir.relative_to(install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle OSX frameworks
def handle_frameworks(flags: T.List[str]) -> T.List[str]:
res: T.List[str] = []
for i in flags:
p = Path(i)
if not p.exists() or not p.name.endswith('.framework'):
res += [i]
continue
res += ['-framework', p.stem]
return res
self.link_libraries = handle_frameworks(self.link_libraries)
self.link_flags = handle_frameworks(self.link_flags)
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
dep_tgt = output_target_map.target(i)
if dep_tgt:
self.depends.append(dep_tgt)
def process_object_libs(self, obj_target_list: T.List['ConverterTarget'], linker_workaround: bool) -> None:
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if any([x.name.endswith('.' + y) for y in obj_suffixes])]
stem = [x.stem for x in temp]
exts = self._all_source_suffixes()
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [x.name for x in i.sources + i.generated]
for j in stem:
# On some platforms (specifically looking at you Windows with vs20xy backend) CMake does
# not produce object files with the format `foo.cpp.obj`, instead it skipps the language
# suffix and just produces object files like `foo.obj`. Thus we have to do our best to
# undo this step and guess the correct language suffix of the object file. This is done
# by trying all language suffixes meson knows and checking if one of them fits.
candidates = [j] # type: T.List[str]
if not any([j.endswith('.' + x) for x in exts]):
mlog.warning('Object files do not contain source file extensions, thus falling back to guessing them.', once=True)
candidates += ['{}.{}'.format(j, x) for x in exts]
if any([x in source_files for x in candidates]):
if linker_workaround:
self._append_objlib_sources(i)
else:
self.includes += i.includes
self.includes = list(OrderedSet(self.includes))
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not any([x.name.endswith('.' + y) for y in obj_suffixes])]
def _append_objlib_sources(self, tgt: 'ConverterTarget') -> None:
self.includes += tgt.includes
self.sources += tgt.sources
self.generated += tgt.generated
self.generated_ctgt += tgt.generated_ctgt
self.includes = list(OrderedSet(self.includes))
self.sources = list(OrderedSet(self.sources))
self.generated = list(OrderedSet(self.generated))
self.generated_ctgt = list(OrderedSet(self.generated_ctgt))
# Inherit compiler arguments since they may be required for building
for lang, opts in tgt.compile_opts.items():
if lang not in self.compile_opts:
self.compile_opts[lang] = []
self.compile_opts[lang] += [x for x in opts if x not in self.compile_opts[lang]]
@lru_cache(maxsize=None)
def _all_source_suffixes(self) -> T.List[str]:
suffixes = [] # type: T.List[str]
for exts in lang_suffixes.values():
suffixes += [x for x in exts]
return suffixes
@lru_cache(maxsize=None)
def _all_lang_stds(self, lang: str) -> T.List[str]:
try:
res = self.env.coredata.options[OptionKey('std', machine=MachineChoice.BUILD, lang=lang)].choices
except KeyError:
return []
# TODO: Get rid of this once we have propper typing for options
assert isinstance(res, list)
for i in res:
assert isinstance(i, str)
return res
def process_inter_target_dependencies(self) -> None:
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def cleanup_dependencies(self) -> None:
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir.as_posix() if self.install_dir else ''))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- generated_ctgt: ', mlog.bold(str(self.generated_ctgt)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int) -> None:
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget, env: 'Environment', for_machine: MachineChoice) -> None:
assert target.current_bin_dir is not None
assert target.current_src_dir is not None
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [x.name for x in self.original_outputs]
self.conflict_map = {} # type: T.Dict[str, str]
self.command = [] # type: T.List[T.List[T.Union[str, ConverterTarget]]]
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = [] # type: T.List[T.Union[str, CustomTargetReference]]
self.depends = [] # type: T.List[T.Union[ConverterTarget, ConverterCustomTarget]]
self.current_bin_dir = target.current_bin_dir # type: Path
self.current_src_dir = target.current_src_dir # type: Path
self.env = env
self.for_machine = for_machine
self._raw_target = target
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: Path, all_outputs: T.List[str], trace: CMakeTraceParser) -> None:
# Default the working directory to ${CMAKE_CURRENT_BINARY_DIR}
if self.working_dir is None:
self.working_dir = self.current_bin_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR}
if not self.working_dir.is_absolute():
self.working_dir = self.current_bin_dir / self.working_dir
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
def ensure_absolute(x: Path) -> Path:
if x.is_absolute():
return x
else:
return self.current_bin_dir / x
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: T.List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = [] # type: T.List[T.List[T.Union[str, ConverterTarget]]]
for curr_cmd in self._raw_target.command:
assert(isinstance(curr_cmd, list))
cmd = [] # type: T.List[T.Union[str, ConverterTarget]]
for j in curr_cmd:
if not j:
continue
target = output_target_map.executable(j)
if target:
# When cross compiling, binaries have to be executed with an exe_wrapper (for instance wine for mingw-w64)
if self.env.exe_wrapper is not None and self.env.properties[self.for_machine].get_cmake_use_exe_wrapper():
from ..dependencies import ExternalProgram
assert isinstance(self.env.exe_wrapper, ExternalProgram)
cmd += self.env.exe_wrapper.get_command()
cmd += [target]
continue
elif j in trace.targets:
trace_tgt = trace.targets[j]
if trace_tgt.type == 'EXECUTABLE' and 'IMPORTED_LOCATION' in trace_tgt.properties:
cmd += trace_tgt.properties['IMPORTED_LOCATION']
continue
mlog.debug('CMake: Found invalid CMake target "{}" --> ignoring \n{}'.format(j, trace_tgt))
# Fallthrough on error
cmd += [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
for i in self.depends_raw:
if not i:
continue
raw = Path(i)
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(raw)
rel_to_root = None
try:
rel_to_root = raw.relative_to(root_src_dir)
except ValueError:
rel_to_root = None
# First check for existing files. Only then check for existing
# targets, etc. This reduces the chance of misdetecting input files
# as outputs from other targets.
# See https://github.com/mesonbuild/meson/issues/6632
if not raw.is_absolute() and (self.current_src_dir / raw).exists():
self.inputs += [(self.current_src_dir / raw).relative_to(root_src_dir).as_posix()]
elif raw.is_absolute() and raw.exists() and rel_to_root is not None:
self.inputs += [rel_to_root.as_posix()]
elif art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
ctgt_ref = gen.get_ref(raw)
assert ctgt_ref is not None
self.inputs += [ctgt_ref]
def process_inter_target_dependencies(self) -> None:
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def get_ref(self, fname: Path) -> T.Optional[CustomTargetReference]:
name = fname.name
try:
if name in self.conflict_map:
name = self.conflict_map[name]
idx = self.outputs.index(name)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: Path, src_dir: Path, install_prefix: Path, env: 'Environment', backend: 'Backend'):
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = subdir / '__CMake_build'
self.build_dir = Path(env.get_build_dir()) / self.build_dir_rel
self.install_prefix = install_prefix
self.env = env
self.for_machine = MachineChoice.HOST # TODO make parameter
self.backend_name = backend.name
self.linkers = set() # type: T.Set[str]
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = [] # type: T.List[Path]
self.codemodel_configs = None # type: T.Optional[T.List[CMakeConfiguration]]
self.raw_trace = None # type: T.Optional[str]
# Analysed data
self.project_name = ''
self.languages = [] # type: T.List[str]
self.targets = [] # type: T.List[ConverterTarget]
self.custom_targets = [] # type: T.List[ConverterCustomTarget]
self.trace = CMakeTraceParser('', Path('.')) # Will be replaced in analyse
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {} # type: T.Dict[str, T.Dict[str, T.Optional[str]]]
self.internal_name_map = {} # type: T.Dict[str, str]
# Do some special handling for object libraries for certain configurations
self._object_lib_workaround = False
if self.backend_name.startswith('vs'):
for comp in self.env.coredata.compilers[self.for_machine].values():
if comp.get_linker_id() == 'link':
self._object_lib_workaround = True
break
def configure(self, extra_cmake_options: T.List[str]) -> CMakeExecutor:
# Find CMake
# TODO: Using MachineChoice.BUILD should always be correct here, but also evaluate the use of self.for_machine
cmake_exe = CMakeExecutor(self.env, '>=3.7', MachineChoice.BUILD)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = mesondata['cmake/data/preload.cmake'].write_to_private(self.env)
toolchain = CMakeToolchain(self.env, self.for_machine, CMakeExecScope.SUBPROJECT, self.build_dir.parent, preload_file)
toolchain_file = toolchain.write()
# TODO: drop this check once the deprecated `cmake_args` kwarg is removed
extra_cmake_options = check_cmake_args(extra_cmake_options)
generator = backend_generator_map[self.backend_name]
cmake_args = []
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
trace_args = self.trace.trace_args()
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running CMake with:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir.as_posix())
mlog.log(mlog.bold(' - source directory: '), self.src_dir.as_posix())
mlog.log(mlog.bold(' - toolchain file: '), toolchain_file.as_posix())
mlog.log(mlog.bold(' - preload file: '), preload_file.as_posix())
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
self.build_dir.mkdir(parents=True, exist_ok=True)
os_env = environ.copy()
os_env['LC_ALL'] = 'C'
final_args = cmake_args + trace_args + cmcmp_args + toolchain.get_cmake_args() + [self.src_dir.as_posix()]
cmake_exe.set_exec_mode(print_cmout=True, always_capture_stderr=self.trace.requires_stderr())
rc, _, self.raw_trace = cmake_exe.call(final_args, self.build_dir, env=os_env, disable_cache=True)
mlog.log()
h = mlog.green('SUCCEEDED') if rc == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if rc != 0:
raise CMakeException('Failed to configure the CMake subproject')
return cmake_exe
def initialise(self, extra_cmake_options: T.List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
cmake_exe = self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [relative_to_if_possible(x, Path(self.env.get_source_dir())) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect(cmake_exe):
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
assert isinstance(bs_reply, ReplyCMakeInputs)
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
assert isinstance(cm_reply, ReplyCodeModel)
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [relative_to_if_possible(src_dir / x, Path(self.env.get_source_dir()), resolve=True) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
added_target_names = [] # type: T.List[str]
for i_0 in self.codemodel_configs:
for j_0 in i_0.projects:
if not self.project_name:
self.project_name = j_0.name
for k_0 in j_0.targets:
# Avoid duplicate targets from different configurations and known
# dummy CMake internal target types
if k_0.type not in skip_targets and k_0.name not in added_target_names:
added_target_names += [k_0.name]
self.targets += [ConverterTarget(k_0, self.env, self.for_machine)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i_1 in self.trace.targets.values():
if i_1.type != 'INTERFACE' or i_1.name in api_target_name_list or i_1.imported:
continue
dummy = CMakeTarget({
'name': i_1.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env, self.for_machine)]
for i_2 in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i_2, self.env, self.for_machine)]
# generate the output_target_map
for i_3 in [*self.targets, *self.custom_targets]:
assert isinstance(i_3, (ConverterTarget, ConverterCustomTarget))
self.output_target_map.add(i_3)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: T.List[str]
for ctgt in self.custom_targets:
ctgt.postprocess(self.output_target_map, self.src_dir, custom_target_outputs, self.trace)
for tgt in self.targets:
tgt.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if tgt.type == 'OBJECT_LIBRARY':
object_libs += [tgt]
self.languages += [x for x in tgt.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for tgt in self.targets:
tgt.process_object_libs(object_libs, self._object_lib_workaround)
# Third pass: Reassign dependencies to avoid some loops
for tgt in self.targets:
tgt.process_inter_target_dependencies()
for ctgt in self.custom_targets:
ctgt.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for tgt in self.targets:
tgt.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self, options: TargetOptions) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val: TYPE_mixed = '') -> Token:
return Token(tid, self.subdir.as_posix(), 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value: TYPE_mixed_list) -> BaseNode:
if isinstance(value, str):
return string(value)
if isinstance(value, Path):
return string(value.as_posix())
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
elif isinstance(value, BaseNode):
return value
raise RuntimeError('invalid type of value: {} ({})'.format(type(value).__name__, str(value)))
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements: TYPE_mixed_list) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args: T.Optional[TYPE_mixed_list] = None, kwargs: T.Optional[TYPE_mixed_kwargs] = None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
assert isinstance(args, (str, int, bool, Path, BaseNode))
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir.as_posix(), 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args: T.Optional[TYPE_mixed_list] = None, kwargs: T.Optional[TYPE_mixed_kwargs] = None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
assert isinstance(args, (str, int, bool, Path, BaseNode))
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir.as_posix(), 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir.as_posix(), 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
# Add the targets
processing = [] # type: T.List[str]
processed = {} # type: T.Dict[str, T.Dict[str, T.Optional[str]]]
name_map = {} # type: T.Dict[str, str]
def extract_tgt(tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> T.Union[IdNode, IndexNode]:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget) -> None:
detect_cycle(tgt)
# First handle inter target dependencies
link_with = [] # type: T.List[IdNode]
objec_libs = [] # type: T.List[IdNode]
sources = [] # type: T.List[Path]
generated = [] # type: T.List[T.Union[IdNode, IndexNode]]
generated_filenames = [] # type: T.List[str]
custom_targets = [] # type: T.List[ConverterCustomTarget]
dependencies = [] # type: T.List[IdNode]
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
sources += tgt.sources
sources += tgt.generated
for ctgt_ref in tgt.generated_ctgt:
ctgt = ctgt_ref.ctgt
if ctgt.name not in processed:
process_custom_target(ctgt)
generated += [resolve_ctgt_ref(ctgt_ref)]
generated_filenames += [ctgt_ref.filename()]
if ctgt not in custom_targets:
custom_targets += [ctgt]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for ctgt in custom_targets:
for j in ctgt.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(ctgt.get_ref(Path(j)))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
install_tgt = options.get_install(tgt.cmake_name, tgt.install)
# Generate target kwargs
tgt_kwargs = {
'build_by_default': install_tgt,
'link_args': options.get_link_args(tgt.cmake_name, tgt.link_flags + tgt.link_libraries),
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': install_tgt,
'override_options': options.get_override_options(tgt.cmake_name, tgt.override_options),
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
} # type: TYPE_mixed_kwargs
# Only set if installed and only override if it is set
if install_tgt and tgt.install_dir:
tgt_kwargs['install_dir'] = tgt.install_dir
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = options.get_compile_args(tgt.cmake_name, key, val)
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
} # type: TYPE_mixed_kwargs
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, id_node(src_var), *generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
elif tgt_func in ['shared_module']:
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Union[str, ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> T.Union[str, IdNode, IndexNode]:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = [] # type: T.List[T.Union[str, IdNode, IndexNode]]
command += mesonlib.meson_command
command += ['--internal', 'cmake_run_ctgt']
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + [x.as_posix() for x in tgt.original_outputs]
command += ['-d', tgt.working_dir.as_posix()]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
} # type: TYPE_mixed_kwargs
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for ctgt in self.custom_targets:
if ctgt.name not in processed:
process_custom_target(ctgt)
for tgt in self.targets:
if tgt.name not in processed:
process_target(tgt)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> T.Optional[T.Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = _sanitize_cmake_name(target)
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> T.List[str]:
return list(self.internal_name_map.keys())
|
the-stack_0_13643 | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from tests.utils import assert_equal_dict
from polyaxon.schemas.polyflow.init import InitConfig
@pytest.mark.init_mark
class TestInitConfigs(TestCase):
def test_init_config(self):
config_dict = {}
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add artifact_refs
config_dict["artifacts"] = [
{"name": "data2"},
{"name": "data3", "paths": ["/subpath1", "subpath2"]},
{"name": "artifact2", "paths": ["/subpath1", "subpath2"]},
]
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add repos
config_dict["repos"] = [
{"name": "repo1"},
{"name": "repo1", "commit": "commit-hash"},
{"name": "repo2", "branch": "dev"},
]
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add build context
config_dict = {
"build": {
"image": "tensorflow:1.3.0",
"run": ["pip install tensor2tensor"],
"env": [["LC_ALL", "en_US.UTF-8"]],
"shell": "foo",
"name": "foo.yaml",
"workdir": "/test",
}
}
config = InitConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
|
the-stack_0_13644 | from rqt_gui_py.plugin import Plugin
import python_qt_binding.QtGui as QtGui
from python_qt_binding.QtGui import (QAction, QIcon, QMenu, QWidget,
QPainter, QColor, QFont, QBrush,
QPen, QMessageBox, QSizePolicy,
QImage, QPixmap, qRgb, QComboBox,
QDialog, QPushButton)
from python_qt_binding.QtCore import (Qt, QTimer, qWarning, Slot,
QEvent, QSize, pyqtSignal,
pyqtSlot)
from threading import Lock, Thread
import rospy
import python_qt_binding.QtCore as QtCore
from std_msgs.msg import Bool, Time
import time
import math
from resource_retriever import get_filename
import yaml
import os, sys
import numpy as np
import cv2, cv
from cv_bridge import CvBridge, CvBridgeError
from image_view2.msg import MouseEvent
from sensor_msgs.msg import Image
class ComboBoxDialog(QDialog):
def __init__(self, parent=None):
super(ComboBoxDialog, self).__init__()
self.number = 0
vbox = QtGui.QVBoxLayout(self)
self.combo_box = QComboBox(self)
self.combo_box.activated.connect(self.onActivated)
vbox.addWidget(self.combo_box)
button = QPushButton()
button.setText("Done")
button.clicked.connect(self.buttonCallback)
vbox.addWidget(button)
self.setLayout(vbox)
def buttonCallback(self, event):
self.close()
def onActivated(self, number):
self.number = number
class ImageView2Plugin(Plugin):
"""
rqt wrapper for image_view2
"""
def __init__(self, context):
super(ImageView2Plugin, self).__init__(context)
self.setObjectName("ImageView2Plugin")
self._widget = ImageView2Widget()
context.add_widget(self._widget)
def save_settings(self, plugin_settings, instance_settings):
self._widget.save_settings(plugin_settings, instance_settings)
def restore_settings(self, plugin_settings, instance_settings):
self._widget.restore_settings(plugin_settings, instance_settings)
def trigger_configuration(self):
self._widget.trigger_configuration()
class ScaledLabel(QtGui.QLabel):
def __init__(self, *args, **kwargs):
QtGui.QLabel.__init__(self)
self._pixmap = QtGui.QPixmap(self.pixmap())
def resizeEvent(self, event):
self.setPixmap(self._pixmap.scaled(
self.width(), self.height(),
QtCore.Qt.KeepAspectRatio))
class ImageView2Widget(QWidget):
"""
Qt widget to communicate with image_view2
"""
cv_image = None
pixmap = None
repaint_trigger = pyqtSignal()
def __init__(self):
super(ImageView2Widget, self).__init__()
self.left_button_clicked = False
self.repaint_trigger.connect(self.redraw)
self.lock = Lock()
self.need_to_rewrite = False
self.bridge = CvBridge()
self.image_sub = None
self.event_pub = None
self.label = ScaledLabel()
self.label.setAlignment(Qt.AlignCenter)
self.label.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored))
#self.label.installEventFilter(self)
vbox = QtGui.QVBoxLayout(self)
vbox.addWidget(self.label)
self.setLayout(vbox)
self._image_topics = []
self._update_topic_thread = Thread(target=self.updateTopics)
self._update_topic_thread.start()
self._active_topic = None
self.setMouseTracking(True)
self.label.setMouseTracking(True)
self._dialog = ComboBoxDialog()
self.show()
def trigger_configuration(self):
self._dialog.exec_()
self.setupSubscriber(self._image_topics[self._dialog.number])
def setupSubscriber(self, topic):
if self.image_sub:
self.image_sub.unregister()
rospy.loginfo("Subscribing %s" % (topic + "/marked"))
self.image_sub = rospy.Subscriber(topic + "/marked",
Image,
self.imageCallback)
self.event_pub = rospy.Publisher(topic + "/event", MouseEvent)
self._active_topic = topic
def onActivated(self, number):
self.setupSubscriber(self._image_topics[number])
def imageCallback(self, msg):
with self.lock:
if msg.width == 0 or msg.height == 0:
rospy.logdebug("Looks input images is invalid")
return
cv_image = self.bridge.imgmsg_to_cv2(msg, msg.encoding)
if msg.encoding == "bgr8":
self.cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
elif msg.encoding == "rgb8":
self.cv_image = cv_image
self.numpy_image = np.asarray(self.cv_image)
self.need_to_rewrite = True
self.repaint_trigger.emit()
def updateTopics(self):
need_to_update = False
for (topic, topic_type) in rospy.get_published_topics():
if topic_type == "sensor_msgs/Image":
with self.lock:
if not topic in self._image_topics:
self._image_topics.append(topic)
need_to_update = True
if need_to_update:
with self.lock:
self._image_topics = sorted(self._image_topics)
self._dialog.combo_box.clear()
for topic in self._image_topics:
self._dialog.combo_box.addItem(topic)
if self._active_topic:
self._dialog.combo_box.setCurrentIndex(self._image_topics.index(self._active_topic))
time.sleep(1)
@pyqtSlot()
def redraw(self):
with self.lock:
if not self.need_to_rewrite:
return
if self.cv_image != None:
size = self.cv_image.shape
img = QImage(self.cv_image.data,
size[1], size[0], size[2] * size[1],
QImage.Format_RGB888)
# convert to QPixmap
self.pixmap = QPixmap(size[1], size[0])
self.pixmap.convertFromImage(img)
self.label.setPixmap(self.pixmap.scaled(
self.label.width(), self.label.height(),
QtCore.Qt.KeepAspectRatio))
#self.label.setPixmap(self.pixmap)
def mousePosition(self, e):
label_x = self.label.x()
label_y = self.label.y()
label_width = self.label.width()
label_height = self.label.height()
pixmap_width = self.label.pixmap().width()
pixmap_height = self.label.pixmap().height()
x_offset = (label_width - pixmap_width) / 2.0 + label_x
y_offset = (label_height - pixmap_height) / 2.0 + label_y
return (e.x() - x_offset, e.y()- y_offset)
def mouseMoveEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.type = MouseEvent.MOUSE_MOVE
msg.x, msg.y = self.mousePosition(e)
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
if self.event_pub:
self.event_pub.publish(msg)
def mousePressEvent(self, e):
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
if e.button() == Qt.LeftButton:
msg.type = MouseEvent.MOUSE_LEFT_DOWN
self.left_button_clicked = True
elif e.button() == Qt.RightButton:
msg.type = MouseEvent.MOUSE_RIGHT_DOWN
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def mouseReleaseEvent(self, e):
if e.button() == Qt.LeftButton:
self.left_button_clicked = False
msg = MouseEvent()
msg.header.stamp = rospy.Time.now()
msg.width = self.label.pixmap().width()
msg.height = self.label.pixmap().height()
msg.type = MouseEvent.MOUSE_LEFT_UP
msg.x, msg.y = self.mousePosition(e)
if self.event_pub:
self.event_pub.publish(msg)
def save_settings(self, plugin_settings, instance_settings):
if self._active_topic:
instance_settings.set_value("active_topic", self._active_topic)
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.value("active_topic"):
topic = instance_settings.value("active_topic")
self._dialog.combo_box.addItem(topic)
self.setupSubscriber(topic)
|
the-stack_0_13646 | """
"""
# Created on 2014.10.05
#
# Author: Giovanni Cannata
#
# Copyright 2014 - 2019 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
import re
from ..utils.log import log, log_enabled, NETWORK
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError): # fix for Python 2, code from Python 3.5 standard library
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Backported from Python 3.4.3 standard library
Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
if log_enabled(NETWORK):
log(NETWORK, "matching dn %s with hostname %s", dn, hostname)
pats = []
if not dn:
return False
pieces = dn.split(r'.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Backported from Python 3.4.3 standard library.
Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
the-stack_0_13647 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_less_equal_normal(test_case, device):
input1 = flow.tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.tensor(
np.random.randn(2, 6, 5, 3), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.le(input1, input2)
np_out = np.less_equal(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_symbol(test_case, device):
input1 = flow.tensor(
np.array([1, 1, 4]).astype(np.float32),
dtype=flow.float32,
device=flow.device(device),
)
input2 = flow.tensor(
np.array([1, 2, 3]).astype(np.float32),
dtype=flow.float32,
device=flow.device(device),
)
of_out = input1 <= input2
np_out = np.less_equal(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_int_scalar(test_case, device):
np_arr = np.random.randn(2, 3, 4, 5)
input1 = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
input2 = 1
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_int_tensor_int_scalr(test_case, device):
np_arr = np.random.randint(2, size=(2, 3, 4, 5))
input1 = flow.tensor(np_arr, dtype=flow.int, device=flow.device(device))
input2 = 1
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_less_equal_float_scalar(test_case, device):
np_arr = np.random.randn(3, 2, 5, 7)
input1 = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
input2 = 2.3
of_out = input1 <= input2
np_out = np.less_equal(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestLessEqual(flow.unittest.TestCase):
def test_less_equal(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_less_equal_normal,
_test_less_equal_symbol,
_test_less_equal_int_scalar,
_test_less_equal_int_tensor_int_scalr,
_test_less_equal_float_scalar,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_13648 | import tensorflow as tf
class MultiHeadSelfAttLayer(tf.keras.layers.Layer):
def __init__(self, n_heads, input_size, hidd_size, level):
super(MultiHeadSelfAttLayer, self).__init__()
self.hidd_size = hidd_size
self.n_heads = n_heads
self.w_output = tf.get_variable(name='w_output', shape=(hidd_size * n_heads, input_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
# regularizer=tf.keras.regularizers.l2(l2=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=level),
trainable=True)
self.layernorm0 = tf.keras.layers.LayerNormalization(axis=-1)
self.layernorm1 = tf.keras.layers.LayerNormalization(axis=-1)
self.output_layer = Ffnn(hidd_size * n_heads, input_size * 3, input_size, level)
self.layers = []
for n in range(n_heads):
with tf.variable_scope("self_att_layer_%d_%d" % (n, level)):
# Create sublayers for each layer.
self_attention_layer = SelfAttentionLayer(input_size, hidd_size, n)
self.layers.append(self_attention_layer)
def call(self, x, training):
att_heads_results = []
att_weights_results = []
# multi-head attention
for n, self_attention_layer in enumerate(self.layers):
with tf.variable_scope("self_att_layer_%d" % n):
interaction_weights, layer_out = self_attention_layer(x, training)
att_heads_results.append(layer_out)
att_weights_results.append(interaction_weights)
# concat
embedded_output = tf.stack(att_heads_results, axis=-1)
hidd_doc_repr = tf.reshape(embedded_output, (-1, tf.shape(embedded_output)[1], self.hidd_size * self.n_heads))
# add and norm
hidd_doc_repr = self.layernorm0(hidd_doc_repr + x)
hidd_doc_repr = tf.layers.dropout(hidd_doc_repr, rate=0.5, training=training)
# position-ff
output = self.output_layer(hidd_doc_repr, training)
# add and norm
output = self.layernorm1(output + hidd_doc_repr)
output = tf.layers.dropout(output, rate=0.5, training=training)
return tf.stack(att_weights_results, axis=-1), output
class Ffnn(tf.keras.layers.Layer):
def __init__(self, input_size, w1_hidd_size, w2_hidd_size, seed):
super(Ffnn, self).__init__()
# self.bn = tf.keras.layers.BatchNormalization(axis=-1)
self.w1 = tf.get_variable(name='w1', shape=(input_size, w1_hidd_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.b1 = tf.get_variable(name='b1', shape=w1_hidd_size,
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.w2 = tf.get_variable(name='w2', shape=(w1_hidd_size, w2_hidd_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
self.b2 = tf.get_variable(name='b2', shape=w2_hidd_size,
# regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed),
trainable=True)
def call(self, x, training):
p1 = tf.nn.leaky_relu(tf.einsum('bse, eo->bso', x, self.w1) + self.b1)
# print('add dropout in ffnn in between the layers')
# p1 = tf.layers.dropout(p1, training=training)
# print('replaced l2 norm in ffnn with bn layer')
p1 = tf.nn.l2_normalize(p1, axis=-1)
# p1 = self.bn(p1)
p2 = tf.einsum('bse, eo->bso', p1, self.w2) + self.b2
return p2
class SelfAttentionLayer(tf.keras.layers.Layer):
def __init__(self, input_data_size, proj_space_size, seed):
super(SelfAttentionLayer, self).__init__()
self.proj_space_size = proj_space_size
self.k = tf.get_variable(name='K', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.q = tf.get_variable(name='Q', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.v = tf.get_variable(name='V', shape=(input_data_size, self.proj_space_size),
regularizer=tf.contrib.layers.l2_regularizer(scale=1.),
dtype=tf.float32, initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
def call(self, embdedded_features_vectors, training):
Q = tf.einsum('eo, bse->bso', self.q, embdedded_features_vectors)
K = tf.einsum('eo, bse->bso', self.k, embdedded_features_vectors)
V = tf.einsum('eo, bse->bso', self.v, embdedded_features_vectors)
QK = tf.matmul(Q, K, transpose_b=True)
QK = QK / tf.sqrt(tf.cast(self.proj_space_size, tf.float32))
interaction_weights = tf.reduce_sum(QK, axis=-1)
att_w = tf.nn.softmax(interaction_weights, axis=-1)
output = tf.layers.dropout(tf.einsum('bso,bs->bso', V, att_w), rate=0.5, training=training)
output = tf.nn.l2_normalize(output)
return att_w, output
class FFNetCombo(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, seed, rate=0.5):
super(FFNetCombo, self).__init__()
self.proj_matrix = tf.get_variable(name='W_ffncombo', shape=(input_size, output_size), dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.bias = tf.get_variable(name='b_ffncombo', shape=output_size, dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.dropout = tf.keras.layers.Dropout(rate)
self.bn = tf.keras.layers.BatchNormalization(momentum=0.4, axis=-1)
# self.hidd_l = tfp.layers.DenseFlipout(1, activation=tf.nn.leaky_relu)
def call(self, inputs, **kwargs):
norm_inputs = self.bn(inputs)
output = tf.nn.leaky_relu(tf.einsum('bsf, fo->bso', norm_inputs, self.proj_matrix) + self.bias)
# output = self.hidd_l(norm_inputs)
output = self.dropout(output)
return output
class FCReluBN(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, seed, rate=0.5):
super(FCReluBN, self).__init__()
self.proj_matrix = tf.get_variable(name='W_ffncombo', shape=(input_size, output_size), dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.bias = tf.get_variable(name='b_ffncombo', shape=output_size, dtype=tf.float32,
initializer=tf.initializers.glorot_normal(seed=seed), trainable=True)
self.dropout = tf.keras.layers.Dropout(rate)
self.bn = tf.keras.layers.BatchNormalization(momentum=0.4, axis=-1)
# self.hidd_l = tfp.layers.DenseFlipout(1, activation=tf.nn.leaky_relu)
def call(self, inputs, **kwargs):
output = tf.nn.leaky_relu(tf.einsum('bsf, fo->bso', inputs, self.proj_matrix) + self.bias)
output = self.bn(output)
return output
|
the-stack_0_13650 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-usehd=0']] * self.num_nodes)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid cadex address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 KDX to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-usehd=0']] * self.num_nodes)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 12)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
#############################
# Test address reuse option #
#############################
result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# frt should not have removed the key from the keypool
assert(changeaddress == nextaddr)
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
# Disable BIP69 sorting of inputs and outputs
self.nodes[3].setbip69enabled(False)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
keys = list(outputs.keys())
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
# Reenable BIP69 sorting of inputs and outputs
self.nodes[3].setbip69enabled(True)
if __name__ == '__main__':
RawTransactionsTest().main()
|
the-stack_0_13651 | # Copyright (c) 2014 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import plugin_utils as pu
from sahara.plugins.cdh.v5 import config_helper as c_helper
from sahara.plugins.cdh.v5 import db_helper
class PluginUtilsV5(pu.AbstractPluginUtils):
def __init__(self):
self.c_helper = c_helper
self.db_helper = db_helper
def configure_spark(self, cluster):
spark = self.get_spark_historyserver(cluster)
with spark.remote() as r:
r.execute_command(
'sudo su - -c "hdfs dfs -mkdir -p '
'/user/spark/applicationHistory" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -mkdir -p '
'/user/spark/share/lib" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -put /usr/lib/spark/assembly/lib/'
'spark-assembly-hadoop* '
'/user/spark/share/lib/spark-assembly.jar" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chown -R '
'spark:spark /user/spark" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chmod 0751 /user/spark" hdfs')
r.execute_command(
'sudo su - -c "hdfs dfs -chmod 1777 /user/spark/'
'applicationHistory" hdfs')
def create_hive_hive_directory(self, cluster):
# Hive requires /tmp/hive-hive directory
namenode = self.get_namenode(cluster)
with namenode.remote() as r:
r.execute_command(
'sudo su - -c "hadoop fs -mkdir -p /tmp/hive-hive" hdfs')
r.execute_command(
'sudo su - -c "hadoop fs -chown hive /tmp/hive-hive" hdfs')
def start_cloudera_manager(self, cluster):
self._start_cloudera_manager(
cluster, c_helper.AWAIT_MANAGER_STARTING_TIMEOUT)
def get_config_value(self, service, name, cluster=None):
configs = c_helper.get_plugin_configs()
return self._get_config_value(service, name, configs, cluster)
|
the-stack_0_13653 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import viztracer
import subprocess
import os
import time
import sys
import multiprocessing
from viztracer import VizTracer, ignore_function
from .cmdline_tmpl import CmdlineTmpl
from .base_tmpl import BaseTmpl
class TestIssue1(BaseTmpl):
def test_datetime(self):
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
tracer = viztracer.VizTracer()
tracer.start()
from datetime import timedelta
timedelta(hours=5)
tracer.stop()
tracer.parse()
tracer.save(output_file="tmp.json")
os.remove("tmp.json")
class TestStackOptimization(BaseTmpl):
# There's an order issue in tracefunc to skip the FEE log
# If the stack is empty(stack_top is NULL), and we entered
# into an ignored function, ignore_stack_depth will increment.
# However, when its corresponding exit comes, ignore_stack_depth
# won't be decrement because the function is skipped when
# stack is empty and it's a return function
def test_instant(self):
def s():
return 0
tracer = VizTracer()
tracer.start()
# This is a library function which will be ignored, but
# this could trick the system into a ignoring status
tracer.add_instant("name", {"a": 1})
s()
s()
s()
tracer.stop()
entries = tracer.parse()
tracer.save()
self.assertEqual(entries, 4)
class TestSegFaultRegression(BaseTmpl):
# Without parsing, cleanup of C function had caused segfault
def test_cleanup(self):
tracer = VizTracer()
tracer.start()
_ = len([1, 2, 3])
_ = sum([2, 3, 4])
try:
raise Exception("lol")
except Exception:
pass
tracer.stop()
tracer.cleanup()
class TestFunctionArg(BaseTmpl):
def test_functionarg(self):
def f(n):
tracer.add_func_args("input", n)
if n < 2:
return 1
return f(n - 1) + f(n - 2)
tracer = VizTracer()
tracer.start()
f(5)
tracer.stop()
tracer.parse()
inputs = set()
for d in tracer.data["traceEvents"]:
if d["ph"] == "X":
inputs.add(d["args"]["input"])
self.assertEqual(inputs, set([0, 1, 2, 3, 4, 5]))
issue21_code = \
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--script_option", action="store_true")
parser.add_argument("-o", action="store_true")
options = parser.parse_args()
print(options)
if not options.script_option:
exit(1)
"""
class TestIssue21(CmdlineTmpl):
# viztracer --run my_script --script_option
# is not parsed correctly because the program gets confused
# about --script_option
def test_issue21(self):
self.template(["viztracer", "--include_files", "/", "--run", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "--include_files", "/", "--", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "cmdline_test.py", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--", "cmdline_test.py", "-o", "--script_option"], script=issue21_code)
self.template(["viztracer", "--run"], script=issue21_code, success=False, expected_output_file=None)
self.template(["viztracer", "--"], script=issue21_code, success=False, expected_output_file=None)
term_code = \
"""
import time
a = []
a.append(1)
for i in range(10):
time.sleep(1)
"""
class TestTermCaught(CmdlineTmpl):
def test_term(self):
if sys.platform == "win32":
return
self.build_script(term_code)
cmd = ["viztracer", "-o", "term.json", "cmdline_test.py"]
if os.getenv("COVERAGE_RUN"):
cmd = ["coverage", "run", "--parallel-mode", "--pylib", "-m"] + cmd
p = subprocess.Popen(cmd)
time.sleep(0.5)
p.terminate()
p.wait(timeout=10)
self.assertTrue(os.path.exists("term.json"))
self.cleanup(output_file="term.json")
class TestIssue42(BaseTmpl):
def test_issue42(self):
@ignore_function
def f():
lst = []
lst.append(1)
tracer = VizTracer()
tracer.start()
f()
tracer.stop()
tracer.parse()
self.assertEventNumber(tracer.data, 0)
issue47_code = \
"""
import sys
import gc
class C:
def __init__(self):
self.data = bytearray()
def change(self):
b = memoryview(self.data).tobytes()
self.data += b"123123"
del self.data[:1]
c = C()
c.change()
"""
class TestIssue47(CmdlineTmpl):
def test_issue47(self):
self.template(["viztracer", "cmdline_test.py", "-o", "result.json"], script=issue47_code, expected_output_file="result.json", expected_entries=7)
class TestIssue58(CmdlineTmpl):
def test_issue58(self):
if multiprocessing.get_start_method() == "fork":
self.template(["viztracer", "--log_multiprocess", "-m", "tests.modules.issue58"], expected_output_file="result.html")
|
the-stack_0_13654 | import setuptools
VERSION = '0.1'
setuptools.setup(
name='TTWeb',
version=VERSION,
description='Web framework for The Tale',
long_description='Web framework for The Tale',
url='https://github.com/Tiendil/the-tale',
author='Aleksey Yeletsky <Tiendil>',
author_email='[email protected]',
license='BSD',
packages=setuptools.find_packages(),
install_requires=['aiohttp==3.7.4',
'cchardet==2.1.5',
'aiodns==2.0.0',
'aiopg==1.0.0',
'Django==3.0.11',
'yarl==1.4.2',
'protobuf==3.11.1'],
entry_points={'console_scripts': ['tt_service=tt_web.commands.tt_service:main']},
include_package_data=True,
test_suite='tests')
|
the-stack_0_13655 | # Self Driving Car
# Importing the libraries
import numpy as np
from random import random, randint
import matplotlib.pyplot as plt
import time
# Importing the Kivy packages
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from kivy.config import Config
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
# Importing the Dqn object from our AI in ai.py
from ai import Dqn
# Adding this line if we don't want the right click to put a red point
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
# Introducing last_x and last_y, used to keep the last point in memory when we draw the sand on the map
# Sand is our punishment
last_x = 0
last_y = 0
n_points = 0 # the total number of points in the last drawing
length = 0 # the length of the last drawing
# Getting our AI which contains NN that represents Q-function
brain = Dqn(5,3,0.9) # 5 sensors states, 3 actions, gama = 0.9
action2rotation = [0,20,-20]
last_reward = 0
scores = []
# Initializing the map
first_update = True # To initialize the map only once
def init():
global sand # sand is an array that has as many cells as our graphic interface has pixels. Each cell has a one if there is sand, 0 otherwise
global goal_x # x-coordinate of the goal (where the car has to go, that is the airport or the downtown)
global goal_y # y-coordinate of the goal (where the car has to go, that is the airport or the downtown)
global first_update
sand = np.zeros((longueur,largeur)) # initializing the sand array with zeros
goal_x = 20 # the goal is at the upper left of the map (the x-coordinate is 20 and not 0 because the car gets bad reward if it touches the wall)
goal_y = largeur - 20 # the goal is at the upper left of the map (y-coordinate)
first_update = False # initialize the map only once
# Initializing the last distance
last_distance = 0
# Creating the car class
class Car(Widget):
angle = NumericProperty(0) # initializing the angle of the car (angle between the x-axis of the map and the axis of the car)
rotation = NumericProperty(0) # initializing the last rotation of the car (after playing the action, the car does a rotation of 0, 20 or -20 degrees)
velocity_x = NumericProperty(0) # initializing the x-coordinate of the velocity vector
velocity_y = NumericProperty(0) # initializing the y-coordinate of the velocity vector
velocity = ReferenceListProperty(velocity_x, velocity_y) # velocity vector
sensor1_x = NumericProperty(0) # initializing the x-coordinate of the first sensor (the one that looks forward)
sensor1_y = NumericProperty(0) # initializing the y-coordinate of the first sensor (the one that looks forward)
sensor1 = ReferenceListProperty(sensor1_x, sensor1_y) # first sensor vector
sensor2_x = NumericProperty(0)
sensor2_y = NumericProperty(0)
sensor2 = ReferenceListProperty(sensor2_x, sensor2_y)
sensor3_x = NumericProperty(0)
sensor3_y = NumericProperty(0)
sensor3 = ReferenceListProperty(sensor3_x, sensor3_y)
signal1 = NumericProperty(0)
signal2 = NumericProperty(0)
signal3 = NumericProperty(0)
def move(self, rotation):
self.pos = Vector(*self.velocity) + self.pos # updating the position of the car according to its last position and velocity
self.rotation = rotation # getting the rotation of the car
self.angle = self.angle + self.rotation # updating the angle
self.sensor1 = Vector(30, 0).rotate(self.angle) + self.pos # updating the position of sensor 1, 30 is distance between car and sensor
self.sensor2 = Vector(30, 0).rotate((self.angle+30)%360) + self.pos # updating the position of sensor 2
self.sensor3 = Vector(30, 0).rotate((self.angle-30)%360) + self.pos # sensor 3
self.signal1 = int(np.sum(sand[int(self.sensor1_x)-10:int(self.sensor1_x)+10, int(self.sensor1_y)-10:int(self.sensor1_y)+10]))/400. # getting the signal received by sensor 1 (density of sand around sensor 1)
self.signal2 = int(np.sum(sand[int(self.sensor2_x)-10:int(self.sensor2_x)+10, int(self.sensor2_y)-10:int(self.sensor2_y)+10]))/400.
self.signal3 = int(np.sum(sand[int(self.sensor3_x)-10:int(self.sensor3_x)+10, int(self.sensor3_y)-10:int(self.sensor3_y)+10]))/400.
if self.sensor1_x>longueur-10 or self.sensor1_x<10 or self.sensor1_y>largeur-10 or self.sensor1_y<10: # if sensor 1 is out of the map (the car is facing one edge of the map)
self.signal1 = 1. # sensor 1 detects full sand
if self.sensor2_x>longueur-10 or self.sensor2_x<10 or self.sensor2_y>largeur-10 or self.sensor2_y<10:
self.signal2 = 1.
if self.sensor3_x>longueur-10 or self.sensor3_x<10 or self.sensor3_y>largeur-10 or self.sensor3_y<10:
self.signal3 = 1.
class Ball1(Widget): # sensor 1
pass
class Ball2(Widget): # sensor 2
pass
class Ball3(Widget): # sensor 3
pass
# Creating the game class
class Game(Widget):
car = ObjectProperty(None) # getting the car object from our kivy file
ball1 = ObjectProperty(None) # getting the sensor 1 object from our kivy file
ball2 = ObjectProperty(None)
ball3 = ObjectProperty(None)
def serve_car(self): # starting the car when we launch the application
self.car.center = self.center # the car will start at the center of the map
self.car.velocity = Vector(6, 0) # the car will start to go horizontally to the right with a speed of 6
def update(self, dt): # update function that updates everything that needs to be updated at each discrete time t when reaching a new state (getting new signals from the sensors)
global brain # specifying the global variables (the brain of the car, that is our AI)
global last_reward # specifying the global variables (the last reward)
global scores # specifying the global variables (the means of the rewards)
global last_distance # specifying the global variables (the last distance from the car to the goal)
global goal_x # specifying the global variables (x-coordinate of the goal)
global goal_y # specifying the global variables (y-coordinate of the goal)
global longueur # specifying the global variables (width of the map)
global largeur # specifying the global variables (height of the map)
longueur = self.width # width of the map (horizontal edge)
largeur = self.height # height of the map (vertical edge)
if first_update: # initialize the map only once
init()
xx = goal_x - self.car.x # difference of x-coordinates between the goal and the car
yy = goal_y - self.car.y # difference of y-coordinates between the goal and the car
orientation = Vector(*self.car.velocity).angle((xx,yy))/180. # direction of the car with respect to the goal (if the car is heading perfectly towards the goal, then orientation = 0)
last_signal = [self.car.signal1, self.car.signal2, self.car.signal3, orientation, -orientation] # our input state vector, composed of the three signals received by the three sensors, plus the orientation and -orientation
action = brain.update(last_reward, last_signal) # playing the action from our ai (the object brain of the dqn class)
scores.append(brain.score()) # appending the score (mean of the last 100 rewards to the reward window)
rotation = action2rotation[action] # converting the action played (0, 1 or 2) into the rotation angle (0°, 20° or -20°)
self.car.move(rotation) # moving the car according to this last rotation angle
distance = np.sqrt((self.car.x - goal_x)**2 + (self.car.y - goal_y)**2) # getting the new distance between the car and the goal right after the car moved
self.ball1.pos = self.car.sensor1 # updating the position of the first sensor (ball1) right after the car moved
self.ball2.pos = self.car.sensor2
self.ball3.pos = self.car.sensor3
if sand[int(self.car.x),int(self.car.y)] > 0: # if the car is on the sand
self.car.velocity = Vector(1, 0).rotate(self.car.angle) # it is slowed down (speed = 1)
last_reward = -1 # and reward = -1
else: # otherwise
self.car.velocity = Vector(6, 0).rotate(self.car.angle) # it goes to a normal speed (speed = 6)
last_reward = -0.2 # and it gets bad reward (-0.2)
if distance < last_distance: # if it getting close to the goal
last_reward = 0.1 # it still gets slightly positive reward 0.1
if self.car.x < 10: # if the car is in the left edge of the frame
self.car.x = 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.x > self.width - 10: # if the car is in the right edge of the frame
self.car.x = self.width - 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.y < 10: # if the car is in the bottom edge of the frame
self.car.y = 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if self.car.y > self.height - 10: # if the car is in the upper edge of the frame
self.car.y = self.height - 10 # it is not slowed down
last_reward = -1 # but it gets bad reward -1
if distance < 100: # when the car reaches its goal
goal_x = self.width-goal_x # the goal becomes the bottom right corner of the map (the downtown), and vice versa (updating of the x-coordinate of the goal)
goal_y = self.height-goal_y # the goal becomes the bottom right corner of the map (the downtown), and vice versa (updating of the y-coordinate of the goal)
last_distance = distance # Updating the last distance from the car to the goal
# Adding the painting tools
class MyPaintWidget(Widget):
def on_touch_down(self, touch): # putting some sand when we do a left click
global length, n_points, last_x, last_y
with self.canvas:
Color(0.8,0.7,0)
d = 10.
touch.ud['line'] = Line(points = (touch.x, touch.y), width = 10)
last_x = int(touch.x)
last_y = int(touch.y)
n_points = 0
length = 0
sand[int(touch.x),int(touch.y)] = 1
def on_touch_move(self, touch): # putting some sand when we move the mouse while pressing left
global length, n_points, last_x, last_y
if touch.button == 'left':
touch.ud['line'].points += [touch.x, touch.y]
x = int(touch.x)
y = int(touch.y)
length += np.sqrt(max((x - last_x)**2 + (y - last_y)**2, 2))
n_points += 1.
density = n_points/(length)
touch.ud['line'].width = int(20 * density + 1)
sand[int(touch.x) - 10 : int(touch.x) + 10, int(touch.y) - 10 : int(touch.y) + 10] = 1
last_x = x
last_y = y
# Adding the API Buttons (clear, save and load)
class CarApp(App):
def build(self):
parent = Game()
parent.serve_car()
Clock.schedule_interval(parent.update, 1.0/60.0)
self.painter = MyPaintWidget()
clearbtn = Button(text = 'clear')
savebtn = Button(text = 'save', pos = (parent.width, 0))
loadbtn = Button(text = 'load', pos = (2 * parent.width, 0))
clearbtn.bind(on_release = self.clear_canvas)
savebtn.bind(on_release = self.save)
loadbtn.bind(on_release = self.load)
parent.add_widget(self.painter)
parent.add_widget(clearbtn)
parent.add_widget(savebtn)
parent.add_widget(loadbtn)
return parent
def clear_canvas(self, obj): # clear button
global sand
self.painter.canvas.clear()
sand = np.zeros((longueur,largeur))
def save(self, obj): # save button
print("saving brain...")
brain.save()
plt.plot(scores)
plt.show()
def load(self, obj): # load button
print("loading last saved brain...")
brain.load()
# Running the whole thing
if __name__ == '__main__':
CarApp().run()
|
the-stack_0_13656 | #!/usr/bin/env python3
import json
import os
from os import path
import sys
import zipfile
import hashlib
import urllib.parse
BLOCKSIZE = 65536
def sha256(file):
hasher = hashlib.sha256()
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return(hasher.hexdigest())
def guess_mod_name(file_name):
file_name, _ = os.path.splitext(file_name)
parts = []
for p in file_name.split('-'):
if len(p) > 0 and p[0].isdigit():
break
parts.append(p)
return "-".join(parts)
def apply_mod_count(modcount, modid):
if modid in modcount:
count = modcount[modid]
modcount[modid] = count + 1
return "{}-{}".format(modid, count)
else:
modcount[modid] = 1
return modid
def generate_mod(mod_file, url_base, flags, writer, modcount):
zip = zipfile.ZipFile(mod_file)
name = None
version = None
if 'mcmod.info' in zip.namelist():
try:
f = zip.open('mcmod.info')
data = json.load(f)
if 'modListVersion' in data and data['modListVersion'] == 2:
data = data['modList']
name = data[0]['modid']
if 'version' in data[0]:
version = data[0]['version']
else:
print("Warning: Mod {} is apparently incapable of specifying a version number in their mcmod.info. Using 'unknown', this may have weird side effects".format(name))
version = 'unknown'
except ValueError as e:
print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file))
except json.decoder.JSONDecodeError as e:
print("Warning: Author of mod {} is apparently incapable of writing correctly formatted json. Guessing information, this may have weird side effects ({})".format(mod_file, e))
except Exception as e:
print("Irgendwas kaputt: {}".format(e))
else:
print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file))
if name == None:
name = guess_mod_name(path.basename(mod_file))
version = ''
name = apply_mod_count(modcount, name)
our_flags = flags[name] if name in flags else ''
writer.write("{},{},{}/mods/{},mod,{},{}\n".format(name, version, url_base, urllib.parse.quote(path.basename(mod_file)), sha256(mod_file), our_flags))
def make_configs(url_base, writer, exclude):
"""
Creates a configs.zip from the config/ directory.
Can be given a list of filenames to exclude
"""
with zipfile.ZipFile('configs.zip', 'w') as zip:
for (dirname, dirs, files) in os.walk("config"):
if dirname in exclude:
print("Skipping " + dirname + " and all files in it")
continue
for dir in dirs:
filename = path.join(dirname, dir)
arcname = filename[7:]
if filename not in exclude:
zip.write(filename, arcname)
for file in files:
filename = path.join(dirname, file)
if filename in exclude:
print("Skipping " + filename)
continue
arcname = filename[7:]
zip.write(filename, arcname)
writer.write("Configs,{1},{0}/configs.zip,config,{1}\n".format(url_base, sha256('configs.zip')))
def path_to_tree(path):
ret = set([])
total_path = ""
for el in path.split("/"):
total_path += el + "/"
ret.add(total_path)
return ret
def make_resources(list, url_base, writer):
dirs = set([])
for p in list:
dirname = path.dirname(p)
if len(dirname) > 0:
dirs = dirs.union(path_to_tree(dirname))
with zipfile.ZipFile('resources.zip', 'w') as zip:
for dir in dirs:
zip.write(dir, dir)
for file in list:
file = file.rstrip()
zip.write(file, file)
writer.write("Resources,{1},{0}/resources.zip,resources,{1}\n".format(url_base, sha256('resources.zip')))
if len(sys.argv) != 3:
print("Usage: {} <url_base> <out_file>".format(sys.argv[0]))
sys.exit(1)
base_url = sys.argv[1]
out_file = sys.argv[2]
exclude = []
if path.isfile('exclude.packupdate'):
with open('exclude.packupdate') as file:
for line in file.readlines():
exclude.append(line.strip())
with open(out_file, 'w') as out_file:
make_configs(base_url, out_file, exclude)
if path.isfile('resources.packupdate'):
with open('resources.packupdate') as file:
make_resources(file.readlines(), base_url, out_file)
if path.isfile('forge.packupdate'):
with open('forge.packupdate') as file:
out_file.write("Minecraft Forge,{},,forge\n".format(file.read().strip()))
flags = {}
if path.isfile('flags.packupdate'):
with open('flags.packupdate') as file:
for line in file.readlines():
key, val = line.split(',')
flags[key] = val.rstrip()
modpath = 'mods/'
modcount = {}
for f in os.listdir(modpath):
mod_file = os.path.join(modpath, f)
if mod_file in exclude:
continue
if os.path.isfile(mod_file):
generate_mod(mod_file, base_url, flags, out_file, modcount)
|
the-stack_0_13657 | import click
import click_completion
from .main import completion
@completion.command()
@click.option(
"-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion"
)
@click.argument(
"shell",
required=False,
type=click_completion.DocumentedChoice(click_completion.core.shells),
)
def show(shell, case_insensitive):
"""Show the click-completion-command completion code"""
extra_env = (
{"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"}
if case_insensitive
else {}
)
click.echo(click_completion.core.get_code(shell, extra_env=extra_env))
@completion.command()
@click.option(
"--append/--overwrite", help="Append the completion code to the file", default=None
)
@click.option(
"-i", "--case-insensitive/--no-case-insensitive", help="Case insensitive completion"
)
@click.argument(
"shell",
required=False,
type=click_completion.DocumentedChoice(click_completion.core.shells),
)
@click.argument("path", required=False)
def install(append, case_insensitive, shell, path):
"""Install the click-completion-command completion"""
extra_env = (
{"_CLICK_COMPLETION_COMMAND_CASE_INSENSITIVE_COMPLETE": "ON"}
if case_insensitive
else {}
)
shell, path = click_completion.core.install(
shell=shell, path=path, append=append, extra_env=extra_env
)
click.echo("%s completion installed in %s" % (shell, path))
|
the-stack_0_13658 | from __future__ import print_function
import tempfile
import os
import shutil
from b3get.utils import tmp_location
def test_has_tempdir():
assert tempfile.gettempdir()
def test_create_tempdir():
assert tempfile.gettempdir()
tdir = tempfile.mkdtemp()
assert os.path.exists(tdir)
print("\n", tdir)
shutil.rmtree(tdir)
def test_b3get_tempdir():
tdir = tmp_location()
assert os.path.exists(tdir)
assert os.path.isdir(tdir)
shutil.rmtree(tdir)
def test_b3get_tempdir_reuse():
tmp = tempfile.gettempdir()
exp = os.path.join(tmp, 'random-b3get')
os.makedirs(exp)
tdir = tmp_location()
assert tdir == exp
shutil.rmtree(tdir)
def test_b3get_tempdir_double_call():
exp = tmp_location()
tdir = tmp_location()
assert tdir == exp
shutil.rmtree(tdir)
|
the-stack_0_13659 | import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
os.makedirs("visual", exist_ok=True)
def show_mnist(n=20):
from tensorflow import keras
(x, y), _ = keras.datasets.mnist.load_data()
idx = np.random.randint(0, len(x), n)
x, y = x[idx], y[idx]
n_col = 5
n_row = len(x) // n_col
if x.ndim > 3:
x = np.squeeze(x, axis=-1)
plt.figure(0, (5, n_row))
for c in range(n_col):
for r in range(n_row):
i = r*n_col+c
plt.subplot(n_row, n_col, i+1)
plt.imshow(x[i], cmap="gray_r")
plt.axis("off")
# plt.xlabel(y[i])
plt.tight_layout()
plt.savefig("visual/mnist.png")
# plt.show()
def save_gan(model, ep, **kwargs):
name = model.__class__.__name__.lower()
if name in ["dcgan", "wgan", "wgangp", "lsgan", "wgandiv", "sagan", "pggan"]:
imgs = model.call(100, training=False).numpy()
_save_gan(name, ep, imgs, show_label=False)
elif name == "gan":
data = model.call(5, training=False).numpy()
plt.plot(data.T)
plt.xticks((), ())
dir_ = "visual/{}".format(name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
elif name == "cgan" or name == "acgan":
img_label = np.arange(0, 10).astype(np.int32).repeat(10, axis=0)
imgs = model.predict(img_label)
_save_gan(name, ep, imgs, show_label=True)
elif name in ["infogan"]:
img_label = np.arange(0, model.label_dim).astype(np.int32).repeat(10, axis=0)
img_style = np.concatenate(
[np.linspace(-model.style_scale, model.style_scale, 10)] * 10).reshape((100, 1)).repeat(model.style_dim, axis=1).astype(np.float32)
img_info = img_label, img_style
imgs = model.predict(img_info)
_save_gan(name, ep, imgs, show_label=False)
elif name in ["ccgan", "pix2pix"]:
if "img" not in kwargs:
raise ValueError
input_img = kwargs["img"][:100]
mask_width = np.random.randint(model.mask_range[0], model.mask_range[1], len(input_img))
mask = np.ones(input_img.shape, np.float32)
for i, w in enumerate(mask_width):
mask_xy = np.random.randint(0, model.img_shape[0] - w, 2)
x0, x1 = mask_xy[0], w + mask_xy[0]
y0, y1 = mask_xy[1], w + mask_xy[1]
mask[i, x0:x1, y0:y1] = 0
masked_img = input_img * mask
imgs = model.predict(masked_img)
_save_img2img_gan(name, ep, masked_img, imgs)
elif name == "cyclegan":
if "img6" not in kwargs or "img9" not in kwargs:
raise ValueError
img6, img9 = kwargs["img6"][:50], kwargs["img9"][:50]
img9_, img6_ = model.g12.call(img6, training=False), model.g21.call(img9, training=False)
img = np.concatenate((img6.numpy(), img9.numpy()), axis=0)
imgs = np.concatenate((img9_.numpy(), img6_.numpy()), axis=0)
_save_img2img_gan(name, ep, img, imgs)
elif name in ["srgan"]:
if "img" not in kwargs:
raise ValueError
input_img = kwargs["img"][:100]
imgs = model.predict(input_img)
_save_img2img_gan(name, ep, input_img, imgs)
elif name == "stylegan":
n = 12
global z1, z2 # z1 row, z2 col
if "z1" not in globals():
z1 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))
if "z2" not in globals():
z2 = np.random.normal(0, 1, size=(n, 1, model.latent_dim))
imgs = model.predict([
np.concatenate(
(z1.repeat(n, axis=0).repeat(1, axis=1), np.repeat(np.concatenate([z2 for _ in range(n)], axis=0), 2, axis=1)),
axis=1),
np.zeros([len(z1)*n, model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
z1_imgs = -model.predict([z1.repeat(model.n_style, axis=1), np.zeros([len(z1), model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
z2_imgs = -model.predict([z2.repeat(model.n_style, axis=1), np.zeros([len(z2), model.img_shape[0], model.img_shape[1]], dtype=np.float32)])
imgs = np.concatenate([z2_imgs, imgs], axis=0)
rest_imgs = np.concatenate([np.ones([1, 28, 28, 1], dtype=np.float32), z1_imgs], axis=0)
for i in range(len(rest_imgs)):
imgs = np.concatenate([imgs[:i*(n+1)], rest_imgs[i:i+1], imgs[i*(n+1):]], axis=0)
_save_gan(name, ep, imgs, show_label=False, nc=n+1, nr=n+1)
else:
raise ValueError(name)
plt.clf()
plt.close()
def _img_recenter(img):
return (img + 1) * 255 / 2
def _save_img2img_gan(model_name, ep, img1, img2):
if not isinstance(img1, np.ndarray):
img1 = img1.numpy()
if not isinstance(img2, np.ndarray):
img2 = img2.numpy()
if img1.ndim > 3:
img1 = np.squeeze(img1, axis=-1)
if img2.ndim > 3:
img2 = np.squeeze(img2, axis=-1)
img1, img2 = _img_recenter(img1), _img_recenter(img2)
plt.clf()
nc, nr = 20, 10
plt.figure(0, (nc * 2, nr * 2))
i = 0
for c in range(0, nc, 2):
for r in range(nr):
n = r * nc + c
plt.subplot(nr, nc, n + 1)
plt.imshow(img1[i], cmap="gray")
plt.axis("off")
plt.subplot(nr, nc, n + 2)
plt.imshow(img2[i], cmap="gray_r")
plt.axis("off")
i += 1
plt.tight_layout()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
def _save_gan(model_name, ep, imgs, show_label=False, nc=10, nr=10):
if not isinstance(imgs, np.ndarray):
imgs = imgs.numpy()
if imgs.ndim > 3:
imgs = np.squeeze(imgs, axis=-1)
imgs = _img_recenter(imgs)
plt.clf()
plt.figure(0, (nc * 2, nr * 2))
for c in range(nc):
for r in range(nr):
i = r * nc + c
plt.subplot(nr, nc, i + 1)
plt.imshow(imgs[i], cmap="gray_r")
plt.axis("off")
if show_label:
plt.text(23, 26, int(r), fontsize=23)
plt.tight_layout()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/{}.png".format(ep)
plt.savefig(path)
def infogan_comp():
import tensorflow as tf
from infogan import InfoGAN
STYLE_DIM = 2
LABEL_DIM = 10
RAND_DIM = 88
IMG_SHAPE = (28, 28, 1)
FIX_STD = True
model = InfoGAN(RAND_DIM, STYLE_DIM, LABEL_DIM, IMG_SHAPE, FIX_STD)
model.load_weights("./models/infogan/model.ckpt").expect_partial()
img_label = np.arange(0, 10).astype(np.int32).repeat(10, axis=0)
noise = tf.repeat(tf.random.normal((1, model.rand_dim)), len(img_label), axis=0)
def plot(noise, img_label, img_style, n):
img_label = tf.convert_to_tensor(img_label, dtype=tf.int32)
img_style = tf.convert_to_tensor(img_style, dtype=tf.float32)
imgs = model.g.call([noise, img_label, img_style], training=False).numpy()
if imgs.ndim > 3:
imgs = np.squeeze(imgs, axis=-1)
plt.clf()
nc, nr = 10, 10
plt.figure(0, (nc * 2, nr * 2))
for c in range(nc):
for r in range(nr):
i = r * nc + c
plt.subplot(nc, nr, i + 1)
plt.imshow(imgs[i], cmap="gray_r")
plt.axis("off")
plt.text(23, 26, int(r), fontsize=23)
plt.tight_layout()
model_name = model.__class__.__name__.lower()
dir_ = "visual/{}".format(model_name)
os.makedirs(dir_, exist_ok=True)
path = dir_ + "/style{}.png".format(n)
plt.savefig(path)
img_style = np.concatenate(
[np.linspace(-model.style_scale, model.style_scale, 10)] * 10).reshape((100, 1)).astype(np.float32)
plot(noise, img_label, np.concatenate((img_style, np.zeros_like(img_style)), axis=1), 1)
plot(noise, img_label, np.concatenate((np.zeros_like(img_style), img_style), axis=1), 2)
def cvt_gif(folders_or_gan, shrink=10):
if not isinstance(folders_or_gan, list):
folders_or_gan = [folders_or_gan.__class__.__name__.lower()]
for folder in folders_or_gan:
folder = "visual/"+folder
fs = [folder+"/" + f for f in os.listdir(folder)]
imgs = []
for f in sorted(fs, key=os.path.getmtime):
if not f.endswith(".png"):
continue
try:
int(os.path.basename(f).split(".")[0])
except ValueError:
continue
img = Image.open(f)
img = img.resize((img.width//shrink, img.height//shrink), Image.ANTIALIAS)
imgs.append(img)
path = "{}/generating.gif".format(folder)
if os.path.exists(path):
os.remove(path)
imgs[-1].save(path, append_images=imgs, optimize=False, save_all=True, duration=400, loop=0)
print("saved ", path)
if __name__ == "__main__":
# show_mnist(20)
# cgan_res()
# save_infogan(None, 1)
# infogan_comp()
cvt_gif(["wgangp", "wgandiv", "wgan", "cgan", "acgan", "dcgan", "lsgan", "infogan", "ccgan", "cyclegan", "pix2pix", "stylegan"]) |
the-stack_0_13661 | # Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from sklearn.manifold import TSNE
def plot(data, useful_embedding):
tsne_input = []
labels = []
curr_dim = 0
for list in data:
for word in list:
tsne_input.append(useful_embedding[word])
labels.append(str(curr_dim))
curr_dim += 1
tsne_input = np.array(tsne_input)
model = TSNE(n_components=2, init='pca')
tsne_result = model.fit_transform(tsne_input)
scatter(zip(tsne_result, labels))
def scatter(data):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
dic = {}
for psi_2d, label in data:
if label == 'N/A':
continue
if label not in dic:
dic[label] = []
dic[label].append(psi_2d)
labels = list(dic.keys())
labels.sort(key=lambda l: len(dic[l]), reverse=True)
for label in labels[10:]:
del dic[label]
labels = dic.keys()
colors = cm.rainbow(np.linspace(0, 1, len(dic)))
plotpoints = []
for label, color in zip(labels, colors):
x = list(map(lambda s: s[0], dic[label]))
y = list(map(lambda s: s[1], dic[label]))
plotpoints.append(plt.scatter(x, y, color=color))
plt.show()
# returns latent dimensionality sized list of list of words
def read_dataset(clargs):
word_lines = []
# skip first 8 lines, information lines
info_lines = 8
file = open(clargs.data_file)
lines = file.readlines()
file.close()
# 6 lines for each dimension
dimensionality = (len(lines) - info_lines) / 6
start_line = info_lines + 1 # 0-based
for i in range(int(dimensionality)):
word_lines.append(lines[start_line + i * 6].strip())
list_of_words = []
import ast
for word_line in word_lines:
list_of_words.append(ast.literal_eval(word_line))
return list_of_words
# returns of a dict: token -> embedding (list of floats)
def get_useful_embedding(clargs, tokens):
file = open(clargs.embedding_file)
lines = file.readlines()
file.close()
embedding = {}
for line in lines:
splits = line.split(' ', 1)
embedding[splits[0]] = splits[1]
del lines
useful_embedding = {}
for token in tokens:
useful_embedding[token] = [float(i) for i in embedding[token].split(' ')]
del embedding
return useful_embedding
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_file', type=str)
parser.add_argument('embedding_file', type=str)
clargs = parser.parse_args()
data = read_dataset(clargs)
import itertools
tokens = list(itertools.chain.from_iterable(data))
useful_embedding = get_useful_embedding(clargs, tokens)
plot(data, useful_embedding)
|
the-stack_0_13662 | # -*- coding:utf-8 -*-
import os
import warnings
import git
import torch
import torchvision.transforms as transforms
import yolov2
import visdom
from yolov2 import detection_loss_4_yolo
from torchsummary.torchsummary import summary
from utilities.dataloader import detection_collate
from utilities.dataloader import VOC
from utilities.utils import save_checkpoint
from utilities.utils import create_vis_plot
from utilities.utils import update_vis_plot
from utilities.utils import visualize_GT
from utilities.augmentation import Augmenter
from imgaug import augmenters as iaa
warnings.filterwarnings("ignore")
# plt.ion() # interactive mode
# model = torch.nn.DataParallel(net, device_ids=[0]).cuda()
def train(params):
# future work variable
dataset = params["dataset"]
input_height = params["input_height"]
input_width = params["input_width"]
data_path = params["data_path"]
val_data_path = params["val_data_path"]
val_datalist_path = params["val_datalist_path"]
datalist_path = params["datalist_path"]
class_path = params["class_path"]
batch_size = params["batch_size"]
num_epochs = params["num_epochs"]
learning_rate = params["lr"]
checkpoint_path = params["checkpoint_path"]
USE_AUGMENTATION = params["use_augmentation"]
USE_GTCHECKER = params["use_gtcheck"]
USE_VISDOM = params["use_visdom"]
USE_GITHASH = params["use_githash"]
num_class = params["num_class"]
num_gpus = [i for i in range(1)]
with open(class_path) as f:
class_list = f.read().splitlines()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
if (USE_GITHASH):
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=7)
if USE_VISDOM:
viz = visdom.Visdom(use_incoming_socket=False)
vis_title = 'YOLOv2'
vis_legend_Train = ['Train Loss']
vis_legend_Val = ['Val Loss']
iter_plot = create_vis_plot(viz, 'Iteration', 'Total Loss', vis_title, vis_legend_Train)
val_plot = create_vis_plot(viz, 'Iteration', 'Validation Loss', vis_title, vis_legend_Val)
# 2. Data augmentation setting
if (USE_AUGMENTATION):
seq = iaa.SomeOf(2, [
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect BBs
iaa.Affine(
translate_px={"x": 3, "y": 10},
scale=(0.9, 0.9)
), # translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs
iaa.AdditiveGaussianNoise(scale=0.1 * 255),
iaa.CoarseDropout(0.02, size_percent=0.15, per_channel=0.5),
iaa.Affine(rotate=45),
iaa.Sharpen(alpha=0.5)
])
else:
seq = iaa.Sequential([])
composed = transforms.Compose([Augmenter(seq)])
# 3. Load Dataset
# composed
# transforms.ToTensor
#TODO : Datalist가 있을때 VOC parsing
# import pdb;pdb.set_trace()
train_dataset = VOC(root=data_path,transform=composed, class_path=class_path, datalist_path=datalist_path)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=detection_collate)
val_dataset = VOC(root=val_data_path,transform=composed, class_path=class_path, datalist_path=val_datalist_path)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=detection_collate)
# 5. Load YOLOv2
net = yolov2.YOLOv2()
model = torch.nn.DataParallel(net, device_ids=num_gpus).cuda()
print("device : ", device)
if device.type == 'cpu':
model = torch.nn.DataParallel(net)
else:
model = torch.nn.DataParallel(net, device_ids=num_gpus).cuda()
# 7.Train the model
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
# Train the model
total_step = len(train_loader)
total_train_step = num_epochs * total_step
# for epoch in range(num_epochs):
for epoch in range(1, num_epochs + 1):
train_loss =0
total_val_loss = 0
train_total_conf_loss = 0
train_total_xy_loss = 0
train_total_wh_loss = 0
train_total_c_loss = 0
val_total_conf_loss = 0
val_total_xy_loss = 0
val_total_wh_loss = 0
val_total_c_loss = 0
if(epoch %500 ==0 and epoch <1000):
learning_rate /= 10
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
if (epoch == 200) or (epoch == 400) or (epoch == 600) or (epoch == 20000) or (epoch == 30000):
scheduler.step()
model.train()
for i, (images, labels, sizes) in enumerate(train_loader):
current_train_step = (epoch) * total_step + (i + 1)
if USE_GTCHECKER:
visualize_GT(images, labels, class_list)
images = images.to(device)
labels = labels.to(device)
dog = labels[0,4,7,:]
human = labels[0,6,6,:]
# Forward pass
outputs = model(images)
# Calc Loss
one_loss,conf_loss,xy_loss,wh_loss,class_loss = detection_loss_4_yolo(outputs, labels, device.type)
# objness1_loss = detection_loss_4_yolo(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
one_loss.backward()
optimizer.step()
train_loss += one_loss.item()
train_total_conf_loss += conf_loss.item()
train_total_xy_loss += xy_loss.item()
train_total_wh_loss += wh_loss.item()
train_total_c_loss += class_loss.item()
train_total_conf_loss = train_total_conf_loss / len(train_loader)
train_total_xy_loss= train_total_xy_loss / len(train_loader)
train_total_wh_loss = train_total_wh_loss /len(train_loader)
train_total_c_loss = train_total_c_loss /len(train_loader)
train_epoch_loss = train_loss / len(train_loader)
update_vis_plot(viz, epoch + 1, train_epoch_loss, iter_plot, None, 'append')
model.eval()
with torch.no_grad():
for j, (v_images, v_labels, v_sizes) in enumerate(val_loader):
v_images = v_images.to(device)
v_labels = v_labels.to(device)
# Forward pass
v_outputs = model(v_images)
# Calc Loss
val_loss,conf_loss,xy_loss,wh_loss,class_loss = detection_loss_4_yolo(v_outputs, v_labels, device.type)
total_val_loss += val_loss.item()
val_total_conf_loss += conf_loss.item()
val_total_xy_loss += xy_loss.item()
val_total_wh_loss += wh_loss.item()
val_total_c_loss += class_loss.item()
val_epoch_loss = total_val_loss / len(val_loader)
val_total_conf_loss = val_total_conf_loss / len(val_loader)
val_total_xy_loss= val_total_xy_loss / len(val_loader)
val_total_wh_loss = val_total_wh_loss /len(val_loader)
val_total_c_loss = val_total_c_loss /len(val_loader)
update_vis_plot(viz, epoch + 1, val_epoch_loss, val_plot, None, 'append')
if (((current_train_step) % 100) == 0) or (current_train_step % 1 == 0 and current_train_step < 300):
print(
'epoch: [{}/{}], total step: [{}/{}], batch step [{}/{}], lr: {},one_loss: {:.4f},val_loss: {:.4f}'
.format(epoch + 1, num_epochs, current_train_step, total_train_step, i + 1, total_step,
([param_group['lr'] for param_group in optimizer.param_groups])[0],
one_loss,val_loss ))
print('train loss',train_epoch_loss,'val loss',val_epoch_loss)
print('train conf loss',train_total_conf_loss,'val conf loss',val_total_conf_loss)
print('train xy loss',train_total_xy_loss,'val xy loss',val_total_xy_loss)
print('train wh loss',train_total_wh_loss,'val wh loss',val_total_wh_loss)
print('train class loss',train_total_c_loss,'val class loss',val_total_c_loss)
if not USE_GITHASH:
short_sha = 'noHash'
# if ((epoch % 1000) == 0) and (epoch != 0):
# if ((epoch % 100) == 0) :
if ((epoch % 10) == 0) :
#if (one_loss <= 1) :
save_checkpoint({
'epoch': epoch + 1,
'arch': "YOLOv2",
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False, filename=os.path.join(checkpoint_path, 'ckpt_{}_ep{:05d}_loss{:.04f}_lr{}.pth.tar'.format(short_sha, epoch, one_loss.item(), ([param_group['lr'] for param_group in optimizer.param_groups])[0])))
# print(dir(model))
filename = os.path.join(checkpoint_path, 'ckpt_{}_ep{:05d}_loss{:.04f}_lr{}model.pth.tar'.format(short_sha, epoch, one_loss.item(), ([param_group['lr'] for param_group in optimizer.param_groups])[0]))
torch.save(model.module.state_dict(),filename)
|
the-stack_0_13665 | from __future__ import unicode_literals
import tablib
from django.test import TestCase
from import_export import instance_loaders
from import_export import resources
from core.models import Book
class CachedInstanceLoaderTest(TestCase):
def setUp(self):
self.resource = resources.modelresource_factory(Book)()
self.dataset = tablib.Dataset(headers=['id', 'name', 'author_email'])
self.book = Book.objects.create(name="Some book")
self.book2 = Book.objects.create(name="Some other book")
row = [str(self.book.pk), 'Some book', '[email protected]']
self.dataset.append(row)
self.instance_loader = instance_loaders.CachedInstanceLoader(
self.resource, self.dataset)
def test_all_instances(self):
self.assertTrue(self.instance_loader.all_instances)
self.assertEqual(len(self.instance_loader.all_instances), 1)
self.assertEqual(list(self.instance_loader.all_instances.keys()),
[self.book.pk])
def test_get_instance(self):
obj = self.instance_loader.get_instance(self.dataset.dict[0])
self.assertEqual(obj, self.book)
|
the-stack_0_13667 | # Copyright 2018 Gregory Szorc <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from .node import (
bin,
hex,
)
from .i18n import _
from .thirdparty import (
attr,
)
from . import (
error,
util,
)
from .utils import (
interfaceutil,
)
# Names of the SSH protocol implementations.
SSHV1 = 'ssh-v1'
# These are advertised over the wire. Increment the counters at the end
# to reflect BC breakages.
SSHV2 = 'exp-ssh-v2-0003'
HTTP_WIREPROTO_V2 = 'exp-http-v2-0003'
NARROWCAP = 'exp-narrow-1'
ELLIPSESCAP = 'exp-ellipses-1'
# All available wire protocol transports.
TRANSPORTS = {
SSHV1: {
'transport': 'ssh',
'version': 1,
},
SSHV2: {
'transport': 'ssh',
# TODO mark as version 2 once all commands are implemented.
'version': 1,
},
'http-v1': {
'transport': 'http',
'version': 1,
},
HTTP_WIREPROTO_V2: {
'transport': 'http',
'version': 2,
}
}
class bytesresponse(object):
"""A wire protocol response consisting of raw bytes."""
def __init__(self, data):
self.data = data
class ooberror(object):
"""wireproto reply: failure of a batch of operation
Something failed during a batch call. The error message is stored in
`self.message`.
"""
def __init__(self, message):
self.message = message
class pushres(object):
"""wireproto reply: success with simple integer return
The call was successful and returned an integer contained in `self.res`.
"""
def __init__(self, res, output):
self.res = res
self.output = output
class pusherr(object):
"""wireproto reply: failure
The call failed. The `self.res` attribute contains the error message.
"""
def __init__(self, res, output):
self.res = res
self.output = output
class streamres(object):
"""wireproto reply: binary stream
The call was successful and the result is a stream.
Accepts a generator containing chunks of data to be sent to the client.
``prefer_uncompressed`` indicates that the data is expected to be
uncompressable and that the stream should therefore use the ``none``
engine.
"""
def __init__(self, gen=None, prefer_uncompressed=False):
self.gen = gen
self.prefer_uncompressed = prefer_uncompressed
class streamreslegacy(object):
"""wireproto reply: uncompressed binary stream
The call was successful and the result is a stream.
Accepts a generator containing chunks of data to be sent to the client.
Like ``streamres``, but sends an uncompressed data for "version 1" clients
using the application/mercurial-0.1 media type.
"""
def __init__(self, gen=None):
self.gen = gen
# list of nodes encoding / decoding
def decodelist(l, sep=' '):
if l:
return [bin(v) for v in l.split(sep)]
return []
def encodelist(l, sep=' '):
try:
return sep.join(map(hex, l))
except TypeError:
raise
# batched call argument encoding
def escapebatcharg(plain):
return (plain
.replace(':', ':c')
.replace(',', ':o')
.replace(';', ':s')
.replace('=', ':e'))
def unescapebatcharg(escaped):
return (escaped
.replace(':e', '=')
.replace(':s', ';')
.replace(':o', ',')
.replace(':c', ':'))
# mapping of options accepted by getbundle and their types
#
# Meant to be extended by extensions. It is extensions responsibility to ensure
# such options are properly processed in exchange.getbundle.
#
# supported types are:
#
# :nodes: list of binary nodes
# :csv: list of comma-separated values
# :scsv: list of comma-separated values return as set
# :plain: string with no transformation needed.
GETBUNDLE_ARGUMENTS = {
'heads': 'nodes',
'bookmarks': 'boolean',
'common': 'nodes',
'obsmarkers': 'boolean',
'phases': 'boolean',
'bundlecaps': 'scsv',
'listkeys': 'csv',
'cg': 'boolean',
'cbattempted': 'boolean',
'stream': 'boolean',
}
class baseprotocolhandler(interfaceutil.Interface):
"""Abstract base class for wire protocol handlers.
A wire protocol handler serves as an interface between protocol command
handlers and the wire protocol transport layer. Protocol handlers provide
methods to read command arguments, redirect stdio for the duration of
the request, handle response types, etc.
"""
name = interfaceutil.Attribute(
"""The name of the protocol implementation.
Used for uniquely identifying the transport type.
""")
def getargs(args):
"""return the value for arguments in <args>
For version 1 transports, returns a list of values in the same
order they appear in ``args``. For version 2 transports, returns
a dict mapping argument name to value.
"""
def getprotocaps():
"""Returns the list of protocol-level capabilities of client
Returns a list of capabilities as declared by the client for
the current request (or connection for stateful protocol handlers)."""
def getpayload():
"""Provide a generator for the raw payload.
The caller is responsible for ensuring that the full payload is
processed.
"""
def mayberedirectstdio():
"""Context manager to possibly redirect stdio.
The context manager yields a file-object like object that receives
stdout and stderr output when the context manager is active. Or it
yields ``None`` if no I/O redirection occurs.
The intent of this context manager is to capture stdio output
so it may be sent in the response. Some transports support streaming
stdio to the client in real time. For these transports, stdio output
won't be captured.
"""
def client():
"""Returns a string representation of this client (as bytes)."""
def addcapabilities(repo, caps):
"""Adds advertised capabilities specific to this protocol.
Receives the list of capabilities collected so far.
Returns a list of capabilities. The passed in argument can be returned.
"""
def checkperm(perm):
"""Validate that the client has permissions to perform a request.
The argument is the permission required to proceed. If the client
doesn't have that permission, the exception should raise or abort
in a protocol specific manner.
"""
class commandentry(object):
"""Represents a declared wire protocol command."""
def __init__(self, func, args='', transports=None,
permission='push', cachekeyfn=None, extracapabilitiesfn=None):
self.func = func
self.args = args
self.transports = transports or set()
self.permission = permission
self.cachekeyfn = cachekeyfn
self.extracapabilitiesfn = extracapabilitiesfn
def _merge(self, func, args):
"""Merge this instance with an incoming 2-tuple.
This is called when a caller using the old 2-tuple API attempts
to replace an instance. The incoming values are merged with
data not captured by the 2-tuple and a new instance containing
the union of the two objects is returned.
"""
return commandentry(func, args=args, transports=set(self.transports),
permission=self.permission)
# Old code treats instances as 2-tuples. So expose that interface.
def __iter__(self):
yield self.func
yield self.args
def __getitem__(self, i):
if i == 0:
return self.func
elif i == 1:
return self.args
else:
raise IndexError('can only access elements 0 and 1')
class commanddict(dict):
"""Container for registered wire protocol commands.
It behaves like a dict. But __setitem__ is overwritten to allow silent
coercion of values from 2-tuples for API compatibility.
"""
def __setitem__(self, k, v):
if isinstance(v, commandentry):
pass
# Cast 2-tuples to commandentry instances.
elif isinstance(v, tuple):
if len(v) != 2:
raise ValueError('command tuples must have exactly 2 elements')
# It is common for extensions to wrap wire protocol commands via
# e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
# doing this aren't aware of the new API that uses objects to store
# command entries, we automatically merge old state with new.
if k in self:
v = self[k]._merge(v[0], v[1])
else:
# Use default values from @wireprotocommand.
v = commandentry(v[0], args=v[1],
transports=set(TRANSPORTS),
permission='push')
else:
raise ValueError('command entries must be commandentry instances '
'or 2-tuples')
return super(commanddict, self).__setitem__(k, v)
def commandavailable(self, command, proto):
"""Determine if a command is available for the requested protocol."""
assert proto.name in TRANSPORTS
entry = self.get(command)
if not entry:
return False
if proto.name not in entry.transports:
return False
return True
def supportedcompengines(ui, role):
"""Obtain the list of supported compression engines for a request."""
assert role in (util.CLIENTROLE, util.SERVERROLE)
compengines = util.compengines.supportedwireengines(role)
# Allow config to override default list and ordering.
if role == util.SERVERROLE:
configengines = ui.configlist('server', 'compressionengines')
config = 'server.compressionengines'
else:
# This is currently implemented mainly to facilitate testing. In most
# cases, the server should be in charge of choosing a compression engine
# because a server has the most to lose from a sub-optimal choice. (e.g.
# CPU DoS due to an expensive engine or a network DoS due to poor
# compression ratio).
configengines = ui.configlist('experimental',
'clientcompressionengines')
config = 'experimental.clientcompressionengines'
# No explicit config. Filter out the ones that aren't supposed to be
# advertised and return default ordering.
if not configengines:
attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
return [e for e in compengines
if getattr(e.wireprotosupport(), attr) > 0]
# If compression engines are listed in the config, assume there is a good
# reason for it (like server operators wanting to achieve specific
# performance characteristics). So fail fast if the config references
# unusable compression engines.
validnames = set(e.name() for e in compengines)
invalidnames = set(e for e in configengines if e not in validnames)
if invalidnames:
raise error.Abort(_('invalid compression engine defined in %s: %s') %
(config, ', '.join(sorted(invalidnames))))
compengines = [e for e in compengines if e.name() in configengines]
compengines = sorted(compengines,
key=lambda e: configengines.index(e.name()))
if not compengines:
raise error.Abort(_('%s config option does not specify any known '
'compression engines') % config,
hint=_('usable compression engines: %s') %
', '.sorted(validnames))
return compengines
@attr.s
class encodedresponse(object):
"""Represents response data that is already content encoded.
Wire protocol version 2 only.
Commands typically emit Python objects that are encoded and sent over the
wire. If commands emit an object of this type, the encoding step is bypassed
and the content from this object is used instead.
"""
data = attr.ib()
@attr.s
class alternatelocationresponse(object):
"""Represents a response available at an alternate location.
Instances are sent in place of actual response objects when the server
is sending a "content redirect" response.
Only compatible with wire protocol version 2.
"""
url = attr.ib()
mediatype = attr.ib()
size = attr.ib(default=None)
fullhashes = attr.ib(default=None)
fullhashseed = attr.ib(default=None)
serverdercerts = attr.ib(default=None)
servercadercerts = attr.ib(default=None)
@attr.s
class indefinitebytestringresponse(object):
"""Represents an object to be encoded to an indefinite length bytestring.
Instances are initialized from an iterable of chunks, with each chunk being
a bytes instance.
"""
chunks = attr.ib()
|
the-stack_0_13668 | #
# findPath.py finds the optimal path, p
#
# create by: Samuel King
#
from . import getRandomData, field, timeCosts
class Path ( field.Field ) :
def __init__ ( self ) :
super( Path, self ).__init__()
self.__finalPathString = []
self.__finalPathCoords = []
self.__overallTimeCost = 0.0
self.__zLen = self.getzLen()
self.__timeCosts = timeCosts.TimeCosts()
# find opt path, p, s.t. p.overallTimeCost is minimized
def findOverallPath ( self ) :
fieldFloor = self.getFieldFloor()
itemLocations = self.getItemLocations()
oneItemLocation = itemLocations[0]
start = ( oneItemLocation[0], oneItemLocation[1], oneItemLocation[2] + 2 )
# find opt path to item
pathToItem = self.findPathTrajectoryToItem( fieldFloor, start, False, start, False )
# Recursively find cost by flipping the graph on its head and traversing it from finish to start.
# This will allow us to avoid issues of getting stuck behind a 'mountain' and not taking the optimal path.
def findPathTrajectoryToItem ( self, fieldFloor, startLocation, pickedUp, currCoords, finished ) :
try :
self.__finalPathCoords.append( currCoords )
# return final path
if finished == True :
return self.__finalPathString.insert( 0, "Start!" )
# fist pick up the item
if startLocation == currCoords and pickedUp == False :
self.__overallTimeCost += self.__timeCosts.pickupTimeCost
self.__finalPathString.insert( 0, "Pick up Item from " + str( startLocation ) )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, currCoords, False )
# can move diagonally
if currCoords[0] - 1 >= 0 and \
currCoords[1] - 1 >= 0 and \
fieldFloor[ currCoords[0] - 1 ][ currCoords[1] - 1 ] < currCoords[2] and \
fieldFloor[ currCoords[0] - 1 ][ currCoords[1] ] < currCoords[2] and \
fieldFloor[ currCoords[0] ][ currCoords[1] - 1 ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveDiagTimeCost
nextCoords = ( currCoords[0] - 1, currCoords[1] - 1, currCoords[2] )
self.__finalPathString.insert( 0, "Move Diagonally from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move forward
if currCoords[0] - 1 >= 0 and fieldFloor[ currCoords[0] - 1 ][ currCoords[1] ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveForwardTimeCost
nextCoords = ( currCoords[0] - 1, currCoords[1], currCoords[2] )
self.__finalPathString.insert( 0, "Move Forward from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move to the right
if currCoords[1] - 1 >= 0 and fieldFloor[ currCoords[0] ][ currCoords[1] - 1 ] < currCoords[2] :
self.__overallTimeCost += self.__timeCosts.moveSideTimeCost
nextCoords = ( currCoords[0], currCoords[1] - 1, currCoords[2] )
self.__finalPathString.insert( 0, "Move To the Right from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
# can move down
if currCoords[2] + 1 <= self.__zLen :
self.__overallTimeCost += self.__timeCosts.moveDownTimeCost
nextCoords = ( currCoords[0], currCoords[1], currCoords[2] + 1 )
self.__finalPathString.insert( 0, "Move Down from " + str( nextCoords ) )
fin = nextCoords == ( 0, 0, self.__zLen )
return self.findPathTrajectoryToItem( fieldFloor, startLocation, True, nextCoords, fin )
raise ValueError("INVALID PATH!")
except ValueError as e :
print( "There was an invalid path at " + str(startLocation) + str(pickedUp) + str(currCoords) + str(finished))
# setter methods
def changeOverallTimeCost ( self, cost ) :
self.__overallTimeCost += cost
def changeTimeCosts (
self,
moveDiagTimeCost,
moveForwardTimeCost,
moveSideTimeCost,
moveUpTimeCost,
moveDownTimeCost,
pickupTimeCost ) :
self.__timeCosts.setTimeCosts(
moveDiagTimeCost,
moveForwardTimeCost,
moveSideTimeCost,
moveUpTimeCost,
moveDownTimeCost,
pickupTimeCost
)
# getter methods
def getOverallTimeCost ( self ) :
return self.__overallTimeCost
def getFinalPathCoords ( self ) :
return self.__finalPathCoords
def getFinalPathString ( self ) :
return self.__finalPathString
|
the-stack_0_13669 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import DAG
from airflow.models import TaskInstance
from airflow.contrib.operators.spark_submit_operator import SparkSubmitOperator
from airflow.utils import timezone
from datetime import timedelta
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSparkSubmitOperator(unittest.TestCase):
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'driver_class_path': 'parquet.jar',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/[email protected]',
'proxy_user': 'sample_user',
'name': '{{ task_instance.task_id }}',
'num_executors': 10,
'verbose': True,
'application': 'test_application.py',
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--start', '{{ macros.ds_add(ds, -1)}}',
'--end', '{{ ds }}',
'--with-spaces', 'args should keep embdedded spaces',
]
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_execute(self):
# Given / When
conn_id = 'spark_default'
operator = SparkSubmitOperator(
task_id='spark_submit_job',
spark_binary="sparky",
dag=self.dag,
**self._config
)
# Then expected results
expected_dict = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'driver_class_path': 'parquet.jar',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/[email protected]',
'proxy_user': 'sample_user',
'name': '{{ task_instance.task_id }}',
'num_executors': 10,
'verbose': True,
'application': 'test_application.py',
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--start', '{{ macros.ds_add(ds, -1)}}',
'--end', '{{ ds }}',
'--with-spaces', 'args should keep embdedded spaces',
],
'spark_binary': 'sparky'
}
self.assertEqual(conn_id, operator._conn_id)
self.assertEqual(expected_dict['application'], operator._application)
self.assertEqual(expected_dict['conf'], operator._conf)
self.assertEqual(expected_dict['files'], operator._files)
self.assertEqual(expected_dict['py_files'], operator._py_files)
self.assertEqual(expected_dict['archives'], operator._archives)
self.assertEqual(expected_dict['driver_class_path'], operator._driver_class_path)
self.assertEqual(expected_dict['jars'], operator._jars)
self.assertEqual(expected_dict['packages'], operator._packages)
self.assertEqual(expected_dict['exclude_packages'], operator._exclude_packages)
self.assertEqual(expected_dict['repositories'], operator._repositories)
self.assertEqual(expected_dict['total_executor_cores'],
operator._total_executor_cores)
self.assertEqual(expected_dict['executor_cores'], operator._executor_cores)
self.assertEqual(expected_dict['executor_memory'], operator._executor_memory)
self.assertEqual(expected_dict['keytab'], operator._keytab)
self.assertEqual(expected_dict['principal'], operator._principal)
self.assertEqual(expected_dict['proxy_user'], operator._proxy_user)
self.assertEqual(expected_dict['name'], operator._name)
self.assertEqual(expected_dict['num_executors'], operator._num_executors)
self.assertEqual(expected_dict['verbose'], operator._verbose)
self.assertEqual(expected_dict['java_class'], operator._java_class)
self.assertEqual(expected_dict['driver_memory'], operator._driver_memory)
self.assertEqual(expected_dict['application_args'], operator._application_args)
self.assertEqual(expected_dict['spark_binary'], operator._spark_binary)
def test_render_template(self):
# Given
operator = SparkSubmitOperator(task_id='spark_submit_job',
dag=self.dag, **self._config)
ti = TaskInstance(operator, DEFAULT_DATE)
# When
ti.render_templates()
# Then
expected_application_args = [u'-f', 'foo',
u'--bar', 'bar',
u'--start', (DEFAULT_DATE - timedelta(days=1))
.strftime("%Y-%m-%d"),
u'--end', DEFAULT_DATE.strftime("%Y-%m-%d"),
u'--with-spaces',
u'args should keep embdedded spaces',
]
expected_name = 'spark_submit_job'
self.assertListEqual(expected_application_args,
getattr(operator, '_application_args'))
self.assertEqual(expected_name, getattr(operator, '_name'))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13671 | #!/usr/bin/env python
import unittest
import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath
from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
class TestMAP(VppTestCase):
""" MAP Test Case """
def setUp(self):
super(TestMAP, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# pg0 is 'inside' IPv4
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
# pg1 is 'outside' IPv6
self.pg1.admin_up()
self.pg1.config_ip6()
self.pg1.generate_remote_hosts(4)
self.pg1.configure_ipv6_neighbors()
def tearDown(self):
super(TestMAP, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
def send_and_assert_encapped(self, tx, ip6_src, ip6_dst, dmac=None):
if not dmac:
dmac = self.pg1.remote_mac
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
rx = rx[0]
self.assertEqual(rx[Ether].dst, dmac)
self.assertEqual(rx[IP].src, tx[IP].src)
self.assertEqual(rx[IPv6].src, ip6_src)
self.assertEqual(rx[IPv6].dst, ip6_dst)
def test_map_e(self):
""" MAP-E """
#
# Add a route to the MAP-BR
#
map_br_pfx = "2001::"
map_br_pfx_len = 64
map_route = VppIpRoute(self,
map_br_pfx,
map_br_pfx_len,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
map_route.add_vpp_config()
#
# Add a domain that maps from pg0 to pg1
#
map_dst = socket.inet_pton(socket.AF_INET6, map_br_pfx)
map_src = "3001::1"
map_src_n = socket.inet_pton(socket.AF_INET6, map_src)
client_pfx = socket.inet_pton(socket.AF_INET, "192.168.0.0")
self.vapi.map_add_domain(map_dst,
map_br_pfx_len,
map_src_n,
128,
client_pfx,
16)
#
# Fire in a v4 packet that will be encapped to the BR
#
v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4, dst='192.168.1.1') /
UDP(sport=20000, dport=10000) /
Raw('\xa5' * 100))
self.send_and_assert_encapped(v4, map_src, "2001::c0a8:0:0")
#
# Fire in a V6 encapped packet.
# expect a decapped packet on the inside ip4 link
#
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=map_src, src="2001::1") /
IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
UDP(sport=20000, dport=10000) /
Raw('\xa5' * 100))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
self.assertFalse(rx.haslayer(IPv6))
self.assertEqual(rx[IP].src, p[IP].src)
self.assertEqual(rx[IP].dst, p[IP].dst)
#
# Pre-resolve. No API for this!!
#
self.vapi.ppcli("map params pre-resolve ip6-nh 4001::1")
self.send_and_assert_no_replies(self.pg0, v4,
"resovled via default route")
#
# Add a route to 4001::1. Expect the encapped traffic to be
# sent via that routes next-hop
#
pre_res_route = VppIpRoute(
self, "4001::1", 128,
[VppRoutePath(self.pg1.remote_hosts[2].ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, map_src,
"2001::c0a8:0:0",
dmac=self.pg1.remote_hosts[2].mac)
#
# change the route to the pre-solved next-hop
#
pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6,
self.pg1.sw_if_index,
proto=DpoProto.DPO_PROTO_IP6)])
pre_res_route.add_vpp_config()
self.send_and_assert_encapped(v4, map_src,
"2001::c0a8:0:0",
dmac=self.pg1.remote_hosts[3].mac)
#
# cleanup. The test infra's object registry will ensure
# the route is really gone and thus that the unresolve worked.
#
pre_res_route.remove_vpp_config()
self.vapi.ppcli("map params pre-resolve del ip6-nh 4001::1")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
the-stack_0_13672 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD-Script / 测试_test
Author: Curtin
功能:邀请5人得60豆(每天最多10次600豆),被邀请完成开卡30豆,一次性任务。ck1助力Author,其他助力ck1
Date: 2021/11/14 下午6:21
TG交流 https://t.me/topstyle996
TG频道 https://t.me/TopStyle2021
cron: 30 6,12,15,20 11-17 11 *
new Env('品牌联合开卡 11.11-11.17');
活动入口:16:/#A5eHpAAyC12xuX%,☂
'''
import requests
import random
import re
import sys
from time import sleep
import datetime
from urllib.parse import quote
try:
from jd_cookie import getJDCookie
getCk = getJDCookie()
except:
print("请先下载依赖脚本,\n下载链接:https://ghproxy.com/https://raw.githubusercontent.com/kongbg/JD-Script/main/jd_tool_dl.py")
sys.exit(3)
if datetime.datetime.now() > datetime.datetime.strptime('2021-11-18', "%Y-%m-%d"):
print("品牌联合开卡 11.11-11.17---活动结束\n请删掉脚本:jd_kk_test.py")
exit(3)
UserAgent = ''
activityId='96475ceebdf0418ab524c9bc68a789e8'
def userAgent():
"""
随机生成一个UA
:return:
"""
if not UserAgent:
uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
iosVer = ''.join(random.sample(["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/8679C062-A41A-4A25-88F1-50A7A3EEF34A;model/iPhone{iPhone},1;addressid/3723896896;appBuild/167707;jdSupportDarkMode/0'
else:
return UserAgent
def isvObfuscator(ck):
headers = {
'J-E-H': '%7B%22ciphertype%22:5,%22cipher%22:%7B%22User-Agent%22:%22IuG0aVLeb25vBzO2Dzq2CyUyCMrfUQrlbwU7TJSmaU9JTJSmCJUkCJivCtLJY2PiZI8zBtKmAG==%22%7D,%22ts%22:1636865800,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'JD4iPhone/167863%20(iPhone;%20iOS;%20Scale/3.00)',
'Cookie': ck,
'Host': 'api.m.jd.com',
'Referer': '',
'J-E-C': '%7B%22ciphertype%22:5,%22cipher%22:%7B%22pin%22:%22TUU5TJuyTJvQTUU3TUOnTJu1TUU1TUSmTUSnTUU2TJu4TUPQTUU0TUS4TJrOTUU1TUSmTJq2TUU1TUSmTUSn%22%7D,%22ts%22:1636884564,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Accept-Language': 'zh-Hans-CN;q=1',
'Accept': '*/*'
}
url = 'https://api.m.jd.com/client.action?functionId=isvObfuscator'
body = 'body={"url":"https:\/\/cjhydz-isv.isvjcloud.com","id":""}&build=167863&client=apple&clientVersion=10.2.2&d_brand=apple&d_model=iPhone14,3&ef=1&eid=&ep={"ciphertype":5,"cipher":{"screen":"CJS4DMeyDzc4","wifiBssid":"","osVersion":"CJUkCG==","area":"","openudid":"DtVwZtvvZJcmZwPtDtc5DJSmCtZvDzLsCzK2DJG2DtU1EWG5Dzc2ZK==","uuid":""},"ts":1636884530,"hdid":"","version":"1.0.3","appname":"com.360buy.jdmobile","ridx":-1}&ext={"prstate":"0"}&isBackground=N&joycious=67&lang=zh_CN&networkType=wifi&networklibtype=JDNetworkBaseAF&partner=apple&rfs=0000&scope=10&sign=0a635010067282017044162e187af9a7&st=1636884564653&sv=112&uemps=0-0'
resp = requests.post(url=url, headers=headers, data=body).json()
if resp['code'] == '0':
return resp['token']
else:
return ''
def buildheaders(ck):
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&inviter=kNwcKz+y+wjfE/yhJf7Ph2cLh8yR0FTTtPtNBwC7New+Y72eTaNK0sHryLjn2YvU&inviterImg=http://storage.360buyimg.com/i.imageUpload/31333435303133353830315f7031363134333838323331343238_mid.jpg&inviterNickName=Curtinlv&shareuserid4minipg=kNwcKz%2By%2BwjfE%2FyhJf7Ph2cLh8yR0FTTtPtNBwC7New%2BY72eTaNK0sHryLjn2YvU&shopid=599119&lng=113.367448&lat=23.112787&sid=6ed3dcfe7c0bb6992246a5771fac1aaw&un_area=19_1601_3633_63243'
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': ck,
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Host': 'cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
resp = requests.get(url, headers)
LZ_TOKEN = re.findall(r'(LZ_TOKEN_KEY=.*?;).*?(LZ_TOKEN_VALUE=.*?;)', resp.headers['Set-Cookie'])
return LZ_TOKEN[0][0]+LZ_TOKEN[0][1]
def getMyPing(ck):
sleep(1)
cookie = buildheaders(ck)
token = isvObfuscator(ck)
url = 'https://cjhydz-isv.isvjcloud.com/customer/getMyPing'
headers = {
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Cookie': cookie,
'Host': 'cjhydz-isv.isvjcloud.com',
'Referer': 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept': 'application/json'
}
body = f'userId=599119&token={token}&fromType=APP&riskType=1'
resp = requests.post(url=url, headers=headers, data=body)
try:
pin = resp.json()['data']['pin']
secretPin = resp.json()['data']['secretPin']
userid = resp.json()['data']['id']
yunMidImageUrl = resp.json()['data']['yunMidImageUrl']
except Exception as e:
print("建议请稍等再试~", e)
sys.exit(1)
LZ_TOKEN_KEY = re.findall(r'(LZ_TOKEN_KEY=.*?;)', resp.headers['Set-Cookie'])[0]
LZ_TOKEN_VALUE = re.findall(r'(LZ_TOKEN_VALUE=.*?;)', resp.headers['Set-Cookie'])[0]
AUTH_C_USER = re.findall(r'(AUTH_C_USER=.*?;)', resp.headers['Set-Cookie'])[0]
headers = {
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://cjhydz-isv.isvjcloud.com',
'User-Agent': userAgent(),
'Cookie': LZ_TOKEN_KEY+LZ_TOKEN_VALUE+AUTH_C_USER+'APP_ABBR=CJHY;__jd_ref_cls=Mnpm_ComponentApplied;',
'Host': 'cjhydz-isv.isvjcloud.com',
'Referer': 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId=96475ceebdf0418ab524c9bc68a789e8&',
'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
'Accept': 'application/json'
}
return headers, pin, secretPin, userid, yunMidImageUrl
def accessLog(headers, body):
url = 'https://cjhydz-isv.isvjcloud.com/common/accessLog'
resp = requests.post(url=url, headers=headers, data=quote(body))
if resp.status_code == 200:
print('\t└accessLog ---> success')
else:
print('\t└accessLog ---> error')
def getOpenCardAllStatuesNew(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/getOpenCardAllStatuesNew'
body = f'activityId={activityId}&pin={secretPin}&isInvited=1'
resp = requests.post(url=url, headers=headers, data=body).json()
if resp['result']:
shoplist = resp['data']['list']
venderIdList = []
shopIdList = []
channelList = []
shopNameList = []
for i in shoplist:
if not i['statue']:
openCardLink = i['openCardLink']
shopid = re.findall(r'shopId=(\d+)', openCardLink)[0]
venderId = re.findall(r'venderId=(\d+)', openCardLink)[0]
channel = re.findall(r'channel=(\d+)', openCardLink)[0]
shopIdList.append(shopid)
venderIdList.append(venderId)
channelList.append(channel)
shopNameList.append(i['shopName'])
return shopIdList, venderIdList, channelList, shopNameList
def getShopOpenCardInfo(headers, venderId, channe):
url = f'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{venderId}%22%2C%22payUpShop%22%3Atrue%2C%22channel%22%3A{channe}%7D&client=H5&clientVersion=9.2.0&uuid=88888'
resp = requests.get(url=url, headers=headers).json()
if resp['result']['interestsRuleList']:
activityId = resp['result']['interestsRuleList'][0]['interestsInfo']['activityId']
return activityId
else:
return None
def bindWithVender(ck, inviterNickName, inviter):
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'keep-alive',
'Referer': 'https://shopmember.m.jd.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.m.jd.com',
'User-Agent': userAgent(),
'Accept-Language': 'zh-CN,zh-Hans;q=0.9'
}
shopIdList, venderIdList, channelList, shopNameList= getOpenCardAllStatuesNew(ck)
for shopId,venderId,channe,shopName in zip(shopIdList, venderIdList, channelList, shopNameList):
shopcard_url = f'https://shopmember.m.jd.com/shopcard/?venderId={venderId}&shopId={shopId}&channel={channe}&returnUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{inviter}%26inviterImg%3D%26inviterNickName%3D{inviterNickName}%26shareuserid4minipg%3D{inviter}%26shopid%3D599119%26lng%3D113.%26lat%3D23.%26sid%3D%26un_area%3D'
requests.get(url=shopcard_url, headers=headers)
sleep(1)
shopactivityId = getShopOpenCardInfo(headers, venderId, channe)
print("shopactivityId:", shopactivityId)
sleep(1)
bindWithVender_url = f'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{venderId}%22%2C%22shopId%22%3A%22{shopId}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{shopactivityId}%2C%22channel%22%3A{channe}%7D&client=H5&clientVersion=9.2.0&uuid=88888&'
resp = requests.get(url=bindWithVender_url, headers=headers).json()
print(f"\t└去开卡【{shopName}】")
if resp['success']:
print(f"\t\t└{resp['message']}")
else:
pass
print(f"\t└完成开卡获得30豆,京东明显查询【微定制-邀请瓜分京豆】。")
def getActivityInfo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/getActivityInfo'
body = f'activityId={activityId}'
resp = requests.post(url, headers=headers, data=body).json()
# print(resp)
def isInvited(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/isInvited'
body = f'activityId={activityId}&pin={secretPin}'
resp = requests.post(url=url, headers=headers, data=body).json()
print(resp)
# exit(3)
# print(resp)
def inviteRecord(headers, inviter):
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/inviteRecord'
body = f'activityId={activityId}&inviter={inviter}&pageNo=1&pageSize=15&type=0'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def acceptInvite(headers, pin, secretPin, inviter, inviterNick, yunMidImageUrl):
inviteRecord(headers, inviter)
body = f'venderId=&code=99&pin={pin}&activityId={activityId}&pageUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{inviter}%26inviterImg%3D%26inviterNickName%3D{inviterNick}%26shareuserid4minipg%3D{inviter}%26shopid%3D599119%26lng%3D%26lat%3D%26sid%3D%26un_area%3D&subType='
accessLog(headers, body)
url = 'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/acceptInvite'
body1 = f'activityId={activityId}&inviter={inviter}&inviterImg=&inviterNick={quote(inviterNick)}&invitee={secretPin}&inviteeImg={yunMidImageUrl}&inviteeNick={quote(pin)}'
headers['Referer'] = f'https://cjhydz-isv.isvjcloud.com/microDz/invite/activity/wx/view/index/5986361?activityId={activityId}&inviter={inviter}&inviterImg=&inviterNickName={inviterNick}&shareuserid4minipg={inviter}&shopid=599119&lng=113.&lat=23.&sid=6ed3dcfe7c0bb6992246a5771fac1aaw&un_area=19_1601_3633_63243'
resp = requests.post(url=url, headers=headers, data=body1).json()
print(f"\t└{resp['errorMessage']}")
def miniProgramShareInfo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/miniProgramShareInfo/getInfo?activityId=96475ceebdf0418ab524c9bc68a789e8'
resp = requests.get(url=url, headers=headers).json()
# print(resp)
def getSimpleActInfoVo(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/customer/getSimpleActInfoVo'
body = f'activityId={activityId}'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def getSystemConfig(ck):
headers, pin, secretPin, userid, yunMidImageUrl = getMyPing(ck)
url = 'https://cjhydz-isv.isvjcloud.com/wxCommonInfo/getSystemConfig'
body = f'activityId={activityId}'
resp = requests.post(url=url, headers=headers, data=body).json()
# print(resp)
def start():
global MasterPin, Mastersecret
cookieList, nameList = getCk.iscookie()
a = 1
try:
for ck, user in zip(cookieList, nameList):
headers, pin, secret, userid, yunMidImageUrl = getMyPing(ck)
print(f"## 用户{a}【{user}】")
getSystemConfig(ck)
getSimpleActInfoVo(ck)
getActivityInfo(ck)
isInvited(ck)
if a == 1:
MasterPin = pin
Mastersecret = secret
print(f"用户{a}[{pin}]>>助力>>>[Curtinlv]")
acceptInvite(headers, MasterPin, Mastersecret, '2vlPNpSNPs2zwEu+07zbf8+iQEinB57W5aMO3vKdRy0Jah8sXZOcx4hozgiV81Rt697ulbLIDOIodMQ2RvALQQ==', 'Curtinlv', yunMidImageUrl)
bindWithVender(ck, MasterPin, Mastersecret)
a += 1
sleep(60)
continue
print(f"用户{a}[{pin}]>>助力>>>[{MasterPin}]")
acceptInvite(headers, pin, secret, Mastersecret, MasterPin, yunMidImageUrl)
body = f'venderId=&code=99&pin={secret}%253D%253D&activityId={activityId}&pageUrl=https%3A%2F%2Fcjhydz-isv.isvjcloud.com%2FmicroDz%2Finvite%2Factivity%2Fwx%2Fview%2Findex%2F5986361%3FactivityId%3D{activityId}%26inviter%3D{Mastersecret}%26inviterImg%3Dhttp%3A%2F%2Fstorage.360buyimg.com%2Fi.imageUpload%2F31333435303133353830315f7031363134333838323331343238_mid.jpg%26inviterNickName%3D{MasterPin}%26shareuserid4minipg%3D{Mastersecret}%26shopid%3D599119%26lng%3D113.%26lat%3D23.%26sid%3D%26un_area%3D&subType='
accessLog(headers,body)
bindWithVender(ck, MasterPin, Mastersecret)
sleep(60)
a += 1
except Exception as e:
pass
if __name__ == '__main__':
try:
start()
except:
print("网络异常,请稍等再试~\n") |
the-stack_0_13673 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0016_flowpagevisitgrade_graded_at_git_commit_sha'),
]
operations = [
migrations.AddField(
model_name='event',
name='all_day',
field=models.BooleanField(default=False, help_text='Only affects the rendering in the class calendar, in that a start time is not shown'),
preserve_default=True,
),
]
|
the-stack_0_13675 | from django.shortcuts import render,HttpResponse
from urllib import parse
from apps.test_case.services.HTTP_test_caseService import HTTP_test_caseService
from apps.test_case.services.HTTP_test_case_stepService import HTTP_test_case_stepService
from apps.interface.services.HTTP_interfaceService import HTTP_interfaceService
from apps.test_case.services.HTTP_test_case_debugService import HTTP_test_case_debugService
from apps.test_case.services.HTTP_test_case_step_debugService import HTTP_test_case_step_debugService
from apps.common.config import commonWebConfig
from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from apps.config.services.businessLineService import BusinessService
from apps.config.services.modulesService import ModulesService
from apps.config.services.uriService import UriService
from apps.config.services.serviceConfService import ServiceConfService
from apps.config.services.http_confService import HttpConfService
from apps.config.views.http_conf import getDebugBtn
from apps.common.func.WebFunc import *
from AutotestWebD.settings import isRelease
import json,time
from apps.version_manage.services.common_service import VersionService
def http_testCaseStepCheck(request):
context = {}
context["testCaseStepCheck"] = "current-page"
context["userName"] = request.session.get("userName")
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
if not isRelease:
context["env"] = "test"
#文本
text = {}
text["pageTitle"] = "HTTP用例步骤查看"
context["text"] = text
context["page"] = 1
return render(request,"InterfaceTest/HTTPTestCase/HTTP_testCaseStep_check.html",context)
def http_testCaseStepListCheck(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkVal")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb_http_testcase_step"
versionCondition = ""
else:
tbName = "tb_version_http_testcase_step"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT t.*,u.userName,m.moduleName,b.bussinessLineName,mu.userName modByName,tc.id tid from %s t LEFT JOIN tb_user mu ON t.modBy = mu.loginName LEFT JOIN tb_modules m on t.moduleId = m.id LEFT JOIN tb_business_line b on t.businessLineId = b.id LEFT JOIN tb_user u ON t.addBy = u.loginName LEFT JOIN tb_http_testcase tc ON t.caseId = tc.caseId WHERE 1=1 and t.state=1 %s" % (tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "caseFounder" :
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (t.addBy LIKE %s or u.userName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and m.moduleName LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and b.bussinessLineName LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql,attrList=checkList,page=page,pageNum=commonWebConfig.testCasePageNum)
response = render(request, "InterfaceTest/HTTPTestCase/SubPages/HTTP_testCaseStep_list_check_page.html",context)
return response
|
the-stack_0_13676 | import unittest
from formation import AppBuilder
from formation.tests.support import get_resource
class CanvasTestCase(unittest.TestCase):
builder = None
@classmethod
def setUpClass(cls) -> None:
cls.builder = AppBuilder(path=get_resource("canvas.xml"))
cls.canvas1 = cls.builder.canvas1
cls.canvas2 = cls.builder.canvas2
def test_loading(self):
self.assertEqual(len(self.canvas1.find_all()), 19)
self.assertEqual(len(self.canvas2.find_all()), 6)
def test_line(self):
line = self.builder.cv1_line
coords = self.canvas1.coords(line)
self.assertListEqual(
list(coords),
[25, 33, 292, 33, 382, 128, 542, 128, 542, 226]
)
def test_polygon(self):
poly = self.builder.cv1_polygon
coords = self.canvas1.coords(poly)
self.assertListEqual(
list(coords),
[68, 216, 67, 284, 151, 339, 366, 340, 448, 272, 448, 216]
)
self.assertEqual(self.canvas1.itemcget(poly, "fill"), "#1d731d")
def test_rectangle(self):
rec = self.builder.cv2_rectangle
coords = self.canvas2.coords(rec)
self.assertListEqual(list(coords), [372, 88, 423, 136])
self.assertEqual(self.canvas2.itemcget(rec, "stipple"), "gray12")
self.assertEqual(self.canvas2.itemcget(rec, "fill"), "#1d731d")
def test_oval(self):
circle = self.builder.cv1_circle2
coords = self.canvas1.coords(circle)
self.assertListEqual(list(coords), [177, 59, 288, 169])
self.assertEqual(self.canvas1.itemcget(circle, "stipple"), "gray12")
self.assertEqual(self.canvas1.itemcget(circle, "fill"), "#ff0000")
self.assertEqual(self.canvas1.itemcget(circle, "outline"), "#1d731d")
def test_arc(self):
arc = self.builder.cv2_arc1
coords = self.canvas2.coords(arc)
self.assertListEqual(list(coords), [78, 37, 190, 133])
self.assertEqual(float(self.canvas2.itemcget(arc, "extent")), 90.0)
self.assertEqual(float(self.canvas2.itemcget(arc, "start")), 0.0)
self.assertEqual(self.canvas2.itemcget(arc, "style"), "pieslice")
def test_image(self):
image = self.builder.cv1_image
self.assertListEqual(list(self.canvas1.coords(image)), [472, 67])
self.assertTrue(bool(self.canvas1.itemcget(image, "image")))
def test_bitmap(self):
bit = self.builder.cv1_bitmap
self.assertListEqual(list(self.canvas1.coords(bit)), [84, 115])
self.assertEqual(self.canvas1.itemcget(bit, "bitmap"), "gray12")
self.assertEqual(self.canvas1.itemcget(bit, "anchor"), "nw")
self.assertEqual(self.canvas1.itemcget(bit, "background"), "#1d731d")
def test_text(self):
text = self.builder.cv2_text
self.assertListEqual(list(self.canvas2.coords(text)), [280, 114])
self.assertEqual(self.canvas2.itemcget(text, "text"), "yet another layout")
self.assertEqual(self.canvas2.itemcget(text, "fill"), "#1d731d")
|
the-stack_0_13677 | import numpy as np
from cmath import sqrt
import qutip as qt
from operators import *
tol = 1e-16
def solvePoly(vec):
roots = np.empty(2, dtype=np.complex128)
vec[1]=2*vec[1]
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1]+sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
return roots
def root_to_xyz(root):
if root == np.inf:
return [0,0,1]
x = root.real
y = root.imag
den = 1/(1.+(x**2)+(y**2))
return [2*x*den,2*y*den, (1.-(x**2)+(y**2))*den]
def getStars(vec):
#converts 3-spinor into two stars
roots = np.empty(2, dtype=np.complex128)
stars = [[],[],[]] #stores x, y and z coordinates
vec[1] *= -np.sqrt(2)
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1] + sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
for r in roots:
if r == np.inf:
stars[0].append(0)
stars[1].append(0)
stars[2].append(-1)
else:
x = r.real
y = r.imag
den = 1/(1.+(x**2)+(y**2))
stars[0].append(2*x*den)
stars[1].append(2*y*den)
stars[2].append((1.-(x**2)-(y**2))*den)
return stars
print(getStars([1,0,1]))
b = qt.Bloch()
b.point_color = ['b','b','r','r','g','g','#CC6600','#CC6600'] #ensures point and line are same colour
b.add_points(getStars([1,sqrt(2),1]))
#b.add_points(getStars([1/sqrt(2),0,1/sqrt(2)]),meth='l')
b.xlabel = ['$<F_x>$','']
b.ylabel = ['$<F_y>$','']
b.zlabel = ['$<F_z>$','']
#b.add_points([[0,0],[-1,1],[0,0]], meth='l')
#b.add_points([[-1,1],[0,0],[0,0]], meth='l')
#b.add_points([0,0])
#b.add_points([0,0,-1])
b.show()
|
the-stack_0_13680 | # -*- coding: utf-8 -*-
import glob
import os
import codecs
import math
from collections import Counter, defaultdict
from itertools import chain, cycle
import torch
import torchtext.data
from torchtext.data import Field
from torchtext.vocab import Vocab
from onmt.inputters.text_dataset import text_fields, TextMultiField
from onmt.inputters.image_dataset import image_fields
from onmt.inputters.audio_dataset import audio_fields
from onmt.utils.logging import logger
# backwards compatibility
from onmt.inputters.text_dataset import _feature_tokenize # noqa: F401
from onmt.inputters.image_dataset import ( # noqa: F401
batch_img as make_img)
import gc
# monkey-patch to make torchtext Vocab's pickleable
def _getstate(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
Vocab.__getstate__ = _getstate
Vocab.__setstate__ = _setstate
def make_src(data, vocab):
#print('in make src', data ,' vocab',vocab)
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
def get_fields(
src_data_type,
n_src_feats,
n_tgt_feats,
pad='<blank>',
bos='<s>',
eos='</s>',
dynamic_dict=False,
src_truncate=None,
tgt_truncate=None
):
"""
Args:
src_data_type: type of the source input. Options are [text|img|audio].
n_src_feats (int): the number of source features (not counting tokens)
to create a :class:`torchtext.data.Field` for. (If
``src_data_type=="text"``, these fields are stored together
as a ``TextMultiField``).
n_tgt_feats (int): See above.
pad (str): Special pad symbol. Used on src and tgt side.
bos (str): Special beginning of sequence symbol. Only relevant
for tgt.
eos (str): Special end of sequence symbol. Only relevant
for tgt.
dynamic_dict (bool): Whether or not to include source map and
alignment fields.
src_truncate: Cut off src sequences beyond this (passed to
``src_data_type``'s data reader - see there for more details).
tgt_truncate: Cut off tgt sequences beyond this (passed to
:class:`TextDataReader` - see there for more details).
Returns:
A dict mapping names to fields. These names need to match
the dataset example attributes.
"""
assert src_data_type in ['text', 'img', 'audio'], \
"Data type not implemented"
assert not dynamic_dict or src_data_type == 'text', \
'it is not possible to use dynamic_dict with non-text input'
fields = {}
fields_getters = {"text": text_fields,
"img": image_fields,
"audio": audio_fields}
src_field_kwargs = {"n_feats": n_src_feats,
"include_lengths": True,
"pad": pad, "bos": None, "eos": None,
"truncate": src_truncate,
"base_name": "src"}
fields["src"] = fields_getters[src_data_type](**src_field_kwargs)
tgt_field_kwargs = {"n_feats": n_tgt_feats,
"include_lengths": False,
"pad": pad, "bos": bos, "eos": eos,
"truncate": tgt_truncate,
"base_name": "tgt"}
fields["tgt"] = fields_getters["text"](**tgt_field_kwargs)
indices = Field(use_vocab=False, dtype=torch.long, sequential=False)
fields["indices"] = indices
if dynamic_dict:
src_map = Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
fields["src_map"] = src_map
align = Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["alignment"] = align
return fields
def load_old_vocab(vocab, data_type="text", dynamic_dict=False):
"""Update a legacy vocab/field format.
Args:
vocab: a list of (field name, torchtext.vocab.Vocab) pairs. This is the
format formerly saved in *.vocab.pt files. Or, text data
not using a :class:`TextMultiField`.
data_type (str): text, img, or audio
dynamic_dict (bool): Used for copy attention.
Returns:
a dictionary whose keys are the field names and whose values Fields.
"""
if _old_style_vocab(vocab):
# List[Tuple[str, Vocab]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
vocab = dict(vocab)
n_src_features = sum('src_feat_' in k for k in vocab)
n_tgt_features = sum('tgt_feat_' in k for k in vocab)
fields = get_fields(
data_type, n_src_features, n_tgt_features,
dynamic_dict=dynamic_dict)
for n, f in fields.items():
try:
f_iter = iter(f)
except TypeError:
f_iter = [(n, f)]
for sub_n, sub_f in f_iter:
if sub_n in vocab:
sub_f.vocab = vocab[sub_n]
return fields
if _old_style_field_list(vocab): # upgrade to multifield
# Dict[str, List[Tuple[str, Field]]]
# doesn't change structure - don't return early.
fields = vocab
for base_name, vals in fields.items():
if ((base_name == 'src' and data_type == 'text') or
base_name == 'tgt'):
assert not isinstance(vals[0][1], TextMultiField)
fields[base_name] = [(base_name, TextMultiField(
vals[0][0], vals[0][1], vals[1:]))]
if _old_style_nesting(vocab):
# Dict[str, List[Tuple[str, Field]]] -> List[Tuple[str, Field]]
# -> dict[str, Field]
fields = dict(list(chain.from_iterable(vocab.values())))
return fields
def _old_style_vocab(vocab):
"""Detect old-style vocabs (``List[Tuple[str, torchtext.data.Vocab]]``).
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is a list of pairs where the second object
is a :class:`torchtext.vocab.Vocab` object.
This exists because previously only the vocab objects from the fields
were saved directly, not the fields themselves, and the fields needed to
be reconstructed at training and translation time.
"""
return isinstance(vocab, list) and \
any(isinstance(v[1], Vocab) for v in vocab)
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values())
def _old_style_field_list(vocab):
"""Detect old-style text fields.
Not old style vocab, old nesting, and text-type fields not using
``TextMultiField``.
Args:
vocab: some object loaded from a *.vocab.pt file
Returns:
Whether ``vocab`` is not an :func:`_old_style_vocab` and not
a :class:`TextMultiField` (using an old-style text representation).
"""
# if tgt isn't using TextMultiField, then no text field is.
return (not _old_style_vocab(vocab)) and _old_style_nesting(vocab) and \
(not isinstance(vocab['tgt'][0][1], TextMultiField))
def old_style_vocab(vocab):
"""The vocab/fields need updated."""
return _old_style_vocab(vocab) or _old_style_field_list(vocab) or \
_old_style_nesting(vocab)
def filter_example(ex, use_src_len=True, use_tgt_len=True,
min_src_len=1, max_src_len=float('inf'),
min_tgt_len=1, max_tgt_len=float('inf')):
"""Return whether an example is an acceptable length.
If used with a dataset as ``filter_pred``, use :func:`partial()`
for all keyword arguments.
Args:
ex (torchtext.data.Example): An object with a ``src`` and ``tgt``
property.
use_src_len (bool): Filter based on the length of ``ex.src``.
use_tgt_len (bool): Similar to above.
min_src_len (int): A non-negative minimally acceptable length
(examples of exactly this length will be included).
min_tgt_len (int): Similar to above.
max_src_len (int or float): A non-negative (possibly infinite)
maximally acceptable length (examples of exactly this length
will be included).
max_tgt_len (int or float): Similar to above.
"""
src_len = len(ex.src[0])
tgt_len = len(ex.tgt[0])
return (not use_src_len or min_src_len <= src_len <= max_src_len) and \
(not use_tgt_len or min_tgt_len <= tgt_len <= max_tgt_len)
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return
target_size = int(math.ceil(vocab_size / multiple)) * multiple
padding_tokens = [
"averyunlikelytoken%d" % i for i in range(target_size - vocab_size)]
vocab.extend(Vocab(Counter(), specials=padding_tokens))
return vocab
def _build_field_vocab(field, counter, size_multiple=1, **kwargs):
# this is basically copy-pasted from torchtext.
all_specials = [
field.unk_token, field.pad_token, field.init_token, field.eos_token
]
specials = [tok for tok in all_specials if tok is not None]
field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
if size_multiple > 1:
_pad_vocab_to_multiple(field.vocab, size_multiple)
def _load_vocab(vocab_path, name, counters):
# counters changes in place
vocab = _read_vocab_file(vocab_path, name)
vocab_size = len(vocab)
#print(" vocab size \n", vocab_size)
logger.info('Loaded %s vocab has %d tokens.' % (name, vocab_size))
for i, token in enumerate(vocab):
# keep the order of tokens specified in the vocab file by
# adding them to the counter with decreasing counting values
counters[name][token] = vocab_size - i
return vocab, vocab_size
def _build_fv_from_multifield(multifield, counters, build_fv_args,
size_multiple=1):
for name, field in multifield:
_build_field_vocab(
field,
counters[name],
size_multiple=size_multiple,
**build_fv_args[name])
logger.info(" * %s vocab size: %d." % (name, len(field.vocab)))
def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency,
vocab_size_multiple=1):
"""Build the fields for all data sides.
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict[str, Field]): fields to build vocab for.
data_type (str): A supported data type string.
share_vocab (bool): share source and target vocabulary?
src_vocab_path (str): Path to src vocabulary file.
src_vocab_size (int): size of the source vocabulary.
src_words_min_frequency (int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path (str): Path to tgt vocabulary file.
tgt_vocab_size (int): size of the target vocabulary.
tgt_words_min_frequency (int): the minimum frequency needed to
include a target word in the vocabulary.
vocab_size_multiple (int): ensure that the vocabulary size is a
multiple of this value.
Returns:
Dict of Fields
"""
# print("in build vocab src_vocab_size_*********** \n",src_vocab_size)
counters = defaultdict(Counter)
# print("\n\nin build vocab\n", counters)
if src_vocab_path:
try:
logger.info("Using existing vocabulary...")
vocab = torch.load(src_vocab_path)
# return vocab to dump with standard name
return vocab
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
# empty train_dataset_files so that vocab is only loaded from
# given paths in src_vocab_path, tgt_vocab_path
train_dataset_files = []
# print(src_vocab_path)
# assert False
# Load vocabulary
#print('src path', src_vocab_path)
#assert False
if src_vocab_path:
src_vocab, src_vocab_size = _load_vocab(
src_vocab_path, "src", counters)
# print('src vocab', src_vocab, 'src_vocab_size',src_vocab_size)
# assert False
else:
src_vocab = None
if tgt_vocab_path:
tgt_vocab, tgt_vocab_size = _load_vocab(
tgt_vocab_path, "tgt", counters)
else:
tgt_vocab = None
for i, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for name, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and src_vocab) or \
(sub_n == 'tgt' and tgt_vocab)
if sub_f.sequential and not has_vocab:
val = fd
counters[sub_n].update(val)
# Drop the none-using from memory but keep the last
if i < len(train_dataset_files) - 1:
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
build_fv_args = defaultdict(dict)
build_fv_args["src"] = dict(
max_size=src_vocab_size, min_freq=src_words_min_frequency)
build_fv_args["tgt"] = dict(
max_size=tgt_vocab_size, min_freq=tgt_words_min_frequency)
tgt_multifield = fields["tgt"]
_build_fv_from_multifield(
tgt_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if data_type == 'text':
src_multifield = fields["src"]
_build_fv_from_multifield(
src_multifield,
counters,
build_fv_args,
size_multiple=vocab_size_multiple if not share_vocab else 1)
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
src_field = src_multifield.base_field
tgt_field = tgt_multifield.base_field
_merge_field_vocabs(
src_field, tgt_field, vocab_size=src_vocab_size,
min_freq=src_words_min_frequency,
vocab_size_multiple=vocab_size_multiple)
logger.info(" * merged vocab size: %d." % len(src_field.vocab))
return fields # is the return necessary?
def _merge_field_vocabs(src_field, tgt_field, vocab_size, min_freq,
vocab_size_multiple):
# in the long run, shouldn't it be possible to do this by calling
# build_vocab with both the src and tgt data?
specials = [tgt_field.unk_token, tgt_field.pad_token,
tgt_field.init_token, tgt_field.eos_token]
merged = sum(
[src_field.vocab.freqs, tgt_field.vocab.freqs], Counter()
)
merged_vocab = Vocab(
merged, specials=specials,
max_size=vocab_size, min_freq=min_freq
)
if vocab_size_multiple > 1:
_pad_vocab_to_multiple(merged_vocab, vocab_size_multiple)
src_field.vocab = merged_vocab
tgt_field.vocab = merged_vocab
assert len(src_field.vocab) == len(tgt_field.vocab)
def _read_vocab_file(vocab_path, tag):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line by itself. Tokens must not
contain whitespace (else only before the whitespace
is considered).
tag (str): Used for logging which vocab is being read.
"""
logger.info("Loading {} vocabulary from {}".format(tag, vocab_path))
if not os.path.exists(vocab_path):
raise RuntimeError(
"{} vocabulary not found at {}".format(tag, vocab_path))
else:
with codecs.open(vocab_path, 'r', 'utf-8') as f:
# print('>>>>>>>>>>>>> ', vocab_path, tag)
# assert False
return [line.strip().split()[0] for line in f if line.strip()]
def batch_iter(data, batch_size, batch_size_fn=None, batch_size_multiple=1):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if batch_size_multiple > 1:
overflowed += (
(len(minibatch) - overflowed) % batch_size_multiple)
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1, size_so_far)
if minibatch:
yield minibatch
class OrderedIterator(torchtext.data.Iterator):
def __init__(self,
dataset,
batch_size,
batch_size_multiple=1,
**kwargs):
super(OrderedIterator, self).__init__(dataset, batch_size, **kwargs)
self.batch_size_multiple = batch_size_multiple
def create_batches(self):
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = batch_iter(
sorted(p, key=self.sort_key),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in batch_iter(
self.data(),
self.batch_size,
batch_size_fn=self.batch_size_fn,
batch_size_multiple=self.batch_size_multiple):
self.batches.append(sorted(b, key=self.sort_key))
class DatasetLazyIter(object):
"""Yield data from sharded dataset files.
Args:
dataset_paths: a list containing the locations of dataset files.
fields (dict[str, Field]): fields dict for the
datasets.
batch_size (int): batch size.
batch_size_fn: custom batch process function.
device: See :class:`OrderedIterator` ``device``.
is_train (bool): train or valid?
"""
def __init__(self, dataset_paths, fields, batch_size, batch_size_fn,
batch_size_multiple, device, is_train, repeat=True,
num_batches_multiple=1):
self._paths = dataset_paths
self.fields = fields
self.batch_size = batch_size
self.batch_size_fn = batch_size_fn
self.batch_size_multiple = batch_size_multiple
self.device = device
self.is_train = is_train
self.repeat = repeat
self.num_batches_multiple = num_batches_multiple
def _iter_dataset(self, path):
cur_dataset = torch.load(path)
logger.info('Loading dataset from %s, number of examples: %d' %
(path, len(cur_dataset)))
cur_dataset.fields = self.fields
cur_iter = OrderedIterator(
dataset=cur_dataset,
batch_size=self.batch_size,
batch_size_multiple=self.batch_size_multiple,
batch_size_fn=self.batch_size_fn,
device=self.device,
train=self.is_train,
sort=False,
sort_within_batch=True,
repeat=False
)
for batch in cur_iter:
yield batch
cur_dataset.examples = None
gc.collect()
del cur_dataset
gc.collect()
def __iter__(self):
num_batches = 0
paths = self._paths
if self.is_train and self.repeat:
# Cycle through the shards indefinitely.
paths = cycle(paths)
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if self.is_train and not self.repeat and \
num_batches % self.num_batches_multiple != 0:
# When the dataset is not repeated, we might need to ensure that
# the number of returned batches is the multiple of a given value.
# This is important for multi GPU training to ensure that all
# workers have the same number of batches to process.
for path in paths:
for batch in self._iter_dataset(path):
yield batch
num_batches += 1
if num_batches % self.num_batches_multiple == 0:
return
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt[0]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def build_dataset_iter(corpus_type, fields, opt, is_train=True):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
dataset_paths = list(sorted(
glob.glob(opt.data + '.' + corpus_type + '*.pt')))
if not dataset_paths:
return None
batch_size = opt.batch_size if is_train else opt.valid_batch_size
batch_fn = max_tok_len if is_train and opt.batch_type == "tokens" else None
batch_size_multiple = 8 if opt.model_dtype == "fp16" else 1
device = "cuda" if opt.gpu_ranks else "cpu"
return DatasetLazyIter(
dataset_paths,
fields,
batch_size,
batch_fn,
batch_size_multiple,
device,
is_train,
repeat=not opt.single_pass,
num_batches_multiple=opt.accum_count * opt.world_size)
|
the-stack_0_13681 | # This is the MIT license: http://www.opensource.org/licenses/mit-license.php
#
# Copyright (c) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>.
# SQLAlchemy is a trademark of Michael Bayer.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from sqlalchemy import pool
from sqlalchemy.engine import default
from requests import Session
from .base import SolrDialect, SolrIdentifierPreparer, SolrCompiler
from sqlalchemy_solr.solrdbapi import api_globals
import logging
from .message_formatter import MessageFormatter
try:
from sqlalchemy.sql.compiler import SQLCompiler
except ImportError:
from sqlalchemy.sql.compiler import DefaultCompiler as SQLCompiler
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.ERROR)
try:
from sqlalchemy.types import BigInteger
except ImportError:
from sqlalchemy.databases.mysql import MSBigInteger as BigInteger
class SolrDialect_http(SolrDialect):
mf = MessageFormatter()
def __init__(self, **kw):
default.DefaultDialect.__init__(self, **kw)
self.supported_extensions = []
def create_connect_args(self, url, **kwargs):
url_port = url.port or 8047
qargs = {'host': url.host, 'port': url_port}
try:
db_parts = url.database.split('/')
db = ".".join(db_parts)
self.proto = "http://"
if 'use_ssl' in kwargs:
if kwargs['use_ssl'] in [True, 'True', 'true']:
self.proto = "https://"
# Mapping server path and collection
if db_parts[0]:
server_path = db_parts[0]
else:
raise AttributeError('Missing server path')
if db_parts[1]:
collection = db_parts[1]
else:
raise AttributeError('Missing collection')
# Save this for later use.
self.host = url.host
self.port = url_port
self.username = url.username
self.password = url.password
self.db = db
self.server_path = server_path
self.collection = collection
qargs.update(url.query)
qargs['db'] = db
qargs['server_path'] = server_path
qargs['collection'] = collection
qargs['username'] = url.username
qargs['password'] = url.password
except Exception as ex:
logging.error(self.mf
.format("Error in SolrDialect_http.create_connect_args :: ", str(ex)))
return [], qargs
def get_table_names(self, connection, schema=None, **kw):
session = Session()
local_payload = api_globals._PAYLOAD.copy()
local_payload['action'] = 'LIST'
try:
result = session.get(
self.proto + self.host + ":" + str(self.port) + "/" +
self.server_path + "/admin/collections",
params=local_payload,
headers=api_globals._HEADER,
auth=(self.username, self.password)
)
tables_names = result.json()['collections']
except Exception as ex:
logging.error("Error in SolrDialect_http.get_table_names :: " + str(ex))
return tuple(tables_names)
def get_columns(self, connection, table_name, schema=None, **kw):
columns = []
session = Session()
local_payload = api_globals._PAYLOAD.copy()
local_payload['action'] = 'LIST'
try:
result = session.get(
self.proto + self.host + ":" + str(self.port) + "/" +
self.server_path + "/" + table_name + "/admin/luke",
params=local_payload,
headers=api_globals._HEADER,
auth=(self.username, self.password)
)
fields = result.json()['fields']
for field in fields:
column = {
"name": field,
"type": self.get_data_type(fields[field]['type']),
"longType": self.get_data_type(fields[field]['type'])
}
columns.append(column)
return columns
except Exception as ex:
logging.error("Error in SolrDialect_http.get_table_names :: " + str(ex)) |
the-stack_0_13682 | from NIENV import *
# API METHODS --------------
# self.main_widget
# self.update_shape()
# Ports
# self.input(index)
# self.set_output_val(index, val)
# self.exec_output(index)
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', target='global')
# self.log_message('that\'s not good', target='error')
# --------------------------
class GetAttributes_NodeInstance(NodeInstance):
def __init__(self, params):
super(GetAttributes_NodeInstance, self).__init__(params)
self.special_actions['generate attribute outputs'] = {'method': M(self.init_attribute_ports)}
self.ready = False
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
def update_event(self, input_called=-1):
if self.ready:
est = self.input(0)
attributes = [i for i in dir(est) if (i[-1] == "_" and i[0] != "_")]
attri = 0
for attr in attributes:
try:
self.set_output_val(attri, getattr(est, attr))
except:
self.set_output_val(attri, None)
attri += 1
def init_attribute_ports(self):
if self.input(0) == None:
return
est = self.input(0)
for i in range(len(self.outputs)):
self.delete_output(0)
attributes = [i for i in dir(est) if (i[-1] == "_" and i[0] != "_")]
attri = 0
for attr in attributes:
self.create_new_output(type_="data", label=attr, pos=-1)
try:
self.set_output_val(attri, getattr(est, attr))
except:
self.set_output_val(attri, None)
attri += 1
self.ready = True
def get_data(self):
data = {}
return data
def set_data(self, data):
pass
def removing(self):
pass
|
the-stack_0_13683 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRUCell, self).__init__()
self.hidden_size = hidden_size
# Layers
self.linear_z = nn.Linear(input_size+hidden_size, hidden_size)
self.linear_r = nn.Linear(input_size+hidden_size, hidden_size)
self.linear = nn.Linear(input_size+hidden_size, hidden_size)
self._initialization()
def _initialization(self):
a = -np.sqrt(1/self.hidden_size)
b = np.sqrt(1/self.hidden_size)
torch.nn.init.uniform_(self.linear_z.weight, a, b)
torch.nn.init.uniform_(self.linear_z.bias, a, b)
torch.nn.init.uniform_(self.linear_r.weight, a, b)
torch.nn.init.uniform_(self.linear_r.bias, a, b)
torch.nn.init.uniform_(self.linear.weight, a, b)
torch.nn.init.uniform_(self.linear.bias, a, b)
def forward(self, input_, hidden_state):
inputs_and_prev_state = torch.cat((input_, hidden_state), -1)
# z = sigma(W_z * a + U_z * h(t-1)) (3)
update_gate = self.linear_z(inputs_and_prev_state).sigmoid()
# r = sigma(W_r * a + U_r * h(t-1)) (4)
reset_gate = self.linear_r(inputs_and_prev_state).sigmoid()
# h_hat(t) = tanh(W * a + U*(r o h(t-1))) (5)
new_hidden_state = self.linear(torch.cat((input_, reset_gate * hidden_state), -1)).tanh()
# h(t) = (1-z) o h(t-1) + z o h_hat(t) (6)
output = (1 - update_gate) * hidden_state + update_gate * new_hidden_state
return output
class GGNNModel(nn.Module):
def __init__(self, attr_size, hidden_size, propag_steps):
super(GGNNModel, self).__init__()
self.attr_size = attr_size
self.hidden_size = hidden_size
self.propag_steps = propag_steps
# Layers
self.linear_i = nn.Linear(attr_size,hidden_size)
self.gru = GRUCell(2*hidden_size, hidden_size)
self.linear_o = nn.Linear(hidden_size, 1)
self._initialization()
def _initialization(self):
torch.nn.init.kaiming_normal_(self.linear_i.weight)
torch.nn.init.constant_(self.linear_i.bias, 0)
torch.nn.init.xavier_normal_(self.linear_o.weight)
torch.nn.init.constant_(self.linear_o.bias, 0)
def forward(self, attr_matrix, adj_matrix):
'''
attr_matrix of shape (batch, graph_size, attributes dimension)
adj_matrix of shape (batch, graph_size, graph_size)
> Only 0 (nonexistent) or 1 (existent) edge types
'''
mask = (attr_matrix[:,:,0] != 0)*1
A_in = adj_matrix.float()
A_out = torch.transpose(A_in,-2,-1)
if len(A_in.shape) < 3:
A_in = torch.unsqueeze(A_in,0)
A_out = torch.unsqueeze(A_out,0)
if len(attr_matrix.shape) < 3:
attr_matrix = torch.unsqueeze(attr_matrix,0)
hidden_state = self.linear_i(attr_matrix.float()).relu()
for step in range(self.propag_steps):
# a_v = A_v[h_1 ... h_|V|]
a_in = torch.bmm(A_in, hidden_state)
a_out = torch.bmm(A_out, hidden_state)
# GRU-like update
hidden_state = self.gru(torch.cat((a_in, a_out), -1), hidden_state)
# Output model
output = self.linear_o(hidden_state).squeeze(-1)
output = output + (mask + 1e-45).log() # Mask output
output = output.log_softmax(1)
return output
|
the-stack_0_13685 | from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from tqdm.notebook import tqdm
from helpers import count_unique_words, count_unique_ngrams, \
build_unique_ngrams, create_sentence_vectors, create_sentence_vectors_submission
import sys
import tensorflow as tf
from tensorflow import keras
import gensim # Not sure whether it is better to use gensim or tensorflow :/
import logging
from gensim.models.phrases import Phrases, Phraser
import multiprocessing
from gensim.models import Word2Vec
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sys.path.append('../')
import argparse
parser = argparse.ArgumentParser(description='Builds sentence representation using word vectors.')
parser.add_argument('--w2v_model',
required=True,
help='Word2Vec model pretrained')
parser.add_argument('--filter_size',
nargs='+',
required=True,
help='a list of sizes for the convolutional filters (usually odd numbers. for example 3 5)')
parser.add_argument('--hidden_layers_size',
nargs='+',
required=True,
help='a list of sizes for the hidden layers (usually 50-100)')
parser.add_argument('--output_model',
required=True,
help='path where the model will be saved')
args = parser.parse_args()
# Up to now everything is hardcoded, may be better to use parameters!
df = pd.read_pickle("dataframes/full_df_cleaned_train_0_8_glove200.pickle")
df_test = pd.read_pickle("dataframes/full_df_cleaned_test_0_2_glove200.pickle")
maxlen = 44 # magic number
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
counter_wrong = 0
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(vector, dtype=np.float32)[:embedding_dim]
for row in range(embedding_matrix.shape[0]):
if not np.any(embedding_matrix[row,:]):
counter_wrong += 1
embedding_matrix[row,:] = np.random.rand(embedding_dim)
print("The number of times we didn't find a word is {} and should be 0, wtf".format(counter_wrong))
return embedding_matrix
def create_embedding_matrix_w2v(w2v_model, word_index):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
## We can assume love is always present in our vocabulary ahaha
embedding_matrix = np.zeros((vocab_size, w2v_model.wv.word_vec("love").shape[0]))
for word in w2v_model.wv.vocab:
vector = w2v_model.wv.word_vec(word)
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)
for row in range(embedding_matrix.shape[0]):
if not np.any(embedding_matrix[row,:]):
### This should be checked again!!! Not sure it is correct!
embedding_matrix[row,:] = np.random.rand(w2v_model.wv.vectors.shape[1])
return embedding_matrix
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer()
tokenizer.fit_on_texts(df.sentence)
vocab_size = len(tokenizer.word_index) + 1
X_train = tokenizer.texts_to_sequences(df.sentence)
X_test = tokenizer.texts_to_sequences(df_test.sentence)
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
y_train = np.where(df.label == 1, 1, 0)
y_test = np.where(df_test.label == 1, 1, 0)
if args.w2v_model == 'w2v':
# Use word2vec
w2v_model = gensim.models.KeyedVectors.load_word2vec_format('models/GoogleNews-vectors-negative300.bin', binary=True)
embedding_matrix = create_embedding_matrix_w2v(
w2v_model,
tokenizer.word_index)
## Embedding dimension
embedding_dim = w2v_model.wv.vectors.shape[1]
else:
# Use glove
embedding_dim = 200
embedding_matrix = create_embedding_matrix(
'glove/glove.twitter.27B.200d.txt',
tokenizer.word_index, embedding_dim)
# Compile the model
from tensorflow.keras.layers import GlobalMaxPooling1D, concatenate, Dropout, Dense, Embedding, Input, Conv1D
from tensorflow.keras.models import Model
# Specifying the input shape: the input is a sentence of maxlen words
embedding_layer = Embedding(vocab_size, output_dim=embedding_dim, weights=[embedding_matrix], input_length=maxlen,
trainable=True)
sequence_input = Input(shape=(maxlen,), dtype='int32')
# Creating the embedding using the previously constructed embedding matrix
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [int(el) for el in args.filter_size]
for filter_size in filter_sizes:
# Creating the convolutional layer:
# "filters" represents the number of different windows we want (i.e. how many channels to produce),
# therefore in our case we will end up with 200 different convolutions
conv_layer = Conv1D(filters=256,
kernel_size=filter_size,
activation='relu')(embedded_sequences)
# Creating the global max pooling layer
pool_layer = GlobalMaxPooling1D()(conv_layer)
convs.append(pool_layer)
merged_layers = concatenate(convs, axis=1)
# Create dropout leayer: randomly set a fraction of input units to 0, which helps prevent overfitting
x = Dropout(0.2)(merged_layers)
# Create (regular) densely-connected layer
for el in args.hidden_layers_size:
x = Dense(int(el), activation='relu')(x)
x = Dropout(0.2)(x)
preds = Dense(1, activation='sigmoid')(x)
model_tw = Model(sequence_input, preds)
model_tw.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model_tw.summary()
from tensorflow.keras.callbacks import ModelCheckpoint
filepath="models/cnn_glove_tw"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Finally fit the model
history = model_tw.fit(X_train, y_train, epochs=15, verbose=True, validation_data=(X_test, y_test), callbacks=callbacks_list, batch_size=512)
loss, accuracy = model_tw.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model_tw.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
plot_history(history) |
the-stack_0_13687 | from typing import Callable
from urllib.parse import urlunsplit
from .typing import ASGIFramework
from .utils import invoke_asgi
class HTTPToHTTPSRedirectMiddleware:
def __init__(self, app: ASGIFramework, host: str) -> None:
self.app = app
self.host = host
async def __call__(self, scope: dict, receive: Callable, send: Callable) -> None:
if scope["type"] == "http" and scope["scheme"] == "http":
await self._send_http_redirect(scope, send)
elif scope["type"] == "websocket" and scope["scheme"] == "ws":
# If the server supports the WebSocket Denial Response
# extension we can send a redirection response, if not we
# can only deny the WebSocket connection.
if "websocket.http.response" in scope.get("extensions", {}):
await self._send_websocket_redirect(scope, send)
else:
await send({"type": "websocket.close"})
else:
return await invoke_asgi(self.app, scope, receive, send)
async def _send_http_redirect(self, scope: dict, send: Callable) -> None:
new_url = urlunsplit(
("https", self.host, scope["path"], scope["query_string"].decode(), "")
)
await send(
{
"type": "http.response.start",
"status": 307,
"headers": [(b"location", new_url.encode())],
}
)
await send({"type": "http.response.body"})
async def _send_websocket_redirect(self, scope: dict, send: Callable) -> None:
# If the HTTP version is 2 we should redirect with a https
# scheme not wss.
scheme = "wss"
if scope.get("http_version", "1.1") == "2.0":
scheme = "https"
new_url = urlunsplit((scheme, self.host, scope["path"], scope["query_string"].decode(), ""))
await send(
{
"type": "websocket.http.response.start",
"status": 307,
"headers": [(b"location", new_url.encode())],
}
)
await send({"type": "websocket.http.response.body"})
|
the-stack_0_13691 | import sys
import logging
import json
from collections import OrderedDict
from redash import settings
logger = logging.getLogger(__name__)
__all__ = [
'BaseQueryRunner',
'InterruptException',
'BaseSQLQueryRunner',
'TYPE_DATETIME',
'TYPE_BOOLEAN',
'TYPE_INTEGER',
'TYPE_STRING',
'TYPE_DATE',
'TYPE_FLOAT',
'SUPPORTED_COLUMN_TYPES',
'register',
'get_query_runner',
'import_query_runners'
]
# Valid types of columns returned in results:
TYPE_INTEGER = 'integer'
TYPE_FLOAT = 'float'
TYPE_BOOLEAN = 'boolean'
TYPE_STRING = 'string'
TYPE_DATETIME = 'datetime'
TYPE_DATE = 'date'
SUPPORTED_COLUMN_TYPES = set([
TYPE_INTEGER,
TYPE_FLOAT,
TYPE_BOOLEAN,
TYPE_STRING,
TYPE_DATETIME,
TYPE_DATE
])
class InterruptException(Exception):
pass
class NotSupported(Exception):
pass
class BaseQueryRunner(object):
noop_query = None
def __init__(self, configuration):
self.syntax = 'sql'
self.configuration = configuration
@classmethod
def name(cls):
return cls.__name__
@classmethod
def type(cls):
return cls.__name__.lower()
@classmethod
def enabled(cls):
return True
@classmethod
def annotate_query(cls):
return True
@classmethod
def configuration_schema(cls):
return {}
def test_connection(self):
if self.noop_query is None:
raise NotImplementedError()
data, error = self.run_query(self.noop_query, None)
if error is not None:
raise Exception(error)
def run_query(self, query, user):
raise NotImplementedError()
def fetch_columns(self, columns):
column_names = []
duplicates_counter = 1
new_columns = []
for col in columns:
column_name = col[0]
if column_name in column_names:
column_name = "{}{}".format(column_name, duplicates_counter)
duplicates_counter += 1
column_names.append(column_name)
new_columns.append({'name': column_name,
'friendly_name': column_name,
'type': col[1]})
return new_columns
def get_schema(self, get_stats=False):
raise NotSupported()
def _run_query_internal(self, query):
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed running query [%s]." % query)
return json.loads(results)['rows']
@classmethod
def to_dict(cls):
return {
'name': cls.name(),
'type': cls.type(),
'configuration_schema': cls.configuration_schema()
}
class BaseSQLQueryRunner(BaseQueryRunner):
def get_schema(self, get_stats=False):
schema_dict = {}
self._get_tables(schema_dict)
if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:
self._get_tables_stats(schema_dict)
return schema_dict.values()
def _get_tables(self, schema_dict):
return []
def _get_tables_stats(self, tables_dict):
for t in tables_dict.keys():
if type(tables_dict[t]) == dict:
res = self._run_query_internal('select count(*) as cnt from %s' % t)
tables_dict[t]['size'] = res[0]['cnt']
query_runners = {}
def register(query_runner_class):
global query_runners
if query_runner_class.enabled():
logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
query_runners[query_runner_class.type()] = query_runner_class
else:
logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing "
"dependencies.", query_runner_class.name())
def get_query_runner(query_runner_type, configuration):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return None
return query_runner_class(configuration)
def get_configuration_schema_for_query_runner_type(query_runner_type):
query_runner_class = query_runners.get(query_runner_type, None)
if query_runner_class is None:
return None
return query_runner_class.configuration_schema()
def import_query_runners(query_runner_imports):
for runner_import in query_runner_imports:
__import__(runner_import)
|
the-stack_0_13692 | import summarizer as nlp
import csv
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from collections import defaultdict, Counter
import math
from myfile import *
from googletrans import Translator
# turns .tsv file into list of lists
def tsv2mat(fname) :
with open(fname) as f:
wss = csv.reader(f, delimiter='\t')
return list(wss)
class Data :
'''
builds dataset from dependency edges in .tsv file associating
<from,link,to> edges and sentences in which they occur;
links are of the form POS_deprel_POS with POS and deprel
tags concatenated
'''
def __init__(self,fname='texts/english') :
edge_file="out/"+fname+".tsv"
if not nlp.exists_file(edge_file) :
nlp.process_file(fname=fname)
wss = tsv2mat(edge_file)
self.sents=tsv2mat("out/"+fname+"_sents.tsv")
occs=defaultdict(set)
sids=set()
lens=[]
for f,ff,r,tt,t,id in wss:
id=int(id)
if len(lens)<=id : lens.append(0)
lens[id]+=1
occs[(f,ff,r,tt,t)].add(id)
sids.add(id)
self.occs=occs # dict where edges occur
self.lens=lens # number of edges in each sentence
X,Y=list(zip(*list(occs.items())))
X = np.array(X)
y0 = np.array(sorted(map(lambda x:[x],sids)))
# make OneHot encoders for X and y
enc_X = OneHotEncoder(handle_unknown='ignore')
enc_y = OneHotEncoder(handle_unknown='ignore')
enc_X.fit(X)
enc_y.fit(y0)
hot_X = enc_X.transform(X).toarray()
self.enc_X = enc_X
self.enc_y = enc_y
self.X=X
# encode y as logical_or of sentence encodings it occurs in
ms=[]
for ys in Y :
m = np.array([[0]],dtype=np.float32)
for v in ys :
m0=enc_y.transform(np.array([[v]])).toarray()
m = np.logical_or(m,m0)
m=np.array(np.logical_or(m,m0),dtype=np.float32)
ms.append(m[0])
hot_y=np.array(ms)
self.hot_X=hot_X
self.hot_y =hot_y
print('\nFINAL DTATA SHAPES','X',hot_X.shape,'y',hot_y.shape)
#print('SENTENCE LENGTHS',lens)
class Query(Data) :
'''
builds <from,link,to> dependency links form a given
text query and matches it against data to retrive
sentences in which most of those edges occur
'''
def __init__(self,fname='texts/english'):
super().__init__(fname=fname)
text = file2text(fname + ".txt")
self.data_lang = nlp.detectLang(text)
self.nlp_engine=nlp.NLP()
def ask(self,text=None,interactive=False, tolang='en'):
'''
compute Jaccard similarity between
set of edges in query and each sentence,
then select the most similar ones
'''
if not text: text = input("Query:")
elif not interactive: print("Query:",text)
self.question_lang = nlp.detectLang(text)
print('qLang:', self.question_lang)
print('Data Lang:',self.data_lang)
if self.question_lang != self.data_lang:
translator = Translator()
if self.data_lang == 'zh':
text= translator.translate(text, dest='zh-cn').text
elif self.data_lang == 'jv':
text= translator.translate(text, dest='jw').text
else:
text= translator.translate(text, dest=self.data_lang).text
print('translated question:\n', text)
self.nlp_engine.from_text(text)
sids=[]
for f,ff,r,tt,t,_ in self.nlp_engine.facts() :
sids.extend(self.occs.get((f,ff,r,tt,t),[]))
self.save_answers(sids, tolang)
def save_answers(self, sids, tolang, k=3):
c = Counter(sids)
qlen=len(list(self.nlp_engine.facts()))
for id in c:
shared=c[id]
union_size=self.lens[id]+qlen-shared
#jaccard=shared/union_size
#c[id]=jaccard
c[id]=shared/math.log(union_size)
print('\nHIT WEIGHTS:', c, "\n")
best = c.most_common(k)
print('save_answers, question_lang:', self.question_lang, ', data_lang:\n', self.data_lang)
translator = Translator()
self.answer = defaultdict(set)
for sid, _ in best:
id, sent = self.sents[sid]
print(id, ':', sent)
if self.data_lang == tolang:
self.answer[id] = sent
else:
sent= translator.translate(sent, dest=tolang).text
self.answer[id] = sent
print("")
def show_answers(self):
print("\nSummary:")
for id in self.answer:
print(id, ':', self.answer[id])
print("")
def interact(self):
while True:
text = input("Query: ")
if not text: return
self.ask(text=text,interactive=True)
### TESTS ###
def qtest() :
q=Query()
q.ask(text="What did Penrose show?", tolang="en")
q.show_answers()
q.ask(text="What was in Roger's 1965 paper?", tolang="en")
q.show_answers()
def dtest() :
d=Data()
print("X",d.hot_X.shape)
print(d.hot_X)
print("y",d.hot_y.shape)
print(d.hot_y)
def dtests():
''' data loading tests'''
dtest('out/texts/english.tsv')
dtest('out/texts/spanish.tsv')
dtest('out/texts/chinese.tsv')
dtest('out/texts/russian.tsv')
def atest() :
''' tests symbolic and neural QA on given document '''
'''
i=Query('texts/english')
print("\n")
print("ALGORITHMICALLY DERIVED ANSWERS:\n")
i.ask("What did Penrose show about black holes?")
i.ask(text="What was in Roger's 1965 paper?")
print("\n")
'''
i=Query('texts/chinese')
print("\n")
print("ALGORITHMICALLY DERIVED ANSWERS:\n")
'''
i.ask("中国藏书有多少年历史?")
i.show_answers()
i.ask(text="设立图书馆情报学本科教育的学校有多少所?")
i.show_answers()
'''
i.ask("How many years is the Chinese collection of books?", tolang="en")
i.show_answers()
i.ask(text="How many schools have established undergraduate education in library and information science?", tolang="en")
i.show_answers()
print("\n")
if __name__=="__main__" :
atest()
|
the-stack_0_13693 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('button09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.write_comment('A1', 'Foo')
worksheet2.insert_button('C2', {})
worksheet1.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
|
the-stack_0_13694 | from typing import Any, List
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import index as libindex
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_interval_dtype,
is_list_like,
is_scalar,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import accessor
from pandas.core.algorithms import take_1d
from pandas.core.arrays.categorical import Categorical, _recode_for_categories, contains
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="CategoricalIndex"))
@inherit_names(
[
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
"is_dtype_equal",
"min",
"max",
],
Categorical,
)
@accessor.delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
overwrite=True,
)
class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate):
"""
Index based on an underlying :class:`Categorical`.
CategoricalIndex, like Categorical, can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
like Categorical, it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.21.0
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
Index : The base pandas Index type.
Categorical : A categorical array.
CategoricalDtype : Type for categorical data.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_
for more.
Examples
--------
>>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'])
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
>>> pd.CategoricalIndex(c)
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') # noqa
>>> ci.min()
'c'
"""
_typ = "categoricalindex"
_raw_inherit = {
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
}
codes: np.ndarray
categories: Index
@property
def _engine_type(self):
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
}[self.codes.dtype.type]
_attributes = ["name"]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None
):
dtype = CategoricalDtype._from_values_or_dtype(data, categories, ordered, dtype)
name = maybe_extract_name(name, data, cls)
if not is_categorical_dtype(data):
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
raise cls._scalar_data_error(data)
data = []
data = cls._create_categorical(data, dtype=dtype)
data = data.copy() if copy else data
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, dtype=None, name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
dtype: CategoricalDtype, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
if dtype is None:
dtype = self.dtype
if name is None:
name = self.name
cat = Categorical.from_codes(codes, dtype=dtype)
return CategoricalIndex(cat, name=name)
@classmethod
def _create_categorical(cls, data, dtype=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
dtype : CategoricalDtype, defaults to existing
Returns
-------
Categorical
"""
if isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data):
data = data.values
if not isinstance(data, ABCCategorical):
return Categorical(data, dtype=dtype)
if isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
return data
@classmethod
def _simple_new(cls, values, name=None, dtype=None):
result = object.__new__(cls)
values = cls._create_categorical(values, dtype=dtype)
result._data = values
result.name = name
result._reset_identity()
result._no_setting_name = False
return result
# --------------------------------------------------------------------
@Appender(_index_shared_docs["_shallow_copy"])
def _shallow_copy(self, values=None, dtype=None, **kwargs):
if dtype is None:
dtype = self.dtype
return super()._shallow_copy(values=values, dtype=dtype, **kwargs)
def _is_dtype_compat(self, other) -> bool:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError(
"categories must match existing categories when appending"
)
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype))
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
return other
def equals(self, other):
"""
Determine if two CategoricalIndex objects contain the same elements.
Returns
-------
bool
If two CategoricalIndex objects have equal elements True,
otherwise False.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
if isinstance(other, type(self)):
other = other._data
return self._data.equals(other)
except (TypeError, ValueError):
pass
return False
# --------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
attrs = [
(
"categories",
ibase.default_pprint(self.categories, max_seq_items=max_categories),
),
("ordered", self.ordered),
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
# --------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "categorical"
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@property
def _has_complex_internals(self):
# used to avoid libreduction code paths, which raise or require conversion
return True
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
# We use _shallow_copy rather than the Index implementation
# (which uses _constructor) in order to preserve dtype.
return self._shallow_copy(result, name=name)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.hasnans
hash(key)
return contains(self, key, container=self._engine)
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex(np.array(self))
elif is_categorical_dtype(dtype):
# GH 18630
dtype = self.dtype.update_dtype(dtype)
if dtype == self.dtype:
return self.copy() if copy else self
return Index.astype(self, dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs["fillna"])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves.
# To avoid a reference cycle, bind `codes` to a local variable, so
# `self` is not passed into the lambda.
codes = self.codes
return self._engine_type(lambda: codes, len(self))
@Appender(_index_shared_docs["index_unique"] % _index_doc_kwargs)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self.values.unique()
# CategoricalIndex._shallow_copy keeps original dtype
# if not otherwise specified
return self._shallow_copy(result, dtype=result.dtype)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
codes = self.codes.astype("i8")
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype("object")
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Raises
------
KeyError : if the key is not in the index
Examples
--------
>>> unique_index = pd.CategoricalIndex(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.CategoricalIndex(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.CategoricalIndex(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True], dtype=bool)
"""
code = self.categories.get_loc(key)
code = self.codes.dtype.type(code)
try:
return self._engine.get_loc(code)
except KeyError:
raise KeyError(key)
def get_value(self, series: AnyArrayLike, key: Any):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
Parameters
----------
series : Series, ExtensionArray, Index, or ndarray
1-dimensional array to take values from
key: : scalar
The value of this index at the position of the desired value,
otherwise the positional index of the desired value
Returns
-------
Any
The element of the series at the position indicated by the key
"""
k = key
try:
k = self._convert_scalar_indexer(k, kind="getitem")
indexer = self.get_loc(k)
return series.take([indexer])[0]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super().get_value(series, key)
@Appender(_index_shared_docs["where"])
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
# 1. copy the underlying Categorical
# 2. setitem with `cond` and `other`
# 3. Rebuild CategoricalIndex.
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
cat = Categorical(values, dtype=self.dtype)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
missing: List[int]
if self.equals(target):
indexer = None
missing = []
else:
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes) and indexer is not None:
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype="intp")
if method == "pad" or method == "backfill":
raise NotImplementedError(
"method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex"
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet for CategoricalIndex"
)
if isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target):
if self.values.equals(target.values):
# we have the same codes
codes = target.codes
else:
codes = _recode_for_categories(
target.codes, target.categories, self.values.categories
)
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
if isinstance(target, CategoricalIndex):
# Indexing on codes is more efficient if categories are the same:
if target.categories is self.categories:
target = target.codes
indexer, missing = self._engine.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
target = target.values
codes = self.categories.get_indexer(target)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@Appender(_index_shared_docs["_convert_scalar_indexer"])
def _convert_scalar_indexer(self, key, kind=None):
if kind == "loc":
try:
return self.categories._convert_scalar_indexer(key, kind=kind)
except TypeError:
self._invalid_indexer("label", key)
return super()._convert_scalar_indexer(key, kind=kind)
@Appender(_index_shared_docs["_convert_list_indexer"])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only include values that are in the categories"
)
return self.get_indexer(keyarr)
@Appender(_index_shared_docs["_convert_arr_indexer"])
def _convert_arr_indexer(self, keyarr):
keyarr = com.asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs["_convert_index_indexer"])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(*args, **kwargs)
@Appender(_index_shared_docs["_maybe_cast_slice_bound"])
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label
return super()._maybe_cast_slice_bound(label, side, kind)
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc: int, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1) and not (is_scalar(item) and isna(item)):
raise TypeError(
"cannot insert an item into a CategoricalIndex "
"that is not already an existing category"
)
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _concat(self, to_concat, name):
# if calling index is category, don't check dtype of others
return CategoricalIndex._concat_same_dtype(self, to_concat, name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _delegate_property_get(self, name, *args, **kwargs):
""" method delegation to the ._values """
prop = getattr(self._values, name)
return prop # no wrapping for now
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res) or name in self._raw_inherit:
return res
return CategoricalIndex(res, name=self.name)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
|
the-stack_0_13696 | #!/usr/bin/python
""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_vrouterbgp
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove/modify vrouter-bgp.
description:
- Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a vRouter service that forwards traffic between
networks and implements Layer 4 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add bgp,
'absent' to remove bgp and 'update' to modify bgp.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify a name for the vRouter service.
required: True
pn_neighbor:
description:
- Specify a neighbor IP address to use for BGP.
- Required for vrouter-bgp-add.
pn_remote_as:
description:
- Specify the remote Autonomous System(AS) number. This value is between
1 and 4294967295.
- Required for vrouter-bgp-add.
pn_next_hop_self:
description:
- Specify if the next-hop is the same router or not.
pn_password:
description:
- Specify a password, if desired.
pn_ebgp:
description:
- Specify a value for external BGP to accept or attempt BGP connections
to external peers, not directly connected, on the network. This is a
value between 1 and 255.
pn_prefix_listin:
description:
- Specify the prefix list to filter traffic inbound.
pn_prefix_listout:
description:
- Specify the prefix list to filter traffic outbound.
pn_route_reflector:
description:
- Specify if a route reflector client is used.
pn_override_capability:
description:
- Specify if you want to override capability.
pn_soft_reconfig:
description:
- Specify if you want a soft reconfiguration of inbound traffic.
pn_max_prefix:
description:
- Specify the maximum number of prefixes.
pn_max_prefix_warn:
description:
- Specify if you want a warning message when the maximum number of
prefixes is exceeded.
pn_bfd:
description:
- Specify if you want BFD protocol support for fault detection.
pn_multiprotocol:
description:
- Specify a multi-protocol for BGP.
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_weight:
description:
- Specify a default weight value between 0 and 65535 for the neighbor
routes.
pn_default_originate:
description:
- Specify if you want announce default routes to the neighbor or not.
pn_keepalive:
description:
- Specify BGP neighbor keepalive interval in seconds.
pn_holdtime:
description:
- Specify BGP neighbor holdtime in seconds.
pn_route_mapin:
description:
- Specify inbound route map for neighbor.
pn_route_mapout:
description:
- Specify outbound route map for neighbor.
"""
EXAMPLES = """
- name: add vrouter-bgp
pn_vrouterbgp:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_neighbor: 104.104.104.1
pn_remote_as: 1800
- name: remove vrouter-bgp
pn_vrouterbgp:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouterbpg command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterbgp command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
NEIGHBOR_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a BGP neighbor with the given ip exists on the given vRouter,
return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Global flags
global VROUTER_EXISTS, NEIGHBOR_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for BGP neighbors
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if neighbor in out:
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-bgp-add'
if state == 'absent':
command = 'vrouter-bgp-remove'
if state == 'update':
command = 'vrouter-bgp-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_neighbor=dict(type='str'),
pn_remote_as=dict(type='str'),
pn_next_hop_self=dict(type='bool'),
pn_password=dict(type='str', no_log=True),
pn_ebgp=dict(type='int'),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_route_reflector=dict(type='bool'),
pn_override_capability=dict(type='bool'),
pn_soft_reconfig=dict(type='bool'),
pn_max_prefix=dict(type='int'),
pn_max_prefix_warn=dict(type='bool'),
pn_bfd=dict(type='bool'),
pn_multiprotocol=dict(type='str',
choices=['ipv4-unicast', 'ipv6-unicast']),
pn_weight=dict(type='int'),
pn_default_originate=dict(type='bool'),
pn_keepalive=dict(type='str'),
pn_holdtime=dict(type='str'),
pn_route_mapin=dict(type='str'),
pn_route_mapout=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent",
["pn_vrouter_name", "pn_neighbor"]],
["state", "update",
["pn_vrouter_name", "pn_neighbor"]]
)
)
# Accessing the arguments
state= module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
next_hop_self = module.params['pn_next_hop_self']
password = module.params['pn_password']
ebgp = module.params['pn_ebgp']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
route_reflector = module.params['pn_route_reflector']
override_capability = module.params['pn_override_capability']
soft_reconfig = module.params['pn_soft_reconfig']
max_prefix = module.params['pn_max_prefix']
max_prefix_warn = module.params['pn_max_prefix_warn']
bfd = module.params['pn_bfd']
multiprotocol = module.params['pn_multiprotocol']
weight = module.params['pn_weight']
default_originate = module.params['pn_default_originate']
keepalive = module.params['pn_keepalive']
holdtime = module.params['pn_holdtime']
route_mapin = module.params['pn_route_mapin']
route_mapout = module.params['pn_route_mapout']
# Building the CLI command string
cli = pn_cli(module)
command = get_command_from_state(state)
if command == 'vrouter-bgp-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s does not exist on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
else:
if command == 'vrouter-bgp-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s already exists on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
if remote_as:
cli += ' remote-as ' + str(remote_as)
if next_hop_self is True:
cli += ' next-hop-self '
if next_hop_self is False:
cli += ' no-next-hop-self '
if password:
cli += ' password ' + password
if ebgp:
cli += ' ebgp-multihop ' + str(ebgp)
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
if route_reflector is True:
cli += ' route-reflector-client '
if route_reflector is False:
cli += ' no-route-reflector-client '
if override_capability is True:
cli += ' override-capability '
if override_capability is False:
cli += ' no-override-capability '
if soft_reconfig is True:
cli += ' soft-reconfig-inbound '
if soft_reconfig is False:
cli += ' no-soft-reconfig-inbound '
if max_prefix:
cli += ' max-prefix ' + str(max_prefix)
if max_prefix_warn is True:
cli += ' max-prefix-warn-only '
if max_prefix_warn is False:
cli += ' no-max-prefix-warn-only '
if bfd is True:
cli += ' bfd '
if bfd is False:
cli += ' no-bfd '
if multiprotocol:
cli += ' multi-protocol ' + multiprotocol
if weight:
cli += ' weight ' + str(weight)
if default_originate is True:
cli += ' default-originate '
if default_originate is False:
cli += ' no-default-originate '
if keepalive:
cli += ' neighbor-keepalive-interval ' + keepalive
if holdtime:
cli += ' neighbor-holdtime ' + holdtime
if route_mapin:
cli += ' route-map-in ' + route_mapin
if route_mapout:
cli += ' route-map-out ' + route_mapout
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
the-stack_0_13697 |
import tensorflow as tf
from select_threshold_op import SelectThreshold
import numpy as np
import time
# SelectThreshold(x, pl, rowsplits, threshold=0.5)
nvert=10000
nfeat=128
xs = tf.constant(np.random.rand(nvert) ,dtype='float32')
xs = tf.reshape(xs, [-1,1])
rs = tf.constant([0,int(nvert/4),int(nvert/2),nvert],dtype='int32')
pl = tf.constant( np.random.rand(nvert,nfeat) ,dtype='float32')
print(xs, pl, rs)
newfeat, newrs, scatter_idxs = SelectThreshold(xs,pl,rs,threshold=0.5)
bef = time.time()
for _ in range(20):
newfeat, newrs, scatter_idxs = SelectThreshold(xs,pl,rs,threshold=0.5)
totaltime = time.time() - bef
print('output')
print(newfeat, rs, scatter_idxs)
print('scattered back')
print(tf.scatter_nd(scatter_idxs, newfeat ,shape=pl.shape))
print('total time', totaltime) |
the-stack_0_13698 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import BitcoinNickelTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(BitcoinNickelTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generate(100)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader.
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_code=16, reject_reason=b'bad-txns-duplicate')
# Check transactions for duplicate inputs
self.log.info("Test duplicate input block.")
block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0])
block2_orig.vtx[2].rehash()
block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root()
block2_orig.rehash()
block2_orig.solve()
node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_reason=b'bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_code=16, reject_reason=b'bad-cb-amount')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
the-stack_0_13701 | from django.conf.urls import url
from . import views
app_name = 'prospecting'
urlpatterns = [
# ex: /prospecting/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex: /prospecting/5/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# ex: /prospecting/5/results/
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# ex:
url(r'^dashboard/$', views.dashboard, name='dashboard'),
# ex: /prospecting/5/prospect_views/
url(r'^(?P<account_id>[0-9]+)/prospect_view/$', views.ProspectViewView, name='prospect_view'),
# page for adding a new account
url(r'^new_account/$', views.new_account, name='new_account'),
# page for adding a new prospect
url(r'^new_prospect/(?P<account_id>\d+)/$', views.new_prospect, name='new_prospect'),
# editing account information view
url(r'^edit_account/(?P<account_id>\d+)/$', views.edit_account, name='edit_account'),
# editing prospect information view
url(r'^edit_prospect/(?P<prospect_id>\d+)/$', views.edit_prospect, name='edit_prospect'),
]
|
the-stack_0_13702 | from NekoGram import Neko, Bot
import pytest
neko = Neko(bot=Bot(token='0:0', validate_token=False), validate_text_names=False)
@pytest.mark.asyncio
async def test_build_response():
raw_json = '{"x": {"text": "hello"} }'
neko.add_texts(texts=raw_json, lang='en')
data = await neko.build_text(text='x', user='en')
assert data.data.text == 'hello'
|
the-stack_0_13705 | import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
import pretrainedmodels
from ghost_net import ghost_net
from torchreid.models import resnet
######################################################################
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f = False):
super(ClassBlock, self).__init__()
self.return_f = return_f
add_block = []
if linear:
add_block += [nn.Linear(input_dim, num_bottleneck)]
else:
num_bottleneck = input_dim
if bnorm:
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:
add_block += [nn.LeakyReLU(0.1)]
if droprate>0:
add_block += [nn.Dropout(p=droprate)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
if self.return_f:
f = x
x = self.classifier(x)
return x,f
else:
x = self.classifier(x)
return x
# Define the ResNet50-based Model
class ft_net(nn.Module):
def __init__(self, class_num, droprate=0.5, stride=2):
super(ft_net, self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
if stride == 1:
model_ft.layer4[0].downsample[0].stride = (1,1)
model_ft.layer4[0].conv2.stride = (1,1)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.classifier = ClassBlock(2048, class_num, droprate)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the DenseNet121-based Model
class ft_net_dense(nn.Module):
def __init__(self, class_num, droprate=0.5):
super().__init__()
model_ft = models.densenet121(pretrained=True)
model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1))
model_ft.fc = nn.Sequential()
self.model = model_ft
# For DenseNet, the feature dim is 1024
self.classifier = ClassBlock(1024, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the NAS-based Model
class ft_net_NAS(nn.Module):
def __init__(self, class_num, droprate=0.5):
super().__init__()
model_name = 'nasnetalarge'
# pip install pretrainedmodels
model_ft = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
model_ft.avg_pool = nn.AdaptiveAvgPool2d((1,1))
model_ft.dropout = nn.Sequential()
model_ft.last_linear = nn.Sequential()
self.model = model_ft
# For DenseNet, the feature dim is 4032
self.classifier = ClassBlock(4032, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = self.model.avg_pool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Define the ResNet50-based Model (Middle-Concat)
# In the spirit of "The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching." Yu, Qian, et al. arXiv:1711.08106 (2017).
class ft_net_middle(nn.Module):
def __init__(self, class_num, droprate=0.5):
super(ft_net_middle, self).__init__()
model_ft = models.resnet50(pretrained=True)
# avg pooling to global pooling
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.classifier = ClassBlock(2048+1024, class_num, droprate)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
# x0 n*1024*1*1
x0 = self.model.avgpool(x)
x = self.model.layer4(x)
# x1 n*2048*1*1
x1 = self.model.avgpool(x)
x = torch.cat((x0,x1),1)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
# Part Model proposed in Yifan Sun etal. (2018)
class PCB(nn.Module):
def __init__(self, class_num ):
super(PCB, self).__init__()
self.part = 6 # We cut the pool5 to 6 parts
model_ft = models.resnet50(pretrained=True)
self.model = model_ft
self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
self.dropout = nn.Dropout(p=0.5)
# remove the final downsample
self.model.layer4[0].downsample[0].stride = (1,1)
self.model.layer4[0].conv2.stride = (1,1)
# define 6 classifiers
for i in range(self.part):
name = 'classifier'+str(i)
setattr(self, name, ClassBlock(2048, class_num, droprate=0.5, relu=False, bnorm=True, num_bottleneck=256))
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.avgpool(x)
x = self.dropout(x)
part = {}
predict = {}
# get six part feature batchsize*2048*6
for i in range(self.part):
part[i] = torch.squeeze(x[:,:,i])
name = 'classifier'+str(i)
c = getattr(self,name)
predict[i] = c(part[i])
# sum prediction
#y = predict[0]
#for i in range(self.part-1):
# y += predict[i+1]
y = []
for i in range(self.part):
y.append(predict[i])
return y
class PCB_test(nn.Module):
def __init__(self,model):
super(PCB_test,self).__init__()
self.part = 6
self.model = model.model
self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
# remove the final downsample
self.model.layer4[0].downsample[0].stride = (1,1)
self.model.layer4[0].conv2.stride = (1,1)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.avgpool(x)
y = x.view(x.size(0),x.size(1),x.size(2))
return y
# Define the ghost_net-based Model
class GhostNet(nn.Module):
def __init__(self, class_num, droprate=0.5, stride=2):
super(GhostNet, self).__init__()
model_gh = ghost_net(width_mult=1.0)
model_gh.avgpool = nn.AdaptiveAvgPool2d((1,1))
#model_gh.fc = nn.Sequential()
self.model = model_gh
self.classifier = ClassBlock(960, class_num, droprate)
def forward(self, x):
x = self.model.features(x)
x = self.model.squeeze(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
'''
# debug model structure
# Run this code with:
python model.py
'''
if __name__ == '__main__':
# Here I left a simple forward function.
# Test the model, before you train it.
net = GhostNet(751, stride=1)
net.classifier = nn.Sequential()
print(net)
input = Variable(torch.FloatTensor(8, 3, 256, 128))
output = net(input)
print('net output size:')
print(output.shape)
|
the-stack_0_13706 | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object client"""
import logging
from openstackclient.common import utils
LOG = logging.getLogger(__name__)
DEFAULT_OBJECT_API_VERSION = '1'
API_VERSION_OPTION = 'os_object_api_version'
API_NAME = 'object'
API_VERSIONS = {
'1': 'openstackclient.object.client.ObjectClientv1',
}
def make_client(instance):
"""Returns an object service client."""
object_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
if instance._url:
endpoint = instance._url
else:
endpoint = instance.get_endpoint_for_service_type(API_NAME)
LOG.debug('instantiating object client')
client = object_client(
endpoint=endpoint,
token=instance._token,
)
return client
def build_option_parser(parser):
"""Hook to add global options"""
parser.add_argument(
'--os-object-api-version',
metavar='<object-api-version>',
default=utils.env(
'OS_OBJECT_API_VERSION',
default=DEFAULT_OBJECT_API_VERSION),
help='Object API version, default=' +
DEFAULT_OBJECT_API_VERSION +
' (Env: OS_OBJECT_API_VERSION)')
return parser
class ObjectClientv1(object):
def __init__(
self,
endpoint_type='publicURL',
endpoint=None,
token=None,
):
self.endpoint_type = endpoint_type
self.endpoint = endpoint
self.token = token
|
the-stack_0_13708 | import re
import sys
RE = re.compile(r'-?\d+')
ingredients = []
for line in open(sys.argv[1]).readlines():
ingredients.append([int(v) for v in RE.findall(line)])
def score(amounts):
negative = False
product = 1
for i in range(len(ingredients[0])-1):
iscore = sum([amounts[j]*ingredients[j][i] for j in range(len(ingredients))])
product *= abs(iscore)
if iscore < 0:
negative = True
return -product if negative else product
maxscore = 0
iter = 0
for a in range(101):
for b in range(101):
for c in range(101):
for d in range(101):
iter += 1
#if iter % 1000000 == 0:
# print(iter, maxscore)
if a+b+c+d != 100:
continue
maxscore = max(score([a,b,c,d]), maxscore)
print(maxscore)
|
the-stack_0_13710 | from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from test.utils import encode_jwt
class TestDecorators(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.private_key = b"""-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw
33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW
+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB
AoGAD+onAtVye4ic7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS
3MCyjjX2eMhu/aF5YhXBwkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5Cp
uGb1esyPbYW+Ty2PC0GSZfIXkXs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE
2+yo382CLJdrlSLVROWKwb4tb2PjhY4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0
GAtFr833AkEA6avx20OHo61Yela/4k5kQDtjEf1N0LfI+BcWZtxsS3jDM3i1Hp0K
Su5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2XIpsitLyPpuiMOvBbzPavd4gY
6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWiz+6zLoX1dbOZwJACmH5
fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT/SySDOxQ4G/523
Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxrE9MNUZ2aP
FaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==
-----END RSA PRIVATE KEY-----"""
self.public_key = b"""-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugd
UWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQs
HUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5D
o2kQ+X5xK9cipRgEKwIDAQAB
-----END PUBLIC KEY-----"""
self.settings = {
'audience': 'audience',
'issuer': 'issuer',
}
from ridi.django_jwt.config import configure
configure(
key=self.public_key,
audience='audience',
issuer='issuer',
algorithm='RS256',
)
def test_jwt_required(self):
from ridi.django_jwt.decorators import jwt_required
@jwt_required()
def view(request):
return HttpResponse('view')
encoded_jwt = encode_jwt({
'aud': self.settings['audience'],
'iss': self.settings['issuer'],
}, self.private_key)
request = self.factory.get('/', HTTP_AUTHORIZATION='Bearer ' + encoded_jwt)
response = view(request)
self.assertEqual(response.status_code, 200)
def test_jwt_required_mixin(self):
from ridi.django_jwt.decorators import JWTRequiredMixin
from django.views.generic import View
class IndexView(JWTRequiredMixin, View):
def get(self, request):
return HttpResponse()
encoded_jwt = encode_jwt({
'aud': self.settings['audience'],
'iss': self.settings['issuer'],
}, self.private_key)
request = self.factory.get('/', HTTP_AUTHORIZATION='Bearer ' + encoded_jwt)
response = IndexView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
the-stack_0_13712 | import pickle
import sys, os.path
parent_dir = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, parent_dir)
from blinker._utilities import symbol
def test_symbols():
foo = symbol('foo')
assert foo.name == 'foo'
assert foo is symbol('foo')
bar = symbol('bar')
assert foo is not bar
assert foo != bar
assert not foo == bar
assert repr(foo) == 'foo'
def test_pickled_symbols():
foo = symbol('foo')
for protocol in 0, 1, 2:
roundtrip = pickle.loads(pickle.dumps(foo))
assert roundtrip is foo
|
the-stack_0_13714 | from collections import defaultdict
from typing import List
import config
def number_game(numbers: List[str], max_turns: int = 2020) -> int:
"""Simulate the number game.
Args:
numbers (List[str]): starting numbers for the game
Returns:
int: the 2020th number spoken
"""
last_turns = defaultdict(list)
times_spoken = defaultdict(int)
numbers = numbers.split(',')
starting_turn = len(numbers) + 1
for turn, number in enumerate(numbers, start=1):
last_number = int(number)
last_turns[last_number].append(turn)
times_spoken[last_number] += 1
for turn in range(starting_turn, max_turns + 1):
if times_spoken[last_number] == 1:
last_number = 0
else:
last_number = (
last_turns[last_number][-1] - last_turns[last_number][-2])
last_turns[last_number].append(turn)
times_spoken[last_number] += 1
return last_number
def main() -> None:
"""Simulate the number game described in day 15."""
# Part A
test_answer = 436
file = config.TestFile(test_answer)
test = number_game(file.contents[0])
file.test(test)
# Part B
file.answer = 175594
test = number_game(file.contents[0], max_turns=30000000)
file.test(test)
# Part A
file = config.File()
result = number_game(file.contents[0])
config.log_part_info('A', result)
# Part B
result = number_game(file.contents[0], max_turns=30000000)
config.log_part_info('B', result)
if __name__ == '__main__':
main()
|
the-stack_0_13716 | """
Utils function.
"""
import sys
import os
import logging
from glob import glob
def add_pyspark_path_if_needed():
"""Add PySpark to the library path based on the value of SPARK_HOME if
pyspark is not already in our path"""
try:
from pyspark import context
except ImportError:
# We need to add PySpark, try findspark if we can but it has an
# undeclared IPython dep.
try:
import findspark
findspark.init()
except ImportError:
add_pyspark_path()
def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME."""
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
print("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
def quiet_py4j():
logger = logging.getLogger('py4j')
logger.setLevel(logging.INFO)
|
the-stack_0_13717 | from django.urls import path
from . import views
urlpatterns = [
path('home',views.index,name="index"),
path('',views.landing,name="landing"),
path('register',views.register,name="register"),
path('logout',views.logout,name="logout"),
path('profile/edit', views.profile, name="profile"),
path('profile/<int:id>/', views.myprofile, name="myprofile"),
path('search', views.SearchResultsView.as_view(),name="search")
]
|
the-stack_0_13719 | # coding=utf-8
import datetime
import os
import gym
import numpy
import matplotlib.pyplot as plt
import pandas
from dateutil import relativedelta
from gym import spaces
class FxEnv(gym.Env):
metadata = {'render.modes': ['human', 'ohlc_array']}
def __init__(self):
# 定数
self.STAY = 0
self.BUY = 1
self.SELL = 2
self.CLOSE = 3
# 対象となる通貨ペアの最大値
self.MAX_VALUE = 2
# 初期の口座資金
self.initial_balance = 10000
# CSVファイルのパス配列(最低4ヶ月分を昇順で)
self.csv_file_paths = []
now = datetime.datetime.now()
for _ in range(4):
now = now - relativedelta.relativedelta(months=1)
filename = 'DAT_MT_EURUSD_M1_{}.csv'.format(now.strftime('%Y%m'))
if not os.path.exists(filename):
print('ファイルが存在していません。下記からダウンロードしてください。', filename)
print('http://www.histdata.com/download-free-forex-historical-data/?/metatrader/1-minute-bar-quotes/EURUSD/')
else:
self.csv_file_paths.append(filename)
# スプレッド
self.spread = 0.5
# Point(1pipsの値)
self.point = 0.0001
# 利食いpips
self.take_profit_pips = 30
# 損切りpips
self.stop_loss_pips = 15
# ロット数
self.lots = 0.01
# ロット数
self.lot_base = 100000
# 0~3のアクション。定数に詳細は記載している
self.action_space = gym.spaces.Discrete(4)
# 観測できる足数
self.visible_bar = 32
# 1分足、5分足、30分足、4時間足の5時系列データを足数分作る
self._reset()
self.observation_space = spaces.Box(low=0, high=self.MAX_VALUE, shape=numpy.shape(self.make_obs('ohlc_array')))
self.m5 = []
self.m30 = []
self.h4 = []
def _reset(self):
self.info = AccountInformation(self.initial_balance)
# CSVを読み込む
self.data = pandas.DataFrame()
for path in self.csv_file_paths:
csv = pandas.read_csv(path,
names=['date', 'time', 'open', 'high', 'low', 'close', 'v'],
parse_dates={'datetime': ['date', 'time']},
)
csv.index = csv['datetime']
csv = csv.drop('datetime', axis=1)
csv = csv.drop('v', axis=1)
self.data = self.data.append(csv)
# 最後に読んだCSVのインデックスを開始インデックスとする
self.read_index = len(self.data) - len(csv)
# そこから開始位置をランダムにずらす(5日分(7220分)は残す)
# self.read_index += numpy.random.randint(0, (len(csv) - 7220))
# チケット一覧
self.tickets = []
return self.make_obs('ohlc_array')
def _step(self, action):
current_data = self.data.iloc[self.read_index]
ask = current_data['close'] + self.spread * self.point
bid = current_data['close'] - self.spread * self.point
if action == self.STAY:
for ticket in self.tickets:
if ticket.order_type == self.BUY:
if bid > ticket.take_profit:
# 買いチケットを利確
profit = (ticket.take_profit - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif bid < ticket.stop_loss:
# 買いチケットを損切り
profit = (ticket.stop_loss - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif ticket.order_type == self.SELL:
if ask < ticket.take_profit:
# 売りチケットを利確
profit = (ticket.open_price - ticket.take_profit) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
elif bid < ticket.stop_loss:
# 売りチケットを損切り
profit = (ticket.open_price - ticket.stop_loss) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
elif action == self.BUY:
ticket = Ticket(self.BUY, ask, ask + self.take_profit_pips * self.point,
ask - self.stop_loss_pips * self.point, self.lots)
self.tickets.append(ticket)
pass
elif action == self.SELL:
ticket = Ticket(self.SELL, bid, bid - self.take_profit_pips * self.point,
bid + self.stop_loss_pips * self.point, self.lots)
self.tickets.append(ticket)
pass
elif action == self.CLOSE:
for ticket in self.tickets:
if ticket.order_type == self.BUY:
# 買いチケットをクローズ
profit = (bid - ticket.open_price) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_buy += profit
elif ticket.order_type == self.SELL:
# 売りチケットをクローズ
profit = (ticket.open_price - ask) * ticket.lots * self.lot_base
self.info.balance += profit
self.info.total_pips_sell += profit
# done = self.info.balance <= 0 or self.read_index >= len(self.data)
# reward = 0.00001 * (self.info.total_pips_buy + self.info.total_pips_sell)
done = self.info.balance > self.info.fixed_balance * 2
reward = self.info.balance
# インデックスをインクリメント
self.read_index += 1
# obs, reward, done, infoを返す
return self.make_obs('ohlc_array'), reward, done, self.info
def _render(self, mode='human', close=False):
return self.make_obs(mode)
def make_obs(self, mode):
"""
1分足、5分足、30分足、4時間足の4時系列データを64本分作成する
:return:
"""
target = self.data.iloc[self.read_index - 60 * 4 * 70: self.read_index]
if mode == 'human':
m1 = numpy.array(target.iloc[-1 * self.visible_bar:][target.columns])
m5 = numpy.array(target.resample('5min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
m30 = numpy.array(target.resample('30min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
h4 = numpy.array(target.resample('4H').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
return numpy.array([m1, m5, m30, h4])
# humanの場合はmatplotlibでチャートのimgを作成する?
# fig = plt.figure(figsize=(10, 4))
# # ローソク足は全横幅の太さが1である。表示する足数で割ってさらにその1/3の太さにする
# width = 1.0 / 64 / 3
# # 1分足
# ax = plt.subplot(2, 2, 1)
# # y軸のオフセット表示を無効にする。
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target.iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 5分足
# ax = plt.subplot(2, 2, 2)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('5min').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 30分足
# ax = plt.subplot(2, 2, 3)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('30min').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# # 4時間足
# ax = plt.subplot(2, 2, 4)
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# data = target['close'].resample('4H').ohlc().dropna().iloc[-1 * self.visible_bar:].values
# mpf.candlestick_ohlc(ax, data, width=width, colorup='g', colordown='r')
# return fig.canvas.buffer_rgba()
elif mode == 'ohlc_array':
m1 = numpy.array(target.iloc[-1 * self.visible_bar:][target.columns])
m5 = numpy.array(target.resample('5min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
m30 = numpy.array(target.resample('30min').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
h4 = numpy.array(target.resample('4H').agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last'}).dropna().iloc[-1 * self.visible_bar:][target.columns])
return numpy.array([m1, m5, m30, h4])
class AccountInformation(object):
"""
口座情報クラス
"""
def __init__(self, initial_balance):
# 口座資金(含み益含む)
self.balance = initial_balance
# 口座資金
self.fixed_balance = initial_balance
# 総獲得pips(買い)
self.total_pips_buy = 0
# 総獲得pips(売り)
self.total_pips_sell = 0
def items(self):
'''
rl\core.py line 172 で呼び出される
:return: 口座情報
'''
return [('balance', self.balance), ('fixed_balance', self.fixed_balance), ('total_pips_buy', self.total_pips_buy), ('total_pips_sell', self.total_pips_sell)]
class Ticket(object):
"""
チケット
"""
def __init__(self, order_type, open_price, take_profit, stop_loss, lots):
# タイプ
self.order_type = order_type
# 約定価格
self.open_price = open_price
# 利食い価格
self.take_profit = take_profit
# 損切り価格
self.stop_loss = stop_loss
# ロット
self.lots = lots
|
the-stack_0_13720 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import tushare_easy
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'arrow',
'unipath',
'pandas',
'lxml',
'tushare',
]
setup_requirements = [
'lxml',
'pandas',
]
test_requirements = [
'lxml',
'pandas',
'flake8',
'tox',
]
setup(
name='tushare_easy',
version=tushare_easy.__version__,
description='make tushare easyer',
long_description=readme + '\n\n' + history,
author='yingnn',
author_email='[email protected]',
url='https://github.com/yingnn/tushare_easy',
packages=find_packages(include=['tushare_easy']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='tushare_easy, tushare',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
scripts = ['scripts/tushare_easy'],
)
|
the-stack_0_13722 | # Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes,
person_classes, triage_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'person_classes', 'triage_classes'
]
|
the-stack_0_13723 | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""Factory functions for all ngraph ops."""
import numpy as np
from ngraph.impl import AxisSet, AxisVector, Coordinate, CoordinateDiff, Function, Node, \
Shape, Strides
from ngraph.impl.op import Abs, Acos, Add, And, Asin, ArgMax, ArgMin, Atan, AvgPool, \
BatchNormTraining, BatchNormInference, Broadcast, Ceiling, Concat, Constant, Convert, \
Convolution, ConvolutionBackpropData, Cos, Cosh, Divide, Dot, Equal, Exp, Floor, \
GetOutputElement, Greater, GreaterEq, Less, LessEq, Log, LRN, Max, Maximum, MaxPool, \
Min, Minimum, Multiply, Negative, Not, NotEqual, OneHot, Or, Pad, Parameter, Product, \
Power, Relu, ReplaceSlice, Reshape, Reverse, Select, Sign, Sin, Sinh, Slice, Softmax, \
Sqrt, Subtract, Sum, Tan, Tanh, TopK
from typing import Callable, Iterable, List, Union
from ngraph.utils.broadcasting import get_broadcast_axes
from ngraph.utils.decorators import nameable_op, binary_op, unary_op
from ngraph.utils.input_validation import assert_list_of_ints
from ngraph.utils.reduction import get_reduction_axes
from ngraph.utils.types import NumericType, NumericData, TensorShape, make_constant_node, \
NodeInput, ScalarData
from ngraph.utils.types import get_element_type
@nameable_op
def parameter(shape, dtype=np.float32, name=None):
# type: (TensorShape, NumericType, str) -> Parameter
"""Return an ngraph Parameter object."""
assert_list_of_ints(shape, 'Parameter shape must be a list of integer values.')
element_type = get_element_type(dtype)
return Parameter(element_type, Shape(shape))
@nameable_op
def constant(value, dtype=None, name=None): # type: (NumericData, NumericType, str) -> Constant
"""Create a Constant node from provided value.
:param value: One of: array of values or scalar to initialize node with.
:param dtype: The data type of provided data.
:param name: Optional name for output node.
:return: The Constant node initialized with provided data.
"""
return make_constant_node(value, dtype)
# Unary ops
@unary_op
def absolute(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies f(x) = abs(x) to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with Abs operation applied on it.
"""
return Abs(node)
@unary_op
def acos(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arccos operation applied on it.
"""
return Acos(node)
@unary_op
def asin(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arcsin operation applied on it.
"""
return Asin(node)
@unary_op
def atan(node, name=None): # type: (NodeInput, str) -> Node
"""Apply inverse tangent function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with arctan operation applied on it.
"""
return Atan(node)
@unary_op
def cos(node, name=None): # type: (NodeInput, str) -> Node
"""Apply cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cos operation applied on it.
"""
return Cos(node)
@unary_op
def cosh(node, name=None): # type: (NodeInput, str) -> Node
"""Apply hyperbolic cosine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with cosh operation applied on it.
"""
return Cosh(node)
@unary_op
def sqrt(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies square root to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: The new node with sqrt operation applied element-wise.
"""
return Sqrt(node)
@unary_op
def exp(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies exp to the input node element-wise.
:param node: The node providing data for operation.
:param name: The optional name for new output node.
:return: The new node performing natural exponential operation.
"""
return Exp(node)
@unary_op
def log(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies natural logarithm to the input node element-wise.
:param node: The input node providing data for operation.
:param name: The optional new name for output node.
:return: The new node performing log operation element-wise.
"""
return Log(node)
@unary_op
def negative(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies f(x) = -x to the input node elementwise."""
return Negative(node)
@unary_op
def floor(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies floor to the input node element-wise.
:param node: The input node providing data.
:param name: The optional name for new output node.
:return: The node performing element-wise floor operation.
"""
return Floor(node)
@unary_op
def ceiling(node, name=None): # type: (NodeInput, str) -> Node
"""Return node which applies ceiling to the input node element-wise.
:param node: The node providing data to ceiling operation.
:param name: Optional name for output node.
:return: The node performing element-wise ceiling.
"""
return Ceiling(node)
@unary_op
def reshape(node, output_shape, input_order=None, name=None):
# type: (Node, List[int], List[int], str) -> Node
"""Return reshaped node according to provided parameters.
:param node: The tensor we want to reshape.
:param input_order: The order in which to iterate over input axes of input tensor.
:param output_shape: The new shape for input tensor.
"""
if input_order is None:
input_order = list(range(len(node.shape)))
return Reshape(node, AxisVector(input_order), Shape(output_shape))
@unary_op
def relu(node, name=None): # type: (NodeInput, str) -> Node
"""Perform rectified linear unit operation on input node element-wise.
:param node: One of: input node, array or scalar.
:param name: The optional ouptut node name.
:return: The new node performing relu operation on its input element-wise.
"""
return Relu(node)
@unary_op
def sign(node, name=None): # type: (NodeInput, str) -> Node
"""Perform element-wise sign operation.
:param node: One of: input node, array or scalar.
:param name: The optional new name for ouptut node.
:return: The node with mapped elements of the input tensor to -1 (if it is negative),
0 (if it is zero), or 1 (if it is positive).
"""
return Sign(node)
@unary_op
def sin(node, name=None): # type: (NodeInput, str) -> Node
"""Apply sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with sin operation applied on it.
"""
return Sin(node)
@unary_op
def sinh(node, name=None): # type: (NodeInput, str) -> Node
"""Apply hyperbolic sine function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with sin operation applied on it.
"""
return Sinh(node)
@unary_op
def tan(node, name=None): # type: (NodeInput, str) -> Node
"""Apply tangent function on the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with tan operation applied on it.
"""
return Tan(node)
# Binary ops
@binary_op
def divide(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A/B to the input nodes element-wise.
:param left_node: The node providing dividend data.
:param right_node: The node providing divisor data.
:param name: Optional name for output node.
:return: The node performing element-wise division.
"""
return Divide(left_node, right_node)
@binary_op
def multiply(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A*B to the input nodes elementwise."""
return Multiply(left_node, right_node)
@binary_op
def subtract(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A-B to the input nodes element-wise.
:param left_node: The node providing data for left hand side of operator.
:param right_node: The node providing data for right hand side of operator.
:param name: The optional name for output node.
:return: The new output node performing subtraction operation on both tensors element-wise.
"""
return Subtract(left_node, right_node)
@binary_op
def add(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies f(x) = A+B to the input nodes element-wise."""
return Add(left_node, right_node)
@binary_op
def minimum(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies the minimum operation to input nodes elementwise."""
return Minimum(left_node, right_node)
@binary_op
def maximum(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which applies the maximum operation to input nodes elementwise."""
return Maximum(left_node, right_node)
@binary_op
def power(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which perform element-wise exponentiation operation.
:param left_node: The node providing the base of operation.
:param right_node: The node providing the exponent of operation.
:param name: The optional name for the new output node.
:return: The new node performing element-wise exponentiation operation on input nodes.
"""
return Power(left_node, right_node)
# Logical ops
@binary_op
def equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if input nodes are equal element-wise.
:param left_node: The first input node for equal operation.
:param right_node: The second input node for equal operation.
:param name: The optional name for output new node.
:return: The node performing element-wise equality check.
"""
return Equal(left_node, right_node)
@binary_op
def not_equal(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if input nodes are unequal element-wise.
:param left_node: The first input node for not-equal operation.
:param right_node: The second input node for not-equal operation.
:param name: The optional name for output new node.
:return: The node performing element-wise inequality check.
"""
return NotEqual(left_node, right_node)
@binary_op
def greater(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is greater than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than right_node.
"""
return Greater(left_node, right_node)
@binary_op
def greater_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left node is greater or equal to the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is greater than or equal
right_node.
"""
return GreaterEq(left_node, right_node)
@binary_op
def less(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is less than the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than the right_node.
"""
return Less(left_node, right_node)
@binary_op
def less_eq(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which checks if left input node is less or equal the right node element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing element-wise check whether left_node is less than or equal the
right_node.
"""
return LessEq(left_node, right_node)
@binary_op
def logical_and(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which perform logical and operation on input nodes element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing logical and operation on input nodes corresponding elements.
"""
return And(left_node, right_node)
@binary_op
def logical_or(left_node, right_node, name=None): # type: (NodeInput, NodeInput, str) -> Node
"""Return node which performs logical or operation on input nodes element-wise.
:param left_node: The first input node providing data.
:param right_node: The second input node providing data.
:param name: The optional new name for output node.
:return: The node performing logical or operation on input nodes corresponding elements.
"""
return Or(left_node, right_node)
@unary_op
def logical_not(node, name=None): # type: (Node, str) -> Node
"""Return node which applies logical negation to the input node elementwise."""
return Not(node)
# Extend Node class to support binary operators
Node.__add__ = add
Node.__sub__ = subtract
Node.__mul__ = multiply
Node.__div__ = divide
Node.__truediv__ = divide
Node.__radd__ = lambda left, right: add(right, left)
Node.__rsub__ = lambda left, right: subtract(right, left)
Node.__rmul__ = lambda left, right: multiply(right, left)
Node.__rdiv__ = lambda left, right: divide(right, left)
Node.__rtruediv__ = lambda left, right: divide(right, left)
Node.__eq__ = equal
Node.__ne__ = not_equal
Node.__lt__ = less
Node.__le__ = less_eq
Node.__gt__ = greater
Node.__ge__ = greater_eq
# Custom ops
@nameable_op
def broadcast(node, new_shape, broadcast_axes, name=None):
# type: (Node, TensorShape, Iterable[int], str) -> Node
"""Create a node which broadcasts the input node's values along specified axes to a desired shape.
:param node: The node with input tensor data.
:param new_shape: The new shape we want to broadcast tensor to.
:param broadcast_axes: The axis positions (0-based) in the result that are being broadcast.
:param name: Optional new name for output node.
:return: New node with broadcast shape.
"""
return Broadcast(node, Shape(new_shape), AxisSet(broadcast_axes))
@nameable_op
def broadcast_to(node, new_shape, axis=None, name=None):
# type: (Node, TensorShape, int, str) -> Node
"""Create a node which broadcasts the input node's values to a desired shape.
`broadcast_to` will attempt to automatically determine which axes need broadcasting.
The optional `axis` parameter specifies the starting axis position (0-based) in the output
shape from which the current shape of the tensor matches the desired new shape.
e.g. current_shape: [4, 5], new_shape: [2, 3, 4, 5, 6], axis: 2
By using the `axis` parameter you can control which output axis to broadcast along.
Example:
>>> input_node = ng.constant([1, 2, 3])
>>> current_shape = [3]
>>> new_shape = [3, 3]
>>> ng.broadcast_to(input_node, new_shape, axis=1)
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> ng.broadcast_to(input_node, new_shape, axis=0)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
If the `axis` parameter is not specified, `broadcast_to` will attempt to match shapes,
assuming the current shape matches the rightmost positions of the desired new shape.
This behaviour is similar to NumPy's broadcasting.
i.e. default `axis = len(new_shape) - len(current_shape)`
:param node: The node with input tensor data.
:param new_shape: The new shape we want to broadcast tensor to.
:param axis: The axis along which we perform broadcasting.
:param name: Optional new name for output node.
:return: New node with broadcast shape.
"""
return Broadcast(node, Shape(new_shape), get_broadcast_axes(new_shape, node.shape, axis))
@nameable_op
def convert(node, new_type, name=None): # type: (Node, NumericType, str) -> Node
"""Return node which casts input node values to specified type."""
new_element_type = get_element_type(new_type)
return Convert(node, new_element_type)
@nameable_op
def select(selection_node, input_node1, input_node2, name=None):
# type: (Node, Node, Node, str) -> Node
"""Perform an element-wise selection operation on input tensors.
:param selection_node: The node providing selection values of `bool` type.
:param input_node1: The node providing data to be selected if respective `selection_node`
item value is `True`.
:param input_node2: The node providing data to be selected if respective `selection_node`
item value is `False`.
:param name: The optional new name for output node.
:return: The new node with values selected according to provided arguments.
"""
return Select(selection_node, input_node1, input_node2)
# Non-linear ops
@unary_op
def tanh(node, name=None): # type: (Node, str) -> Node
"""Return node which applies hyperbolic tangent to the input node element-wise.
:param node: One of: input node, array or scalar.
:param name: Optional new name for output node.
:return: New node with tanh operation applied on it.
"""
return Tanh(node)
# matmul ops
@nameable_op
def dot(left_node, right_node, reduction_axes_count=None, name=None):
# type: (Node, Node, int, str) -> Node
"""Return node which performs generalized dot product of two input nodes.
This operation is capable of performing scalar-tensor, matrix-vector product and matrix
multiplication.
:param left_node: The node providing left hand side data.
:param right_node: The node providing right hand side data.
:param reduction_axes_count: The number of axes to reduce during dot-product.
:param name: The optional name for output node.
:return: The new node performing dot-product on input two nodes.
"""
if reduction_axes_count is None:
return Dot(left_node, right_node)
else:
return Dot(left_node, right_node, reduction_axes_count)
# convpool ops
@nameable_op
def convolution(data_batch, # type: Node
filter_weights, # type: Node
filter_strides=None, # type: List[int]
filter_dilation_strides=None, # type: List[int]
padding_below=None, # type: List[int]
padding_above=None, # type: List[int]
data_dilation_strides=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return node performing batched convolution operation.
:param data_batch: The node providing data batch tensor.
:param filter_weights: The node providing filters tensor.
:param filter_strides: The kernel window movement strides.
:param filter_dilation_strides: The filters dilation strides.
:param padding_below: The number of zero padding elements to add on each axis below 0
coordinate.
:param padding_above: The number of zero padding elements to add on each axis above max
coordinate.
:param data_dilation_strides: The data batch dilation strides.
:param name: The optional new name for output node.
:return: New node performing batched convolution operation.
"""
spatial_dim_count = len(data_batch.shape) - 2
if filter_strides is None:
filter_strides = [1] * spatial_dim_count
if filter_dilation_strides is None:
filter_dilation_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * spatial_dim_count
if data_dilation_strides is None:
data_dilation_strides = [1] * spatial_dim_count
return Convolution(data_batch, filter_weights, Strides(filter_strides),
Strides(filter_dilation_strides), CoordinateDiff(padding_below),
CoordinateDiff(padding_above), Strides(data_dilation_strides))
@nameable_op
def convolution_backprop_data(data_batch_shape, # type: TensorShape
filters, # type: Node
output_delta, # type: Node
window_movement_strides_forward=None, # type: List[int]
window_dilation_strides_forward=None, # type: List[int]
padding_below_forward=None, # type: List[int]
padding_above_forward=None, # type: List[int]
data_dilation_strides_forward=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return node performing a batched-convolution data batch-backprop operation.
:param data_batch_shape: The shape of the data batch from forward-prop.
:param filters: The node producing the filters from forward-prop.
:param output_delta: The node producing output delta.
:param window_movement_strides_forward: The window movement strides from forward-prop.
:param window_dilation_strides_forward: The window dilation strides from forward-prop.
:param padding_below_forward: The padding-below sizes from forward-prop.
:param padding_above_forward: The padding-above sizes from forward-prop.
:param data_dilation_strides_forward: The data dilation strides from forward-prop.
"""
spatial_dim_count = len(data_batch_shape) - 2
if window_movement_strides_forward is None:
window_movement_strides_forward = [1] * spatial_dim_count
if window_dilation_strides_forward is None:
window_dilation_strides_forward = [1] * spatial_dim_count
if padding_below_forward is None:
padding_below_forward = [0] * spatial_dim_count
if padding_above_forward is None:
padding_above_forward = [0] * spatial_dim_count
if data_dilation_strides_forward is None:
data_dilation_strides_forward = [1] * spatial_dim_count
return ConvolutionBackpropData(Shape(data_batch_shape), filters, output_delta,
Strides(window_movement_strides_forward),
Strides(window_dilation_strides_forward),
CoordinateDiff(padding_below_forward),
CoordinateDiff(padding_above_forward),
Strides(data_dilation_strides_forward))
@nameable_op
def avg_pool(data_batch, # type: Node
window_shape, # type: TensorShape
window_strides=None, # type: List[int]
padding_below=None, # type: TensorShape
padding_above=None, # type: TensorShape
include_padding=False, # type: bool
name=None, # type: str
):
# type: (...) -> Node
"""Return average pooling node.
:param data_batch: The input node providing data.
:param window_shape: The pooling window shape.
:param window_strides: The window movement strides.
:param padding_below: The input data optional padding below filled with zeros.
:param padding_above: The input data optional padding below filled with zeros.
:param include_padding: Whether or not to include zero padding in average computations.
:param name: Optional name for the new output node.
:return: New node with AvgPool operation applied on its data.
"""
spatial_dim_count = len(window_shape)
if window_strides is None:
window_strides = [1] * spatial_dim_count
if padding_above is None:
padding_above = [0] * spatial_dim_count
if padding_below is None:
padding_below = [0] * spatial_dim_count
return AvgPool(data_batch, Shape(window_shape), Strides(window_strides), Shape(padding_below),
Shape(padding_above), include_padding)
@nameable_op
def max_pool(x, # type: Node
window_shape, # type: TensorShape
strides=None, # type: List[int]
padding_above=None, # type: List[int]
padding_below=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return max pooling node."""
if strides is None:
strides = [1] * len(window_shape) # Default to as many 1s as spatial dimensions of input.
if padding_above is None:
padding_above = [0] * len(window_shape)
if padding_below is None:
padding_below = [0] * len(window_shape)
return MaxPool(x, Shape(window_shape), Strides(strides),
Shape(padding_above), Shape(padding_below))
# reduction ops
@nameable_op
def sum(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Perform element-wise sums of the input tensor, eliminating the specified reduction axes.
:param node: The node providing data for operation.
:param reduction_axes: The axes to eliminate through summation.
:param name: The optional new name for ouptut node.
:return: The new node performing summation along `reduction_axes` element-wise.
"""
return Sum(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def max(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Max-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to max-reduce.
:param reduction_axes: The axes to eliminate through max operation.
:param name: Optional name for output node.
"""
return Max(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def min(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Min-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to min-reduce.
:param reduction_axes: The axes to eliminate through min operation.
:param name: Optional name for output node.
"""
return Min(node, AxisSet(get_reduction_axes(node, reduction_axes)))
@nameable_op
def prod(node, reduction_axes=None, name=None):
# type: (Node, Iterable[int], str) -> Node
"""Product-reduction operation on input tensor, eliminating the specified reduction axes.
:param node: The tensor we want to product-reduce.
:param reduction_axes: The axes to eliminate through product operation.
:param name: Optional name for output node.
:return: The new node performing product-reduction operation.
"""
return Product(node, AxisSet(get_reduction_axes(node, reduction_axes)))
# reshape ops
@nameable_op
def slice(node, lower_bounds, upper_bounds, strides=None, name=None):
# type: (Node, List[int], List[int], List[int], str) -> Node
"""Take a slice of an input tensor, (sub-tensor) that resides within a bounding box.
Optionally this function may be provided with stride along each axis.
:param node: The tensor we want to slice.
:param lower_bounds: The (inclusive) lower-bound coordinates for the tensor slice.
:param upper_bounds: The (exclusive) upper-bound coordinates for the tensor slice.
:param strides: The strides for the tensor slice.
:param name: Optional name for the output node.
:return: Return node that represents a slice of input nodes data.
"""
if strides is None:
return Slice(node, Coordinate(lower_bounds), Coordinate(upper_bounds))
else:
return Slice(node, Coordinate(lower_bounds), Coordinate(upper_bounds), Strides(strides))
@nameable_op
def concat(nodes, axis, name=None): # type: (List[Node], int, str) -> Node
"""Concatenate input nodes into single new node along specified axis.
:param nodes: The nodes we want concatenate into single new node.
:param axis: The axis along which we want to concatenate input nodes.
:param name: The optional new name for output node.
:return: Return new node that is a concatenation of input nodes.
"""
return Concat(nodes, axis)
@nameable_op
def softmax(node, axes, name=None): # type: (Node, Iterable[int], str) -> Node
"""Apply softmax operation on each element of input tensor.
:param node: The tensor providing input data.
:param axes: The list of axes indices which are used to calculate divider of
the softmax function.
:param name: The optional new name for output node.
:return: The new node with softmax operation applied on each element.
"""
if not isinstance(axes, set):
axes = set(axes)
return Softmax(node, AxisSet(axes))
@nameable_op
def pad(data_batch, # type: Node
value, # type: Node
padding_below=None, # type: TensorShape
padding_above=None, # type: TensorShape
padding_in=None, # type: TensorShape
name=None, # type: str
):
# type: (...) -> Node
"""Return padding node.
:param data_batch: The input node providing data.
:param value: The node producing the scalar value to be inserted for padding.
:param padding_below: The padding-below widths.
:param padding_above: The padding-above widths.
:param padding_in: The interior-padding widths.
:param name: The optional new name for output node.
:return: Return node that represents a padding of input nodes data.
"""
dim_count = len(data_batch.shape)
if padding_above is None:
padding_above = [0] * dim_count
if padding_below is None:
padding_below = [0] * dim_count
if padding_in is None:
padding_in = [0] * dim_count
return Pad(data_batch, value, Shape(padding_below), Shape(padding_above), Shape(padding_in))
@nameable_op
def one_hot(node, shape, one_hot_axis, name=None): # type: (Node, TensorShape, int, str) -> Node
"""Create node performing one-hot encoding on input data.
:param node: The input node providing data for operation.
:param shape: The output node shape including the new one-hot axis.
:param one_hot_axis: The index within the output shape of the new one-hot axis.
:param name: The optional name for new output node.
:return: New node performing one-hot operation.
"""
return OneHot(node, Shape(shape), one_hot_axis)
@nameable_op
def replace_slice(dest_node, # type: Node
src_node, # type: Node
lower_bounds, # type: List[int]
upper_bounds, # type: List[int]
strides=None, # type: List[int]
name=None, # type: str
):
# type: (...) -> Node
"""Return a copy of `dest_node` with the specified slice overwritten by the `src_node` data.
:param dest_node: The node providing data to be overwritten by the specified slice.
:param src_node: The node providing data for overwriting.
:param lower_bounds: The (inclusive) lower-bound coordinates for the replaced slice.
:param upper_bounds: The (exclusive) upper-bound coordinates for the replaced slice.
:param strides: The strides for the replaced slice.
:param name: The optional name for the output new node.
:return: The new node with copy of `dest_node` with the specified slice overwritten
by the `src_node`.
"""
if strides is None:
return ReplaceSlice(dest_node, src_node, Coordinate(lower_bounds), Coordinate(upper_bounds))
else:
return ReplaceSlice(dest_node, src_node, Coordinate(lower_bounds), Coordinate(upper_bounds),
Strides(strides))
@nameable_op
def reverse(node, reversed_axes, name=None): # type: (Node, List[int], str) -> Node
"""Perform axis-reverse operation.
:param node: The input node on which operation will be carried out.
:param reversed_axes: The list of indices of axes to be reversed.
:param name: The optional name of the output node.
:return: The new node with reversed axes.
"""
return Reverse(node, AxisSet(reversed_axes))
@nameable_op
def batch_norm(eps, # type: float
gamma, # type: Node
beta, # type: Node
data, # type: Node
mean=None, # type: Node
variance=None, # type: Node
name=None, # type: str
):
# type: (...) -> Node
"""Return batch normalization node."""
if mean is None and variance is None:
return BatchNormTraining(data, gamma, beta, eps)
else:
return BatchNormInference(data, gamma, beta, mean, variance, eps)
@nameable_op
def lrn(data, # type: Node
alpha=1, # type: float
beta=0.5, # type: float
bias=1, # type: float
size=5, # type: int
name=None, # type: str
):
# type: (...) -> Node
"""Return a node which performs element-wise Local Response Normalization (LRN) operation.
:param data: Input data.
:param alpha: A scale factor (usually positive).
:param beta: An exponent.
:param bias: An offset (usually positive) to avoid dividing by 0.
:param size: Width of the 1-D normalization window.
:param name: An optional name of the output node.
:return: The new node which performs LRN.
"""
return LRN(data, alpha, beta, bias, size)
@nameable_op
def argmax(data, # type: Node
axis=0, # type: int
):
# type: (...) -> Node
"""Return a node which performs ArgMax index reduction operation.
:param data: Input data.
:param axis: Reduction Axis.
:return: The new node which performs ArgMax
"""
return ArgMax(data, axis, get_element_type(np.int32))
@nameable_op
def argmin(data, # type: Node
axis=0, # type: int
):
# type: (...) -> Node
"""Return a node which performs ArgMin index reduction operation.
:param data: Input data.
:param axis: Reduction Axis.
:return: The new node which performs ArgMin
"""
return ArgMin(data, axis, get_element_type(np.int32))
@nameable_op
def topk(data, # type: Node
k, # type: int
kaxis=-1, # type: int
cmax=True, # type: bool
):
# type: (...) -> Node
"""Return a node which performs TopK.
:param data: Input data.
:param kaxis: TopK Axis.
:param k: K.
:param cmax: Compute TopK largest (True) or smallest (False)
:return: The new node which performs TopK (both indices and values)
"""
return TopK(data,
len(data.get_shape()) - 1 if kaxis == -1 else kaxis,
get_element_type(np.int32),
k,
cmax)
@nameable_op
def get_output_element(data, index): # type: (Node, int) -> Node
"""Return the n-th element of the input tuple."""
return GetOutputElement(data, index)
|
the-stack_0_13725 | from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'CFG_ZEGE'
self.rule_severity = 2
self.rule_description = 'This rule checks for accessible Open API (Swagger) Documentation'
self.rule_confirm = 'Remote Server is exposing Swagger API'
self.rule_details = ''
self.rule_mitigation = '''Swagger API may have been incorrectly configured to allow access to untrusted clients. \
Check whether this can be restricted, as it may lead to attackers identifying your application endpoints.'''
self.rule_match_string = {
'/v2/api-docs':{
'app':'SWAGGER',
'match':['"swagger":"2.0"'],
'title':'REST API Documentation'
},
'/help':{
'app':'ASPNET_WEBAPI_HELP',
'match':['ASP.NET Web API Help Page'],
'title':'ASP.NET API Docs'
},
'/api-docs':{
'app':'SWAGGER',
'match':['"swagger":"2.0"'],
'title':'REST API Documentation'
},
'/swagger/index.html':{
'app':'SWAGGER_ALT1',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger-ui.html':{
'app':'SWAGGER_ALT2',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/api/swagger-ui.html':{
'app':'SWAGGER_ALT3',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/api-docs/swagger.json':{
'app':'SWAGGER_ALT4',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger.json':{
'app':'SWAGGER_ALT5',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
'/swagger/v1/swagger.json':{
'app':'SWAGGER_ALT6',
'match':['Swagger UI', '"swagger"'],
'title':'REST API Documentation'
},
}
self.intensity = 3
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
domain = p.get_domain()
module = p.get_module()
if 'http' not in module:
return
for uri, values in self.rule_match_string.items():
app_title = values['title']
resp = t.http_request(ip, port, uri=uri)
if resp is not None:
for match in values['match']:
if match in resp.text:
self.rule_details = 'Identified an exposed {} at {}'.format(app_title, resp.url)
rds.store_vuln({
'ip':ip,
'port':port,
'domain':domain,
'rule_id':self.rule,
'rule_sev':self.rule_severity,
'rule_desc':self.rule_description,
'rule_confirm':self.rule_confirm,
'rule_details':self.rule_details,
'rule_mitigation':self.rule_mitigation
})
return
|
the-stack_0_13727 | """
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
from collections import namedtuple
from ..topology_types import GenericTopology
from ..config import ConfigValidator, StringField, PathField, ConfigError
from ..dependency import ClassProvider
from ..utils import format_key, get_parameter_value_from_config
ConverterReturn = namedtuple('ConverterReturn', ['annotations', 'meta', 'content_check_errors'])
class BaseFormatConverter(ClassProvider):
__provider_type__ = 'converter'
topology_types = (GenericTopology, )
@classmethod
def parameters(cls):
return {
'converter': StringField(description="Converter name.")
}
@property
def config_validator(self):
return ConfigValidator(
'{}_converter_config'.format(self.get_name()), fields=self.parameters(),
on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT
)
def __init__(self, config=None):
self.config = config
if config:
self.validate_config()
self.configure()
def get_value_from_config(self, key):
return get_parameter_value_from_config(self.config, self.parameters(), key)
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
Converts specific annotation format to the ResultRepresentation specific for current dataset/task.
Arguments:
check_content: bool flag which enable dataset files (e. g. images, gt segmentation masks) existence checking
progress_callback: callback function for handling conversion progress status
Returns:
instance of ConverterReturn, where:
annotations is list of AnnotationRepresentations for current dataset
meta is dataset specific attributes e. g. label_map (can be None if dataset does not have specific info)
content_check_errors: list of error string messages for content check (can be None if check_content=False)
"""
raise NotImplementedError
@classmethod
def get_name(cls):
return cls.__provider__
def get_argparser(self):
parser = ArgumentParser(add_help=False)
config_validator = self.config_validator
fields = config_validator.fields
for field_name, field in fields.items():
if field_name == 'converter':
# it is base argument. Main argparser already use it to get argparser from specific converter.
# Converter argparser should contain only converter specific arguments.
continue
kwargs = {'required': not field.optional}
data_type = field.type
if data_type is not None:
kwargs['type'] = data_type
parser.add_argument(format_key(field_name), **kwargs)
return parser
def validate_config(self):
self.config_validator.validate(self.config)
def configure(self):
pass
class FileBasedAnnotationConverter(BaseFormatConverter):
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'annotation_file': PathField(description="Path to annotation file.")
})
return parameters
def configure(self):
self.annotation_file = self.get_value_from_config('annotation_file')
def convert(self, check_content=False, **kwargs):
pass
class DirectoryBasedAnnotationConverter(BaseFormatConverter):
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'data_dir': PathField(is_directory=True, description="Path to data directory.")
})
return parameters
def configure(self):
self.data_dir = self.get_value_from_config('data_dir')
def convert(self, check_content=False, **kwargs):
pass
def verify_label_map(label_map):
valid_label_map = {}
for class_id, class_name in label_map.items():
try:
int_class_id = int(class_id)
valid_label_map[int_class_id] = class_name
except ValueError:
raise ConfigError(
'class_id {} is invalid. `label_map` should have integer keys.'.format(class_id)
)
return valid_label_map
|
the-stack_0_13728 | # MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torch.nn.modules.rnn import apply_permutation
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from hanlp.common.structure import ConfigTracker
from hanlp.layers.dropout import SharedDropout
class VariationalLSTM(nn.Module):
r"""
LSTM is an variant of the vanilla bidirectional LSTM adopted by Biaffine Parser
with the only difference of the dropout strategy.
It drops nodes in the LSTM layers (input and recurrent connections)
and applies the same dropout mask at every recurrent timesteps.
APIs are roughly the same as :class:`~torch.nn.LSTM` except that we only allows
:class:`~torch.nn.utils.rnn.PackedSequence` as input.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
input_size (int):
The number of expected features in the input.
hidden_size (int):
The number of features in the hidden state `h`.
num_layers (int):
The number of recurrent layers. Default: 1.
bidirectional (bool):
If ``True``, becomes a bidirectional LSTM. Default: ``False``
dropout (float):
If non-zero, introduces a :class:`SharedDropout` layer on the outputs of each LSTM layer except the last layer.
Default: 0.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, input_size, hidden_size, num_layers=1, bidirectional=False, dropout=0):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.dropout = dropout
self.num_directions = 1 + self.bidirectional
self.f_cells = nn.ModuleList()
if bidirectional:
self.b_cells = nn.ModuleList()
for _ in range(self.num_layers):
self.f_cells.append(nn.LSTMCell(input_size=input_size, hidden_size=hidden_size))
if bidirectional:
self.b_cells.append(nn.LSTMCell(input_size=input_size, hidden_size=hidden_size))
input_size = hidden_size * self.num_directions
self.reset_parameters()
def __repr__(self):
s = f"{self.input_size}, {self.hidden_size}"
if self.num_layers > 1:
s += f", num_layers={self.num_layers}"
if self.bidirectional:
s += f", bidirectional={self.bidirectional}"
if self.dropout > 0:
s += f", dropout={self.dropout}"
return f"{self.__class__.__name__}({s})"
def reset_parameters(self):
for param in self.parameters():
# apply orthogonal_ to weight
if len(param.shape) > 1:
nn.init.orthogonal_(param)
# apply zeros_ to bias
else:
nn.init.zeros_(param)
def permute_hidden(self, hx, permutation):
if permutation is None:
return hx
h = apply_permutation(hx[0], permutation)
c = apply_permutation(hx[1], permutation)
return h, c
def layer_forward(self, x, hx, cell, batch_sizes, reverse=False):
hx_0 = hx_i = hx
hx_n, output = [], []
steps = reversed(range(len(x))) if reverse else range(len(x))
if self.training:
hid_mask = SharedDropout.get_mask(hx_0[0], self.dropout)
for t in steps:
last_batch_size, batch_size = len(hx_i[0]), batch_sizes[t]
if last_batch_size < batch_size:
hx_i = [torch.cat((h, ih[last_batch_size:batch_size])) for h, ih in zip(hx_i, hx_0)]
else:
hx_n.append([h[batch_size:] for h in hx_i])
hx_i = [h[:batch_size] for h in hx_i]
hx_i = [h for h in cell(x[t], hx_i)]
output.append(hx_i[0])
if self.training:
hx_i[0] = hx_i[0] * hid_mask[:batch_size]
if reverse:
hx_n = hx_i
output.reverse()
else:
hx_n.append(hx_i)
hx_n = [torch.cat(h) for h in zip(*reversed(hx_n))]
output = torch.cat(output)
return output, hx_n
def forward(self, sequence, hx=None):
r"""
Args:
sequence (~torch.nn.utils.rnn.PackedSequence):
A packed variable length sequence.
hx (~torch.Tensor, ~torch.Tensor):
A tuple composed of two tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial hidden state
for each element in the batch.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the initial cell state
for each element in the batch.
If `hx` is not provided, both `h` and `c` default to zero.
Default: ``None``.
Returns:
~torch.nn.utils.rnn.PackedSequence, (~torch.Tensor, ~torch.Tensor):
The first is a packed variable length sequence.
The second is a tuple of tensors `h` and `c`.
`h` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the hidden state for `t=seq_len`.
Like output, the layers can be separated using ``h.view(num_layers, num_directions, batch_size, hidden_size)``
and similarly for c.
`c` of shape ``[num_layers*num_directions, batch_size, hidden_size]`` holds the cell state for `t=seq_len`.
"""
x, batch_sizes = sequence.data, sequence.batch_sizes.tolist()
batch_size = batch_sizes[0]
h_n, c_n = [], []
if hx is None:
ih = x.new_zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size)
h, c = ih, ih
else:
h, c = self.permute_hidden(hx, sequence.sorted_indices)
h = h.view(self.num_layers, self.num_directions, batch_size, self.hidden_size)
c = c.view(self.num_layers, self.num_directions, batch_size, self.hidden_size)
for i in range(self.num_layers):
x = torch.split(x, batch_sizes)
if self.training:
mask = SharedDropout.get_mask(x[0], self.dropout)
x = [i * mask[:len(i)] for i in x]
x_i, (h_i, c_i) = self.layer_forward(x=x,
hx=(h[i, 0], c[i, 0]),
cell=self.f_cells[i],
batch_sizes=batch_sizes)
if self.bidirectional:
x_b, (h_b, c_b) = self.layer_forward(x=x,
hx=(h[i, 1], c[i, 1]),
cell=self.b_cells[i],
batch_sizes=batch_sizes,
reverse=True)
x_i = torch.cat((x_i, x_b), -1)
h_i = torch.stack((h_i, h_b))
c_i = torch.stack((c_i, c_b))
x = x_i
h_n.append(h_i)
c_n.append(h_i)
x = PackedSequence(x,
sequence.batch_sizes,
sequence.sorted_indices,
sequence.unsorted_indices)
hx = torch.cat(h_n, 0), torch.cat(c_n, 0)
hx = self.permute_hidden(hx, sequence.unsorted_indices)
return x, hx
class VariationalLSTMEncoder(VariationalLSTM, ConfigTracker):
def __init__(self,
input_size,
hidden_size,
num_layers=1,
bidirectional=False,
variational_dropout=0,
word_dropout=0,
):
super().__init__(input_size, hidden_size, num_layers, bidirectional, variational_dropout)
ConfigTracker.__init__(self, locals())
self.lstm_dropout = SharedDropout(p=word_dropout)
# noinspection PyMethodOverriding
def forward(self, embed, mask):
batch_size, seq_len = mask.shape
x = pack_padded_sequence(embed, mask.sum(1), True, False)
x, _ = super().forward(x)
x, _ = pad_packed_sequence(x, True, total_length=seq_len)
x = self.lstm_dropout(x)
return x
def get_output_dim(self):
return self.hidden_size * self.num_directions
|
the-stack_0_13730 | """Builds the Unlimited Hand - sensor values network. (made from MNIST)
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import math
import sys
import tensorflow as tf
import numpy as np
def inference(sensor_values, layer_units_array):
"""Build the Unlimited Hand - sensor values model up to where it may be used for inference.
Args:
sensor_values: sensor values placeholder, from inputs().
layer_units_array: layer units count array like hidden1, hidden2 and output layer units count array
Returns:
softmax_linear: Output tensor with the computed logits.
"""
out_layer_units_count = layer_units_array[len(layer_units_array) - 1]
values = sensor_values
logits = None
for layer_index in xrange(len(layer_units_array) - 1):
name = None
if layer_index != (len(layer_units_array) - 2):
name = 'hidden' + str(layer_index + 1)
else:
name = 'softmax_linear'
with tf.name_scope(name):
# weights = tf.Variable(
# tf.truncated_normal([layer_units_array[layer_index], layer_units_array[layer_index + 1]],
# stddev=1.0 / math.sqrt(float(layer_units_array[layer_index]))),
# name='weights')
weights = tf.Variable(
tf.truncated_normal([layer_units_array[layer_index], layer_units_array[layer_index + 1]],
stddev=np.sqrt(2 / np.prod(values.get_shape().as_list()[1:]))),
name='weights')
biases = tf.Variable(tf.zeros([layer_units_array[layer_index + 1]]), name='biases')
if layer_index != (len(layer_units_array) - 2):
values = tf.nn.relu(tf.matmul(values, weights) + biases)
else:
logits = tf.matmul(values, weights) + biases
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, class_count].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(optimizer_full_class, loss, learning_rate):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss)
# Create the optimizer with the given learning rate.
optimizer = eval(optimizer_full_class + '(learning_rate)')
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, class_count].
labels: Labels tensor, int32 - [batch_size], with values in the range [0, class_count).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1)
# of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32), name="eval_correct")
|
the-stack_0_13731 | """
Given a string, calculate its length
"""
# Iterative approach
def iterative_length(string):
length = 0
for i in string:
length += 1
return length
print(iterative_length('hello worLd'))
# Recursive approach
def recursive_length(string):
if string == "":
return 0
return 1 + recursive_length(string[1:])
print(recursive_length('hello Worlds'))
|
the-stack_0_13732 | import os
from utils.face_proc import FaceProc
import argparse
import pickle
from forensic_test import exam_img, exam_video
def main(args):
all_paths = os.listdir(args.input_dir)
proba_list = []
# initiate face process class, used to detect face and extract landmarks
face_inst = FaceProc()
# initialize SVM classifier for face forensics
with open(args.classifier_path, 'rb') as f:
model = pickle.load(f)
classifier = model[0]
scaler = model[1]
for f_name in all_paths:
f_path = os.path.join(args.input_dir, f_name)
print('_'*20)
print('Testing: ' + f_name)
suffix = f_path.split('.')[-1]
if suffix.lower() in ['jpg', 'png', 'jpeg', 'bmp']:
proba, optout = exam_img(args, f_path, face_inst, classifier, scaler)
elif suffix.lower() in ['mp4', 'avi', 'mov', 'mts']:
proba, optout = exam_video(args, f_path, face_inst, classifier, scaler)
print('fake_proba: {}, optout: {}'.format(str(proba), optout))
tmp_dict = dict()
tmp_dict['file_name'] = f_name
tmp_dict['probability'] = proba
tmp_dict['optout'] = optout
proba_list.append(tmp_dict)
pickle.dump(proba_list, open(args.save_file, 'wb'))
print(proba_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="headpose forensics")
parser.add_argument('--input_dir', type=str, default='debug_data')
parser.add_argument('--markID_c', type=str, default='18-36,49,55', help='landmark ids to estimate CENTRAL face region')
parser.add_argument('--markID_a', type=str, default='1-36,49,55', help='landmark ids to estimate WHOLE face region')
parser.add_argument('--classifier_path', type=str, default='svm_model.p')
parser.add_argument('--save_file', type=str, default='proba_list.p')
args = parser.parse_args()
main(args) |
the-stack_0_13734 | # Cave factory produces a cave-like structure with no disconnected
# rooms. Caves typically have a smooth, twisty appearance with lots of
# alcoves. This is based largely on the cellular automata examples at:
#
# http://roguebasin.roguelikedevelopment.org
#
# It also borrows code for joining disconnected cells from Dana Larose's
# example:
# http://pixelenvy.ca/wa/ca_cave.html
#
# I've tweaked the CA generations a bit to smooth out the cell joins, and added
# support for building connecting edges. I use this to build connected tiles of
# caves and hallways joining to other parts of the dungeon.
import sys
from random import randrange, random, choice
from disjoint_set import DisjointSet
FLOOR = 1
WALL = 2
TUNNEL = 3
class new:
def __init__(self, length, width, walls=0.40):
self.__length = length
self.__width = width
self.__exits = []
self.__map = []
self.__buf_map = []
self.__gen_initial_map(walls)
self.__ds = DisjointSet()
self.__cpt = (int(self.__length/2), int(self.__width/2))
def resize_map(self, new_length, new_width, center=True):
new_map = [[WALL for i in xrange(new_width)]
for j in xrange(new_length)]
ox = int(new_width/2.0-self.__width/2.0+0.5)
oy = int(new_length/2.0-self.__length/2.0+0.5)
for i in xrange(self.__width):
for j in xrange(self.__length):
x2 = ox + i
y2 = oy + j
if (
x2 >= 0 and
y2 >= 0 and
x2 < new_width and
y2 < new_width
):
new_map[x2][y2] = self.__map[i][j]
self.__map = new_map
self.__length = new_length
self.__width = new_width
self.__exits = []
self.__cpt = (int(self.__length/2), int(self.__width/2))
def print_map(self):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == WALL:
sys.stdout.write('#')
elif self.__map[r][c] == TUNNEL:
sys.stdout.write('+')
else:
sys.stdout.write(' ')
print
print
def iterate_walls(self):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == WALL:
if (self.__adj_flr_count(r, c) > 0):
yield (c, r)
def iterate_map(self, cell_type):
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if self.__map[r][c] == cell_type:
yield (c, r)
def add_exit(self, pt1, pt2):
while (pt1 != pt2):
if (
pt1[0] < 0 or
pt1[0] >= self.__width or
pt1[1] < 0 or
pt1[1] >= self.__length
):
sys.exit('WARN: Exit out of range', pt1)
else:
self.__exits.append(pt1)
pt1 = (pt1[0] + cmp(pt2[0], pt1[0]),
pt1[1] + cmp(pt2[1], pt1[1]))
def purge_exits(self):
self.__exits = []
for c in xrange(0, self.__width):
for r in xrange(0, self.__length):
if (
c == 0 or c == self.__width-1 or
r == 0 or r == self.__length-1
):
self.__map[r][c] == WALL
def grow_map(self):
self.__generation(1, 2, -1)
def reduce_map(self):
self.__generation(1, 7, -1)
def gen_map(self, mode='default'):
if mode == 'room':
# One large cavern room
self.__generation(4, 5, -1)
self.__join_rooms()
self.__generation(1, 5, -1)
else:
# Windey passages.
#Repeat 4: W?(p) = R1(p) ? 5 || R2(p) ? 2
#Repeat 3: W?(p) = R1(p) ? 5
# We do the above, with a cave join pass right before the final
# iteration. This helps smooth out any sharp edges after the join
# pass.
self.__generation(4, 5, 2)
self.__generation(2, 5, -1)
self.__join_rooms()
self.__generation(1, 5, -1)
def __generation(self, count, r1_cutoff, r2_cutoff):
while (count > 0):
self.__buf_map = [[WALL for i in xrange(self.__width)]
for j in xrange(self.__length)]
self.__gen_walls(self.__buf_map)
self.__gen_walls(self.__map)
for r in xrange(1, self.__length-1):
for c in xrange(1, self.__width-1):
adjcount_r1 = self.__adj_wall_count(r, c, 1)
adjcount_r2 = self.__adj_wall_count(r, c, 2)
if(adjcount_r1 >= r1_cutoff or
adjcount_r2 <= r2_cutoff):
self.__buf_map[r][c] = WALL
else:
self.__buf_map[r][c] = FLOOR
self.__map = list(self.__buf_map)
count -= 1
def __gen_initial_map(self, fillprob):
def rwall(fillprob):
if (random() < fillprob):
return WALL
return FLOOR
self.__map = [[rwall(fillprob) for i in xrange(self.__width)]
for j in xrange(self.__length)]
self.__gen_walls(self.__map)
def __gen_walls(self, a_map):
for j in range(0, self.__length):
a_map[j][0] = WALL
a_map[j][self.__width-1] = WALL
for j in range(0, self.__width):
a_map[0][j] = WALL
a_map[self.__length-1][j] = WALL
# Force the exits to be floor. We grow them out from the edge a bit to
# make sure they don't get sealed off.
for pos in self.__exits:
a_map[pos[0]][pos[1]] = FLOOR
for pos2 in ((-1, 0), (1, 0), (0, -1), (0, 1),
(-2, 0), (2, 0), (0, -2), (0, 2)):
p = (pos[0]+pos2[0], pos[1]+pos2[1])
if (p[0] < 1 or p[1] < 1):
continue
if (
p[0] >= self.__width-1 or
p[1] >= self.__length-1
):
continue
a_map[p[0]][p[1]] = FLOOR
def __adj_flr_count(self, sr, sc):
count = 0
for pos in ((-1, 0), (1, 0), (0, -1), (0, 1)):
p = (sr+pos[0], sc+pos[1])
if (p[0] < 0 or p[1] < 0):
continue
if (
p[0] > self.__width-1 or
p[1] > self.__length-1
):
continue
if (self.__map[p[0]][p[1]] == FLOOR):
count += 1
return count
def __adj_wall_count(self, sr, sc, rng=1):
count = 0
for r in xrange(-rng, rng+1):
for c in xrange(-rng, rng+1):
#if (r == 0 and c == 0):
# continue
if (abs(r) == 2 and abs(c) == 2):
continue
if (sr + r < 0 or sc + c < 0):
continue
if (sr + r >= self.__length or sc + c >= self.__width):
continue
if self.__map[sr + r][sc + c] == WALL:
count += 1
return count
def __join_rooms(self):
# Divide all cells into joined sets
for r in xrange(0, self.__length):
for c in xrange(0, self.__width):
if self.__map[r][c] != WALL:
self.__union_adj_sqr(r, c)
all_caves = self.__ds.split_sets()
while len(all_caves) > 1:
self.__join_points(all_caves[choice(all_caves.keys())][0])
all_caves = self.__ds.split_sets()
def __union_adj_sqr(self, sr, sc):
loc = (sr, sc)
root1 = self.__ds.find(loc)
# A cell is connected to other cells only in cardinal directions.
# (diagonals don't count for movement).
for pos in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if (sr+pos[0] < 0 or sc+pos[1] < 0):
continue
if (
sr+pos[0] >= self.__length or
sc+pos[1] >= self.__width
):
continue
nloc = (sr+pos[0], sc+pos[1])
if self.__map[nloc[0]][nloc[1]] == FLOOR:
root2 = self.__ds.find(nloc)
if root1 != root2:
self.__ds.union(root1, root2)
def __join_points(self, pt1):
next_pt = pt1
while 1:
dir = self.__get_tunnel_dir(pt1, self.__cpt)
move = randrange(0, 3)
if move == 0:
next_pt = (pt1[0] + dir[0], pt1[1])
elif move == 1:
next_pt = (pt1[0], pt1[1] + dir[1])
else:
next_pt = (pt1[0] + dir[0], pt1[1] + dir[1])
root1 = self.__ds.find(next_pt)
root2 = self.__ds.find(pt1)
if root1 != root2:
self.__ds.union(root1, root2)
for pos in ((0, 0), (-1, 0), (1, 0), (0, -1), (0, 1)):
if (
next_pt[0]+pos[0] < 0 or next_pt[1]+pos[1] < 0 or
next_pt[0]+pos[0] >= self.__length or
next_pt[1]+pos[1] >= self.__width
):
continue
if (self.__map[next_pt[0]+pos[0]][next_pt[1]+pos[1]] == WALL):
self.__map[next_pt[0]+pos[0]][next_pt[1]+pos[1]] = TUNNEL
if self.__stop_drawing(pt1, next_pt, self.__cpt):
return
pt1 = next_pt
def __stop_drawing(self, pt, npt, cpt):
if self.__ds.find(npt) == self.__ds.find(cpt):
return 1
if (
self.__ds.find(pt) != self.__ds.find(npt) and
self.__map[npt[0]][npt[1]] != WALL
):
return 1
return 0
def __get_tunnel_dir(self, pt1, pt2):
if pt1[0] < pt2[0]:
h_dir = +1
elif pt1[0] > pt2[0]:
h_dir = -1
else:
h_dir = 0
if pt1[1] < pt2[1]:
v_dir = +1
elif pt1[1] > pt2[1]:
v_dir = -1
else:
v_dir = 0
return (h_dir, v_dir)
|
the-stack_0_13735 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_metric
import transformers
from trainer_qa import QuestionAnsweringTrainer
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from utils_qa import postprocess_qa_predictions
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.6.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": "The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
if training_args.do_train:
column_names = datasets["train"].column_names
elif training_args.do_eval:
column_names = datasets["validation"].column_names
else:
column_names = datasets["test"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
# We will select sample from whole data if agument is specified
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Create train feature from dataset
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = datasets["validation"]
if data_args.max_eval_samples is not None:
# We will select sample from whole data
eval_examples = eval_examples.select(range(data_args.max_eval_samples))
# Validation Feature Creation
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Predict Feature Creation
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# Data collator
# We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data
# collator.
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
is_world_process_zero=trainer.is_world_process_zero(),
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Initialize our Trainer
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tags": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
the-stack_0_13738 | import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, channels_noise, features_g=64):
super(Generator, self).__init__()
self.gen = nn.Sequential(
# Input: N x channels_noise x 1 x 1
self._block(channels_noise, features_g * 16, 4, 1, 0), # img: 4x4
self._block(features_g * 16, features_g * 8, 4, 2, 1), # img: 8x8
self._block(features_g * 8, features_g * 4, 4, 2, 1), # img: 16x16
# self._block(features_g * 4, features_g * 2, 4, 2, 1), # img: 32x32
nn.ConvTranspose2d(features_g * 4, 4, kernel_size=4, stride=2, padding=1),
# Output: N x 3 x 64 x 64
)
self.activation = nn.Tanh()
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.gen(x)
return self.activation(x) * 2 |
the-stack_0_13740 | from typing import Any
from eth_utils import (
encode_hex,
is_bytes,
is_integer,
)
from eth_utils.toolz import curry
from eth_keys.constants import (
SECPK1_N,
)
from eth_keys.exceptions import (
ValidationError,
)
def validate_integer(value: Any) -> None:
if not is_integer(value) or isinstance(value, bool):
raise ValidationError("Value must be a an integer. Got: {0}".format(type(value)))
def validate_bytes(value: Any) -> None:
if not is_bytes(value):
raise ValidationError("Value must be a byte string. Got: {0}".format(type(value)))
@curry
def validate_gte(value: Any, minimum: int) -> None:
validate_integer(value)
if value < minimum:
raise ValidationError(
"Value {0} is not greater than or equal to {1}".format(
value, minimum,
)
)
@curry
def validate_lte(value: Any, maximum: int) -> None:
validate_integer(value)
if value > maximum:
raise ValidationError(
"Value {0} is not less than or equal to {1}".format(
value, maximum,
)
)
validate_lt_secpk1n = validate_lte(maximum=SECPK1_N - 1)
def validate_bytes_length(value: bytes, expected_length: int, name: str) -> None:
actual_length = len(value)
if actual_length != expected_length:
raise ValidationError(
"Unexpected {name} length: Expected {expected_length}, but got {actual_length} "
"bytes".format(
name=name,
expected_length=expected_length,
actual_length=actual_length,
)
)
def validate_message_hash(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 32, "message hash")
def validate_uncompressed_public_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 64, "uncompressed public key")
def validate_compressed_public_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 33, "compressed public key")
first_byte = value[0:1]
if first_byte not in (b"\x02", b"\x03"):
raise ValidationError(
"Unexpected compressed public key format: Must start with 0x02 or 0x03, but starts "
"with {first_byte}".format(
first_byte=encode_hex(first_byte),
)
)
def validate_private_key_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 32, "private key")
def validate_signature_bytes(value: Any) -> None:
validate_bytes(value)
validate_bytes_length(value, 65, "signature")
|
the-stack_0_13741 | """
Plugin to extract tables from an invoice.
"""
import re
import logging
logger = logging.getLogger(__name__)
DEFAULT_OPTIONS = {'field_separator': r'\s+', 'line_separator': r'\n'}
def extract(self, content, output):
"""Try to extract tables from an invoice"""
for table in self['tables']:
# First apply default options.
plugin_settings = DEFAULT_OPTIONS.copy()
plugin_settings.update(table)
table = plugin_settings
# Validate settings
assert 'start' in table, 'Table start regex missing'
assert 'end' in table, 'Table end regex missing'
assert 'body' in table, 'Table body regex missing'
start = re.search(table['start'], content)
end = re.search(table['end'], content)
if not start or not end:
logger.warning('no table body found - start %s, end %s', start, end)
continue
table_body = content[start.end(): end.start()]
for line in re.split(table['line_separator'], table_body):
# if the line has empty lines in it , skip them
if not line.strip('').strip('\n') or not line:
continue
match = re.search(table['body'], line)
if match:
for field, value in match.groupdict().items():
# If a field name already exists, do not overwrite it
if field in output:
continue
if field.startswith('date') or field.endswith('date'):
output[field] = self.parse_date(value)
if not output[field]:
logger.error("Date parsing failed on date '%s'", value)
return None
elif field.startswith('amount'):
output[field] = self.parse_number(value)
else:
output[field] = value
logger.debug('ignoring *%s* because it doesn\'t match anything', line)
|
the-stack_0_13742 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Mario Motta
# Yang Gao
# Qiming Sun <[email protected]>
# Jason Yu
# Alec White
#
import time
from functools import reduce
import numpy as np
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import scf
from pyscf.cc import uccsd
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.lib.kpts_helper import gamma_point
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.mp.kump2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
from pyscf.pbc.cc import kintermediates_uhf
from pyscf import __config__
einsum = lib.einsum
# --- list2array
def mo_c_list_to_array(mo_coeff):
mo_coeff_tmp=[]
for js in range(2):
tmp_nk = len(mo_coeff[js])
tmp_nb = mo_coeff[js][0].shape[0]
tmp_array = np.zeros((tmp_nk,tmp_nb,tmp_nb),dtype=complex)
for ik in range(tmp_nk):
tmp_array[ik,:,:]=mo_coeff[js][ik][:,:]
mo_coeff_tmp.append(tmp_array)
return mo_coeff_tmp
def convert_mo_coeff(mo_coeff):
if isinstance(mo_coeff[0], list):
mo_coeff=mo_c_list_to_array(mo_coeff)
return mo_coeff
def update_amps(cc, t1, t2, eris):
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht1a = np.zeros_like(t1a)
Ht1b = np.zeros_like(t1b)
Ht2aa = np.zeros_like(t2aa)
Ht2ab = np.zeros_like(t2ab)
Ht2bb = np.zeros_like(t2bb)
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
#fvv_ = eris.fock[0][:,nocca:,nocca:]
#fVV_ = eris.fock[1][:,noccb:,noccb:]
#foo_ = eris.fock[0][:,:nocca,:nocca]
#fOO_ = eris.fock[1][:,:noccb,:noccb]
fov_ = eris.fock[0][:,:nocca,nocca:]
fOV_ = eris.fock[1][:,:noccb,noccb:]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] + cc.level_shift for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] + cc.level_shift for e in eris.mo_energy[1]]
Fvv_, FVV_ = kintermediates_uhf.cc_Fvv(cc, t1, t2, eris)
Foo_, FOO_ = kintermediates_uhf.cc_Foo(cc, t1, t2, eris)
Fov_, FOV_ = kintermediates_uhf.cc_Fov(cc, t1, t2, eris)
# Move energy terms to the other side
for k in range(nkpts):
Fvv_[k][np.diag_indices(nvira)] -= mo_ea_v[k]
FVV_[k][np.diag_indices(nvirb)] -= mo_eb_v[k]
Foo_[k][np.diag_indices(nocca)] -= mo_ea_o[k]
FOO_[k][np.diag_indices(noccb)] -= mo_eb_o[k]
# Get the momentum conservation array
kconserv = cc.khelper.kconserv
# T1 equation
P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
Ht1a += fov_.conj()
Ht1b += fOV_.conj()
Ht1a += einsum('xyximae,yme->xia', t2aa, Fov_)
Ht1a += einsum('xyximae,yme->xia', t2ab, FOV_)
Ht1b += einsum('xyximae,yme->xia', t2bb, FOV_)
Ht1b += einsum('yxymiea,yme->xia', t2ab, Fov_)
Ht1a -= einsum('xyzmnae, xzymine->zia', t2aa, eris.ooov)
Ht1a -= einsum('xyzmNaE, xzymiNE->zia', t2ab, eris.ooOV)
#Ht1a -= einsum('xyzmnae,xzymine,xyzw->zia', t2aa, eris.ooov, P)
#Ht1a -= einsum('xyzmNaE,xzymiNE,xyzw->zia', t2ab, eris.ooOV, P)
Ht1b -= einsum('xyzmnae, xzymine->zia', t2bb, eris.OOOV)
#Ht1b -= einsum('xyzmnae,xzymine,xyzw->zia', t2bb, eris.OOOV, P)
Ht1b -= einsum('yxwnmea,xzymine,xyzw->zia', t2ab, eris.OOov, P)
for ka in range(nkpts):
Ht1a[ka] += einsum('ie,ae->ia', t1a[ka], Fvv_[ka])
Ht1b[ka] += einsum('ie,ae->ia', t1b[ka], FVV_[ka])
Ht1a[ka] -= einsum('ma,mi->ia', t1a[ka], Foo_[ka])
Ht1b[ka] -= einsum('ma,mi->ia', t1b[ka], FOO_[ka])
for km in range(nkpts):
# ka == ki; km == kf == km
# <ma||if> = [mi|af] - [mf|ai]
# => [mi|af] - [fm|ia]
Ht1a[ka] += einsum('mf,aimf->ia', t1a[km], eris.voov[ka, ka, km])
Ht1a[ka] -= einsum('mf,miaf->ia', t1a[km], eris.oovv[km, ka, ka])
Ht1a[ka] += einsum('MF,aiMF->ia', t1b[km], eris.voOV[ka, ka, km])
# miaf - mfai => miaf - fmia
Ht1b[ka] += einsum('MF,AIMF->IA', t1b[km], eris.VOOV[ka, ka, km])
Ht1b[ka] -= einsum('MF,MIAF->IA', t1b[km], eris.OOVV[km, ka, ka])
Ht1b[ka] += einsum('mf,fmIA->IA', t1a[km], eris.voOV[km, km, ka].conj())
for kf in range(nkpts):
ki = ka
ke = kconserv[ki, kf, km]
Ht1a[ka] += einsum('imef,fmea->ia', t2aa[ki,km,ke], eris.vovv[kf,km,ke].conj())
Ht1a[ka] += einsum('iMeF,FMea->ia', t2ab[ki,km,ke], eris.VOvv[kf,km,ke].conj())
Ht1b[ka] += einsum('IMEF,FMEA->IA', t2bb[ki,km,ke], eris.VOVV[kf,km,ke].conj())
Ht1b[ka] += einsum('mIfE,fmEA->IA', t2ab[km,ki,kf], eris.voVV[kf,km,ke].conj())
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# Fvv equation
Ftmpa_kb = Fvv_[kb] - 0.5 * einsum('mb,me->be', t1a[kb], Fov_[kb])
Ftmpb_kb = FVV_[kb] - 0.5 * einsum('MB,ME->BE', t1b[kb], FOV_[kb])
Ftmpa_ka = Fvv_[ka] - 0.5 * einsum('mb,me->be', t1a[ka], Fov_[ka])
Ftmpb_ka = FVV_[ka] - 0.5 * einsum('MB,ME->BE', t1b[ka], FOV_[ka])
tmp = einsum('ijae,be->ijab', t2aa[ki, kj, ka], Ftmpa_kb)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('IJAE,BE->IJAB', t2bb[ki, kj, ka], Ftmpb_kb)
Ht2bb[ki, kj, ka] += tmp
tmp = einsum('iJaE,BE->iJaB', t2ab[ki, kj, ka], Ftmpb_kb)
Ht2ab[ki, kj, ka] += tmp
tmp = einsum('iJeB,ae->iJaB', t2ab[ki, kj, ka], Ftmpa_ka)
Ht2ab[ki, kj, ka] += tmp
#P(ab)
tmp = einsum('ijbe,ae->ijab', t2aa[ki, kj, kb], Ftmpa_ka)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IJBE,AE->IJAB', t2bb[ki, kj, kb], Ftmpb_ka)
Ht2bb[ki, kj, ka] -= tmp
# Foo equation
Ftmpa_kj = Foo_[kj] + 0.5 * einsum('je,me->mj', t1a[kj], Fov_[kj])
Ftmpb_kj = FOO_[kj] + 0.5 * einsum('JE,ME->MJ', t1b[kj], FOV_[kj])
Ftmpa_ki = Foo_[ki] + 0.5 * einsum('je,me->mj', t1a[ki], Fov_[ki])
Ftmpb_ki = FOO_[ki] + 0.5 * einsum('JE,ME->MJ', t1b[ki], FOV_[ki])
tmp = einsum('imab,mj->ijab', t2aa[ki, kj, ka], Ftmpa_kj)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IMAB,MJ->IJAB', t2bb[ki, kj, ka], Ftmpb_kj)
Ht2bb[ki, kj, ka] -= tmp
tmp = einsum('iMaB,MJ->iJaB', t2ab[ki, kj, ka], Ftmpb_kj)
Ht2ab[ki, kj, ka] -= tmp
tmp = einsum('mJaB,mi->iJaB', t2ab[ki, kj, ka], Ftmpa_ki)
Ht2ab[ki, kj, ka] -= tmp
#P(ij)
tmp = einsum('jmab,mi->ijab', t2aa[kj, ki, ka], Ftmpa_ki)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('JMAB,MI->IJAB', t2bb[kj, ki, ka], Ftmpb_ki)
Ht2bb[ki, kj, ka] += tmp
# T2 equation
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
Ht2aa += (eris_ovov.transpose(0,2,1,3,5,4,6) - eris_ovov.transpose(2,0,1,5,3,4,6)).conj()
Ht2bb += (eris_OVOV.transpose(0,2,1,3,5,4,6) - eris_OVOV.transpose(2,0,1,5,3,4,6)).conj()
Ht2ab += eris_ovOV.transpose(0,2,1,3,5,4,6).conj()
tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
Woooo, WooOO, WOOOO = kintermediates_uhf.cc_Woooo(cc, t1, t2, eris)
# Add the contributions from Wvvvv
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += .5 * einsum('xmenf, xijef->minj', eris_ovov[km,:,kn], tauaa[ki,kj])
WOOOO[km,ki,kn] += .5 * einsum('xMENF, xIJEF->MINJ', eris_OVOV[km,:,kn], taubb[ki,kj])
WooOO[km,ki,kn] += .5 * einsum('xmeNF, xiJeF->miNJ', eris_ovOV[km,:,kn], tauab[ki,kj])
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Ht2aa[ki,kj,:] += einsum('minj,wmnab->wijab', Woooo[km,ki,kn], tauaa[km,kn]) * .5
Ht2bb[ki,kj,:] += einsum('MINJ,wMNAB->wIJAB', WOOOO[km,ki,kn], taubb[km,kn]) * .5
Ht2ab[ki,kj,:] += einsum('miNJ,wmNaB->wiJaB', WooOO[km,ki,kn], tauab[km,kn])
add_vvvv_(cc, (Ht2aa, Ht2ab, Ht2bb), t1, t2, eris)
Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO = \
kintermediates_uhf.cc_Wovvo(cc, t1, t2, eris)
#:Ht2ab += einsum('xwzimae,wvumeBJ,xwzv,wuvy->xyziJaB', t2aa, WovVO, P, P)
#:Ht2ab += einsum('xwziMaE,wvuMEBJ,xwzv,wuvy->xyziJaB', t2ab, WOVVO, P, P)
#:Ht2ab -= einsum('xie,zma,uwzBJme,zuwx,xyzu->xyziJaB', t1a, t1a, eris.VOov, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2aa[kx,kw,kz], WovVO[kw,kv,ku])
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2ab[kx,kw,kz], WOVVO[kw,kv,ku])
#for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
# kx = kconserv[kz,kw,ku]
# ky = kconserv[kz,kx,ku]
# continue
# Ht2ab[kx, ky, kz] -= lib.einsum('ie, ma, emjb->ijab', t1a[kx], t1a[kz], eris.voOV[kx,kz,kw].conj())
Ht2ab -= einsum('xie, yma, xyzemjb->xzyijab', t1a, t1a, eris.voOV[:].conj())
#:Ht2ab += einsum('wxvmIeA,wvumebj,xwzv,wuvy->yxujIbA', t2ab, Wovvo, P, P)
#:Ht2ab += einsum('wxvMIEA,wvuMEbj,xwzv,wuvy->yxujIbA', t2bb, WOVvo, P, P)
#:Ht2ab -= einsum('xIE,zMA,uwzbjME,zuwx,xyzu->yxujIbA', t1b, t1b, eris.voOV, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2ab[kw,kx,kv], Wovvo[kw,kv,ku])
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2bb[kw,kx,kv], WOVvo[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2ab[km,:,ke], Wovvo[km,ke,kb])
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2bb[km,:,ke], WOVvo[km,ke,kb])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[ky,kx,ku] -= lib.einsum('ie, ma, bjme->jiba', t1b[kx], t1b[kz], eris.voOV[ku,kw,kz])
#:Ht2ab += einsum('xwviMeA,wvuMebJ,xwzv,wuvy->xyuiJbA', t2ab, WOvvO, P, P)
#:Ht2ab -= einsum('xie,zMA,zwuMJbe,zuwx,xyzu->xyuiJbA', t1a, t1b, eris.OOvv, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
# Ht2ab[kx,ky,ku] += lib.einsum('imea,mebj->ijba', t2ab[kx,kw,kv],WOvvO[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[:,kj,kb] += einsum('ximea, mebj->xijba', t2ab[:,km,ke], WOvvO[km,ke,kb])
for kz,ku,kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[kx,ky,ku] -= lib.einsum('ie, ma, mjbe->ijba', t1a[kx], t1b[kz], eris.OOvv[kz, kw, ku])
#:Ht2ab += einsum('wxzmIaE,wvumEBj,xwzv,wuvy->yxzjIaB', t2ab, WoVVo, P, P)
#:Ht2ab -= einsum('xIE,zma,zwumjBE,zuwx,xyzu->yxzjIaB', t1b, t1a, eris.ooVV, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[ky, kx, kz] += lib.einsum('miae,mebj->jiab', t2ab[kw,kx,kz], WoVVo[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
Ht2ab[ky,kx,kz] -= lib.einsum('ie, ma, mjbe->jiab', t1b[kx], t1a[kz], eris.ooVV[kz,kw,ku])
#:u2aa = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2aa, Wovvo, P, P)
#:u2aa += einsum('xwziMaE,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WOVvo, P, P)
#Left this in to keep proper shape, need to replace later
u2aa = np.zeros_like(t2aa)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2aa[kx,kw,kz], Wovvo[kw,kv,ku])
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2ab[kx,kw,kz], WOVvo[kw,kv,ku])
#:u2aa += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1a, t1a, eris.oovv, P, P)
#:u2aa -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1a, t1a, eris.voov, P, P)
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
u2aa[kx,ky,kz] += lib.einsum('ie,ma,mjbe->ijab',t1a[kx],t1a[kz],eris.oovv[kz,kw,ku])
u2aa[kx,ky,kz] -= lib.einsum('ie,ma,bjme->ijab',t1a[kx],t1a[kz],eris.voov[ku,kw,kz])
#:u2aa += np.einsum('xie,uyzbjae,uzyx->xyzijab', t1a, eris.vovv, P)
#:u2aa -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooov.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2aa[kx, ky, kz] += lib.einsum('ie, bjae->ijab', t1a[kx], eris.vovv[ku,ky,kz])
u2aa[kx, ky, kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooov[kx,kz,ky].conj())
u2aa = u2aa - u2aa.transpose(1,0,2,4,3,5,6)
u2aa = u2aa - einsum('xyzijab,xyzu->xyuijba', u2aa, P)
Ht2aa += u2aa
#:u2bb = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2bb, WOVVO, P, P)
#:u2bb += einsum('wxvMiEa,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WovVO, P, P)
#:u2bb += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1b, t1b, eris.OOVV, P, P)
#:u2bb -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1b, t1b, eris.VOOV, P, P)
u2bb = np.zeros_like(t2bb)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw,kv, ku]
u2bb[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2bb[kx,kw,kz], WOVVO[kw,kv,ku])
u2bb[kx, ky, kz] += lib.einsum('miea, mebj-> ijab', t2ab[kw,kx,kv],WovVO[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
u2bb[kx, ky, kz] += lib.einsum('ie, ma, mjbe->ijab',t1b[kx],t1b[kz],eris.OOVV[kz,kw,ku])
u2bb[kx, ky, kz] -= lib.einsum('ie, ma, bjme->ijab', t1b[kx], t1b[kz],eris.VOOV[ku,kw,kz])
#:u2bb += np.einsum('xie,uzybjae,uzyx->xyzijab', t1b, eris.VOVV, P)
#:u2bb -= np.einsum('zma,xzyimjb->xyzijab', t1b, eris.OOOV.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2bb[kx,ky,kz] += lib.einsum('ie,bjae->ijab', t1b[kx], eris.VOVV[ku,ky,kz])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# u2bb[kx,ky,kz] -= lib.einsum('ma, imjb-> ijab', t1b[kz], eris.OOOV[kx,kz,ky].conj())
u2bb -= einsum('zma, xzyimjb->xyzijab', t1b, eris.OOOV[:].conj())
u2bb = u2bb - u2bb.transpose(1,0,2,4,3,5,6)
u2bb = u2bb - einsum('xyzijab,xyzu->xyuijba', u2bb, P)
Ht2bb += u2bb
#:Ht2ab += np.einsum('xie,uyzBJae,uzyx->xyziJaB', t1a, eris.VOvv, P)
#:Ht2ab += np.einsum('yJE,zxuaiBE,zuxy->xyziJaB', t1b, eris.voVV, P)
#:Ht2ab -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooOV.conj())
#:Ht2ab -= np.einsum('umb,yuxjmia,xyuz->xyzijab', t1b, eris.OOov.conj(), P)
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky,ku,kx]
Ht2ab[kx,ky,kz] += lib.einsum('ie, bjae-> ijab', t1a[kx], eris.VOvv[ku,ky,kz])
Ht2ab[kx,ky,kz] += lib.einsum('je, aibe-> ijab', t1b[ky], eris.voVV[kz,kx,ku])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# Ht2ab[kx,ky,kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooOV[kx,kz,ky].conj())
Ht2ab -= einsum('zma, xzyimjb->xyzijab', t1a, eris.ooOV[:].conj())
for kx, ky, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[kx, ku, ky]
Ht2ab[kx,ky,kz] -= lib.einsum('mb,jmia->ijab',t1b[ku],eris.OOov[ky,ku,kx].conj())
eia = []
eIA = []
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
for ki in range(nkpts):
ka = ki
# Remove zero/padded elements from denominator
Ht1a[ki] /= eia[ki][ka]
Ht1b[ki] /= eIA[ki][ka]
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
eijab = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Ht2aa[ki,kj,ka] /= eijab
eijab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2ab[ki,kj,ka] /= eijab
eijab = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2bb[ki,kj,ka] /= eijab
time0 = log.timer_debug1('update t1 t2', *time0)
return (Ht1a, Ht1b), (Ht2aa, Ht2ab, Ht2bb)
def get_normt_diff(cc, t1, t2, t1new, t2new):
'''Calculates norm(t1 - t1new) + norm(t2 - t2new).'''
return (np.linalg.norm(t1new[0] - t1[0])**2 +
np.linalg.norm(t1new[1] - t1[1])**2 +
np.linalg.norm(t2new[0] - t2[0])**2 +
np.linalg.norm(t2new[1] - t2[1])**2 +
np.linalg.norm(t2new[2] - t2[2])**2) ** .5
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
kka, noa, nva = t1a.shape
kkb, nob, nvb = t1b.shape
assert(kka == kkb)
nkpts = kka
s = 0.0 + 0j
fa, fb = eris.fock
for ki in range(nkpts):
s += einsum('ia,ia', fa[ki, :noa, noa:], t1a[ki, :, :])
s += einsum('ia,ia', fb[ki, :nob, nob:], t1b[ki, :, :])
t1t1aa = np.zeros(shape=t2aa.shape, dtype=t2aa.dtype)
t1t1ab = np.zeros(shape=t2ab.shape, dtype=t2ab.dtype)
t1t1bb = np.zeros(shape=t2bb.shape, dtype=t2bb.dtype)
for ki in range(nkpts):
ka = ki
for kj in range(nkpts):
t1t1aa[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1a[kj, :, :])
t1t1ab[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1b[kj, :, :])
t1t1bb[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1b[ki, :, :], t1b[kj, :, :])
tauaa = t2aa + 2*t1t1aa
tauab = t2ab + t1t1ab
taubb = t2bb + 2*t1t1bb
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,tauaa)
- einsum('yzxjaib,xyzijab->',eris.ovov,tauaa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,tauab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,taubb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,taubb))
e = s + d
e /= nkpts
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in KCCSD energy %s', e)
return e.real
#def get_nocc(cc, per_kpoint=False):
# '''See also function get_nocc in pyscf/pbc/mp2/kmp2.py'''
# if cc._nocc is not None:
# return cc._nocc
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nocca = [(np.count_nonzero(cc.mo_occ[0][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
# noccb = [(np.count_nonzero(cc.mo_occ[1][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nocca = np.amax(nocca)
# noccb = np.amax(noccb)
# return nocca, noccb
#
#def get_nmo(cc, per_kpoint=False):
# '''See also function get_nmo in pyscf/pbc/mp2/kmp2.py'''
# if cc._nmo is not None:
# return cc._nmo
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nmoa = [(cc.mo_occ[0][k].size - cc.frozen) for k in range(cc.nkpts)]
# nmob = [(cc.mo_occ[1][k].size - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nmoa = np.amax(nmoa)
# nmob = np.amax(nmob)
# return nmoa, nmob
#
#def get_frozen_mask(cc):
# '''See also get_frozen_mask function in pyscf/pbc/mp2/kmp2.py'''
#
# moidxa = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[0]]
# moidxb = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[1]]
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# for idx in moidxa:
# idx[:cc.frozen] = False
# for idx in moidxb:
# idx[:cc.frozen] = False
# else:
# raise NotImplementedError
#
# return moidxa, moisxb
def amplitudes_to_vector(t1, t2):
return np.hstack((t1[0].ravel(), t1[1].ravel(),
t2[0].ravel(), t2[1].ravel(), t2[2].ravel()))
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
nkpts**3*noccb**2*nvirb**2)
sections = np.cumsum(sizes[:-1])
t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)
t1a = t1a.reshape(nkpts,nocca,nvira)
t1b = t1b.reshape(nkpts,noccb,nvirb)
t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def add_vvvv_(cc, Ht2, t1, t2, eris):
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht2aa, Ht2ab, Ht2bb = Ht2
if cc.direct and getattr(eris, 'Lpv', None) is not None:
def get_Wvvvv(ka, kc, kb):
kd = kconserv[ka,kc,kb]
Lpv = eris.Lpv
LPV = eris.LPV
Lbd = (Lpv[kb,kd][:,nocca:] -
lib.einsum('Lkd,kb->Lbd', Lpv[kb,kd][:,:nocca], t1a[kb]))
Wvvvv = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], Lbd)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
Lpv[kb,kd][:,nocca:])
Wvvvv -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
LBD = (LPV[kb,kd][:,noccb:] -
lib.einsum('Lkd,kb->Lbd', LPV[kb,kd][:,:noccb], t1b[kb]))
WvvVV = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
LPV[kb,kd][:,noccb:])
WvvVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
WVVVV = lib.einsum('Lac,Lbd->acbd', LPV[ka,kc][:,noccb:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', LPV[ka,kc][:,:noccb],
LPV[kb,kd][:,noccb:])
WVVVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1b[ka])
Wvvvv *= (1./nkpts)
WvvVV *= (1./nkpts)
WVVVV *= (1./nkpts)
return Wvvvv, WvvVV, WVVVV
else:
_Wvvvv, _WvvVV, _WVVVV = kintermediates_uhf.cc_Wvvvv_half(cc, t1, t2, eris)
def get_Wvvvv(ka, kc, kb):
return _Wvvvv[ka,kc,kb], _WvvVV[ka,kc,kb], _WVVVV[ka,kc,kb]
#:Ht2aa += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', tauaa, _Wvvvv-_Wvvvv.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2bb += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', taubb, _WVVVV-_WVVVV.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2ab += np.einsum('xyuiJeF,zuwaeBF,xyuv,zwuv->xyziJaB', tauab, _WvvVV, P, P)
for ka, kb, kc in kpts_helper.loop_kkk(nkpts):
kd = kconserv[ka,kc,kb]
Wvvvv, WvvVV, WVVVV = get_Wvvvv(ka, kc, kb)
for ki in range(nkpts):
kj = kconserv[ka,ki,kb]
tauaa = t2aa[ki,kj,kc].copy()
tauab = t2ab[ki,kj,kc].copy()
taubb = t2bb[ki,kj,kc].copy()
if ki == kc and kj == kd:
tauaa += einsum('ic,jd->ijcd', t1a[ki], t1a[kj])
tauab += einsum('ic,jd->ijcd', t1a[ki], t1b[kj])
taubb += einsum('ic,jd->ijcd', t1b[ki], t1b[kj])
if ki == kd and kj == kc:
tauaa -= einsum('id,jc->ijcd', t1a[ki], t1a[kj])
taubb -= einsum('id,jc->ijcd', t1b[ki], t1b[kj])
tmp = lib.einsum('acbd,ijcd->ijab', Wvvvv, tauaa) * .5
Ht2aa[ki,kj,ka] += tmp
Ht2aa[ki,kj,kb] -= tmp.transpose(0,1,3,2)
tmp = lib.einsum('acbd,ijcd->ijab', WVVVV, taubb) * .5
Ht2bb[ki,kj,ka] += tmp
Ht2bb[ki,kj,kb] -= tmp.transpose(0,1,3,2)
Ht2ab[ki,kj,ka] += lib.einsum('acbd,ijcd->ijab', WvvVV, tauab)
Wvvvv = WvvVV = WVVVV = None
_Wvvvv = _WvvVV = _WVVVV = None
# Contractions below are merged to Woooo intermediates
# tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
# P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
# minj = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.ovov, tauaa, P, P)
# MINJ = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.OVOV, taubb, P, P)
# miNJ = np.einsum('xwymeNF,uvwiJeF,xywz,uvwz->xuymiNJ', eris.ovOV, tauab, P, P)
# Ht2aa += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', minj, tauaa, P) * .25
# Ht2bb += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', MINJ, taubb, P) * .25
# Ht2ab += np.einsum('xuymiNJ,xywmNaB,xyuv->uvwiJaB', miNJ, tauab, P) * .5
return (Ht2aa, Ht2ab, Ht2bb)
class KUCCSD(uccsd.UCCSD):
max_space = getattr(__config__, 'pbc_cc_kccsd_uhf_KUCCSD_max_space', 20)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert(isinstance(mf, scf.khf.KSCF))
uccsd.UCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
self.kpts = mf.kpts
self.mo_energy = mf.mo_energy
self.khelper = kpts_helper.KptsHelper(mf.cell, self.kpts)
self.direct = True # If possible, use GDF to compute Wvvvv on-the-fly
keys = set(['kpts', 'mo_energy', 'khelper', 'max_space', 'direct'])
self._keys = self._keys.union(keys)
@property
def nkpts(self):
return len(self.kpts)
get_normt_diff = get_normt_diff
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
update_amps = update_amps
energy = energy
def dump_flags(self, verbose=None):
return uccsd.UCCSD.dump_flags(self, verbose)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc.df.df import GDF
cell = self._scf.cell
nkpts = self.nkpts
nmoa, nmob = self.nmo
mem_incore = nkpts**3 * (nmoa**4 + nmob**4) * 8 / 1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now < self.max_memory) or self.mol.incore_anyway:
return _make_eris_incore(self, mo_coeff)
elif (self.direct and type(self._scf.with_df) is GDF
and cell.dimension != 2):
# DFKCCSD does not support MDF
return _make_df_eris(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def init_amps(self, eris):
time0 = time.clock(), time.time()
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = self.nkpts
t1a = np.zeros((nkpts, nocca, nvira), dtype=np.complex128)
t1b = np.zeros((nkpts, noccb, nvirb), dtype=np.complex128)
t1 = (t1a, t1b)
t2aa = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvira, nvira), dtype=np.complex128)
t2ab = np.zeros((nkpts, nkpts, nkpts, nocca, noccb, nvira, nvirb), dtype=np.complex128)
t2bb = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvirb, nvirb), dtype=np.complex128)
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] for e in eris.mo_energy[1]]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(self, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
eia = []
eIA = []
# Create denominators, ignoring padded elements
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
Daa = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Dab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Dbb = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
t2aa[ki,kj,ka] = eris.ovov[ki,ka,kj].conj().transpose((0,2,1,3)) / Daa
t2aa[ki,kj,ka]-= eris.ovov[kj,ka,ki].conj().transpose((2,0,1,3)) / Daa
t2ab[ki,kj,ka] = eris.ovOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dab
t2bb[ki,kj,ka] = eris.OVOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dbb
t2bb[ki,kj,ka]-= eris.OVOV[kj,ka,ki].conj().transpose((2,0,1,3)) / Dbb
t2 = (t2aa,t2ab,t2bb)
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,t2aa)
- einsum('yzxjaib,xyzijab->',eris.ovov,t2aa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,t2ab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,t2bb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,t2bb))
self.emp2 = d/nkpts
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2.real)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
def amplitudes_to_vector(self, t1, t2):
return amplitudes_to_vector(t1, t2)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None, nkpts=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
if nkpts is None: nkpts = self.nkpts
return vector_to_amplitudes(vec, nmo, nocc, nkpts)
UCCSD = KUCCSD
#######################################
#
# _ERIS.
#
# Note the two electron integrals are stored in different orders from
# kccsd_rhf._ERIS. Integrals (ab|cd) are stored as [ka,kb,kc,a,b,c,d] here
# while the order is [ka,kc,kb,a,c,b,d] in kccsd_rhf._ERIS
#
# TODO: use the same convention as kccsd_rhf
#
def _make_eris_incore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0])
eris.oooo = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype=dtype)
eris.ooov = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype=dtype)
eris.oovv = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype=dtype)
eris.ovov = np.empty((nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype=dtype)
eris.voov = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype=dtype)
eris.vovv = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype=dtype)
eris.OOOO = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype=dtype)
eris.OOOV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype=dtype)
eris.OOVV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype=dtype)
eris.OVOV = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype=dtype)
eris.VOOV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype=dtype)
eris.VOVV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype=dtype)
eris.ooOO = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype=dtype)
eris.ooOV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype=dtype)
eris.ooVV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype=dtype)
eris.ovOV = np.empty((nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype=dtype)
eris.voOV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype=dtype)
eris.voVV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype=dtype)
eris.OOoo = None
eris.OOov = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype=dtype)
eris.OOvv = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype=dtype)
eris.OVov = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype=dtype)
eris.VOov = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype=dtype)
eris.VOvv = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype=dtype)
_kuccsd_eris_common_(cc, eris)
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
eris.vvvv = thisdf.ao2mo_7d(orbva, factor=1./nkpts)
eris.VVVV = thisdf.ao2mo_7d(orbvb, factor=1./nkpts)
eris.vvVV = thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], factor=1./nkpts)
return eris
def _kuccsd_eris_common_(cc, eris, buf=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
#if not (cc.frozen is None or cc.frozen == 0):
# raise NotImplementedError('cc.frozen = %s' % str(cc.frozen))
cput0 = (time.clock(), time.time())
log = logger.new_logger(cc)
cell = cc._scf.cell
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
mo_coeff = eris.mo_coeff
nocca, noccb = eris.nocc
nmoa, nmob = cc.nmo
mo_a, mo_b = mo_coeff
# Re-make our fock MO matrix elements from density and fock AO
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
hcore = cc._scf.get_hcore()
with lib.temporary_env(cc._scf, exxdiv=None):
vhf = cc._scf.get_veff(cell, dm)
focka = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[0][k], mo))
for k, mo in enumerate(mo_a)]
fockb = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[1][k], mo))
for k, mo in enumerate(mo_b)]
eris.fock = (np.asarray(focka), np.asarray(fockb))
eris.e_hf = cc._scf.energy_tot(dm=dm, vhf=vhf)
madelung = tools.madelung(cell, kpts)
mo_ea = [focka[k].diagonal().real for k in range(nkpts)]
mo_eb = [fockb[k].diagonal().real for k in range(nkpts)]
mo_ea = [_adjust_occ(e, nocca, -madelung) for e in mo_ea]
mo_eb = [_adjust_occ(e, noccb, -madelung) for e in mo_eb]
eris.mo_energy = (mo_ea, mo_eb)
orboa = np.asarray(mo_coeff[0][:,:,:nocca], order='C')
orbob = np.asarray(mo_coeff[1][:,:,:noccb], order='C')
#orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
#orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
dtype = np.result_type(*focka).char
# The momentum conservation array
kconserv = cc.khelper.kconserv
out = None
if isinstance(buf, h5py.Group):
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.oooo[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,:nocca]
eris.ooov[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,nocca:]
eris.oovv[kp,kq,kr] = tmp[:nocca,:nocca,nocca:,nocca:]
eris.ovov[kp,kq,kr] = tmp[:nocca,nocca:,:nocca,nocca:]
eris.voov[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.vovv[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.OOOO[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,:noccb]
eris.OOOV[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,noccb:]
eris.OOVV[kp,kq,kr] = tmp[:noccb,:noccb,noccb:,noccb:]
eris.OVOV[kp,kq,kr] = tmp[:noccb,noccb:,:noccb,noccb:]
eris.VOOV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.VOVV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.ooOO[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,:noccb]
eris.ooOV[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,noccb:]
eris.ooVV[kp,kq,kr] = tmp[:nocca,:nocca,noccb:,noccb:]
eris.ovOV[kp,kq,kr] = tmp[:nocca,nocca:,:noccb,noccb:]
eris.voOV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.voVV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
#eris.OOoo[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,:nocca]
eris.OOov[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,nocca:]
eris.OOvv[kp,kq,kr] = tmp[:noccb,:noccb,nocca:,nocca:]
eris.OVov[kp,kq,kr] = tmp[:noccb,noccb:,:nocca,nocca:]
eris.VOov[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.VOvv[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0]).char
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = feri.create_dataset('vvvv', (nkpts,nkpts,nkpts,nvira,nvira,nvira,nvira), dtype)
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = feri.create_dataset('VVVV', (nkpts,nkpts,nkpts,nvirb,nvirb,nvirb,nvirb), dtype)
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = feri.create_dataset('vvVV', (nkpts,nkpts,nkpts,nvira,nvira,nvirb,nvirb), dtype)
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
thisdf.ao2mo_7d(orbva, cc.kpts, factor=1./nkpts, out=eris.vvvv)
thisdf.ao2mo_7d(orbvb, cc.kpts, factor=1./nkpts, out=eris.VVVV)
thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], cc.kpts, factor=1./nkpts, out=eris.vvVV)
return eris
def _make_df_eris(cc, mo_coeff=None):
from pyscf.pbc.df import df
from pyscf.ao2mo import _ao2mo
cell = cc._scf.cell
if cell.dimension == 2:
raise NotImplementedError
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
#if getattr(thisdf, 'auxcell', None):
# naux = thisdf.auxcell.nao_nr()
#else:
# naux = thisdf.get_naoaux()
nao = cell.nao_nr()
mo_kpts_a, mo_kpts_b = eris.mo_coeff
if gamma_point(kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_kpts_a)
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = None
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = None
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = None
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
eris.Lpv = Lpv = np.empty((nkpts,nkpts), dtype=object)
eris.LPV = LPV = np.empty((nkpts,nkpts), dtype=object)
with h5py.File(thisdf._cderi, 'r') as f:
kptij_lst = f['j3c-kptij'].value
tao = []
ao_loc = None
for ki, kpti in enumerate(kpts):
for kj, kptj in enumerate(kpts):
kpti_kptj = np.array((kpti,kptj))
Lpq = np.asarray(df._getitem(f, 'j3c', kpti_kptj, kptij_lst))
mo_a = np.hstack((mo_kpts_a[ki], mo_kpts_a[kj][:,nocca:]))
mo_b = np.hstack((mo_kpts_b[ki], mo_kpts_b[kj][:,noccb:]))
mo_a = np.asarray(mo_a, dtype=dtype, order='F')
mo_b = np.asarray(mo_b, dtype=dtype, order='F')
if dtype == np.double:
outa = _ao2mo.nr_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), aosym='s2')
outb = _ao2mo.nr_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), aosym='s2')
else:
#Note: Lpq.shape[0] != naux if linear dependency is found in auxbasis
if Lpq[0].size != nao**2: # aosym = 's2'
Lpq = lib.unpack_tril(Lpq).astype(np.complex128)
outa = _ao2mo.r_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), tao, ao_loc)
outb = _ao2mo.r_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), tao, ao_loc)
Lpv[ki,kj] = outa.reshape(-1,nmoa,nvira)
LPV[ki,kj] = outb.reshape(-1,nmob,nvirb)
return eris
scf.kuhf.KUHF.CCSD = lib.class_as_method(KUCCSD)
if __name__ == '__main__':
from pyscf.pbc import gto, cc
from pyscf import lo
cell = gto.Cell()
cell.atom='''
He 0.000000000000 0.000000000000 0.000000000000
He 1.685068664391 1.685068664391 1.685068664391
'''
#cell.basis = [[0, (1., 1.)], [1, (.5, 1.)]]
cell.basis = [[0, (1., 1.)], [0, (.5, 1.)]]
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.mesh = [13]*3
cell.build()
np.random.seed(2)
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KUHF(cell, kpts=cell.make_kpts([1,1,3]), exxdiv=None)
nmo = cell.nao_nr()
kmf.mo_occ = np.zeros((2,3,nmo))
kmf.mo_occ[0,:,:3] = 1
kmf.mo_occ[1,:,:1] = 1
kmf.mo_energy = np.arange(nmo) + np.random.random((2,3,nmo)) * .3
kmf.mo_energy[kmf.mo_occ == 0] += 2
mo = (np.random.random((2,3,nmo,nmo)) +
np.random.random((2,3,nmo,nmo))*1j - .5-.5j)
s = kmf.get_ovlp()
kmf.mo_coeff = np.empty_like(mo)
nkpts = len(kmf.kpts)
for k in range(nkpts):
kmf.mo_coeff[0,k] = lo.orth.vec_lowdin(mo[0,k], s[k])
kmf.mo_coeff[1,k] = lo.orth.vec_lowdin(mo[1,k], s[k])
def rand_t1_t2(mycc):
nkpts = mycc.nkpts
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
np.random.seed(1)
t1a = (np.random.random((nkpts,nocca,nvira)) +
np.random.random((nkpts,nocca,nvira))*1j - .5-.5j)
t1b = (np.random.random((nkpts,noccb,nvirb)) +
np.random.random((nkpts,noccb,nvirb))*1j - .5-.5j)
t2aa = (np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)) +
np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira))*1j - .5-.5j)
kconserv = kpts_helper.get_kconserv(kmf.cell, kmf.kpts)
t2aa = t2aa - t2aa.transpose(1,0,2,4,3,5,6)
tmp = t2aa.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2aa[ki,kj,kk] = t2aa[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t2ab = (np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb))*1j - .5-.5j)
t2bb = (np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb))*1j - .5-.5j)
t2bb = t2bb - t2bb.transpose(1,0,2,4,3,5,6)
tmp = t2bb.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2bb[ki,kj,kk] = t2bb[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t1 = (t1a, t1b)
t2 = (t2aa, t2ab, t2bb)
return t1, t2
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (2.2677885702176339-2.5150764056992041j))
print(lib.finger(Ht1[1]) - (-51.643438947846086+526.58026126100458j))
print(lib.finger(Ht2[0]) - (-29.490813482748258-8.7509143690136018j))
print(lib.finger(Ht2[1]) - (2256.0440056839416-193.16480896707569j))
print(lib.finger(Ht2[2]) - (-250.59447681063182-397.57189085666982j))
kmf.mo_occ[:] = 0
kmf.mo_occ[:,:,:2] = 1
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (5.4622516572705662+1.990046725028729j))
print(lib.finger(Ht1[1]) - (4.8801120611799043-5.9940463787453488j))
print(lib.finger(Ht2[0]) - (-192.38864512375193+305.14191018543983j))
print(lib.finger(Ht2[1]) - (23085.044505825954-11527.802302550244j))
print(lib.finger(Ht2[2]) - (115.57932548288559-40.888597453928604j))
from pyscf.pbc.cc import kccsd
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
ue = energy(mycc, t1, t2, eris)
print(abs(ge - ue))
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
kmf = kmf.density_fit(auxbasis=[[0, (1., 1.)]])
mycc = KUCCSD(kmf)
eris = _make_df_eris(mycc, mycc.mo_coeff)
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (6.9341372555790013+0.87313546297025901j))
print(lib.finger(Ht1[1]) - (6.7538005829391992-0.95702422534126796j))
print(lib.finger(Ht2[0]) - (-509.24544842179876+448.00925776269855j))
print(lib.finger(Ht2[1]) - (107.5960392010511+40.869216223808067j) )
print(lib.finger(Ht2[2]) - (-196.75910296082139+218.53005038057515j))
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8]))
eris = _make_eris_outcore(mycc, mycc.mo_coeff)
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8,
abs(lib.finger(eris.vvvv) - (-0.080512851258903173-0.2868384266725581j) )<1e-8,
abs(lib.finger(eris.vvVV) - (-0.5137063762484736+1.1036785801263898j) )<1e-8,
#abs(lib.finger(eris.VVvv) - (0.16468487082491939+0.25730725586992997j) )<1e-8,
abs(lib.finger(eris.VVVV) - (-0.56714875196802295+0.058636785679170501j) )<1e-8]))
|
the-stack_0_13746 | # -*- coding: utf-8 -*-
from couchbase_helper.documentgenerator import doc_generator
from failover.AutoFailoverBaseTest import AutoFailoverBaseTest
from custom_exceptions.exception import RebalanceFailedException, \
ServerUnavailableException
from membase.api.rest_client import RestConnection
class MultiNodeAutoFailoverTests(AutoFailoverBaseTest):
def setUp(self):
super(MultiNodeAutoFailoverTests, self).setUp()
self.data_load_spec = self.input.param("data_load_spec",
"volume_test_load")
self.master = self.servers[0]
def tearDown(self):
super(MultiNodeAutoFailoverTests, self).tearDown()
def _is_failover_expected(self, failure_node_number):
failover_not_expected = (
self.max_count == 1 and failure_node_number > 1 and
self.pause_between_failover_action <
self.timeout or self.num_replicas < 1)
failover_not_expected = failover_not_expected or (
1 < self.max_count < failure_node_number and
self.pause_between_failover_action < self.timeout or
self.num_replicas < failure_node_number)
return not failover_not_expected
def _multi_node_failover(self):
servers_to_fail = self.server_to_fail
for i in range(self.max_count):
self.server_to_fail = [servers_to_fail[i]]
self.failover_expected = self._is_failover_expected(i + 1)
self.failover_actions[self.failover_action](self)
self.sleep(self.timeout)
def test_autofailover(self):
"""
Test the basic autofailover for different failure scenarios.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required.
3. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def _get_server_group_nodes(self, server_group):
servers_in_group = self.zones[server_group]
server_group_nodes = []
for server in self.servers:
if server.ip in servers_in_group:
server_group_nodes.append(server)
return server_group_nodes
def test_autofailover_for_server_group(self):
self.enable_autofailover_and_validate()
self.shuffle_nodes_between_zones_and_rebalance()
self.sleep(30,"waiting")
self.server_to_fail = self._get_server_group_nodes("Group 2")
self.failover_expected = True
tasks = self.subsequent_load_gen()
try:
self.failover_actions[self.failover_action](self)
except:
result = self._check_for_autofailover_initiation_for_server_group_failover(self.server_to_fail)
self.assertTrue(result,
"Server group failover msg was not seen in logs")
finally:
self.sleep(300)
self.start_couchbase_server()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_during_rebalance(self):
"""
Test autofailover for different failure scenarios while
rebalance
of nodes in progress
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing nodes.
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_task = self.task.async_rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
self.sleep(2)
self._multi_node_failover()
tasks = self.subsequent_load_gen()
try:
rebalance_task.result()
except RebalanceFailedException:
pass
except ServerUnavailableException:
pass
except Exception:
pass
else:
self.fail("Rebalance should fail since a node went down")
finally:
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_autofailover_after_rebalance(self):
"""
Test autofailover for different failure scenarios after
rebalance
of nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_success = self.task.rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
if not rebalance_success:
self.disable_firewall()
self.fail("Rebalance failed. Check logs")
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_rebalance_after_autofailover(self):
"""
Test autofailover for different failure scenarios and then
rebalance
nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
for node in self.servers_to_add:
self.rest.add_node(user=self.orchestrator.rest_username,
password=self.orchestrator.rest_password,
remoteIp=node.ip)
nodes = self.rest.node_statuses()
nodes_to_remove = [node.id for node in nodes if
node.ip in [t.ip for t in
self.servers_to_remove]]
nodes = [node.id for node in nodes]
started = self.rest.rebalance(nodes, nodes_to_remove)
rebalance_success = False
if started:
rebalance_success = self.rest.monitorRebalance()
if (not rebalance_success or not started) and not \
self.failover_expected:
self.fail("Rebalance failed. Check logs")
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_addback_of_node(self):
"""
Test autofailover of nodes and then addback of the node after
failover
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Addback node and validate that the addback was successful.
:return: Nothing
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
self.server_to_fail = self._servers_to_fail()
self.bring_back_failed_nodes_up()
self.sleep(30)
self.nodes = self.rest.node_statuses()
for node in self.server_to_fail:
self.rest.add_back_node("ns_1@{}".format(node.ip))
self.rest.set_recovery_type("ns_1@{}".format(node.ip),
self.recovery_strategy)
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while recovering failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_remove_failover_node(self):
"""
Test autofailover of nodes and remove the node via rebalance
after
the failover.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Rebalance of node if failover was successful and validate.
:return:
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
tasks = self.subsequent_load_gen()
self.enable_autofailover_and_validate()
self.sleep(5)
self._multi_node_failover()
self.nodes = self.rest.node_statuses()
self.remove_after_failover = True
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while removing failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True),
msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def _check_for_autofailover_initiation_for_server_group_failover(
self, failed_over_nodes):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}','ns_1@{}']".format(
failed_over_nodes[0].ip, failed_over_nodes[1].ip)
self.log.info("ui_logs_text: {0}".format(ui_logs_text))
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def subsequent_load_gen(self, async_load=True):
if self.spec_name is None:
subsequent_load_gen = doc_generator(self.key,
self.num_items,
self.num_items*2,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
tasks = self.async_load_all_buckets(
subsequent_load_gen, "create", 0)
return tasks
else:
doc_loading_spec = self.bucket_util.get_crud_template_from_package(
self.data_load_spec)
tasks = self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.bucket_util.buckets,
doc_loading_spec,
mutation_num=0,
async_load=async_load)
return tasks
def wait_for_async_data_load_to_complete(self, task):
self.task.jython_task_manager.get_task_result(task)
|
the-stack_0_13748 | #!/usr/bin/env python
from setuptools import (
setup,
find_packages,
)
extras_require = {
'test': [
'cryptography',
'pytest-cov',
'pytest-django',
'pytest-xdist',
'pytest',
'tox',
],
'lint': [
'flake8',
'pep8',
'isort',
],
'doc': [
'Sphinx>=1.6.5,<2',
'sphinx_rtd_theme>=0.1.9',
],
'dev': [
'bumpversion>=0.5.3,<1',
'pytest-watch',
'wheel',
'twine',
'ipython',
],
'python-jose': [
'python-jose==3.0.0',
],
}
extras_require['dev'] = (
extras_require['dev'] + # noqa: W504
extras_require['test'] + # noqa: W504
extras_require['lint'] + # noqa: W504
extras_require['doc'] + # noqa: W504
extras_require['python-jose']
)
setup(
name='djangorestframework_simplejwt',
version='4.4.0',
url='https://github.com/davesque/django-rest-framework-simplejwt',
license='MIT',
description='A minimal JSON Web Token authentication plugin for Django REST Framework',
long_description=open('README.rst', 'r', encoding='utf-8').read(),
author='David Sanders',
author_email='[email protected]',
install_requires=[
'django',
'djangorestframework',
'pyjwt',
],
python_requires='>=3.6,<3.9',
extras_require=extras_require,
packages=find_packages(exclude=['tests', 'tests.*', 'licenses', 'requirements']),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP',
]
)
|
the-stack_0_13750 | import unittest
import provider.s3lib as s3lib
import tests.settings_mock as settings_mock
from mock import mock, patch, MagicMock
from ddt import ddt, data, unpack
from boto.s3.key import Key
from boto.s3.prefix import Prefix
class FakeKey(Key):
def __init__(self, name):
self.name = name
class FakePrefix(Prefix):
def __init__(self, name):
self.name = name
class FakeBucket(object):
items = []
def list(self, prefix=None, delimiter=None, headers=None):
return self.items
@ddt
class TestProviderS3Lib(unittest.TestCase):
def setUp(self):
self.fake_s3_keys = [
FakeKey('one.xml'),
FakeKey('one.tif'),
FakeKey('one.pdf')
]
self.fake_s3_prefixes = [
FakePrefix('two/')
]
def test_get_s3_key_names_from_bucket(self):
"simple tests for coverage"
fake_bucket = FakeBucket()
fake_bucket.items += self.fake_s3_keys
fake_bucket.items += self.fake_s3_prefixes
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(fake_bucket)), 3)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, file_extensions=['.xml'])), 1)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, file_extensions=['.xml', '.pdf'])), 2)
self.assertEqual(len(s3lib.get_s3_key_names_from_bucket(
fake_bucket, key_type='prefix')), 1)
@data(
(99999, ['pmc/zip/elife-05-19405.zip'], None),
(19405, ['pmc/zip/elife-05-19405.zip'], 'pmc/zip/elife-05-19405.zip'),
(24052, [
'pmc/zip/elife-06-24052.zip'
'pmc/zip/elife-06-24052.r1.zip',
'pmc/zip/elife-06-24052.r2.zip',
], 'pmc/zip/elife-06-24052.r2.zip'),
# strange example below would not normally exist but is for code coverage
(24052, [
'pmc/zip/elife-04-24052.zip',
'pmc/zip/elife-05-24052.zip',
'pmc/zip/elife-05-24052.r1.zip'
], 'pmc/zip/elife-05-24052.r1.zip'),
)
@unpack
def test_latest_pmc_zip_revision(self, doi_id, s3_key_names, expected_s3_key_name):
self.assertEqual(s3lib.latest_pmc_zip_revision(doi_id, s3_key_names), expected_s3_key_name)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13752 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common client library functions and classes used by all products."""
import abc
import base64
import binascii
from functools import wraps
import inspect
from itertools import izip
import locale
import logging
import logging.config
import os
import ssl
import sys
import threading
import urllib2
import warnings
import lxml.builder
import lxml.etree
import requests.exceptions
import suds
import suds.cache
import suds.client
import suds.mx.literal
import suds.plugin
import suds.transport.http
import suds.xsd.doctor
import yaml
import zeep
import zeep.cache
import zeep.exceptions
import zeep.helpers
import zeep.transports
import zeep.xsd
import googleads.errors
import googleads.oauth2
import googleads.util
try:
import urllib2.HTTPSHandler
except ImportError:
# Python versions below 2.7.9 / 3.4 won't have this. In order to offer legacy
# support (for now) we will work around this gracefully, but users will
# not have certificate validation performed until they update.
pass
logging.getLogger('suds.client').addFilter(googleads.util.GetSudsClientFilter())
logging.getLogger('suds.mx.core').addFilter(
googleads.util.GetSudsMXCoreFilter())
logging.getLogger('suds.mx.literal').addFilter(
googleads.util.GetSudsMXLiteralFilter())
logging.getLogger('suds.transport.http').addFilter(
googleads.util.GetSudsTransportFilter())
_logger = logging.getLogger(__name__)
_PY_VERSION_MAJOR = sys.version_info.major
_PY_VERSION_MINOR = sys.version_info.minor
_PY_VERSION_MICRO = sys.version_info.micro
_DEPRECATED_VERSION_TEMPLATE = (
'This library is being run by an unsupported Python version (%s.%s.%s). In '
'order to benefit from important security improvements and ensure '
'compatibility with this library, upgrade to Python 2.7.9 or higher.')
VERSION = '17.0.0'
_COMMON_LIB_SIG = 'googleads/%s' % VERSION
_LOGGING_KEY = 'logging'
_HTTP_PROXY_YAML_KEY = 'http'
_HTTPS_PROXY_YAML_KEY = 'https'
_PROXY_CONFIG_KEY = 'proxy_config'
_PYTHON_VERSION = 'Python/%d.%d.%d' % (
_PY_VERSION_MAJOR, _PY_VERSION_MINOR, _PY_VERSION_MICRO)
# The required keys in the authentication dictionary that are used to construct
# installed application OAuth2 credentials.
_OAUTH2_INSTALLED_APP_KEYS = ('client_id', 'client_secret', 'refresh_token')
# The keys in the authentication dictionary that are used to construct service
# account OAuth2 credentials.
_OAUTH2_SERVICE_ACCT_KEYS = ('path_to_private_key_file',)
_OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL = ('delegated_account',)
# A key used to configure the client to accept and automatically decompress
# gzip encoded SOAP responses.
ENABLE_COMPRESSION_KEY = 'enable_compression'
# A key used to configure the client to send arbitrary headers in SOAP requests.
CUSTOM_HEADERS_KEY = 'custom_http_headers'
# A key used to specify the SOAP implementation to use.
SOAP_IMPLEMENTATION_KEY = 'soap_impl'
# Global variables used to enable and store utility usage stats.
_utility_registry = googleads.util.UtilityRegistry()
_UTILITY_REGISTER_YAML_KEY = 'include_utilities_in_user_agent'
_UTILITY_LOCK = threading.Lock()
# Apply any necessary patches to dependency libraries.
googleads.util.PatchHelper().Apply()
def GenerateLibSig(short_name):
"""Generates a library signature suitable for a user agent field.
Args:
short_name: The short, product-specific string name for the library.
Returns:
A library signature string to append to user-supplied user-agent value.
"""
with _UTILITY_LOCK:
utilities_used = ', '.join([utility for utility
in sorted(_utility_registry)])
_utility_registry.Clear()
if utilities_used:
return ' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION,
utilities_used)
else:
return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
class CommonClient(object):
"""Contains shared startup code between Ad Manager and AdWords clients."""
def __init__(self):
# Warn users on deprecated Python versions on initialization.
if _PY_VERSION_MAJOR == 2:
if _PY_VERSION_MINOR == 7 and _PY_VERSION_MICRO < 9:
_logger.warning(_DEPRECATED_VERSION_TEMPLATE, _PY_VERSION_MAJOR,
_PY_VERSION_MINOR, _PY_VERSION_MICRO)
elif _PY_VERSION_MINOR < 7:
_logger.warning(_DEPRECATED_VERSION_TEMPLATE, _PY_VERSION_MAJOR,
_PY_VERSION_MINOR, _PY_VERSION_MICRO)
# Warn users about using non-utf8 encoding
_, encoding = locale.getdefaultlocale()
if encoding is None or encoding.lower() != 'utf-8':
_logger.warn('Your default encoding, %s, is not UTF-8. Please run this'
' script with UTF-8 encoding to avoid errors.', encoding)
def LoadFromString(yaml_doc, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth2 credentials. It may also optionally set proxy
configurations.
Args:
yaml_doc: the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth2 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth2 key was missing.
"""
data = yaml.safe_load(yaml_doc) or {}
if 'dfp' in data:
raise googleads.errors.GoogleAdsValueError(
'Please replace the "dfp" key in the configuration YAML string with'
'"ad_manager" to fix this issue.')
logging_config = data.get(_LOGGING_KEY)
if logging_config:
logging.config.dictConfig(logging_config)
try:
product_data = data[product_yaml_key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'The "%s" configuration is missing'
% (product_yaml_key,))
if not isinstance(product_data, dict):
raise googleads.errors.GoogleAdsValueError(
'The "%s" configuration is empty or invalid'
% (product_yaml_key,))
IncludeUtilitiesInUserAgent(data.get(_UTILITY_REGISTER_YAML_KEY, True))
original_keys = list(product_data.keys())
client_kwargs = {}
try:
for key in required_client_values:
client_kwargs[key] = product_data[key]
del product_data[key]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Some of the required values are missing. Required '
'values are: %s, actual values are %s'
% (required_client_values, original_keys))
proxy_config_data = data.get(_PROXY_CONFIG_KEY, {})
proxy_config = _ExtractProxyConfig(product_yaml_key, proxy_config_data)
client_kwargs['proxy_config'] = proxy_config
client_kwargs['oauth2_client'] = _ExtractOAuth2Client(
product_yaml_key, product_data, proxy_config)
client_kwargs[ENABLE_COMPRESSION_KEY] = data.get(
ENABLE_COMPRESSION_KEY, False)
client_kwargs[CUSTOM_HEADERS_KEY] = data.get(CUSTOM_HEADERS_KEY, None)
if SOAP_IMPLEMENTATION_KEY in data:
client_kwargs[SOAP_IMPLEMENTATION_KEY] = data[SOAP_IMPLEMENTATION_KEY]
for value in optional_product_values:
if value in product_data:
client_kwargs[value] = product_data[value]
del product_data[value]
if product_data:
warnings.warn('Could not recognize the following keys: %s. '
'They were ignored.' % (product_data,), stacklevel=3)
return client_kwargs
def LoadFromStorage(path, product_yaml_key, required_client_values,
optional_product_values):
"""Loads the data necessary for instantiating a client from file storage.
In addition to the required_client_values argument, the yaml file must supply
the keys used to create OAuth2 credentials. It may also optionally set proxy
configurations.
Args:
path: A path string to the yaml document whose keys should be used.
product_yaml_key: The key to read in the yaml as a string.
required_client_values: A tuple of strings representing values which must
be in the yaml file for a supported API. If one of these keys is not in
the yaml file, an error will be raised.
optional_product_values: A tuple of strings representing optional values
which may be in the yaml file.
Returns:
A dictionary map of the keys in the yaml file to their values. This will not
contain the keys used for OAuth2 client creation and instead will have a
GoogleOAuth2Client object stored in the 'oauth2_client' field.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required_client_values key was missing or an OAuth2 key was missing.
"""
if not os.path.isabs(path):
path = os.path.expanduser(path)
try:
with open(path, 'rb') as handle:
yaml_doc = handle.read()
except IOError:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not be opened.' % path)
try:
client_kwargs = LoadFromString(yaml_doc, product_yaml_key,
required_client_values,
optional_product_values)
except googleads.errors.GoogleAdsValueError as e:
raise googleads.errors.GoogleAdsValueError(
'Given yaml file, %s, could not find some keys. %s' % (path, e))
return client_kwargs
def _ExtractOAuth2Client(product_yaml_key, product_data, proxy_config):
"""Generates an GoogleOAuth2Client subclass using the given product_data.
Args:
product_yaml_key: a string key identifying the product being configured.
product_data: a dict containing the configurations for a given product.
proxy_config: a ProxyConfig instance.
Returns:
An instantiated GoogleOAuth2Client subclass.
Raises:
A GoogleAdsValueError if the OAuth2 configuration for the given product is
misconfigured.
"""
oauth2_kwargs = {
'proxy_config': proxy_config
}
if all(config in product_data for config in _OAUTH2_INSTALLED_APP_KEYS):
oauth2_args = [
product_data['client_id'], product_data['client_secret'],
product_data['refresh_token']
]
oauth2_client = googleads.oauth2.GoogleRefreshTokenClient
for key in _OAUTH2_INSTALLED_APP_KEYS:
del product_data[key]
elif all(config in product_data for config in _OAUTH2_SERVICE_ACCT_KEYS):
oauth2_args = [
product_data['path_to_private_key_file'],
googleads.oauth2.GetAPIScope(product_yaml_key),
]
oauth2_kwargs.update({
'sub': product_data.get('delegated_account')
})
oauth2_client = googleads.oauth2.GoogleServiceAccountClient
for key in _OAUTH2_SERVICE_ACCT_KEYS:
del product_data[key]
for optional_key in _OAUTH2_SERVICE_ACCT_KEYS_OPTIONAL:
if optional_key in product_data:
del product_data[optional_key]
else:
raise googleads.errors.GoogleAdsValueError(
'Your yaml file is incorrectly configured for OAuth2. You need to '
'specify credentials for either the installed application flow (%s) '
'or service account flow (%s).' %
(_OAUTH2_INSTALLED_APP_KEYS, _OAUTH2_SERVICE_ACCT_KEYS))
return oauth2_client(*oauth2_args, **oauth2_kwargs)
def _ExtractProxyConfig(product_yaml_key, proxy_config_data):
"""Returns an initialized ProxyConfig using the given proxy_config_data.
Args:
product_yaml_key: a string indicating the client being loaded.
proxy_config_data: a dict containing the contents of proxy_config from the
YAML file.
Returns:
If there is a proxy to configure in proxy_config, this will return a
ProxyConfig instance with those settings. Otherwise, it will return None.
Raises:
A GoogleAdsValueError if one of the required keys specified by _PROXY_KEYS
is missing.
"""
cafile = proxy_config_data.get('cafile', None)
disable_certificate_validation = proxy_config_data.get(
'disable_certificate_validation', False)
http_proxy = proxy_config_data.get(_HTTP_PROXY_YAML_KEY)
https_proxy = proxy_config_data.get(_HTTPS_PROXY_YAML_KEY)
proxy_config = ProxyConfig(
http_proxy=http_proxy,
https_proxy=https_proxy,
cafile=cafile,
disable_certificate_validation=disable_certificate_validation)
return proxy_config
def _PackForSuds(obj, factory, packer=None, version=None):
"""Packs SOAP input into the format we want for suds.
The main goal here is to pack dictionaries with an 'xsi_type' key into
objects. This allows dictionary syntax to be used even with complex types
extending other complex types. The contents of dictionaries and lists/tuples
are recursively packed. Mutable types are copied - we don't mutate the input.
Args:
obj: A parameter for a SOAP request which will be packed. If this is
a dictionary or list, the contents will recursively be packed. If this
is not a dictionary or list, the contents will be recursively searched
for instances of unpacked dictionaries or lists.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
version: the version of the current API, e.g. 'v201811'
Returns:
If the given obj was a dictionary that contained the 'xsi_type' key, this
will be an instance of a class generated from the WSDL. Otherwise, this will
be the same data type as the input obj was.
"""
if packer:
obj = packer.Pack(obj, version)
if obj in ({}, None):
# Force suds to serialize empty objects. There are legitimate use cases for
# this, for example passing in an empty SearchCriteria object to a DFA
# search method in order to select everything.
return suds.null()
elif isinstance(obj, dict):
if 'xsi_type' in obj:
try:
new_obj = factory.create(obj['xsi_type'])
except suds.TypeNotFound:
new_obj = factory.create(':'.join(['ns0', obj['xsi_type']]))
# Suds sends an empty XML element for enum types which are not set. None
# of Google's Ads APIs will accept this. Initializing all of the fields in
# a suds object to None will ensure that they don't get serialized at all
# unless the user sets a value. User values explicitly set to None will be
# packed into a suds.null() object.
for param, _ in new_obj:
# Another problem is that the suds.mx.appender.ObjectAppender won't
# serialize object types with no fields set, but both AdWords and Ad
# Manager rely on sending objects with just the xsi:type set. The
# below "if" statement is an ugly hack that gets this to work in all(?)
# situations by taking advantage of the fact that these classes
# generally all have a type field. The only other option is to monkey
# patch ObjectAppender.
if param.endswith('.Type'):
setattr(new_obj, param, obj['xsi_type'])
else:
setattr(new_obj, param, None)
for key in obj:
if key == 'xsi_type': continue
setattr(new_obj, key, _PackForSuds(obj[key], factory,
packer=packer))
else:
new_obj = {}
for key in obj:
new_obj[key] = _PackForSuds(obj[key], factory,
packer=packer)
return new_obj
elif isinstance(obj, (list, tuple)):
return [_PackForSuds(item, factory,
packer=packer) for item in obj]
else:
_RecurseOverObject(obj, factory)
return obj
def _RecurseOverObject(obj, factory, parent=None):
"""Recurses over a nested structure to look for changes in Suds objects.
Args:
obj: A parameter for a SOAP request field which is to be inspected and
will be packed for Suds if an xsi_type is specified, otherwise will be
left unaltered.
factory: The suds.client.Factory object which can create instances of the
classes generated from the WSDL.
parent: The parent object that contains the obj parameter to be inspected.
"""
if _IsSudsIterable(obj):
# Since in-place modification of the Suds object is taking place, the
# iterator should be done over a frozen copy of the unpacked fields.
copy_of_obj = tuple(obj)
for item in copy_of_obj:
if _IsSudsIterable(item):
if 'xsi_type' in item:
if isinstance(obj, tuple):
parent[obj[0]] = _PackForSuds(obj[1], factory)
else:
obj.remove(item)
obj.append(_PackForSuds(item, factory))
_RecurseOverObject(item, factory, obj)
def _IsSudsIterable(obj):
"""A short helper method to determine if a field is iterable for Suds."""
return obj and not isinstance(obj, basestring) and hasattr(obj, '__iter__')
def IncludeUtilitiesInUserAgent(value):
"""Configures the logging of utilities in the User-Agent.
Args:
value: a bool indicating that you want to include utility names in the
User-Agent if set True, otherwise, these will not be added.
"""
with _UTILITY_LOCK:
_utility_registry.SetEnabled(value)
def AddToUtilityRegistry(utility_name):
"""Directly add a utility to the registry, not a decorator.
Args:
utility_name: The name of the utility to add.
"""
with _UTILITY_LOCK:
_utility_registry.Add(utility_name)
def RegisterUtility(utility_name, version_mapping=None):
"""Decorator that registers a class with the given utility name.
This will only register the utilities being used if the UtilityRegistry is
enabled. Note that only the utility class's public methods will cause the
utility name to be added to the registry.
Args:
utility_name: A str specifying the utility name associated with the class.
version_mapping: A dict containing optional version strings to append to the
utility string for individual methods; where the key is the method name and
the value is the text to be appended as the version.
Returns:
The decorated class.
"""
def IsFunctionOrMethod(member):
"""Determines if given member is a function or method.
These two are used in combination to ensure that inspect finds all of a
given utility class's methods in both Python 2 and 3.
Args:
member: object that is a member of a class, to be determined whether it is
a function or method.
Returns:
A boolean that is True if the provided member is a function or method, or
False if it isn't.
"""
return inspect.isfunction(member) or inspect.ismethod(member)
def MethodDecorator(utility_method, version):
"""Decorates a method in the utility class."""
registry_name = ('%s/%s' % (utility_name, version) if version
else utility_name)
@wraps(utility_method)
def Wrapper(*args, **kwargs):
AddToUtilityRegistry(registry_name)
return utility_method(*args, **kwargs)
return Wrapper
def ClassDecorator(cls):
"""Decorates a utility class."""
for name, method in inspect.getmembers(cls, predicate=IsFunctionOrMethod):
# Public methods of the class will have the decorator applied.
if not name.startswith('_'):
# The decorator will only be applied to unbound methods; this prevents
# it from clobbering class methods. If the attribute doesn't exist, set
# None for PY3 compatibility.
if not getattr(method, '__self__', None):
setattr(cls, name, MethodDecorator(
method, version_mapping.get(name) if version_mapping else None))
return cls
return ClassDecorator
class ProxyConfig(object):
"""A utility for configuring the usage of a proxy."""
def __init__(self, http_proxy=None, https_proxy=None, cafile=None,
disable_certificate_validation=False):
self._http_proxy = http_proxy
self._https_proxy = https_proxy
self.proxies = {}
if self._https_proxy:
self.proxies['https'] = str(self._https_proxy)
if self._http_proxy:
self.proxies['http'] = str(self._http_proxy)
self.disable_certificate_validation = disable_certificate_validation
self.cafile = None if disable_certificate_validation else cafile
# Initialize the context used to generate the urllib2.HTTPSHandler (in
# Python 2.7.9+ and 3.4+) used by suds and urllib2.
self.ssl_context = self._InitSSLContext(
self.cafile, self.disable_certificate_validation)
def _InitSSLContext(self, cafile=None,
disable_ssl_certificate_validation=False):
"""Creates a ssl.SSLContext with the given settings.
Args:
cafile: A str identifying the resolved path to the cafile. If not set,
this will use the system default cafile.
disable_ssl_certificate_validation: A boolean indicating whether
certificate verification is disabled. For security purposes, it is
highly recommended that certificate verification remain enabled.
Returns:
An ssl.SSLContext instance, or None if the version of Python being used
doesn't support it.
"""
# Attempt to create a context; this should succeed in Python 2 versions
# 2.7.9+ and Python 3 versions 3.4+.
try:
if disable_ssl_certificate_validation:
ssl._create_default_https_context = ssl._create_unverified_context
ssl_context = ssl.create_default_context()
else:
ssl_context = ssl.create_default_context(cafile=cafile)
except AttributeError:
# Earlier versions lack ssl.create_default_context()
# Rather than raising the exception, no context will be provided for
# legacy support. Of course, this means no certificate validation is
# taking place!
return None
return ssl_context
def BuildOpener(self):
"""Builds an OpenerDirector instance using the ProxyConfig settings.
In Python 2, this will return a urllib2.OpenerDirector instance. In Python
3, this will return a urllib.request.OpenerDirector instance.
Returns:
An OpenerDirector instance instantiated with settings defined in the
ProxyConfig instance.
"""
return urllib2.build_opener(*self.GetHandlers())
def GetHandlers(self):
"""Retrieve the appropriate urllib2 handlers for the given configuration.
Returns:
A list of urllib2.BaseHandler subclasses to be used when making calls
with proxy.
"""
handlers = []
if self.ssl_context:
handlers.append(urllib2.HTTPSHandler(context=self.ssl_context))
if self.proxies:
handlers.append(urllib2.ProxyHandler(self.proxies))
return handlers
def GetSudsProxyTransport(self):
"""Retrieve a suds.transport.http.HttpTransport to be used with suds.
This will apply all handlers relevant to the usage of the proxy
configuration automatically.
Returns:
A _SudsProxyTransport instance used to make requests with suds using the
configured proxy.
"""
return self._SudsProxyTransport(self.GetHandlers())
class _ZeepProxyTransport(zeep.transports.Transport):
"""A Zeep transport which configures caching, proxy support, and timeouts."""
def __init__(self, timeout, proxy_config, cache):
"""Initializes _ZeepProxyTransport.
Args:
timeout: An integer timeout in MS for connections.
proxy_config: A ProxyConfig instance representing proxy settings.
cache: A zeep.cache.Base instance representing a cache strategy to employ.
"""
if not cache:
cache = zeep.cache.SqliteCache()
elif cache == ZeepServiceProxy.NO_CACHE:
cache = None
super(_ZeepProxyTransport, self).__init__(
timeout=timeout, operation_timeout=timeout, cache=cache)
self.session.proxies = proxy_config.proxies
class _SudsProxyTransport(suds.transport.http.HttpTransport):
"""A transport that applies the given handlers for usage with a proxy."""
def __init__(self, timeout, proxy_config):
"""Initializes SudsHTTPSTransport.
Args:
timeout: An integer for the connection timeout time.
proxy_config: A ProxyConfig instance representing proxy settings.
"""
suds.transport.http.HttpTransport.__init__(self, timeout=timeout)
self.handlers = proxy_config.GetHandlers()
def u2handlers(self):
"""Get a collection of urllib2 handlers to be installed in the opener.
Returns:
A list of handlers to be installed to the OpenerDirector used by suds.
"""
# Start with the default set of handlers.
return_handlers = suds.transport.http.HttpTransport.u2handlers(self)
return_handlers.extend(self.handlers)
return return_handlers
class SoapPacker(object):
"""A utility class to be passed to argument packing functions.
A subclass should be used in cases where custom logic is needed to pack a
given object in argument packing functions.
"""
@classmethod
def Pack(cls, obj):
raise NotImplementedError('You must subclass SoapPacker.')
def GetSchemaHelperForLibrary(lib_name):
if lib_name == 'suds':
return SudsSchemaHelper
elif lib_name == 'zeep':
return ZeepSchemaHelper
class GoogleSchemaHelper(object):
"""Base class for type to xml conversion.
Only used for AdWords reporting specialness. A subclass should be created
for each underlying SOAP implementation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
return
class SudsSchemaHelper(GoogleSchemaHelper):
"""Suds schema helper implementation."""
def __init__(self, endpoint, timeout,
proxy_config, namespace_override, cache):
"""Initializes a SudsSchemaHelper.
Args:
endpoint: A string representing the URL to connect to.
timeout: An integer timeout in MS used to determine connection timeouts.
proxy_config: A googleads.common.ProxyConfig instance which represents
the proxy settings needed.
namespace_override: A string to doctor the WSDL namespace with.
cache: An instance of suds.cache.Cache to use for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
if cache and not isinstance(cache, suds.cache.Cache):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper suds cache with suds.')
transport = _SudsProxyTransport(timeout, proxy_config)
try:
doctor = suds.xsd.doctor.ImportDoctor(
suds.xsd.doctor.Import(
namespace_override, endpoint))
self.suds_client = suds.client.Client(
endpoint,
transport=transport,
plugins=[LoggingMessagePlugin()],
cache=cache,
doctor=doctor)
self._namespace_override = namespace_override
except suds.transport.TransportError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
schema = self.suds_client.wsdl.schema
definition_type = schema.elements[(type_name, self._namespace_override)]
marshaller = suds.mx.literal.Literal(schema)
content = suds.mx.Content(
tag=type_name, value=value,
name=type_name, type=definition_type)
data = marshaller.process(content)
return data
class ZeepSchemaHelper(GoogleSchemaHelper):
"""Zeep schema helper implementation."""
def __init__(self, endpoint, timeout,
proxy_config, namespace_override, cache):
"""Initializes a ZeepSchemaHelper.
Args:
endpoint: A string representing the URL to connect to.
timeout: An integer timeout in MS used to determine connection timeouts.
proxy_config: A googleads.common.ProxyConfig instance which represents
the proxy settings needed.
namespace_override: A string to doctor the WSDL namespace with.
cache: An instance of zeep.cache.Base to use for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
if cache and not (isinstance(cache, zeep.cache.Base) or
cache == ZeepServiceProxy.NO_CACHE):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper zeep cache with zeep.')
transport = _ZeepProxyTransport(timeout, proxy_config, cache)
try:
data = transport.load(endpoint)
except requests.exceptions.HTTPError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
self.schema = zeep.xsd.Schema(lxml.etree.fromstring(data))
self._namespace_override = namespace_override
self._element_maker = lxml.builder.ElementMaker(
namespace=namespace_override, nsmap={'tns': namespace_override})
def GetSoapXMLForComplexType(self, type_name, value):
"""Return an XML string representing a SOAP complex type.
Args:
type_name: The name of the type with namespace prefix if necessary.
value: A python dictionary to hydrate the type instance with.
Returns:
A string containing the SOAP XML for the type.
"""
element = self.schema.get_element(
'{%s}%s' % (self._namespace_override, type_name))
result_element = self._element_maker(element.qname.localname)
element_value = element(**value)
element.type.render(result_element, element_value)
data = lxml.etree.tostring(result_element).strip()
return data
def GetServiceClassForLibrary(lib_name):
if lib_name == 'suds':
return SudsServiceProxy
elif lib_name == 'zeep':
return ZeepServiceProxy
class GoogleSoapService(object):
"""Base class for a SOAP service representation.
A subclass should be created for each underlying SOAP implementation.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, header_handler, packer, version):
"""Initializes a SOAP service.
Args:
header_handler: A googleads.common.HeaderHandler instance used to set
SOAP and HTTP headers.
packer: A googleads.common.SoapPacker instance used to transform
entities.
version: the version of the current API, e.g. 'v201811'
"""
self._header_handler = header_handler
self._packer = packer
self._version = version
self._method_proxies = {}
@abc.abstractmethod
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
@abc.abstractmethod
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
@abc.abstractmethod
def _WsdlHasMethod(self, method_name):
"""Determine if the wsdl contains a method.
Args:
method_name: The name of the method to search.
Returns:
True if the method is in the WSDL, otherwise False.
"""
@abc.abstractmethod
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
def __getattr__(self, attr):
"""Support service.method() syntax."""
if self._WsdlHasMethod(attr):
if attr not in self._method_proxies:
self._method_proxies[attr] = self._CreateMethod(attr)
return self._method_proxies[attr]
else:
raise googleads.errors.GoogleAdsValueError('Service %s not found' % attr)
class SudsServiceProxy(GoogleSoapService):
"""Wraps a suds service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
suds_client: The suds.client.Client this service belongs to. If you are
familiar with suds and want to use autogenerated classes, you can access
the client and its factory,
"""
def __init__(self, endpoint, header_handler, packer, proxy_config,
timeout, version, cache=None):
"""Initializes a suds service proxy.
Args:
endpoint: A URL for the service.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
proxy_config: A ProxyConfig that represents proxy settings.
timeout: An integer to set the connection timeout.
version: the current version of the library, e.g. 'v201811'
cache: A suds.cache.Cache instance to pass to the underlying SOAP
library for caching.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
super(SudsServiceProxy, self).__init__(header_handler, packer, version)
if cache and not isinstance(cache, suds.cache.Cache):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper suds cache with suds.')
transport = _SudsProxyTransport(timeout, proxy_config)
self._method_proxies = {}
try:
self.suds_client = suds.client.Client(
endpoint,
timeout=timeout,
cache=cache,
transport=transport,
plugins=[LoggingMessagePlugin()])
except suds.transport.TransportError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
self.suds_client.set_options(nosend=True)
service_request = (getattr(self, method))(*args).envelope
self.suds_client.set_options(nosend=False)
return lxml.etree.fromstring(service_request)
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
return self.suds_client.factory.create(type_name)
def SetHeaders(self, soap_headers, http_headers):
"""Set the headers for the underlying client.
Args:
soap_headers: A SOAP element for the SOAP headers.
http_headers: A dictionary for the http headers.
"""
self.suds_client.set_options(soapheaders=soap_headers, headers=http_headers)
def _WsdlHasMethod(self, method_name):
"""Determine if the wsdl contains a method.
Args:
method_name: The name of the method to search.
Returns:
True if the method is in the WSDL, otherwise False.
"""
return method_name in self.suds_client.wsdl.services[0].ports[0].methods
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = getattr(self.suds_client.service, method_name)
def MakeSoapRequest(*args):
"""Perform a SOAP call."""
AddToUtilityRegistry('suds')
self.SetHeaders(
self._header_handler.GetSOAPHeaders(self.CreateSoapElementForType),
self._header_handler.GetHTTPHeaders())
try:
return soap_service_method(
*[_PackForSuds(arg, self.suds_client.factory,
self._packer) for arg in args])
except suds.WebFault as e:
if _logger.isEnabledFor(logging.WARNING):
_logger.warning('Response summary - %s',
_ExtractResponseSummaryFields(e.document))
_logger.debug('SOAP response:\n%s', e.document.str())
if not hasattr(e.fault, 'detail'):
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc # Done this way for 2to3
# Before re-throwing the WebFault exception, an error object needs to be
# wrapped in a list for safe iteration.
fault = e.fault.detail.ApiExceptionFault
if not hasattr(fault, 'errors') or fault.errors is None:
exc = (googleads.errors.
GoogleAdsServerFault(e.document, message=e.fault.faultstring))
raise exc # Done this way for 2to3
obj = fault.errors
if not isinstance(obj, list):
fault.errors = [obj]
exc = googleads.errors.GoogleAdsServerFault(e.document, fault.errors,
message=e.fault.faultstring)
raise exc # Done this way for 2to3
return MakeSoapRequest
class _ZeepAuthHeaderPlugin(zeep.Plugin):
"""A zeep plugin responsible for setting our custom HTTP headers."""
def __init__(self, header_handler):
"""Instantiate a new _ZeepAuthHeaderPlugin.
Args:
header_handler: A googleads.common.HeaderHandler instance.
"""
self._header_handler = header_handler
def egress(self, envelope, http_headers, operation, binding_options):
"""Overriding the egress function to set our headers.
Args:
envelope: An Element with the SOAP request data.
http_headers: A dict of the current http headers.
operation: The SoapOperation instance.
binding_options: An options dict for the SOAP binding.
Returns:
A tuple of the envelope and headers.
"""
custom_headers = self._header_handler.GetHTTPHeaders()
http_headers.update(custom_headers)
return envelope, http_headers
class ZeepServiceProxy(GoogleSoapService):
"""Wraps a zeep service object, allowing custom logic to be injected.
This class is responsible for refreshing the HTTP and SOAP headers, so changes
to the client object will be reflected in future SOAP calls, and for
transforming SOAP call input parameters, allowing dictionary syntax to be used
with all SOAP complex types.
Attributes:
zeep_client: The zeep.Client this service belongs to. If you are
familiar with zeep, you can utilize this directly.
"""
NO_CACHE = 'zeep_no_cache'
def __init__(self, endpoint, header_handler, packer,
proxy_config, timeout, version, cache=None):
"""Initializes a zeep service proxy.
Args:
endpoint: A URL for the service.
header_handler: A HeaderHandler responsible for setting the SOAP and HTTP
headers on the service client.
packer: An optional subclass of googleads.common.SoapPacker that provides
customized packing logic.
proxy_config: A ProxyConfig that represents proxy settings.
timeout: An integer to set the connection timeout.
version: the version of the current API, e.g. 'v201811'
cache: An instance of zeep.cache.Base to pass to the underlying SOAP
library for caching. A file cache by default. To disable, pass
googleads.common.ZeepServiceProxy.NO_CACHE.
Raises:
GoogleAdsValueError: The wrong type was given for caching.
"""
super(ZeepServiceProxy, self).__init__(header_handler, packer, version)
if cache and not (isinstance(cache, zeep.cache.Base) or
cache == self.NO_CACHE):
raise googleads.errors.GoogleAdsValueError(
'Must use a proper zeep cache with zeep.')
transport = _ZeepProxyTransport(timeout, proxy_config, cache)
plugins = [_ZeepAuthHeaderPlugin(header_handler),
googleads.util.ZeepLogger()]
try:
self.zeep_client = zeep.Client(
endpoint, transport=transport, plugins=plugins)
except requests.exceptions.HTTPError as e:
raise googleads.errors.GoogleAdsSoapTransportError(str(e))
first_service = list(self.zeep_client.wsdl.services.itervalues())[0]
first_port = list(first_service.ports.itervalues())[0]
self._method_bindings = first_port.binding
def CreateSoapElementForType(self, type_name):
"""Create an instance of a SOAP type.
Args:
type_name: The name of the type.
Returns:
An instance of type type_name.
"""
return self.zeep_client.get_type(type_name)()
def GetRequestXML(self, method, *args):
"""Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
"""
packed_args = self._PackArguments(method, args, set_type_attrs=True)
headers = self._GetZeepFormattedSOAPHeaders()
return self.zeep_client.create_message(
self.zeep_client.service, method, *packed_args, _soapheaders=headers)
def _WsdlHasMethod(self, method_name):
"""Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False.
"""
try:
self._method_bindings.get(method_name)
return True
except ValueError:
return False
def _GetBindingNamespace(self):
"""Return a string with the namespace of the service binding in the WSDL."""
return (list(self.zeep_client.wsdl.bindings.itervalues())[0]
.port_name.namespace)
def _PackArguments(self, method_name, args, set_type_attrs=False):
"""Properly pack input dictionaries for zeep.
Pack a list of python dictionaries into XML objects. Dictionaries which
contain an 'xsi_type' entry are converted into that type instead of the
argument default. This allows creation of complex objects which include
inherited types.
Args:
method_name: The name of the method that will be called.
args: A list of dictionaries containing arguments to the method.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
A list of XML objects that can be passed to zeep.
"""
# Get the params for the method to find the initial types to instantiate.
op_params = self.zeep_client.get_element(
'{%s}%s' % (self._GetBindingNamespace(), method_name)).type.elements
result = [self._PackArgumentsHelper(param, param_data, set_type_attrs)
for ((_, param), param_data) in izip(op_params, args)]
return result
@classmethod
def _IsBase64(cls, s):
"""An imperfect but decent method for determining if a string is base64.
Args:
s: A string with the data to test.
Returns:
True if s is base64, else False.
"""
try:
if base64.b64encode(base64.b64decode(s)).decode('utf-8') == s:
return True
except (TypeError, binascii.Error):
pass
return False
def _PackArgumentsHelper(self, elem, data, set_type_attrs):
"""Recursive helper for PackArguments.
Args:
elem: The element type we are creating.
data: The data to instantiate it with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An instance of type 'elem'.
"""
if self._packer:
data = self._packer.Pack(data, self._version)
if isinstance(data, dict): # Instantiate from simple Python dict
# See if there is a manually specified derived type.
type_override = data.get('xsi_type')
if type_override:
elem_type = self._DiscoverElementTypeFromLocalname(type_override)
else:
elem_type = elem.type
data_formatted = data.iteritems()
packed_result = self._CreateComplexTypeFromData(
elem_type, type_override is not None, data_formatted, set_type_attrs)
elif isinstance(data, zeep.xsd.CompoundValue):
# Here the data is already a SOAP element but we still need to look
# through it in case it has been edited with Python dicts.
elem_type = data._xsd_type
data_formatted = zip(dir(data), [data[k] for k in dir(data)])
packed_result = self._CreateComplexTypeFromData(
elem_type, False, data_formatted, set_type_attrs)
elif isinstance(data, (list, tuple)):
packed_result = [self._PackArgumentsHelper(elem, item, set_type_attrs)
for item in data]
else:
if elem.type.name == 'base64Binary' and self._IsBase64(data):
_logger.warn('Passing data to base64 field %s that may '
'already be encoded. Do not pre-encode base64 '
'fields with zeep.', elem.name)
packed_result = data
return packed_result
def _DiscoverElementTypeFromLocalname(self, type_localname):
"""Searches all namespaces for a type by name.
Args:
type_localname: The name of the type.
Returns:
A fully qualified SOAP type with the specified name.
Raises:
A zeep.exceptions.LookupError if the type cannot be found in any
namespace.
"""
elem_type = None
last_exception = None
for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values():
try:
elem_type = self.zeep_client.get_type(
'{%s}%s' % (ns_prefix, type_localname))
except zeep.exceptions.LookupError as e:
last_exception = e
continue
break
if not elem_type:
raise last_exception
return elem_type
def _CreateComplexTypeFromData(
self, elem_type, type_is_override, data, set_type_attrs):
"""Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element.
"""
elem_arguments = dict(elem_type.elements)
# A post order traversal of the original data, need to instantiate from
# the bottom up.
instantiated_arguments = {
k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)
for k, v in data if k != 'xsi_type'}
if set_type_attrs:
found_type_attr = next((e_name for e_name, _ in elem_type.elements
if e_name.endswith('.Type')), None)
if found_type_attr and type_is_override:
instantiated_arguments[found_type_attr] = elem_type.qname.localname
# Now go back through the tree instantiating SOAP types as we go.
return elem_type(**instantiated_arguments)
def _GetZeepFormattedSOAPHeaders(self):
"""Returns a dict with SOAP headers in the right format for zeep."""
headers = self._header_handler.GetSOAPHeaders(self.CreateSoapElementForType)
soap_headers = {'RequestHeader': headers}
return soap_headers
def _CreateMethod(self, method_name):
"""Create a method wrapping an invocation to the SOAP service.
Args:
method_name: A string identifying the name of the SOAP method to call.
Returns:
A callable that can be used to make the desired SOAP request.
"""
soap_service_method = self.zeep_client.service[method_name]
def MakeSoapRequest(*args):
AddToUtilityRegistry('zeep')
soap_headers = self._GetZeepFormattedSOAPHeaders()
packed_args = self._PackArguments(method_name, args)
try:
return soap_service_method(
*packed_args, _soapheaders=soap_headers)['body']['rval']
except zeep.exceptions.Fault as e:
error_list = ()
if e.detail is not None:
underlying_exception = e.detail.find(
'{%s}ApiExceptionFault' % self._GetBindingNamespace())
fault_type = self.zeep_client.get_element(
'{%s}ApiExceptionFault' % self._GetBindingNamespace())
fault = fault_type.parse(
underlying_exception, self.zeep_client.wsdl.types)
error_list = fault.errors or error_list
raise googleads.errors.GoogleAdsServerFault(
e.detail, errors=error_list, message=e.message)
return MakeSoapRequest
class HeaderHandler(object):
"""A generic header handler interface that must be subclassed by each API."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetSOAPHeaders(self, create_method):
"""Returns the required SOAP Headers."""
@abc.abstractmethod
def GetHTTPHeaders(self):
"""Returns the required HTTP headers."""
class LoggingMessagePlugin(suds.plugin.MessagePlugin):
"""A MessagePlugin used to log request summaries."""
def marshalled(self, context):
if _logger.isEnabledFor(logging.INFO):
_logger.info('Request summary - %s',
_ExtractRequestSummaryFields(context.envelope))
def parsed(self, context):
if _logger.isEnabledFor(logging.INFO):
_logger.info('Response summary - %s',
_ExtractResponseSummaryFields(context.reply))
def _ExtractRequestSummaryFields(document):
"""Extract logging fields from the request's suds.sax.element.Element.
Args:
document: A suds.sax.element.Element instance containing the API request.
Returns:
A dict mapping logging field names to their corresponding value.
"""
headers = document.childAtPath('Header/RequestHeader')
body = document.childAtPath('Body')
summary_fields = {
'methodName': body.getChildren()[0].name
}
# Extract AdWords-specific fields if they exist.
# Note: We need to check if None because this will always evaluate False.
client_customer_id = headers.getChild('clientCustomerId')
if client_customer_id is not None:
summary_fields['clientCustomerId'] = client_customer_id.text
# Extract Ad Manager-specific fields if they exist.
# Note: We need to check if None because this will always evaluate False.
network_code = headers.getChild('networkCode')
if network_code is not None:
summary_fields['networkCode'] = network_code.text
return summary_fields
def _ExtractResponseSummaryFields(document):
"""Extract logging fields from the response's suds.sax.document.Document.
Args:
document: A suds.sax.document.Document instance containing the parsed
API response for a given API request.
Returns:
A dict mapping logging field names to their corresponding value.
"""
headers = document.childAtPath('Envelope/Header/ResponseHeader')
body = document.childAtPath('Envelope/Body')
summary_fields = {}
if headers is not None:
summary_fields['requestId'] = headers.getChild('requestId').text
summary_fields['responseTime'] = headers.getChild('responseTime').text
# Extract AdWords-specific summary fields if they are present.
# Note: We need to check if None because this will always evaluate False.
service_name = headers.getChild('serviceName')
if service_name is not None:
summary_fields['serviceName'] = service_name.text
method_name = headers.getChild('methodName')
if method_name is not None:
summary_fields['methodName'] = method_name.text
operations = headers.getChild('operations')
if operations is not None:
summary_fields['operations'] = operations.text
if body is not None:
# Extract fault if it exists.
fault = body.getChild('Fault')
if fault is not None:
summary_fields['isFault'] = True
# Cap length of faultstring to 16k characters for summary.
summary_fields['faultMessage'] = fault.getChild(
'faultstring').text[:16000]
else:
summary_fields['isFault'] = False
return summary_fields
|
the-stack_0_13754 | import math
import numpy as np
def vec3(x, y, z):
return np.array([x, y, z], dtype=np.float32)
def radians(v):
return np.radians(v)
def identity():
return np.identity(4, dtype=np.float32)
def empty():
return np.zeros([4, 4], dtype=np.float32)
def magnitude(v):
return np.linalg.norm(v)
def normalize(v):
m = magnitude(v)
return v if m == 0 else v / m
def dot(u, v):
return np.sum(u * v)
def cross(u, v):
res = vec3(0, 0, 0)
res[0] = u[1] * v[2] - u[2] * v[1]
res[1] = u[2] * v[0] - u[0] * v[2]
res[2] = u[0] * v[1] - u[1] * v[0]
return res
# below functions can be optimized
def translate(m, v):
res = np.copy(m)
res[:,3] = m[:,0] * v[0] + m[:,1] * v[1] + m[:,2] * v[2] + m[:,3]
return res
def rotate(m, angle, v):
a = angle
c = np.cos(a)
s = np.sin(a)
axis = normalize(v)
temp = (1 - c) * axis
rot = empty()
rot[0][0] = c + temp[0] * axis[0]
rot[0][1] = temp[0] * axis[1] + s * axis[2]
rot[0][2] = temp[0] * axis[2] - s * axis[1]
rot[1][0] = temp[1] * axis[0] - s * axis[2]
rot[1][1] = c + temp[1] * axis[1]
rot[1][2] = temp[1] * axis[2] + s * axis[0]
rot[2][0] = temp[2] * axis[0] + s * axis[1]
rot[2][1] = temp[2] * axis[1] - s * axis[0]
rot[2][2] = c + temp[2] * axis[2]
res = empty()
res[:,0] = m[:,0] * rot[0][0] + m[:,1] * rot[0][1] + m[:,2] * rot[0][2]
res[:,1] = m[:,0] * rot[1][0] + m[:,1] * rot[1][1] + m[:,2] * rot[1][2]
res[:,2] = m[:,0] * rot[2][0] + m[:,1] * rot[2][1] + m[:,2] * rot[2][2]
res[:,3] = m[:,3]
return res
def perspective(fovy, aspect, zNear, zFar):
tanHalfFovy = np.tan(fovy / 2)
res = empty()
res[0][0] = 1 / (aspect * tanHalfFovy)
res[1][1] = 1 / (tanHalfFovy)
res[2][3] = -1
res[2][2] = - (zFar + zNear) / (zFar - zNear)
res[3][2] = -(2 * zFar * zNear) / (zFar - zNear)
return res.T
def ortho(left, right, bottom, top, zNear, zFar):
#res = np.ones([4, 4], dtype=np.float32)
res = identity()
res[0][0] = 2 / (right - left)
res[1][1] = 2 / (top - bottom)
res[2][2] = - 2 / (zFar - zNear)
res[3][0] = - (right + left) / (right - left)
res[3][1] = - (top + bottom) / (top - bottom)
res[3][2] = - (zFar + zNear) / (zFar - zNear)
return res.T
def lookat(eye, center, up):
f = normalize(center - eye)
s = normalize(cross(f, up))
u = cross(s, f)
res = identity()
res[0][0] = s[0]
res[1][0] = s[1]
res[2][0] = s[2]
res[0][1] = u[0]
res[1][1] = u[1]
res[2][1] = u[2]
res[0][2] = -f[0]
res[1][2] = -f[1]
res[2][2] = -f[2]
res[3][0] = -dot(s, eye)
res[3][1] = -dot(u, eye)
res[3][2] = -dot(f, eye)
return res.T
def transform(d, m):
return np.dot(m, d.T).T
|
the-stack_0_13755 | import re
import subprocess
import threading
import time
# from time import time
from config import *
from utils import *
def pat(test_data_in, class_path, jar, prt=False):
inputfile = open(test_data_in).readlines()
# print("@@@", test_data_in)
basetime, maxtime = datacheck(test_data_in)
# input = parseInput(inputfile)
# print("@@@", input)
start = time.time()
outputfile = callProgram(r"java -Xmx128m -cp {} {}".format(jar, class_path), inputfile)
end = time.time()
passed_time = end - start
# output = parseOutput(outputfile)
if prt:
for line in outputfile:
print(line)
# A, B, C = parseOutputABC(outputfile)
# print("Elevator A:")
# for line in A:
# print("\033[1;34m{}\033[0m".format(line))
# print("Elevator B:")
# for line in B:
# print("\033[1;35m{}\033[0m".format(line))
# print("Elevator C:")
# for line in C:
# print("\033[1;36m{}\033[0m".format(line))
# print(outputfile)
ac = checkAll(inputfile, outputfile)
t_ac = passed_time < maxtime
if ac is True and t_ac is True:
if passed_time > basetime + 20:
print("\033[1;33mWarning: {}, time:{}, base_time: {}\033[0m"
.format(test_data_in, passed_time, basetime, maxtime))
return True, passed_time
print("\033[1;32mPassed: {}, time:{}, base_time: {}\033[0m".format(test_data_in, passed_time, basetime))
return True, passed_time
if ac is not True:
print("\033[1;31mFailed: {}\n\tWA: {}\033[0m".format(test_data_in, ac))
return False, passed_time
if t_ac is not True:
print("\033[1;31mWarning: {}\n\tTLE: {}, max_time: {}\033[0m".format(test_data_in, passed_time, maxtime))
return True, passed_time
def parseInput(inputfile):
personRequests = []
for line in inputfile:
result = re.search(r'\[(.*)\](-?\d+)-FROM-(-?\d+)-TO-(-?\d+)', line.strip(), re.M)
personRequests.append(result.groups())
return personRequests
def run(p, output):
while True:
line = p.stdout.readline()
if not line:
break
# print(line)
output.append(line.decode().strip())
def callProgram(cmd, inputFile):
# print(cmd)
# os.chdir("temp")
# print(inputFile)
output = []
if cfg.CLOSE_STDERR:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p = subprocess.Popen(cmd,
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
w = threading.Thread(target=run, args=(p, output,))
last_time = 0
for line in inputFile:
result = re.search(r'\[(.*)\](.*)', line.strip(), re.M)
sleeptime = result.group(1)
inputLine = result.group(2)
# print(sleeptime)
time.sleep(float(sleeptime) - last_time)
last_time = float(sleeptime)
write_str = inputLine + '\r\n'
# print(write_str)
p.stdin.write(write_str.encode("UTF-8"))
p.stdin.flush()
time.sleep(0.01)
w.start()
p.stdin.close()
try:
if p.wait(cfg.TIME_LIMIT) != 0:
return output
except subprocess.TimeoutExpired:
p.kill()
p.terminate()
print("\033[1;31mError: TimeoutExpired: May in the endless loop/wait. Check your 'synchronized'.")
return output
# print(p.returncode)
if p.returncode != 0:
print("\033[1;31mError: return code {} is not 0\033[0m".format(p.returncode))
return output
# os.chdir("..")
# print(output)
return output
def parseOutputABC(inputfile):
sequenceA = []
sequenceB = []
sequenceC = []
for line in inputfile:
result = re.search(r'-A', line.strip(), re.M)
if result is not None:
sequenceA.append(line)
continue
result = re.search(r'-B', line.strip(), re.M)
if result is not None:
sequenceB.append(line)
continue
result = re.search(r'-C', line.strip(), re.M)
if result is not None:
sequenceC.append(line)
continue
return sequenceA, sequenceB, sequenceC
def parseOutput(inputfile):
sequence = []
# IN = []
# OUT = []
# OPEN = []
# CLOSE = []
for line in inputfile:
result = re.search(r'\[(.*)\]IN-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["IN", result.groups()])
continue
result = re.search(r'\[(.*)\]OUT-(-?\d+)-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OUT", result.groups()])
continue
result = re.search(r'\[(.*)\]OPEN-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["OPEN", result.groups()])
continue
result = re.search(r'\[(.*)\]CLOSE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["CLOSE", result.groups()])
continue
result = re.search(r'\[(.*)\]ARRIVE-(-?\d+)', line.strip(), re.M)
if result is not None:
sequence.append(["ARRIVE", result.groups()])
continue
return sequence
def check_1_1(input, output, eId):
sequence = output
time = []
level = []
for mesType, mes in sequence:
time.append(float(mes[0]))
if mesType == "IN" or mesType == "OUT":
level.append(int(mes[2]))
else:
level.append(int(mes[1]))
assert len(time) == len(level)
for i in range(len(time) - 1):
estimate_time = abs(level[i + 1] - level[i]) * cfg.LEVEL_TIME[eId]
if level[i] * level[i + 1] < 0:
estimate_time -= cfg.LEVEL_TIME[eId]
if not (time[i + 1] - time[i] >= estimate_time - cfg.EPS):
return "The elevator has no enough time to move such far distance at {}: {}. {}, {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]], time[i + 1] - time[i], estimate_time - cfg.EPS)
return True
def check_1_2(intput, output, eId):
sequence = output
length = len(sequence)
for i, (mesType, mes) in enumerate(sequence):
if mesType == "OPEN" and i != 0:
index = i + 1
while index < len(sequence) and sequence[index][0] != "CLOSE":
index += 1
diff = cfg.DOOR_TIME
if index == len(sequence):
return "No Close with {}".format(sequence[i])
if sequence[index][0] == "CLOSE":
diff = cfg.DOOR_TIME * 2
if not (float(sequence[index][1][0]) - float(sequence[i][1][0]) >= diff) - cfg.EPS:
# print(sequence[i + 1], sequence[i])
return "The elevator has no enough time to open/close at {}: {}".format(i, [sequence[index], sequence[i], sequence[i+1]])
# if mesType == "CLOSE" and i != length - 1:
# index = i - 1
# while index > 0 and sequence[index][0] != "OPEN":
# index -= 1
# diff = 0.25
# if sequence[index][0] == "OPEN":
# diff = 0.5
# if not (float(sequence[i][1][0]) - float(sequence[index][1][0]) > diff - 0.001):
# # print(sequence[i], sequence[i - 1])
# return "The elevator has no enough time to close at {}".format(i)
return True
def getLevel(sequence):
mesType, mes = sequence
if mesType in ["OPEN", "CLOSE", "ARRIVE"]:
return int(mes[1])
else:
return int(mes[2])
def getTime(sequence):
return float(sequence[1][0])
def getId(sequence):
mesType, mes = sequence
assert mesType == "IN" or mesType == "OUT"
return int(mes[1])
def check_1_3(input, output, eId):
sequence = output
isClosed = True
for i, (mesType, mes) in enumerate(sequence):
if i != 1 and not isClosed and (getLevel(sequence[i - 1]) != getLevel(sequence[i])):
# print(sequence[i - 1], sequence[i])
return "The elevator is open at {} while you want it move: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
isClosed = False
if mesType == "CLOSE":
isClosed = True
return True
def check_1_4(input, output, eId):
sequence = output
isOpen = False
for i, (mesType, mes) in enumerate(sequence):
if not isOpen and (mesType == "IN" or mesType == "OUT"):
return "The elevator is closed at {} while you want someone in/out: {}".format(i, [sequence[i-1], sequence[i], sequence[i+1]])
if mesType == "OPEN":
if isOpen is True:
return "The elevator is open at {} while you want it open again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = True
if mesType == "CLOSE":
if isOpen is False:
return "The elevator is closed at {} while you want it close again: {}".format(i, [sequence[i - 1],
sequence[i],
sequence[i + 1]])
isOpen = False
if isOpen == True:
return "Elevator is not closed at the end."
return True
def check_3(input, output, eId):
sequence = output
levelNow = 1
arrivalTime = 0
for i, (mesType, mes) in enumerate(sequence):
if mesType == "ARRIVE":
level = getLevel(sequence[i])
if level in [0]:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
time = getTime(sequence[i])
if levelNow in [-1, 1]:
if not 0 < abs(levelNow - level) <= 2:
return "Bad arrive 0 at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
else:
if not 0 < abs(levelNow - level) <= 1:
#print(levelNow, level)
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
if not abs(arrivalTime - time) >= 0.4 - cfg.EPS:
return "Bad arrive at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i+1]])
arrivalTime = time
levelNow = level
return True
def check_4(input, output, eId):
sequence = output
inside = set()
for i, (mesType, mes) in enumerate(sequence):
if mesType == "IN":
inside.add(getId(sequence[i]))
maxN = 0
if eId == "A":
maxN = 6
if eId == "B":
maxN = 8
if eId == "C":
maxN = 7
if len(inside) > maxN:
return "Elevator is full at {}: {}".format(i, [sequence[-1], sequence[i], sequence[i + 1]])
if mesType == "OUT":
if getId(sequence[i]) not in inside:
return "{} not in elevator at {}: {}".format(getId(sequence[i]), i, [sequence[-1], sequence[i], sequence[i + 1]])
inside.remove(getId(sequence[i]))
return True
def check_2(input, output):
id_now = {}
id_to = {}
id_set = []
ele = set()
for time, id_, from_, to in input:
id_now[int(id_)] = int(from_)
id_to[int(id_)] = int(to)
id_set.append(int(id_))
# print(id_now)
sequence = output
for i, (mesType, mes) in enumerate(sequence):
# print(id_now)
# print(sequence[i])
if mesType == "IN":
thisID = getId(sequence[i])
level = getLevel(sequence[i])
if (thisID not in id_now) or (level != id_now[thisID]):
return "{} is not at floor {} while you want the guy in.".format(thisID, level)
del id_now[thisID]
if thisID in ele:
return "{} has been in the elevator at {} while you want the guy in again.".format(thisID, i)
ele.add(thisID)
if mesType == "OUT":
thisID = getId(sequence[i])
if thisID not in ele:
return "{} is not in the elevator at {} while you want the guy out.".format(thisID, i)
ele.remove(thisID)
id_now[thisID] = getLevel(sequence[i])
if len(ele) > 0:
return "{} still in the elevator.".format(ele)
for id_ in id_set:
if id_now[int(id_)] != id_to[int(id_)]:
return "{} in the wrong floor at the end.".format(id_)
return True
def checkAllSequence(input, output, eId):
r_1_1 = check_1_1(input, output, eId)
r_1_2 = check_1_2(input, output, eId)
r_1_3 = check_1_3(input, output, eId)
r_1_4 = check_1_4(input, output, eId)
r_4 = check_4(input, output, eId)
# r_2 = check_2(input, output)
r_3 = check_3(input, output, eId)
if r_1_1 is not True:
return "check_1_1: \n\t" + str(r_1_1) + "\n\t" + str(output)
if r_1_2 is not True:
return "check_1_2: \n\t" + str(r_1_2) + "\n\t" + str(output)
if r_1_3 is not True:
return "check_1_3: \n\t" + str(r_1_3) + "\n\t" + str(output)
if r_1_4 is not True:
return "check_1_4: \n\t" + str(r_1_4) + "\n\t" + str(output)
if r_4 is not True:
return "check_4: \n\t" + str(r_4) + "\n\t" + str(output)
# if r_2 is not True:
# return "check_2: \n\t" + str(r_2) + "\n\t" + str(output)
if r_3 is not True:
return "check_3: \n\t" + str(r_3) + "\n\t" + str(output)
return True
def checkAll(inputfile, outputfile):
input = parseInput(inputfile)
sequenceAll = parseOutput(outputfile)
sequenceA, sequenceB, sequenceC = parseOutputABC(outputfile)
outputSequenceA = parseOutput(sequenceA)
outputSequenceB = parseOutput(sequenceB)
outputSequenceC = parseOutput(sequenceC)
r_A = checkAllSequence(input, outputSequenceA, "A")
r_B = checkAllSequence(input, outputSequenceB, "B")
r_C = checkAllSequence(input, outputSequenceC, "C")
r_All = check_2(input, sequenceAll)
if r_A is not True:
return "Error Elevator A: " + str(r_A) + "\n\t" + str(outputfile)
if r_B is not True:
return "Error Elevator B: " + str(r_B) + "\n\t" + str(outputfile)
if r_C is not True:
return "Error Elevator C: " + str(r_C) + "\n\t" + str(outputfile)
if r_All is not True:
return r_All + "\n\t" + str(outputfile)
return True
|
the-stack_0_13756 |
import numpy as np
import Augmentor
from PIL import Image
class DatasetAugmentor():
def __init__(self,
dataset_config=None,
additional_augmentor_obj=None
):
self.p = Augmentor.Pipeline()
if dataset_config is not None and 'pipeline' in dataset_config:
for pipeline in dataset_config['pipeline']:
method_to_call = getattr(self.p, pipeline[0])
parameters = pipeline[1]
method_to_call(**parameters)
if additional_augmentor_obj is not None:
for pipeline in additional_augmentor_obj:
method_to_call = getattr(self.p, pipeline[0])
parameters = pipeline[1]
method_to_call(**parameters)
self.transform = self.p.torch_transform()
if dataset_config is not None and 'scaling' in dataset_config:
self.scaling = dataset_config['scaling']
else:
self.scaling = 'tanh'
def _scaling_tanh(self, img):
img = img / 127.5 - 1
return img
def _scaling_sigmoid(self, img):
img = img / 255.0
return img
def augment(self, image, isArray=False):
if isArray: # if the input is a numpy array, convert back to PIL
image = Image.fromarray(image)
image = self.transform(image)
image = np.asarray(image).astype('f')
w, h = image.shape[0], image.shape[1]
if np.ndim(image) == 2:
ch = 1
else:
ch = np.shape(image)[2]
image = image.reshape(w, h, ch)
image = image.transpose((2, 0, 1))
if self.scaling == 'none':
return image
elif self.scaling == 'sigmoid':
return self._scaling_sigmoid(image)
elif self.scaling == 'tanh':
return self._scaling_tanh(image)
else:
raise NotImplementedError
|
the-stack_0_13757 | from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from src.modules.distributions import dmol_loss, sample_from_dmol, log_normal_diag
# ----- NN Model Seleciton -----
from .image_networks.densenet16x32 import q_u, p_z, q_z, p_y, p_x
from ...utils.utils import get_shape
# ----- Two Staged VAE -----
class srVAE(nn.Module):
"""
Super-Resolution Variational Auto-Encoder (srVAE).
A Two Staged Visual Processing Variational AutoEncoder.
Author:
Ioannis Gatopoulos.
"""
def __init__(self, x_shape, args, y_shape=(3, 16, 16)):
super().__init__()
self.device = args.device
self.x_shape = x_shape
self.y_shape = (x_shape[0], y_shape[1], y_shape[2])
u_dim = args.u_dim
z_dim = args.z_dim
prior = args.prior
self.u_shape = get_shape(u_dim)
self.z_shape = get_shape(z_dim)
# q(y|x): deterministic "compressed" transformation
self.compressed_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((self.y_shape[1], self.y_shape[2])),
transforms.ToTensor()
])
# p(u)
self.p_u = globals()[prior](self.u_shape)
# q(u | y)
self.q_u = q_u(self.u_shape, self.y_shape)
# p(z | y)
self.p_z = p_z(self.z_shape, (self.y_shape, self.u_shape))
# q(z | x)
self.q_z = q_z(self.z_shape, self.x_shape)
# p(y | u)
self.p_y = p_y(self.y_shape, self.u_shape, args)
# p(x | y, z)
self.p_x = p_x(self.x_shape, (self.y_shape, self.z_shape), args)
# likelihood distribution
self.recon_loss = partial(dmol_loss)
self.sample_distribution = partial(sample_from_dmol)
def compressed_transoformation(self, input):
y = []
for x in input:
y.append(self.compressed_transform(x.cpu()))
return torch.stack(y).to(self.device)
def initialize(self, dataloader):
""" Data dependent init for weight normalization
(Automatically done during the first forward pass).
"""
with torch.no_grad():
x, _ = next(iter(dataloader))
x = x.to(self.device)
output = self.forward(x)
self.calculate_elbo(x, output)
return
@staticmethod
def reparameterize(z_mean, z_log_var):
""" z ~ N(z| z_mu, z_logvar) """
epsilon = torch.randn_like(z_mean)
return z_mean + torch.exp(0.5 * z_log_var) * epsilon
@torch.no_grad()
def generate(self, n_samples=20):
# u ~ p(u)
u = self.p_u.sample(self.u_shape, n_samples=n_samples, device=self.device).to(self.device)
# p(y|u)
y_logits = self.p_y(u)
y_hat = self.sample_distribution(y_logits, nc=self.y_shape[0])
# z ~ p(z|y, u)
z_p_mean, z_p_logvar = self.p_z((y_hat, u))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y_hat, z_p))
x_hat = self.sample_distribution(x_logits, nc=self.x_shape[0])
return x_hat, y_hat
@torch.no_grad()
def reconstruct(self, x, **kwargs):
outputs = self.forward(x)
y_hat = self.sample_distribution(outputs.get('y_logits'), nc=self.y_shape[0])
x_hat = self.sample_distribution(outputs.get('x_logits'), nc=self.x_shape[0])
return outputs.get('y'), y_hat, x_hat
@torch.no_grad()
def super_resolution(self, y):
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ p(z|y)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y, z_p))
x_hat = self.sample_distribution(x_logits)
return x_hat
def calculate_elbo(self, x, outputs, **kwargs):
# unpack variables
y, x_logits, y_logits = outputs.get('y'), outputs.get('x_logits'), outputs.get('y_logits')
u_q, u_q_mean, u_q_logvar = outputs.get('u_q'), outputs.get('u_q_mean'), outputs.get('u_q_logvar')
z_q, z_q_mean, z_q_logvar = outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar')
z_p_mean, z_p_logvar = outputs.get('z_p_mean'), outputs.get('z_p_logvar')
# Reconstraction loss
RE_x = self.recon_loss(x, x_logits, nc=self.x_shape[0])
RE_y = self.recon_loss(y, y_logits, nc=self.y_shape[0])
# Regularization loss
log_p_u = self.p_u.log_p(u_q, dim=1)
log_q_u = log_normal_diag(u_q, u_q_mean, u_q_logvar)
KL_u = log_q_u - log_p_u
log_p_z = log_normal_diag(z_q, z_p_mean, z_p_logvar)
log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar)
KL_z = log_q_z - log_p_z
# Total lower bound loss
nelbo = - (RE_x + RE_y - KL_u - KL_z).mean()
diagnostics = {
"bpd": (nelbo.item()) / (np.prod(x.shape[1:]) * np.log(2.)),
"nelbo": nelbo.item(),
"RE": - (RE_x + RE_y).mean().item(),
"RE_x": - RE_x.mean().item(),
"RE_y": - RE_y.mean().item(),
"KL": (KL_z + KL_u).mean().item(),
"KL_u": KL_u.mean().item(),
"KL_z": KL_z.mean().item(),
}
return nelbo, diagnostics
def forward(self, x, **kwargs):
""" Forward pass through the inference and the generative model. """
# y ~ f(x) (determinist)
y = self.compressed_transoformation(x)
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ q(z| x, y)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x ~ p(x| y, z)
x_logits = self.p_x((y, z_q))
# y ~ p(y| u)
y_logits = self.p_y(u_q)
# z ~ p(z| x)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
return {
'u_q_mean': u_q_mean,
'u_q_logvar': u_q_logvar,
'u_q': u_q,
'z_q_mean': z_q_mean,
'z_q_logvar': z_q_logvar,
'z_q': z_q,
'z_p_mean': z_p_mean,
'z_p_logvar': z_p_logvar,
'y': y,
'y_logits': y_logits,
'x_logits': x_logits
}
|
the-stack_0_13759 | from mollie.api.objects.refund import Refund
from .utils import assert_list_object
PROFILE_ID = "pfl_v9hTwCvYqw"
def test_get_profile_refunds_by_profile_id(client, response):
"""Get refunds relevant to profile by profile id."""
response.get(f"https://api.mollie.com/v2/refunds?profileId={PROFILE_ID}", "refunds_list")
refunds = client.profile_refunds.with_parent_id(PROFILE_ID).list()
assert_list_object(refunds, Refund)
|
the-stack_0_13760 | import sys
import time
import subprocess
def check_for_libportaudio2():
if sys.platform == 'linux':
try:
output = subprocess.run(['apt', 'list', '--installed',
'libportaudio2'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
output = output.stdout.decode('utf-8')
if 'libportaudio2' not in output:
print('\nLibrary "libportaudio2" is missing,\nInstalling...\n')
time.sleep(2)
subprocess.run(['sudo', 'apt', 'install', 'libportaudio2'])
except OSError:
print('Could not install libportaudio2.')
except KeyboardInterrupt:
sys.exit()
check_for_libportaudio2()
|
the-stack_0_13762 | class RadialDistortion():
"""
Mix-in for sensors that use a radial distortion model.
"""
@property
def usgscsm_distortion_model(self):
"""
Expects odtk to be defined. This should be a list containing
the radial distortion coefficients
Returns
-------
: dict
Dictionary containing the usgscsm distortion model
"""
return {
"radial": {
"coefficients" : self.odtk
}
}
class NoDistortion():
"""
Mix-in for sensors and data sets that do not have a distortion model.
"""
@property
def usgscsm_distortion_model(self):
"""
Returns the specification for no distortion in usgscsm.
Returns
-------
: dict
Dictionary containing the usgscsm specification for no distortion.
"""
return {"radial": {"coefficients": [0.0, 0.0, 0.0]}}
|
the-stack_0_13765 | import os
import struct
import numpy as np
"""
Loosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
which is GPL licensed.
"""
def read(dataset = "training", path = "."):
"""
Python function for importing the MNIST data set. It returns an iterator
of 2-tuples with the first element being the label and the second element
being a numpy.uint8 2D array of pixel data for the given image.
"""
if dataset is "training":
fname_img = os.path.join(path, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
else:
raise(ValueError, "dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
get_img = lambda idx: (lbl[idx], img[idx])
# Create an iterator which returns each image in turn
for i in range(len(lbl)): # xrange in python 2.7, range in python 3.6
yield get_img(i)
|
the-stack_0_13767 | #!/usr/bin/env python3
#
# Copyright (c) 2013-2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# // Author: Filippov Ilia
from collections import OrderedDict
from enum import Enum, auto
import re
import traceback
class SelfbuildType(Enum):
# not a selfbuild
SINGLE = auto()
# complete selfbuild
SELF = auto()
# first phase of selfbuild only
SELF_PHASE1 = auto()
# second phase of selfbuild only
SELF_PHASE2 = auto()
def alloy_error(line, error_type = 1):
global return_status
if error_type == 1:
return_status = 1
common.error(line, error_type)
def tail_and_save(file_in, file_out, tail = 100):
with open(file_in, 'r') as f_in:
lines = f_in.readlines()[-tail:]
with open(file_out, 'w') as f_out:
f_out.writelines(lines)
def setting_paths(llvm, ispc, sde):
if llvm != "":
os.environ["LLVM_HOME"]=llvm
if ispc != "":
os.environ["ISPC_HOME"]=ispc
if sde != "":
os.environ["SDE_HOME"]=sde
def get_sde():
sde_exe = ""
PATH_dir = os.environ["PATH"].split(os.pathsep)
if current_OS == "Windows":
sde_n = "sde.exe"
else:
sde_n = "sde"
for counter in PATH_dir:
if os.path.exists(counter + os.sep + sde_n) and sde_exe == "":
sde_exe = counter + os.sep + sde_n
if os.environ.get("SDE_HOME") != None:
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + sde_n):
sde_exe = os.environ.get("SDE_HOME") + os.sep + sde_n
return sde_exe
def check_LLVM(which_LLVM):
answer = []
if which_LLVM[0] == " ":
return answer
p = os.environ["LLVM_HOME"]
for i in range(0,len(which_LLVM)):
if not os.path.exists(p + os.sep + "bin-" + which_LLVM[i] + os.sep + "bin"):
answer.append(which_LLVM[i])
return answer
def try_do_LLVM(text, command, from_validation, verbose=False):
print_debug("Command line: "+command+"\n", True, alloy_build)
if from_validation == True:
text = text + "\n"
print_debug("Trying to " + text, from_validation, alloy_build)
with subprocess.Popen(command, shell=True,universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
for line in proc.stdout:
print_debug(line, not verbose, alloy_build)
proc.wait()
exit_status = proc.returncode
if exit_status != 0:
print_debug("ERROR.\n", from_validation, alloy_build)
alloy_error("can't " + text, 1)
print_debug("DONE.\n", from_validation, alloy_build)
def checkout_LLVM(component, version_LLVM, target_dir, from_validation, verbose):
# Identify the component
GIT_REPO_BASE="https://github.com/llvm/llvm-project.git"
# Identify the version
# An example of using branch (instead of final tag) is the following (for 9.0):
# git: "origin/release/9.x"
if version_LLVM == "trunk":
GIT_TAG="main"
elif version_LLVM == "14_0":
GIT_TAG="origin/release/14.x"
elif version_LLVM == "13_0":
GIT_TAG="llvmorg-13.0.1"
elif version_LLVM == "12_0":
GIT_TAG="llvmorg-12.0.1"
elif version_LLVM == "11_1":
GIT_TAG="llvmorg-11.1.0"
elif version_LLVM == "11_0":
GIT_TAG="llvmorg-11.0.1"
elif version_LLVM == "10_0":
GIT_TAG="llvmorg-10.0.1"
elif version_LLVM == "9_0":
GIT_TAG="llvmorg-9.0.1"
elif version_LLVM == "8_0":
GIT_TAG="llvmorg-8.0.1"
elif version_LLVM == "7_1":
GIT_TAG="llvmorg-7.1.0"
elif version_LLVM == "7_0":
GIT_TAG="llvmorg-7.0.1"
elif version_LLVM == "6_0":
GIT_TAG="llvmorg-6.0.1"
else:
alloy_error("Unsupported llvm version: " + version_LLVM, 1)
try_do_LLVM("clone "+component+" from "+GIT_REPO_BASE+" to "+target_dir+" ",
"git clone "+GIT_REPO_BASE+" "+target_dir,
from_validation, verbose)
if GIT_TAG != "main":
os.chdir(target_dir)
try_do_LLVM("switch to "+GIT_TAG+" tag ",
"git checkout -b "+GIT_TAG+" "+GIT_TAG, from_validation, verbose)
os.chdir("..")
# ISPC uses LLVM dumps for debug output, so build correctly it requires these functions to be
# present in LLVM libraries. In LLVM 5.0 they are not there by default and require explicit enabling.
# In later version this functionality is triggered by enabling assertions.
def get_llvm_enable_dump_switch(version_LLVM):
return " -DLLVM_ENABLE_DUMP=ON "
def get_llvm_disable_assertions_switch(llvm_disable_assertions):
if llvm_disable_assertions == True:
return " -DLLVM_ENABLE_ASSERTIONS=OFF"
else:
return " -DLLVM_ENABLE_ASSERTIONS=ON"
def build_LLVM(version_LLVM, folder, debug, selfbuild, extra, from_validation, force, make, gcc_toolchain_path, llvm_disable_assertions, verbose):
print_debug("Building LLVM. Version: " + version_LLVM + ".\n", from_validation, alloy_build)
# Here we understand what and where do we want to build
current_path = os.getcwd()
llvm_home = os.environ["LLVM_HOME"]
make_sure_dir_exists(llvm_home)
FOLDER_NAME=version_LLVM
version_LLVM = re.sub('\.', '_', version_LLVM)
os.chdir(llvm_home)
if folder == "":
folder = FOLDER_NAME
if debug == True:
folder = folder + "dbg"
LLVM_SRC="llvm-" + folder
LLVM_BUILD="build-" + folder
LLVM_BIN="bin-" + folder
if os.path.exists(LLVM_BIN + os.sep + "bin") and not force:
alloy_error("you have folder " + LLVM_BIN + ".\nIf you want to rebuild use --force", 1)
LLVM_BUILD_selfbuild = LLVM_BUILD + "_temp"
LLVM_BIN_selfbuild = LLVM_BIN + "_temp"
# Selfbuild phase2 assumes that directories are already create, for all other cases, create them.
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
common.remove_if_exists(LLVM_SRC)
common.remove_if_exists(LLVM_BUILD)
common.remove_if_exists(LLVM_BIN)
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
common.remove_if_exists(LLVM_BUILD_selfbuild)
common.remove_if_exists(LLVM_BIN_selfbuild)
print_debug("Using folders: " + LLVM_SRC + " " + LLVM_BUILD + " " + LLVM_BIN + " in " +
llvm_home + "\n", from_validation, alloy_build)
# Starting from MacOS 10.9 Maverics, C and C++ library headers are part of the SDK, not the OS itself.
# System root must be specified during the compiler build, so the compiler knows the default location to search for headers.
# C headers are located at system root location, while C++ headers are part of the toolchain.
# I.e. specifying system root solved C header problem. For C++ headers we enable libc++ build as part of clang build (our own toolchain).
# Note that on Sierra there's an issue with using C headers from High Sierra SDK, which instantiates as compile error:
# error: 'utimensat' is only available on macOS 10.13 or newer
# This is due to using SDK targeting OS, which is newer than current one.
mac_system_root = ""
if current_OS == "MacOS" \
and int(current_OS_version.split(".")[0]) >= 13:
search_path = os.environ["PATH"].split(os.pathsep)
found_xcrun = False
for path in search_path:
if os.path.exists(os.path.join(path, "xcrun")):
found_xcrun = True
if found_xcrun:
mac_system_root = "`xcrun --show-sdk-path`"
else:
alloy_error("Can't find XCode (xcrun tool) - it's required on MacOS 10.9 and newer", 1)
# prepare configuration parameters
llvm_enable_projects = " -DLLVM_ENABLE_PROJECTS=\"clang"
if current_OS == "MacOS" and int(current_OS_version.split(".")[0]) >= 13:
# Starting with MacOS 10.9 Maverics, the system doesn't contain headers for standard C++ library and
# the default library is libc++, bit libstdc++. The headers are part of XCode now. But we are checking out
# headers as part of LLVM source tree, so they will be installed in clang location and clang will be able
# to find them. Though they may not match to the library installed in the system, but seems that this should
# not happen.
# Note, that we can also build a libc++ library, but it must be on system default location or should be passed
# to the linker explicitly (either through command line or environment variables). So we are not doing it
# currently to make the build process easier.
# We either need to explicitly opt-out from using libcxxabi from this repo, or build and use it,
# otherwise a build error will occure (attempt to use just built libcxxabi, which was not built).
# An option to build seems to be a better one.
llvm_enable_projects +=";libcxx;libcxxabi"
if current_OS == "Linux":
# OpenMP is needed for Xe enabled builds.
# Starting from Ubuntu 20.04 libomp-dev package doesn't install omp.h to default location.
llvm_enable_projects +=";openmp"
if extra == True:
llvm_enable_projects +=";compiler-rt;clang-tools-extra"
llvm_enable_projects += "\""
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
# clone llvm repo
checkout_LLVM("llvm", version_LLVM, LLVM_SRC, from_validation, verbose)
# patch llvm
os.chdir(LLVM_SRC)
patches = glob.glob(os.environ["ISPC_HOME"] + os.sep + "llvm_patches" + os.sep + "*.*")
for patch in patches:
if version_LLVM in os.path.basename(patch):
try_do_LLVM("patch LLVM with patch " + patch + " ", "git apply " + patch, from_validation, verbose)
os.chdir("../")
# configuring llvm and build for first phase of selfbuild
cmakelists_path = LLVM_SRC + "/llvm"
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE1:
print_debug("Making selfbuild and use folders " + LLVM_BUILD_selfbuild + " and " +
LLVM_BIN_selfbuild + "\n", from_validation, alloy_build)
os.makedirs(LLVM_BUILD_selfbuild)
os.makedirs(LLVM_BIN_selfbuild)
os.chdir(LLVM_BUILD_selfbuild)
try_do_LLVM("configure release version for selfbuild ",
"cmake -G " + "\"" + generator + "\"" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN_selfbuild +
" -DCMAKE_BUILD_TYPE=Release" +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
" -DLLVM_INSTALL_UTILS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" else "") +
((" -DDEFAULT_SYSROOT=" + mac_system_root) if mac_system_root != "" else "") +
" -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86" +
" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly" +
" ../" + cmakelists_path,
from_validation, verbose)
try_do_LLVM("build release version for selfbuild ", make, from_validation, verbose)
try_do_LLVM("install release version for selfbuild ", "make install", from_validation, verbose)
os.chdir("../")
# set compiler to use if this is selfbuild
selfbuild_compiler = ""
if selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE2:
selfbuild_compiler = (" -DCMAKE_C_COMPILER=" +llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang " +
" -DCMAKE_CXX_COMPILER="+llvm_home+ "/" + LLVM_BIN_selfbuild + "/bin/clang++ ")
print_debug("Use compiler for selfbuild: " + selfbuild_compiler + "\n", from_validation, alloy_build)
# configure and build for regular build or second phase of selfbuild
if selfbuild is SelfbuildType.SINGLE or selfbuild is SelfbuildType.SELF or selfbuild is SelfbuildType.SELF_PHASE2:
os.makedirs(LLVM_BUILD)
os.makedirs(LLVM_BIN)
os.chdir(LLVM_BUILD)
build_type = "Release" if debug == False else "Debug"
if current_OS != "Windows":
try_do_LLVM("configure " + build_type + " version ",
"cmake -G " + "\"" + generator + "\"" + " -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" +
selfbuild_compiler +
" -DCMAKE_INSTALL_PREFIX=" + llvm_home + "/" + LLVM_BIN +
" -DCMAKE_BUILD_TYPE=" + build_type +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
" -DLLVM_INSTALL_UTILS=ON" +
((" -DGCC_INSTALL_PREFIX=" + gcc_toolchain_path) if gcc_toolchain_path != "" else "") +
((" -DCMAKE_C_COMPILER=" + gcc_toolchain_path+"/bin/gcc") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DCMAKE_CXX_COMPILER=" + gcc_toolchain_path+"/bin/g++") if gcc_toolchain_path != "" and selfbuild_compiler == "" else "") +
((" -DDEFAULT_SYSROOT=" + mac_system_root) if mac_system_root != "" else "") +
" -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86" +
" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly" +
" ../" + cmakelists_path,
from_validation, verbose)
else:
try_do_LLVM("configure " + build_type + " version ",
'cmake -Thost=x64 -G ' + '\"' + generator + '\"' + ' -DCMAKE_INSTALL_PREFIX="..\\'+ LLVM_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=' + build_type +
llvm_enable_projects +
get_llvm_enable_dump_switch(version_LLVM) +
get_llvm_disable_assertions_switch(llvm_disable_assertions) +
' -DLLVM_INSTALL_UTILS=ON' +
' -DLLVM_TARGETS_TO_BUILD=AArch64\;ARM\;X86' +
' -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly' +
' -DLLVM_LIT_TOOLS_DIR="C:\\gnuwin32\\bin" ..\\' + cmakelists_path,
from_validation, verbose)
# building llvm
if current_OS != "Windows":
try_do_LLVM("build LLVM ", make, from_validation, verbose)
try_do_LLVM("install LLVM ", "make install", from_validation, verbose)
else:
try_do_LLVM("build LLVM and then install LLVM ", "msbuild INSTALL.vcxproj /V:m /p:Platform=x64 /p:Configuration=" + build_type + " /t:rebuild", from_validation, verbose)
os.chdir(current_path)
def unsupported_llvm_targets(LLVM_VERSION):
prohibited_list = {"6.0":["avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
"7.0":["avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
"8.0":["avx512skx-x64", "avx512skx-x32"],
"9.0":["avx512skx-x64", "avx512skx-x32"]
}
if LLVM_VERSION in prohibited_list:
return prohibited_list[LLVM_VERSION]
return []
# Split targets into categories: native, sde.
# native - native targets run natively on current hardware.
# sde - native target, which need to be emulated on current hardware.
def check_targets():
result = []
result_sde = []
# check what native targets do we have
if current_OS != "Windows":
if options.ispc_build_compiler == "clang":
cisa_compiler = "clang"
elif options.ispc_build_compiler == "gcc":
cisa_compiler = "g++"
try_do_LLVM("build check_ISA", cisa_compiler + " check_isa.cpp -o check_isa.exe", True)
else:
try_do_LLVM("build check_ISA", "cl check_isa.cpp", True)
# Dictionary mapping hardware architecture to its targets.
# The value in the dictionary is:
# [
# list of targets corresponding to this architecture,
# list of other architecture executable on this hardware,
# flag for sde to emulate this platform,
# flag is this is supported on current platform
# ]
target_dict = OrderedDict([
("SSE2", [["sse2-i32x4", "sse2-i32x8"],
["SSE2"], "-p4", False]),
("SSE4", [["sse4-i32x4", "sse4-i32x8", "sse4-i16x8", "sse4-i8x16"],
["SSE2", "SSE4"], "-wsm", False]),
("AVX", [["avx1-i32x4", "avx1-i32x8", "avx1-i32x16", "avx1-i64x4"],
["SSE2", "SSE4", "AVX"], "-snb", False]),
("AVX2", [["avx2-i32x4", "avx2-i32x8", "avx2-i32x16", "avx2-i64x4", "avx2-i8x32", "avx2-i16x16"],
["SSE2", "SSE4", "AVX", "AVX2"], "-hsw", False]),
("KNL", [["avx512knl-x16"],
["SSE2", "SSE4", "AVX", "AVX2", "KNL"], "-knl", False]),
("SKX", [["avx512skx-x16", "avx512skx-x8", "avx512skx-x4", "avx512skx-x64", "avx512skx-x32"],
["SSE2", "SSE4", "AVX", "AVX2", "SKX"], "-skx", False])
])
hw_arch = take_lines("check_isa.exe", "first").split()[1]
if not (hw_arch in target_dict):
alloy_error("Architecture " + hw_arch + " was not recognized", 1)
# Mark all compatible architecutres in the dictionary.
for compatible_arch in target_dict[hw_arch][1]:
target_dict[compatible_arch][3] = True
# Now initialize result and result_sde.
for key in target_dict:
item = target_dict[key]
targets = item[0]
if item[3]:
# Supported natively
result = result + targets
else:
# Supported through SDE
for target in targets:
result_sde = result_sde + [[item[2], target]]
# now check what targets we have with the help of SDE
sde_exists = get_sde()
if sde_exists == "":
alloy_error("you haven't got sde neither in SDE_HOME nor in your PATH.\n" +
"To test all platforms please set SDE_HOME to path containing SDE.\n" +
"Please refer to http://www.intel.com/software/sde for SDE download information.", 2)
return [result, result_sde]
def build_ispc(version_LLVM, make):
current_path = os.getcwd()
ispc_home = os.environ["ISPC_HOME"]
os.chdir(ispc_home)
make_ispc = "make " + options.ispc_build_compiler + " -j" + options.speed
ISPC_BUILD="build-" + version_LLVM
ISPC_BIN="bin-" + version_LLVM
if not os.path.exists(ISPC_BUILD):
os.makedirs(ISPC_BUILD)
if not os.path.exists(ISPC_BUILD):
os.makedirs(ISPC_BIN)
os.chdir(ISPC_BUILD)
if current_OS != "Windows":
p_temp = os.getenv("PATH")
os.environ["PATH"] = os.environ["LLVM_HOME"] + "/bin-" + version_LLVM + "/bin:" + os.environ["PATH"]
folder = os.environ["LLVM_HOME"] + os.sep + "llvm-"
if options.folder == "":
folder += version_LLVM
if options.debug == True:
folder += "dbg"
try_do_LLVM("configure ispc build", 'cmake -DCMAKE_INSTALL_PREFIX="..\\'+ ISPC_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=Release' +
ispc_home, True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", make_ispc, True)
try_do_LLVM("install ISPC ", "make install", True)
copyfile(os.path.join(ispc_home, ISPC_BIN, "bin", "ispc"), os.path.join(ispc_home, + "ispc"))
os.environ["PATH"] = p_temp
else:
try_do_LLVM("configure ispc build", 'cmake -Thost=x64 -G ' + '\"' + generator + '\"' + ' -DCMAKE_INSTALL_PREFIX="..\\'+ ISPC_BIN + '" ' +
' -DCMAKE_BUILD_TYPE=Release ' +
ispc_home, True)
try_do_LLVM("clean ISPC for building", "msbuild ispc.vcxproj /t:clean", True)
try_do_LLVM("build ISPC with LLVM version " + version_LLVM + " ", "msbuild ispc.vcxproj /V:m /p:Platform=x64 /p:Configuration=Release /t:rebuild", True)
try_do_LLVM("install ISPC ", "msbuild INSTALL.vcxproj /p:Platform=x64 /p:Configuration=Release", True)
copyfile(os.path.join(ispc_home, ISPC_BIN, "bin", "ispc.exe"), os.path.join(ispc_home, + "ispc.exe"))
os.chdir(current_path)
def execute_stability(stability, R, print_version):
global return_status
try:
stability1 = copy.deepcopy(stability)
b_temp = run_tests.run_tests(stability1, [], print_version)
temp = b_temp[0]
time = b_temp[1]
for j in range(0,4):
R[j][0] = R[j][0] + temp[j] # new_runfails, new_compfails, new_passes_runfails, new_passes_compfails
for i in range(0,len(temp[j])):
R[j][1].append(temp[4])
number_of_fails = temp[5]
number_of_new_fails = len(temp[0]) + len(temp[1])
number_of_passes = len(temp[2]) + len(temp[3])
if number_of_fails == 0:
str_fails = ". No fails"
else:
str_fails = ". Fails: " + str(number_of_fails)
if number_of_new_fails == 0:
str_new_fails = ", No new fails"
else:
str_new_fails = ", New fails: " + str(number_of_new_fails)
if number_of_passes == 0:
str_new_passes = "."
else:
str_new_passes = ", " + str(number_of_passes) + " new passes."
if stability.time:
str_time = " " + time + "\n"
else:
str_time = "\n"
print_debug(temp[4][1:-3] + stability1.ispc_flags + str_fails + str_new_fails + str_new_passes + str_time, False, stability_log)
except Exception as e:
print_debug("Exception: " + str(e), False, stability_log)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback, file=sys.stderr)
print_debug("ERROR: Exception in execute_stability: %s\n" % (sys.exc_info()[1]), False, stability_log)
return_status = 1
'''
R = [[new_runfails, [new_line, new_line...]],
[new_compfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]],
[new_passes_runfails, [new_line, new_line...]]]
'''
def output_test_results(R):
ttt = ["NEW RUNFAILS: ", "NEW COMPFAILS: ", "NEW PASSES RUNFAILS: ", "NEW PASSES COMPFAILS: "]
for j in range(0, 4):
if len(R[j][0]) == 0:
print_debug("NO " + ttt[j][:-2] + "\n", False, stability_log)
else:
print_debug(ttt[j] + str(len(R[j][0])) + "\n", False, stability_log)
to_print = {}
for (fail_name, opt_str) in zip(R[j][0], R[j][1]):
if fail_name not in to_print:
to_print[fail_name] = []
to_print[fail_name].append(opt_str)
# sort
for key in to_print.keys():
to_print[key] = sorted(to_print[key])
# print out
for fail_name in sorted(to_print.keys()):
print_debug("\t" + fail_name + "\n", True, stability_log)
for opt_str in to_print[fail_name]:
print_debug("\t\t\t" + opt_str, True, stability_log)
def concatenate_test_results(R1, R2):
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
for j in range(0, 4):
R[j][0] = R1[j][0] + R2[j][0]
R[j][1] = R1[j][1] + R2[j][1]
return R
def validation_run(only, only_targets, reference_branch, number, update, speed_number, make, perf_llvm, time):
os.chdir(os.environ["ISPC_HOME"])
if current_OS != "Windows":
os.environ["PATH"] = os.environ["ISPC_HOME"] + ":" + os.environ["PATH"]
print_debug("Command: " + ' '.join(sys.argv) + "\n", False, "")
print_debug("Folder: " + os.environ["ISPC_HOME"] + "\n", False, "")
date = datetime.datetime.now()
print_debug("Date: " + date.strftime('%H:%M %d/%m/%Y') + "\n", False, "")
newest_LLVM="13.0"
# *** *** ***
# Stability validation run
# *** *** ***
if ((("stability" in only) == True) or ("performance" in only) == False):
print_debug("\n\nStability validation run\n\n", False, "")
stability = common.EmptyClass()
# stability constant options
stability.save_bin = False
stability.random = False
stability.ispc_flags = options.ispc_flags
stability.compiler_exe = options.compiler_exe
stability.num_jobs = speed_number
stability.verbose = False
stability.time = time
# 1200 is more than default value in run_tests.py (600).
# There's a single test, which requires longer time on AVX2 capable server (Github Action):
# tests/idiv.ispc running for avx512-i8x64 for x86 under SDE.
# For any other tests it should be more than enough.
stability.test_time = 1200
stability.csv = ""
stability.non_interactive = True
stability.update = update
stability.include_file = None
stability.silent = True
stability.in_file = "." + os.sep + f_date + os.sep + "run_tests_log.log"
stability.verify = False
stability.fail_db = "fail_db.txt"
stability.device = None
stability.ispc_output = None
stability.debug_check = False
# stability varying options
stability.target = ""
stability.arch = ""
stability.opt = ""
stability.wrapexe = ""
# prepare parameters of run
[targets_t, sde_targets_t] = check_targets()
rebuild = True
opts = []
archs = []
LLVM = []
targets = []
sde_targets = []
dbg_begin = 0
dbg_total = 1
# parsing option only, update parameters of run
if "-O2" in only:
opts.append("O2")
if "-O1" in only:
opts.append("O1")
if "-O0" in only:
opts.append("O0")
if "debug" in only:
if not ("nodebug" in only):
dbg_begin = 1
dbg_total = 2
if "x86" in only and not ("x86-64" in only):
archs.append("x86")
if "x86-64" in only:
archs.append("x86-64")
if "native" in only:
sde_targets_t = []
for i in ["6.0", "7.0", "8.0", "9.0", "10.0", "11.0", "12.0", "13.0", "14.0", "trunk"]:
if i in only:
LLVM.append(i)
if "current" in only:
LLVM = [" "]
rebuild = False
else:
common.check_tools(1)
if only_targets != "":
only_targets += " "
only_targets_t = only_targets.split(" ")
for i in only_targets_t:
if i == "":
continue
err = True
for j in range(0,len(targets_t)):
if i in targets_t[j]:
targets.append(targets_t[j])
err = False
for j in range(0,len(sde_targets_t)):
if i in sde_targets_t[j][1]:
sde_targets.append(sde_targets_t[j])
err = False
if err == True:
alloy_error("You haven't sde for target " + i, 1)
else:
targets = targets_t
sde_targets = sde_targets_t
if "build" in only:
targets = []
sde_targets = []
only = only + " stability "
# finish parameters of run, prepare LLVM
if len(opts) == 0:
opts = ["O2"]
if len(archs) == 0:
archs = ["x86", "x86-64"]
if len(LLVM) == 0:
LLVM = [newest_LLVM, "trunk"]
need_LLVM = check_LLVM(LLVM)
for i in range(0,len(need_LLVM)):
build_LLVM(need_LLVM[i], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path, False, True, False)
# begin validation run for stabitily
common.remove_if_exists(stability.in_file)
R = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_debug("\n" + common.get_host_name() + "\n", False, stability_log)
print_debug("\n_________________________STABILITY REPORT_________________________\n", False, stability_log)
ispc_flags_tmp = stability.ispc_flags
for i in range(0,len(LLVM)):
R_tmp = [[[],[]],[[],[]],[[],[]],[[],[]]]
print_version = 2
if rebuild:
build_ispc(LLVM[i], make)
for j in range(0,len(targets)):
stability.target = targets[j]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
# now set archs for targets
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
execute_stability(stability, R_tmp, print_version)
print_version = 0
for j in range(0,len(sde_targets)):
stability.target = sde_targets[j][1]
# the target might be not supported by the chosen llvm version
if (stability.target in unsupported_llvm_targets(LLVM[i])):
print_debug("Warning: target " + stability.target + " is not supported in LLVM " + LLVM[i] + "\n", False, stability_log)
continue
stability.wrapexe = get_sde() + " " + sde_targets[j][0] + " -- "
arch = archs
for i1 in range(0,len(arch)):
for i2 in range(0,len(opts)):
for i3 in range(dbg_begin,dbg_total):
stability.arch = arch[i1]
stability.opt = opts[i2]
stability.ispc_flags = ispc_flags_tmp
if (i3 != 0):
stability.ispc_flags += " -g"
execute_stability(stability, R_tmp, print_version)
print_version = 0
# Output testing results separate for each tested LLVM version
R = concatenate_test_results(R, R_tmp)
output_test_results(R_tmp)
print_debug("\n", False, stability_log)
print_debug("\n----------------------------------------\nTOTAL:\n", False, stability_log)
output_test_results(R)
print_debug("__________________Watch stability.log for details_________________\n", False, stability_log)
# *** *** ***
# Performance validation run
# *** *** ***
if ((("performance" in only) == True) or ("stability" in only) == False):
print_debug("\n\nPerformance validation run\n\n", False, "")
common.check_tools(1)
performance = common.EmptyClass()
# performance constant options
performance.number = number
performance.config = "." + os.sep + "perf.ini"
performance.path = "." + os.sep
performance.silent = True
performance.output = ""
performance.compiler = ""
performance.ref = "ispc_ref"
if current_OS == "Windows":
performance.ref = "ispc_ref.exe"
performance.perf_target = ""
performance.in_file = "." + os.sep + f_date + os.sep + "performance.log"
# prepare newest LLVM
need_LLVM = check_LLVM([newest_LLVM])
if len(need_LLVM) != 0:
build_LLVM(need_LLVM[0], "", "", "", False, False, False, True, False, make, options.gcc_toolchain_path, True, False)
if perf_llvm == False:
# prepare reference point. build both test and reference compilers
try_do_LLVM("apply git", "git branch", True)
temp4 = take_lines("git branch", "all")
for line in temp4:
if "*" in line:
current_branch = line[2:-1]
stashing = True
sys.stdout.write("Please, don't interrupt script here! You can have not sync git status after interruption!\n")
if "No local changes" in take_lines("git stash", "first"):
stashing = False
#try_do_LLVM("stash current branch ", "git stash", True)
try_do_LLVM("checkout reference branch " + reference_branch + " ", "git checkout " + reference_branch, True)
sys.stdout.write(".\n")
build_ispc(newest_LLVM, make)
sys.stdout.write(".\n")
if current_OS != "Windows":
os.rename("ispc", "ispc_ref")
else:
common.remove_if_exists("ispc_ref.exe")
os.rename("ispc.exe", "ispc_ref.exe")
try_do_LLVM("checkout test branch " + current_branch + " ", "git checkout " + current_branch, True)
if stashing:
try_do_LLVM("return current branch ", "git stash pop", True)
sys.stdout.write("You can interrupt script now.\n")
build_ispc(newest_LLVM, make)
else:
# build compiler with two different LLVM versions
if len(check_LLVM([reference_branch])) != 0:
alloy_error("you haven't got llvm called " + reference_branch, 1)
build_ispc(newest_LLVM, make)
os.rename("ispc", "ispc_ref")
build_ispc(reference_branch, make)
# begin validation run for performance. output is inserted into perf()
perf.perf(performance, [])
# dumping gathered info to the file
common.ex_state.dump(alloy_folder + "test_table.dump", common.ex_state.tt)
def Main():
global current_OS
global current_OS_version
global return_status
current_OS_version = platform.release()
if (platform.system() == 'Windows' or 'CYGWIN_NT' in platform.system()) == True:
current_OS = "Windows"
else:
if (platform.system() == 'Darwin'):
current_OS = "MacOS"
else:
current_OS = "Linux"
if (options.build_llvm == False and options.validation_run == False):
parser.print_help()
exit(1)
# set appropriate makefile target
# gcc and g++ options are equal and added for ease of use
if options.ispc_build_compiler != "clang" and \
options.ispc_build_compiler != "gcc":
alloy_error("unknow option for --ispc-build-compiler: " + options.ispc_build_compiler, 1)
parser.print_help()
exit(1)
# check and normalize selfbuild switches
selfbuild = SelfbuildType.SINGLE
if (options.selfbuild and (options.selfbuild_phase1 or options.selfbuild_phase2)) or (options.selfbuild_phase1 and options.selfbuild_phase2):
alloy_error("Only one of --selfbuild* switches can be used at the same time", 1)
if options.selfbuild:
selfbuild = SelfbuildType.SELF
if options.selfbuild_phase1:
selfbuild = SelfbuildType.SELF_PHASE1
if options.selfbuild_phase2:
selfbuild = SelfbuildType.SELF_PHASE2
setting_paths(options.llvm_home, options.ispc_home, options.sde_home)
if os.environ.get("LLVM_HOME") == None:
alloy_error("you have no LLVM_HOME", 1)
if os.environ.get("ISPC_HOME") == None:
alloy_error("you have no ISPC_HOME", 1)
if options.only != "":
test_only_r = " 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 14.0 trunk current build stability performance x86 x86-64 x86_64 -O0 -O1 -O2 native debug nodebug "
test_only = options.only.split(" ")
for iterator in test_only:
if not (" " + iterator + " " in test_only_r):
alloy_error("unknown option for only: " + iterator, 1)
if current_OS == "Windows" and selfbuild is not SelfbuildType.SINGLE:
alloy_error("Selfbuild is not supported on Windows", 1)
global f_date
f_date = "logs"
common.remove_if_exists(f_date)
os.makedirs(f_date)
global alloy_folder
alloy_folder = os.getcwd() + os.sep + f_date + os.sep
global alloy_build
alloy_build = alloy_folder + "alloy_build.log"
global stability_log
stability_log = alloy_folder + "stability.log"
current_path = os.getcwd()
make = "make -j" + options.speed
if os.environ["ISPC_HOME"] != os.getcwd():
alloy_error("your ISPC_HOME and your current path are different! (" + os.environ["ISPC_HOME"] + " is not equal to " + os.getcwd() +
")\n", 2)
if options.perf_llvm == True:
if options.branch == "main":
options.branch = "trunk"
global generator
if options.generator:
generator = options.generator
else:
if current_OS == "Windows":
generator = "Visual Studio 17 2022"
else:
generator = "Unix Makefiles"
try:
start_time = time.time()
if options.build_llvm:
build_LLVM(options.version, options.folder,
options.debug, selfbuild, options.extra, False, options.force, make, options.gcc_toolchain_path, options.llvm_disable_assertions, options.verbose)
if options.validation_run:
validation_run(options.only, options.only_targets, options.branch,
options.number_for_performance, options.update, int(options.speed),
make, options.perf_llvm, options.time)
elapsed_time = time.time() - start_time
if options.time:
print_debug("Elapsed time: " + time.strftime('%Hh%Mm%Ssec.', time.gmtime(elapsed_time)) + "\n", False, "")
except Exception as e:
print_debug("Exception: " + str(e) + "\n", False, stability_log)
return_status = 1
# Finish execution: time reporting and copy log
try:
os.chdir(current_path)
date_name = "alloy_results_" + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
if os.path.exists(date_name):
alloy_error("It's forbidden to run alloy two times in a second, logs are in ./logs", 1)
os.rename(f_date, date_name)
print_debug("Logs are in " + date_name + "\n", False, "")
except Exception as e:
# Do not return non-zero exit code here, as it's not a critical error and testing might be considered successful.
print_debug("Exception: " + str(e), False, stability_log)
if current_OS == "Windows":
# Windows hangs from time to time on exit, so returning without cleanup.
sys.stdout.flush()
os._exit(return_status)
exit(return_status)
###Main###
from optparse import OptionParser
from optparse import OptionGroup
import sys
import os
import errno
import operator
import time
import glob
import platform
import smtplib
import datetime
import copy
import multiprocessing
import subprocess
import re
from shutil import copyfile
# our drivers
import run_tests
import perf
import common
take_lines = common.take_lines
print_debug = common.print_debug
make_sure_dir_exists = common.make_sure_dir_exists
return_status = 0
if __name__ == '__main__':
# parsing options
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
examples = ("Examples:\n" +
"Download and build LLVM trunk\n\talloy.py -b\n" +
"Download and build LLVM 13.0. Rewrite LLVM folders\n\talloy.py -b --version=13.0 --force\n" +
"Validation run with LLVM trunk; x86, x86-64; -O2;\nall supported targets; performance\n\talloy.py -r\n" +
"Validation run with all avx targets and sse4-i8x16 without performance\n\talloy.py -r --only=stability --only-targets='avx sse4-i8x16'\n" +
"Validation run with avx2-i32x8, all sse4 and sse2 targets\nand all targets with i32x16\n\talloy.py -r --only-targets='avx2-i32x8 sse4 i32x16 sse2'\n" +
"Stability validation run with LLVM 7.0, 8.0; -O0; x86,\nupdate fail_db.txt with passes and fails\n\talloy.py -r --only='7.0 -O0 stability 8.0 x86' --update-errors=FP\n" +
"Try to build compiler with all LLVM\n\talloy.py -r --only=build\n" +
"Performance validation run with 10 runs of each test and comparing to branch 'old'\n\talloy.py -r --only=performance --compare-with=old --number=10\n" +
"Validation run. Update fail_db.txt with new fails\n\talloy.py -r --update-errors=F\n" +
"Test KNL target (requires sde)\n\talloy.py -r --only='stability' --only-targets='avx512knl-x16'\n")
num_threads="%s" % multiprocessing.cpu_count()
parser = MyParser(usage="Usage: alloy.py -r/-b [options]", epilog=examples)
parser.add_option('-b', '--build-llvm', dest='build_llvm',
help='ask to build LLVM', default=False, action="store_true")
parser.add_option('-r', '--run', dest='validation_run',
help='ask for validation run', default=False, action="store_true")
parser.add_option('-j', dest='speed',
help='set -j for make', default=num_threads)
parser.add_option('--ispc-build-compiler', dest='ispc_build_compiler',
help='set compiler to build ispc binary (clang/gcc)', default="clang")
# options for activity "build LLVM"
llvm_group = OptionGroup(parser, "Options for building LLVM",
"These options must be used with -b option.")
llvm_group.add_option('--version', dest='version',
help='version of llvm to build: 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 trunk. Default: trunk', default="trunk")
llvm_group.add_option('--with-gcc-toolchain', dest='gcc_toolchain_path',
help='GCC install dir to use when building clang. It is important to set when ' +
'you have alternative gcc installation. Note that otherwise gcc from standard ' +
'location will be used, not from your PATH', default="")
llvm_group.add_option('--debug', dest='debug',
help='debug build of LLVM', default=False, action="store_true")
llvm_group.add_option('--folder', dest='folder',
help='folder to build LLVM in', default="")
llvm_group.add_option('--selfbuild', dest='selfbuild',
help='make selfbuild of LLVM and clang', default=False, action="store_true")
llvm_group.add_option('--selfbuild-phase1', dest='selfbuild_phase1',
help='make selfbuild of LLVM and clang, first phase only', default=False, action="store_true")
llvm_group.add_option('--selfbuild-phase2', dest='selfbuild_phase2',
help='make selfbuild of LLVM and clang, second phase only', default=False, action="store_true")
llvm_group.add_option('--llvm-disable-assertions', dest='llvm_disable_assertions',
help='build LLVM with assertions disabled', default=False, action="store_true")
llvm_group.add_option('--force', dest='force',
help='rebuild LLVM', default=False, action='store_true')
llvm_group.add_option('--extra', dest='extra',
help='load extra clang tools', default=False, action='store_true')
llvm_group.add_option('--verbose', dest='verbose',
help='verbose output during the build', default=False, action='store_true')
parser.add_option_group(llvm_group)
# options for activity "validation run"
run_group = OptionGroup(parser, "Options for validation run",
"These options must be used with -r option.")
run_group.add_option('--compare-with', dest='branch',
help='set performance reference point. Default: main', default="main")
run_group.add_option('--compiler', dest='compiler_exe',
help='C/C++ compiler binary to use to run tests.', default=None)
run_group.add_option('--ispc-flags', dest='ispc_flags',
help='extra ispc flags.', default="")
run_group.add_option('--number', dest='number_for_performance',
help='number of performance runs for each test. Default: 5', default=5)
run_group.add_option('--update-errors', dest='update',
help='rewrite fail_db.txt file according to received results (F or FP)', default="")
run_group.add_option('--only-targets', dest='only_targets',
help='set list of targets to test. Possible values - all subnames of targets', default="")
run_group.add_option('--time', dest='time',
help='display time of testing', default=False, action='store_true')
run_group.add_option('--only', dest='only',
help='set types of tests. Possible values:\n' +
'-O0, -O1, -O2, x86, x86-64, stability (test only stability), performance (test only performance),\n' +
'build (only build with different LLVM), 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, trunk, native (do not use SDE),\n' +
'current (do not rebuild ISPC), debug (only with debug info), nodebug (only without debug info, default).',
default="")
run_group.add_option('--perf_LLVM', dest='perf_llvm',
help='compare LLVM 8.0 with "--compare-with", default trunk', default=False, action='store_true')
run_group.add_option('--generator', dest='generator',
help='specify cmake generator', default="")
parser.add_option_group(run_group)
# options for activity "setup PATHS"
setup_group = OptionGroup(parser, "Options for setup",
"These options must be use with -r or -b to setup environment variables")
setup_group.add_option('--llvm_home', dest='llvm_home',help='path to LLVM',default="")
setup_group.add_option('--ispc_home', dest='ispc_home',help='path to ISPC',default="")
setup_group.add_option('--sde_home', dest='sde_home',help='path to SDE',default="")
parser.add_option_group(setup_group)
(options, args) = parser.parse_args()
Main()
|
the-stack_0_13771 | #!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
# Copyright (c) 2008-2014, Even Rouault <even dot rouault at mines-paris . org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""Test OGR handling of ESRI Shapefiles.
This is a rewrite of:
http://trac.osgeo.org/gdal/browser/trunk/autotest/ogr/ogr_shape.py.
"""
import os
from osgeo import ogr
import unittest
from autotest2.gcore import gcore_util
from autotest2.ogr import ogr_util
DRIVER = ogr_util.SHAPEFILE_DRIVER
EXT = '.shp'
def setUpModule():
ogr_util.SetupTestEnv()
def HaveGeos():
point1 = ogr.CreateGeometryFromWkt('POINT(10 20)')
point2 = ogr.CreateGeometryFromWkt('POINT(30 20)')
return point1.Union(point2) is not None
@ogr_util.SkipIfDriverMissing(DRIVER)
class OgrShapefileTest(ogr_util.DriverTestCase):
def setUp(self):
super(OgrShapefileTest, self).setUp(DRIVER, EXT)
def testReadPoint(self):
filepath = ogr_util.GetTestFilePath('shape/point/point.shp')
self.CheckOpen(filepath)
self.assertEqual(self.src.GetLayerCount(), 1)
layer = self.src.GetLayer()
self.assertEqual(layer.GetName(), 'point')
self.assertEqual(layer.GetFeatureCount(), 1)
self.assertEqual(layer.GetExtent(), (1.0, 1.0, 2.0, 2.0))
layer_defn = layer.GetLayerDefn()
self.assertEqual(layer_defn.GetFieldCount(), 1)
self.assertEqual(layer_defn.GetGeomType(), ogr.wkbPoint)
field_defn = layer_defn.GetFieldDefn(0)
self.assertEqual(field_defn.GetName(), 'FID')
self.assertEqual(field_defn.GetTypeName(), 'Integer64')
feature = layer.GetNextFeature()
self.assertEqual(feature.GetField(0), 0.0)
self.assertEqual(feature.GetFieldCount(), 1)
self.assertEqual(feature.GetGeomFieldCount(), 1)
self.assertEqual(feature.GetGeometryRef().ExportToWkt(), 'POINT (1 2)')
geometry_ref = feature.GetGeometryRef()
self.assertEqual(geometry_ref.GetPoint(), (1.0, 2.0, 0.0))
self.assertEqual(geometry_ref.GetPoint_2D(), (1.0, 2.0))
self.assertIsNone(geometry_ref.GetSpatialReference())
# These initial tests are overly complicated.
# TODO(schwehr): Test 01.
# TODO(schwehr): Test 02.
# TODO(schwehr): Test 03.
# TODO(schwehr): Test 04.
# TODO(schwehr): Test 05.
# TODO(schwehr): Test 06.
# TODO(schwehr): Test 07.
# TODO(schwehr): Test 08.
def test09SearchInsidePolyReturnNone(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetSpatialFilterRect(-10, -130, 10, -110)
if HaveGeos():
self.assertEqual(layer.GetFeatureCount(), 0)
else:
self.assertEqual(layer.GetFeatureCount(), 1)
def test10SelectSomePolygonsByRegion(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertEqual(fids, [0, 4, 8])
def test11SelectAreaAndFidReturnNone(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetAttributeFilter('FID = 5')
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertFalse(fids)
def test11SelectAreaAndFidReturnsOne(self):
filepath = ogr_util.GetTestFilePath('shape/simplepoly/poly.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
layer.SetAttributeFilter('FID = 4')
layer.SetSpatialFilterRect(-400, 22, -120, 400)
index = layer.GetLayerDefn().GetFieldIndex('FID')
fids = [feature.GetField(index) for feature in ogr_util.Features(layer)]
self.assertEqual(fids, [4])
def test12Multipolygon(self):
filepath = ogr_util.GetTestFilePath('shape/multipolygon/american-samoa.shp')
self.CheckOpen(filepath)
layer = self.src.GetLayer()
feature = layer.GetNextFeature()
geometry = feature.GetGeometryRef()
self.assertEqual(geometry.GetCoordinateDimension(), 2)
self.assertEqual(geometry.GetGeometryName(), 'MULTIPOLYGON')
self.assertEqual(geometry.GetGeometryCount(), 5)
point_counts = [15, 11, 17, 20, 9]
for geom_index in range(5):
poly = geometry.GetGeometryRef(geom_index)
self.assertEqual(poly.GetGeometryName(), 'POLYGON')
self.assertEqual(poly.GetGeometryCount(), 1)
self.assertEqual(poly.GetGeometryRef(0).GetPointCount(),
point_counts[geom_index])
def test13SetFeature(self):
with gcore_util.TestTemporaryDirectory(prefix='shape_setfeature') as tmpdir:
field_settings = (
('real_field', ogr.OFTReal, '1.23', '7.8', 7.8),
('int_field', ogr.OFTInteger, '2', '3', 3),
('str_field', ogr.OFTString, 'original', 'new', 'new')
)
for field_name, field_type, original, new, result in field_settings:
filepath = os.path.join(tmpdir, field_name + 'tmp.shp')
dst = self.driver.CreateDataSource(filepath)
layer = dst.CreateLayer('test_layer')
layer.CreateField(ogr.FieldDefn(field_name, field_type))
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetField(field_name, original)
feature.SetGeometry(ogr.CreateGeometryFromWkt('POINT(4 5)'))
layer.CreateFeature(feature)
dst = None
dst = ogr.Open(filepath, update=True)
layer = dst.GetLayer()
feature = layer.GetFeature(0)
feature.SetField(field_name, new)
new_geom_str = 'POINT (9 0)'
feature.SetGeometry(ogr.CreateGeometryFromWkt(new_geom_str))
self.assertEqual(layer.SetFeature(feature), 0)
dst = None
self.CheckOpen(filepath)
layer = self.src.GetLayer()
feature = layer.GetFeature(0)
self.assertEqual(feature.GetField(0), result)
self.assertEqual(feature.GetGeometryRef().ExportToWkt(), new_geom_str)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13775 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt") as f:
requires = f.read().splitlines()
setuptools.setup(
name="ondewo-client-utils",
version="0.1.0",
author="Ondewo GbmH",
author_email="[email protected]",
description="This library contains utilities and base classes for gRPC clients.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ondewo/ondewo-client-utils-python",
packages=[
np for np in filter(lambda n: n.startswith("ondewo.") or n == "ondewo", setuptools.find_packages())
],
package_data={"ondewo.utils": ["py.typed"]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries",
],
python_requires=">=2.6,!=3.0.*",
install_requires=requires,
)
|
the-stack_0_13776 | """
TODO:
Implement a test that proves file configs override rather than overwrite
the defaults. Unfortunately this functionality will have to be implemented
first.
"""
import os
from unittest import mock
import pytest
import requests
from pianodb.pianodb import (number_of_workers, gen_dummy_cmd, get_config,
get_track_features)
class MockPage:
def __init__(self, status_code=200, content=''):
self.status_code = status_code
self.content = content if content else """
<!-- https://www.pandora.com/great-jazz-trio/s-wonderful/take-5 -->
<div class="artist_name" title="The Great Jazz Trio">
<span>by</span>
<span itemprop="byArtist">
<a href="/great-jazz-trio" class="artist_link hash">The Great Jazz Trio</a>
</span>
</div>
<div class="album_title" title="'S Wonderful">
<span>on</span>
<a href="/great-jazz-trio/s-wonderful" itemprop="inAlbum" class="album_link hash">'S Wonderful</a>
</div>
<div class="song_features clearfix">
<h2>Features of This Track</h2>
a piano solo<br>
an acoustic bass solo<br>
a groove oriented approach<br>
vamping harmony<br>
<div style="display: none;">
unusual rhythms<br>
</div>
<p>These are just a few of the hundreds of attributes cataloged for this track by the Music Genome Project.</p>
<a href="#" class="show_more">show more</a>
</div>
"""
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_number_of_workers_is_double_cpu_count_plus_one(mp):
"""
Test that ``pianodb`` determines the number of workers to be double the CPU
count plus one.
Note:
This test patches the multiprocessing.cpu_count function to return a
constant that does not depend on the actual CPU count.
"""
mp.cpu_count.return_value = 6
assert number_of_workers() == 13
def test_pianodb_can_generate_dummy_click_commands():
"""
Test that ``pianodb`` can generate dummy instances of ``Click.Command`` that
have the correct ``name``, ``help``, and ``short_help``.
"""
cmd = gen_dummy_cmd('dummy')
assert cmd.name == 'dummy'
assert cmd.help == ("This is an unimplimented pianobar eventcmd handler. "
"Calling this subcommand will do absolutely nothing.")
assert cmd.short_help == 'unimplimented pianobar eventcmd'
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_has_config_defaults(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` has config defaults that are used when getting its
configuration. In the absence of an option defined in a config file the
``pianodb`` config should contain these defaults.
"""
database = '/home/cleesej/.config/pianobar/piano.db'
server_database = '/faketmp/piano.db'
# Pretend we have a CPU count of 4.
mp.cpu_count.return_value = 4
# Pretend we have a fake temp dir.
tmpdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
# This is probably a good rationale for having a global default config dict.
expected_config = {
'client': {
'remote': None,
'threshold': 10,
'token': None,
'database': database,
},
'server': {
'interface': 'localhost',
'port': 8000,
'workers': 9,
'database': server_database,
}
}
# overrides: os.environ, os.path, open, multiprocessing.cpu_count
config = get_config()
assert config == expected_config
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
@mock.patch('builtins.open', create=True)
@mock.patch('tempfile.gettempdir')
@mock.patch('pianodb.pianodb.multiprocessing')
def test_pianodb_can_load_configs_from_optional_path(mp, tmpdir, mock_open):
"""
Test that ``pianodb`` can load a config file from a path other than
its own internal default by using the optional ``path`` argument.
"""
# Pretend we have a CPU count of 8.
mp.cpu_count.return_value = 8
# Pretend we have a fake temp dir.
tmpdir.gettempdir.return_value = '/faketmp'
# Pretend open will read a file with nothing in it.
mock_open.side_effect = [
mock.mock_open(read_data="").return_value,
]
config = get_config(path='/spam/and/eggs')
mock_open.assert_called_once_with('/spam/and/eggs', 'r')
@mock.patch.dict(os.environ, {'HOME': '/home/cleesej'})
def test_pianodb_exits_fatally_without_a_config_file():
"""
Test that ``pianodb`` raises a ``SystemExit`` error with the appropriate
error message when attempting to load a nonexistent config.
"""
with pytest.raises(SystemExit) as err:
config = get_config(path='nonexistent')
assert str(err.value) == 'could not load config'
def test_pianodb_can_get_track_features(monkeypatch):
"""
Test that ``pianodb`` can extract track features from a specially formatted
web page.
"""
def _mock_page(url):
return MockPage()
monkeypatch.setattr(requests, 'get', _mock_page)
expected = [
'a piano solo',
'an acoustic bass solo',
'a groove oriented approach',
'vamping harmony',
'unusual rhythms',
]
assert get_track_features('https://fake-url.tld') == expected
def test_pianodb_track_features_empty_if_status_code_is_not_200(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` returns
a ``status_code`` that is not ``200``.
"""
def _mock_page(url):
return MockPage(status_code=418, content='teapot')
monkeypatch.setattr(requests, 'get', _mock_page)
assert get_track_features('https://fake-url.tld') == []
def test_pianodb_track_features_empty_if_requests_connection_error(monkeypatch):
"""
Test that ``pianodb`` track features are empty when ``requests`` raises a
``ConnectionError``.
"""
def _raise_connection_error(url):
raise requests.ConnectionError()
monkeypatch.setattr(requests, 'get', _raise_connection_error)
assert get_track_features('https://fake-url.tld') == []
|
the-stack_0_13777 | from cms.api import create_page
from djangocms_helper.base_test import BaseTestCase
from djangocms_reversion2.models import PageVersion
from djangocms_reversion2.utils import revert_page
from . import testutils
class PageRevisionCreateTestCase(BaseTestCase):
def test_a_revise_page(self):
language = 'en'
page = create_page(title='test_a_revise_page', template='page.html', language=language)
testutils.add_text(page, language, content=u"initial")
page_version = PageVersion.create_version(page.get_draft_object(), language,
version_parent=None, comment='', title='')
self.assertIsNotNone(page_version, msg='PageVersion creation failed')
def test_b_revise_page(self):
language = 'en'
draft = create_page(title='next', template='page.html', language=language).get_draft_object()
# create initial version
pv = PageVersion.create_version(draft, language, version_parent=None, comment='next', title='')
# we have a revised page containing the text 'initial' and add the text 'next'
testutils.add_text(draft, language, content=u"next")
html = testutils.get_html(request=self.get_page_request(draft, self.user))
self.assertIn('next', html, msg='could not add content')
# we check that the the revision does not contain 'next'
draft.refresh_from_db()
html = testutils.get_html(self.get_page_request(draft.page_versions.last().hidden_page, self.user))
self.assertNotIn('next', html, msg='content should not be added to an old revision')
try:
# now we create a new version
pv = PageVersion.create_version(draft, language, version_parent=None, comment='next', title='')
except AssertionError:
self.fail('Expected the page to be dirty, but it\'s clean')
# this version should contain the new text
draft.refresh_from_db()
html = testutils.get_html(request=self.get_page_request(pv.hidden_page, self.user))
self.assertIn('next', html, msg='new content is not in the latest version')
# now we revert to the old date
revert_page(draft.page_versions.first(), language)
html = testutils.get_html(request=self.get_page_request(draft, self.user))
self.assertNotIn('next', html, msg='new content is still in the page')
# def test_b_revise_page_fields(self):
# LANGUAGE = 'en'
# pr = PageRevision.objects.get(page_id=self.page.id, language=LANGUAGE)
# self.assertEqual(pr.revision.comment, self.COMMENT)
# self.assertEqual(pr.revision.user, self.user)
# self.assertEqual(pr.language, self.LANGUAGE)
#
# def test_c_revise_page_page_is_revised(self):
# self.assertTrue(is_revised(self.page, self.LANGUAGE))
# self.assertTrue(PageMarker.objects.filter(language=self.LANGUAGE, page=self.page).exists())
# self.assertEqual(PageMarker.objects.get(language=self.LANGUAGE, page=self.page).page_revision, self.page_revision)
#
# def test_d_revise_page_revise_again_unsuccessful(self):
# new_revision = revise_page(self.page, language=self.LANGUAGE)
# self.assertEqual(new_revision, None)
# self.assertEqual(1, self.page.pagerevision_set.count())
#
#
# class PageRevisionUnmarkPageTestCase(DR2BaseTestCase, TestCase):
# def setUp(self):
# super(PageRevisionUnmarkPageTestCase, self).setUp()
# self.added_plugins = self.add_text(self.page, n=1)
#
# def test_a_page_unmarked(self):
# self.assertFalse(is_revised(self.page, self.LANGUAGE))
# self.assertFalse(PageMarker.objects.filter(language=self.LANGUAGE, page=self.page).exists())
#
#
# class PageRevisionRevertTestCase(DR2BaseTestCase, TestCase):
# def setUp(self):
# super(PageRevisionRevertTestCase, self).setUp()
# self.added_plugins = self.add_text(self.page, n=1)
# self.page_marker = revert_page(self.page_revision, self.request)
# self.initial_html = {
# ph.slot: self.get_current_html(ph) for ph in self.page.placeholders.all()
# }
#
# def test_a_revert_deletion(self):
# print Text.objects.all()
# for pl in self.added_plugins:
# try:
# pl.refresh_from_db()
# self.fail()
# except ObjectDoesNotExist:
# pass
#
# def test_b_revert_auto_revision(self):
# self.assertEqual(2, self.page.pagerevision_set.count())
# auto_revision = self.page.pagerevision_set.latest('pk')
# self.assertEqual(auto_revision.revision.comment, AUTO_REVISION_COMMENT)
#
# def test_c_revert_correct_html(self):
# for placeholder in self.page.placeholders.all():
# slot = placeholder.slot
# html = self.get_current_html(placeholder)
# self.assertEqual(self.initial_html[slot], html, slot)
#
|
the-stack_0_13779 | import os
import numpy as onp
from numpy.testing import assert_allclose
import pytest
from jax import jit, pmap, random, vmap
from jax.lib import xla_bridge
import jax.numpy as np
from jax.scipy.special import logit
import numpyro
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.infer import HMC, MCMC, NUTS
from numpyro.infer.mcmc import hmc
from numpyro.infer.util import initialize_model
from numpyro.util import fori_collect
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('dense_mass', [False, True])
def test_unnormalized_normal_x64(kernel_cls, dense_mass):
true_mean, true_std = 1., 0.5
warmup_steps, num_samples = 1000, 8000
def potential_fn(z):
return 0.5 * np.sum(((z - true_mean) / true_std) ** 2)
init_params = np.array(0.)
kernel = kernel_cls(potential_fn=potential_fn, trajectory_length=8, dense_mass=dense_mass)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(0), init_params=init_params)
hmc_states = mcmc.get_samples()
assert_allclose(np.mean(hmc_states), true_mean, rtol=0.05)
assert_allclose(np.std(hmc_states), true_std, rtol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert hmc_states.dtype == np.float64
def test_correlated_mvn():
# This requires dense mass matrix estimation.
D = 5
warmup_steps, num_samples = 5000, 8000
true_mean = 0.
a = np.tril(0.5 * np.fliplr(np.eye(D)) + 0.1 * np.exp(random.normal(random.PRNGKey(0), shape=(D, D))))
true_cov = np.dot(a, a.T)
true_prec = np.linalg.inv(true_cov)
def potential_fn(z):
return 0.5 * np.dot(z.T, np.dot(true_prec, z))
init_params = np.zeros(D)
kernel = NUTS(potential_fn=potential_fn, dense_mass=True)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(0), init_params=init_params)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples), true_mean, atol=0.02)
assert onp.sum(onp.abs(onp.cov(samples.T) - true_cov)) / D**2 < 0.02
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_logistic_regression_x64(kernel_cls):
N, dim = 3000, 3
warmup_steps, num_samples = 1000, 8000
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = np.arange(1., dim + 1.)
logits = np.sum(true_coefs * data, axis=-1)
labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample('coefs', dist.Normal(np.zeros(dim), np.ones(dim)))
logits = np.sum(coefs * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
kernel = kernel_cls(model=model, trajectory_length=8)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(2), labels)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['coefs'], 0), true_coefs, atol=0.22)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['coefs'].dtype == np.float64
def test_uniform_normal():
true_coef = 0.9
num_warmup, num_samples = 1000, 1000
def model(data):
alpha = numpyro.sample('alpha', dist.Uniform(0, 1))
loc = numpyro.sample('loc', dist.Uniform(0, alpha))
numpyro.sample('obs', dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), data, collect_warmup=True)
samples = mcmc.get_samples()
assert len(samples['loc']) == num_warmup + num_samples
assert_allclose(np.mean(samples['loc'], 0), true_coef, atol=0.05)
def test_improper_normal():
true_coef = 0.9
def model(data):
alpha = numpyro.sample('alpha', dist.Uniform(0, 1))
loc = numpyro.param('loc', 0., constraint=constraints.interval(0., alpha))
numpyro.sample('obs', dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup=1000, num_samples=1000)
mcmc.run(random.PRNGKey(0), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['loc'], 0), true_coef, atol=0.05)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_beta_bernoulli_x64(kernel_cls):
warmup_steps, num_samples = 500, 20000
def model(data):
alpha = np.array([1.1, 1.1])
beta = np.array([1.1, 1.1])
p_latent = numpyro.sample('p_latent', dist.Beta(alpha, beta))
numpyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = np.array([0.9, 0.1])
data = dist.Bernoulli(true_probs).sample(random.PRNGKey(1), (1000, 2))
kernel = kernel_cls(model=model, trajectory_length=1.)
mcmc = MCMC(kernel, num_warmup=warmup_steps, num_samples=num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('dense_mass', [False, True])
def test_dirichlet_categorical_x64(kernel_cls, dense_mass):
warmup_steps, num_samples = 100, 20000
def model(data):
concentration = np.array([1.0, 1.0, 1.0])
p_latent = numpyro.sample('p_latent', dist.Dirichlet(concentration))
numpyro.sample('obs', dist.Categorical(p_latent), obs=data)
return p_latent
true_probs = np.array([0.1, 0.6, 0.3])
data = dist.Categorical(true_probs).sample(random.PRNGKey(1), (2000,))
kernel = kernel_cls(model, trajectory_length=1., dense_mass=dense_mass)
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.02)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
def test_change_point_x64():
# Ref: https://forum.pyro.ai/t/i-dont-understand-why-nuts-code-is-not-working-bayesian-hackers-mail/696
warmup_steps, num_samples = 500, 3000
def model(data):
alpha = 1 / np.mean(data)
lambda1 = numpyro.sample('lambda1', dist.Exponential(alpha))
lambda2 = numpyro.sample('lambda2', dist.Exponential(alpha))
tau = numpyro.sample('tau', dist.Uniform(0, 1))
lambda12 = np.where(np.arange(len(data)) < tau * len(data), lambda1, lambda2)
numpyro.sample('obs', dist.Poisson(lambda12), obs=data)
count_data = np.array([
13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57,
11, 19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13,
19, 23, 27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2,
15, 15, 19, 70, 49, 7, 53, 22, 21, 31, 19, 11, 18, 20,
12, 35, 17, 23, 17, 4, 2, 31, 30, 13, 27, 0, 39, 37,
5, 14, 13, 22,
])
kernel = NUTS(model=model)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(4), count_data)
samples = mcmc.get_samples()
tau_posterior = (samples['tau'] * len(count_data)).astype(np.int32)
tau_values, counts = onp.unique(tau_posterior, return_counts=True)
mode_ind = np.argmax(counts)
mode = tau_values[mode_ind]
assert mode == 44
if 'JAX_ENABLE_x64' in os.environ:
assert samples['lambda1'].dtype == np.float64
assert samples['lambda2'].dtype == np.float64
assert samples['tau'].dtype == np.float64
@pytest.mark.parametrize('with_logits', ['True', 'False'])
def test_binomial_stable_x64(with_logits):
# Ref: https://github.com/pyro-ppl/pyro/issues/1706
warmup_steps, num_samples = 200, 200
def model(data):
p = numpyro.sample('p', dist.Beta(1., 1.))
if with_logits:
logits = logit(p)
numpyro.sample('obs', dist.Binomial(data['n'], logits=logits), obs=data['x'])
else:
numpyro.sample('obs', dist.Binomial(data['n'], probs=p), obs=data['x'])
data = {'n': 5000000, 'x': 3849}
kernel = NUTS(model=model)
mcmc = MCMC(kernel, warmup_steps, num_samples)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['p'], 0), data['x'] / data['n'], rtol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p'].dtype == np.float64
def test_improper_prior():
true_mean, true_std = 1., 2.
num_warmup, num_samples = 1000, 8000
def model(data):
mean = numpyro.param('mean', 0.)
std = numpyro.param('std', 1., constraint=constraints.positive)
return numpyro.sample('obs', dist.Normal(mean, std), obs=data)
data = dist.Normal(true_mean, true_std).sample(random.PRNGKey(1), (2000,))
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup, num_samples)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(np.mean(samples['mean']), true_mean, rtol=0.05)
assert_allclose(np.mean(samples['std']), true_std, rtol=0.05)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('adapt_step_size', [True, False])
def test_diverging(kernel_cls, adapt_step_size):
data = random.normal(random.PRNGKey(0), (1000,))
def model(data):
loc = numpyro.sample('loc', dist.Normal(0., 1.))
numpyro.sample('obs', dist.Normal(loc, 1), obs=data)
kernel = kernel_cls(model, step_size=10., adapt_step_size=adapt_step_size, adapt_mass_matrix=False)
num_warmup = num_samples = 1000
mcmc = MCMC(kernel, num_warmup, num_samples)
mcmc.run(random.PRNGKey(1), data, extra_fields=['diverging'], collect_warmup=True)
num_divergences = mcmc.get_extra_fields()['diverging'].sum()
if adapt_step_size:
assert num_divergences <= num_warmup
else:
assert_allclose(num_divergences, num_warmup + num_samples)
def test_prior_with_sample_shape():
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
def schools_model():
mu = numpyro.sample('mu', dist.Normal(0, 5))
tau = numpyro.sample('tau', dist.HalfCauchy(5))
theta = numpyro.sample('theta', dist.Normal(mu, tau), sample_shape=(data['J'],))
numpyro.sample('obs', dist.Normal(theta, data['sigma']), obs=data['y'])
num_samples = 500
mcmc = MCMC(NUTS(schools_model), num_warmup=500, num_samples=num_samples)
mcmc.run(random.PRNGKey(0))
assert mcmc.get_samples()['theta'].shape == (num_samples, data['J'])
@pytest.mark.parametrize('num_chains', [1, 2])
@pytest.mark.parametrize('chain_method', ['parallel', 'sequential', 'vectorized'])
@pytest.mark.parametrize('progress_bar', [True, False])
@pytest.mark.filterwarnings("ignore:There are not enough devices:UserWarning")
def test_empty_model(num_chains, chain_method, progress_bar):
def model():
pass
mcmc = MCMC(NUTS(model), num_warmup=10, num_samples=10, num_chains=num_chains,
chain_method=chain_method, progress_bar=progress_bar)
mcmc.run(random.PRNGKey(0))
assert mcmc.get_samples() == {}
@pytest.mark.parametrize('use_init_params', [False, True])
@pytest.mark.parametrize('chain_method', ['parallel', 'sequential', 'vectorized'])
@pytest.mark.skipif('XLA_FLAGS' not in os.environ, reason='without this mark, we have duplicated tests in Travis')
def test_chain(use_init_params, chain_method):
N, dim = 3000, 3
num_chains = 2
num_warmup, num_samples = 5000, 5000
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = np.arange(1., dim + 1.)
logits = np.sum(true_coefs * data, axis=-1)
labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample('coefs', dist.Normal(np.zeros(dim), np.ones(dim)))
logits = np.sum(coefs * data, axis=-1)
return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
kernel = NUTS(model=model)
mcmc = MCMC(kernel, num_warmup, num_samples, num_chains=num_chains)
mcmc.chain_method = chain_method
init_params = None if not use_init_params else \
{'coefs': np.tile(np.ones(dim), num_chains).reshape(num_chains, dim)}
mcmc.run(random.PRNGKey(2), labels, init_params=init_params)
samples_flat = mcmc.get_samples()
assert samples_flat['coefs'].shape[0] == num_chains * num_samples
samples = mcmc.get_samples(group_by_chain=True)
assert samples['coefs'].shape[:2] == (num_chains, num_samples)
assert_allclose(np.mean(samples_flat['coefs'], 0), true_coefs, atol=0.21)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('chain_method', [
pytest.param('parallel', marks=pytest.mark.xfail(
reason='jit+pmap does not work in CPU yet')),
'sequential',
'vectorized',
])
@pytest.mark.skipif('CI' in os.environ, reason="Compiling time the whole sampling process is slow.")
def test_chain_inside_jit(kernel_cls, chain_method):
# NB: this feature is useful for consensus MC.
# Caution: compiling time will be slow (~ 90s)
if chain_method == 'parallel' and xla_bridge.device_count() == 1:
pytest.skip('parallel method requires device_count greater than 1.')
warmup_steps, num_samples = 100, 2000
# Here are settings which is currently supported.
rng_key = random.PRNGKey(2)
step_size = 1.
target_accept_prob = 0.8
trajectory_length = 1.
# Not supported yet:
# + adapt_step_size
# + adapt_mass_matrix
# + max_tree_depth
# + num_warmup
# + num_samples
def model(data):
concentration = np.array([1.0, 1.0, 1.0])
p_latent = numpyro.sample('p_latent', dist.Dirichlet(concentration))
numpyro.sample('obs', dist.Categorical(p_latent), obs=data)
return p_latent
@jit
def get_samples(rng_key, data, step_size, trajectory_length, target_accept_prob):
kernel = kernel_cls(model, step_size=step_size, trajectory_length=trajectory_length,
target_accept_prob=target_accept_prob)
mcmc = MCMC(kernel, warmup_steps, num_samples, num_chains=2, chain_method=chain_method,
progress_bar=False)
mcmc.run(rng_key, data)
return mcmc.get_samples()
true_probs = np.array([0.1, 0.6, 0.3])
data = dist.Categorical(true_probs).sample(random.PRNGKey(1), (2000,))
samples = get_samples(rng_key, data, step_size, trajectory_length, target_accept_prob)
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.02)
def test_extra_fields():
def model():
numpyro.sample('x', dist.Normal(0, 1), sample_shape=(5,))
mcmc = MCMC(NUTS(model), 1000, 1000)
mcmc.run(random.PRNGKey(0), extra_fields=('num_steps', 'adapt_state.step_size'))
samples = mcmc.get_samples(group_by_chain=True)
assert samples['x'].shape == (1, 1000, 5)
stats = mcmc.get_extra_fields(group_by_chain=True)
assert 'num_steps' in stats
assert stats['num_steps'].shape == (1, 1000)
assert 'adapt_state.step_size' in stats
assert stats['adapt_state.step_size'].shape == (1, 1000)
@pytest.mark.parametrize('algo', ['HMC', 'NUTS'])
def test_functional_beta_bernoulli_x64(algo):
warmup_steps, num_samples = 500, 20000
def model(data):
alpha = np.array([1.1, 1.1])
beta = np.array([1.1, 1.1])
p_latent = numpyro.sample('p_latent', dist.Beta(alpha, beta))
numpyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = np.array([0.9, 0.1])
data = dist.Bernoulli(true_probs).sample(random.PRNGKey(1), (1000, 2))
init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(2), model, data)
init_kernel, sample_kernel = hmc(potential_fn, algo=algo)
hmc_state = init_kernel(init_params,
trajectory_length=1.,
num_warmup=warmup_steps)
samples = fori_collect(0, num_samples, sample_kernel, hmc_state,
transform=lambda x: constrain_fn(x.z))
assert_allclose(np.mean(samples['p_latent'], 0), true_probs, atol=0.05)
if 'JAX_ENABLE_x64' in os.environ:
assert samples['p_latent'].dtype == np.float64
@pytest.mark.parametrize('algo', ['HMC', 'NUTS'])
@pytest.mark.parametrize('map_fn', [vmap, pmap])
@pytest.mark.skipif('XLA_FLAGS' not in os.environ, reason='without this mark, we have duplicated tests in Travis')
def test_functional_map(algo, map_fn):
if map_fn is pmap and xla_bridge.device_count() == 1:
pytest.skip('pmap test requires device_count greater than 1.')
true_mean, true_std = 1., 2.
warmup_steps, num_samples = 1000, 8000
def potential_fn(z):
return 0.5 * np.sum(((z - true_mean) / true_std) ** 2)
init_kernel, sample_kernel = hmc(potential_fn, algo=algo)
init_params = np.array([0., -1.])
rng_keys = random.split(random.PRNGKey(0), 2)
init_kernel_map = map_fn(lambda init_param, rng_key: init_kernel(
init_param, trajectory_length=9, num_warmup=warmup_steps, rng_key=rng_key))
init_states = init_kernel_map(init_params, rng_keys)
fori_collect_map = map_fn(lambda hmc_state: fori_collect(0, num_samples, sample_kernel, hmc_state,
transform=lambda x: x.z, progbar=False))
chain_samples = fori_collect_map(init_states)
assert_allclose(np.mean(chain_samples, axis=1), np.repeat(true_mean, 2), rtol=0.05)
assert_allclose(np.std(chain_samples, axis=1), np.repeat(true_std, 2), rtol=0.05)
def test_reuse_mcmc_run():
y1 = onp.random.normal(3, 0.1, (100,))
y2 = onp.random.normal(-3, 0.1, (100,))
def model(y_obs):
mu = numpyro.sample('mu', dist.Normal(0., 1.))
sigma = numpyro.sample("sigma", dist.HalfCauchy(3.))
numpyro.sample("y", dist.Normal(mu, sigma), obs=y_obs)
# Run MCMC on zero observations.
kernel = NUTS(model)
mcmc = MCMC(kernel, 200, 200)
mcmc.run(random.PRNGKey(32), y1)
# Run on data, re-using `mcmc`.
mcmc.run(random.PRNGKey(32), y2)
assert_allclose(mcmc.get_samples()['mu'].mean(), -3., atol=0.1)
def test_reuse_mcmc_pe_gen():
y1 = onp.random.normal(3, 0.1, (100,))
y2 = onp.random.normal(-3, 0.1, (100,))
def model(y_obs):
mu = numpyro.sample('mu', dist.Normal(0., 1.))
sigma = numpyro.sample("sigma", dist.HalfCauchy(3.))
numpyro.sample("y", dist.Normal(mu, sigma), obs=y_obs)
init_params, potential_fn, constrain_fn = initialize_model(random.PRNGKey(0), model,
y1, dynamic_args=True)
init_kernel, sample_kernel = hmc(potential_fn_gen=potential_fn)
init_state = init_kernel(init_params, num_warmup=300, model_args=(y1,))
@jit
def _sample(state_and_args):
hmc_state, model_args = state_and_args
return sample_kernel(hmc_state, (model_args,)), model_args
samples = fori_collect(0, 500, _sample, (init_state, y1),
transform=lambda state: constrain_fn(y1)(state[0].z))
assert_allclose(samples['mu'].mean(), 3., atol=0.1)
# Run on data, re-using `mcmc` - this should be much faster.
init_state = init_kernel(init_params, num_warmup=300, model_args=(y2,))
samples = fori_collect(0, 500, _sample, (init_state, y2),
transform=lambda state: constrain_fn(y2)(state[0].z))
assert_allclose(samples['mu'].mean(), -3., atol=0.1)
|
the-stack_0_13780 | from torch.nn import LSTM, Linear, BatchNorm1d, Parameter
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class STFT(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
center=False
):
super(STFT, self).__init__()
self.window = nn.Parameter(
torch.hann_window(n_fft),
requires_grad=False
)
self.n_fft = n_fft
self.n_hop = n_hop
self.center = center
def forward(self, x):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
Output:(nb_samples, nb_channels, nb_bins, nb_frames, 2)
"""
nb_samples, nb_channels, nb_timesteps = np.shape(x)
# merge nb_samples and nb_channels for multichannel stft
x = x.reshape(nb_samples*nb_channels, -1)
# compute stft with parameters as close as possible scipy settings
stft_f = torch.stft(
x,
n_fft=self.n_fft, hop_length=self.n_hop,
window=self.window, center=self.center,
normalized=False, onesided=True,
pad_mode='reflect'
)
# reshape back to channel dimension
stft_f = stft_f.contiguous().view(
nb_samples, nb_channels, self.n_fft // 2 + 1, -1, 2
)
return stft_f
class Spectrogram(nn.Module):
def __init__(
self,
power=1,
mono=True
):
super(Spectrogram, self).__init__()
self.power = power
self.mono = mono
def forward(self, stft_f):
"""
Input: complex STFT
(nb_samples, nb_bins, nb_frames, 2)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
stft_f = stft_f.transpose(2, 3)
# take the magnitude
stft_f = stft_f.pow(2).sum(-1).pow(self.power / 2.0)
# downmix in the mag domain
if self.mono:
stft_f = torch.mean(stft_f, 1, keepdim=True)
# permute output for LSTM convenience
return stft_f.permute(2, 0, 1, 3)
class OpenUnmix(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2,
sample_rate=44100,
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x
|
the-stack_0_13781 | EMBED_SIZE = 200
NUM_LAYERS = 2
LR = 0.0001
MAX_GRAD_NORM = 5.0
PAD_ID = 0
UNK_ID = 1
START_ID = 2
EOS_ID = 3
CONV_SIZE = 3
# sanity
# BUCKETS = [(55, 50)]
# BATCH_SIZE = 10
# NUM_EPOCHS = 50
# NUM_SAMPLES = 498
# HIDDEN_SIZE = 400
# test
BUCKETS = [(30, 30), (55, 50)]
BATCH_SIZE = 20
NUM_EPOCHS = 3
NUM_SAMPLES = 498
HIDDEN_SIZE = 400
# experiment 1
# BUCKETS = [(16, 28), (31, 28), (51, 28)]
# BATCH_SIZE = 400
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 400
# experiment 2
# BUCKETS = [(102, 28)]
# BATCH_SIZE = 300
# NUM_EPOCHS = 5
# NUM_SAMPLES = 40960
# HIDDEN_SIZE = 250
|
the-stack_0_13783 | # module
from __future__ import print_function
import argparse
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
import time
import torch.nn as nn
from SSGE import Attack,resnet18
import torchvision
from attack import uap_sgd
import random
import matplotlib.pyplot as plt
import numpy as np
from torchsummary import summary
from resnet import resnet18
from resnet2 import RESNET18
from model import vgg11_bn
from test import clipping_info,loss_cal
import math
class VGG1(nn.Module):
'''
VGG model
'''
def __init__(self, features):
super(VGG1, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 10),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3,stride=1, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
def vgg11_bn1(num_classes=10):
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG1(make_layers(cfg['A'], batch_norm=True))
parser = argparse.ArgumentParser(description='Deep-Leak')
parser.add_argument('--epsilon', type=float, default=0.3, metavar='EPS', help='L-infinity perturbation limit for PGD attack')
parser.add_argument('--batch-size', '-b', type=int, default=256, metavar='N', help='input batch size for training (default: 500)')
parser.add_argument('--epochs', type=int, default=125, metavar='N', help='number of epochs to train (default: 20)')
parser.add_argument('--no_train', type=int, default=0, metavar='N', help='no training algorithm')
parser.add_argument('--learning-rate', type=float, default=0.02, help='learning rate')
parser.add_argument('--error', type=float, default=0.01, help='error rate')
parser.add_argument('--momentum', type=float, default=0.9, help='learning momentum')
parser.add_argument('--percentage', type=float, default=1, help='learning momentum')
parser.add_argument('--lambdas', type=float, default=0.0001, help='learning momentum')
parser.add_argument('--adv_model', default='./results/baseline_MNIST_classifier.pt', metavar='FILE', help='location of trained classifier')
parser.add_argument('--layer', type=int, default=6, metavar='N', help='Layer Number')
parser.add_argument('--evaluate', type=int, default=1, help='set to 1 to evaluate our trained adversary model in adv_model2/set to 0 to train a model with our method +PGD/else trains with our adversary only')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
print(args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
## normalize layer
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1), requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1), requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
mean = [x / 255 for x in [129.3, 124.1, 112.4]]
std = [x / 255 for x in [68.2, 65.4, 70.4]]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
#transforms.AutoAugment(transforms.AutoAugmentPolicy.CIFAR10),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
attacker = Attack(dataloader=None,
attack_method='pgd', epsilon=args.epsilon)
def lasso_var(var,var1):
"We will use this function for positive and negative half of the distribution"
return (var1.mean() -var).abs().sum()
# Train baseline classifier on clean data
def train_baseline(classifier, adv_classifier, recordf, record,record7,record6,record5,record4,record3,record2,class_opt, device, epoch,lambdas):
classifier.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx == 16:
break
data, target = data.to(device), target.to(device)
'''output = adv_classifier (data)
pred = output.argmax(dim=1, keepdim=True)
target = pred.view(-1)'''
class_opt.zero_grad() # Update the classifier
loss = F.cross_entropy(classifier(data), target)
loss_term = 0
cc = 0
for name, param in classifier.named_modules():
if isinstance(param, nn.Linear) or isinstance(param, nn.Conv2d) :
cc += 1
if cc < args.layer:
loss_term += lambdas * (lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]>=0],param.weight[param.weight >=0]) + lasso_var(param.weight.view(-1)[record[cc]][param.weight.view(-1)[record[cc]]<0],param.weight[param.weight < 0]))
done = 1
#print(loss_term)
loss += loss_term
loss.backward()
class_opt.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return loss
# Tests classifier on clean data or attacker output
def test(classifier, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
def functional(classifier, model, attacker1, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = classifier(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
output1 = model(data)
pred1 = output1.argmax(dim=1, keepdim=True) # get the index of the max log-probability
pred1= pred1.view(target.size())
test_loss += F.cross_entropy(output, pred1, reduction='sum').item() # sum up batch loss
correct += pred.eq(pred1.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Functional Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return correct
## attacking the classifier with black-box adversary generated from model.
def adv_test(classifier, model,attacker, device, epoch):
classifier.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = attacker.attack_method(
model, data, target)
output = classifier(data)
test_loss += F.cross_entropy(output, target.cuda(), reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
net_f = vgg11_bn(10)
net_f.load_state_dict(torch.load(args.adv_model))
net1 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
adv_classifier = net1.to(device)
print("hi")
print("Test accuracy of the model" )
corr = test(adv_classifier, attacker, device, epoch=0)
import copy
net_f = vgg11_bn1(10)
classifier2 = torch.nn.Sequential(
Normalize_layer(mean,std),
net_f
)
classifier2 = classifier2.cuda()
class_adv = torch.optim.Adam(classifier2.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(class_adv, milestones=[30,60,90], gamma=0.1)
summary(classifier2, (3, 32, 32))
cc= 0
corr = functional(classifier2, adv_classifier,attacker, device, epoch=0)
count =0
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data.uniform_(0.01, 0.5)
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data.uniform_(-0.05, 0.05)
print(cc,count)
cc+=1
if args.no_train ==1:
for name, module in classifier2.named_modules():
if isinstance(module, nn.BatchNorm2d):
count+=1
module.weight.data[:] = 0
module.bias.data[:] = 0
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
count+=1
module.weight.data[:] = 0
print(cc,count)
cc+=1
recordr = {} ## all bits
recordf = {} ## MSB + 7
record = {} ## only MSB
recordm = {} ## MSB + any number
record7 = {} ## MSB + 6
record6 = {} ## MSB + 5
record5 = {} ## MSB + 4
record4 = {} ## MSB + 3
record3 = {} ## MSB + 2
record2 = {} ## MSB + 1
recorde = {} ## ERROR MSB
# oldperc = torch.tensor([0.5,0.055,0.056,0.055,0.067,0.077,0.078]) # layer-wise percentage
# 80 perc = torch.tensor([0.25,0.05,0.05,0.05,0.1,0.15,0.15])
'''
new:
90: torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
80: torch.tensor([0.3125,0.0625,0.0625,0.0625,0.0875,0.1,0.125])
60: torch.tensor([0.133,0.033,0.033,0.05,0.067,0.12,0.2])
'''
perc = torch.tensor([0.58,0.033,0.056,0.044,0.056,0.067,0.078])
cc = 0
for name, module in adv_classifier.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc+=1
if cc < args.layer:
tot = module.weight.data.view(-1).size()[0]
p_tot = int(args.percentage*tot)
step_f= int(p_tot*perc[0])
step_7= int(p_tot*perc[1]) + step_f
step_6 = int(p_tot*perc[2]) + step_7
step_5 = int(p_tot*perc[3]) + step_6
step_4 = int(p_tot*perc[4]) + step_5
step_3 = int(p_tot*perc[5]) + step_4
step_2 = int(p_tot*perc[6]) + step_3
recordr[cc] = torch.Tensor(random.sample(range(0,tot), p_tot)).long()
recordm[cc] = recordr[cc]
recorde[cc] = recordr[cc][0:int(p_tot* args.error)]
print("hi")
print(cc)
print(recordm[cc].size()[0])
recordf[cc] = recordr[cc][0:step_f]
record7[cc] = recordr[cc][step_f:step_7]
record6[cc] = recordr[cc][step_7:step_6]
record5[cc] = recordr[cc][step_6:step_5]
record4[cc] = recordr[cc][step_5:step_4]
record3[cc] = recordr[cc][step_4:step_3]
record2[cc] = recordr[cc][step_3:step_2]
record[cc] = recordr[cc][step_2:]
print(recordf[cc].size()[0]/tot,recordf[cc].size()[0]/tot,record7[cc].size()[0]/tot,record6[cc].size()[0]/tot,record5[cc].size()[0]/tot,
record4[cc].size()[0]/tot,record3[cc].size()[0]/tot,record2[cc].size()[0]/tot,record[cc].size()[0]/tot)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc < args.layer:
module.weight.data.view(-1)[recordm[cc]].uniform_(0.001, 0.1)
module.weight.data.view(-1)[recordm[cc]] = module.weight.data.view(-1)[recordm[cc]] * module.weight.data.view(-1)[recordm[cc]].sign() * module1.weight.data.view(-1)[recordm[cc]].clone().sign()
module.weight.data.view(-1)[recorde[cc]] = module.weight.data.view(-1)[recorde[cc]] * (-1)
#module.weight.data.view(-1)[recordm[cc][0:int(recordm[cc].size()[0]*args.error)]] *= -1
total = 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Conv2d):
ss = module.weight.data.size()
total += ss[0]*ss[1]*ss[2]*ss[3]
print(total)
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear):
ss = module.weight.data.size()
total += ss[0]*ss[1]
print(ss[0]*ss[1])
print(total)
corrr = test(classifier2, None, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with white-box PGD" )
adv_test(adv_classifier,adv_classifier,attacker, device, 0)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
best_acc = 0
t0 = time.time()
print("Attacking the Classifier with hammer leak" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
count =0
losses = np.zeros([args.epochs])
if args.evaluate==0:
print('Training both baseline classifier classifiers')
# Classification model setup
scheduler.step()
for epoch in range(1, args.epochs + 1):
losses[epoch-1] = train_baseline(classifier2, adv_classifier,recordf,recordm,record7,record6,record5,record4,record3,record2,class_adv, device, epoch,args.lambdas)
classifier2.eval()
if epoch == 109:
args.lambdas = 0
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
if (epoch+1)%5 == 0 and epoch < 111:
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
if cc<args.layer:
print(cc)
module.weight.data.view(-1)[recordm[cc]] = module.weight.data.view(-1)[recordm[cc]].abs() * module1.weight.data.view(-1)[recordm[cc]].sign()
module.weight.data.view(-1)[recorde[cc]] = module.weight.data.view(-1)[recorde[cc]] * (-1)
#module.weight.data.view(-1)[recordm[cc][0:int(recordm[cc].size()[0]*args.error)]] *= -1
#module.weight.data.view(-1)[recordf[cc]] = module1.weight.data.view(-1)[recordf[cc]]
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
accs = test(classifier2, None, device, epoch)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
if epoch == 111:
classifier2 = torch.load('nm2.pt')
if best_acc < accs:
best_acc = accs
torch.save(classifier2, 'nm2.pt')
classifier2 = torch.load('nm2.pt')
plt.plot(losses)
plt.xlabel("Iterations")
plt.ylabel("Loss term")
plt.savefig("figure.png")
accs = test(classifier2, None, device, epoch)
_ = functional(classifier2, adv_classifier,attacker, device, epoch=0)
cc= 0
for name, module in classifier2.named_modules():
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
cc +=1
print(cc)
m=0
for name1, module1 in adv_classifier.named_modules():
if isinstance(module1, nn.Linear) or isinstance(module1, nn.Conv2d):
m+=1
if cc==m:
print((module.weight.data.view(-1).sign() - module1.weight.data.view(-1).sign()).abs().sum())
t0 = time.time()
print("Attacking PGD trained Classifier with Black-box PGD" )
adv_test(adv_classifier,classifier2,attacker, device, 0)
torch.cuda.current_stream().synchronize()
t1= time.time()
print(" Black-PGD Attack Time:",'{} seconds'.format(t1 - t0)) |
the-stack_0_13787 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
from telemetry import decorators
from telemetry import page as page_module
from telemetry import story
from telemetry.page import cache_temperature
from telemetry.testing import browser_test_case
from telemetry.timeline import tracing_config
from tracing.trace_data import trace_data
class CacheTempeartureTests(browser_test_case.BrowserTestCase):
def __init__(self, *args, **kwargs):
super(CacheTempeartureTests, self).__init__(*args, **kwargs)
self._full_trace = None
@contextlib.contextmanager
def captureTrace(self):
tracing_controller = self._browser.platform.tracing_controller
options = tracing_config.TracingConfig()
options.enable_chrome_trace = True
tracing_controller.StartTracing(options)
try:
yield
finally:
self._full_trace = tracing_controller.StopTracing()
def traceMarkers(self):
if not self._full_trace:
return set()
chrome_trace = self._full_trace.GetTraceFor(trace_data.CHROME_TRACE_PART)
return set(
event['name']
for event in chrome_trace['traceEvents']
if event['cat'] == 'blink.console')
@decorators.Enabled('has tabs')
def testEnsureAny(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.ANY, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertNotIn('telemetry.internal.ensure_diskcache.start', markers)
self.assertNotIn('telemetry.internal.warmCache.start', markers)
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos')
def testEnsurePCv1Cold(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_COLD, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertIn('telemetry.internal.ensure_diskcache.start', markers)
self.assertIn('telemetry.internal.ensure_diskcache.end', markers)
@decorators.Enabled('has tabs')
def testEnsurePCv1WarmAfterPCv1ColdRun(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_COLD, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
previous_page = page
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_WARM, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(
page, self._browser, previous_page)
markers = self.traceMarkers()
self.assertNotIn('telemetry.internal.warmCache.start', markers)
@decorators.Enabled('has tabs')
@decorators.Disabled('chromeos')
def testEnsurePCv1WarmFromScratch(self):
with self.captureTrace():
story_set = story.StorySet()
page = page_module.Page(
'http://google.com', page_set=story_set,
cache_temperature=cache_temperature.PCV1_WARM, name='http://google.com')
cache_temperature.EnsurePageCacheTemperature(page, self._browser)
markers = self.traceMarkers()
self.assertIn('telemetry.internal.warmCache.start', markers)
self.assertIn('telemetry.internal.warmCache.end', markers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.