filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_14492 | import numpy as np
import tensorflow as tf
def _to_int32(a):
return np.int32(np.ceil(a))
def extract_patches(detector: tf.keras.models.Model,
img: tf.TensorArray,
min_score: float = 0.4,
max_boxes: int = 10):
shape = tf.shape(img)
im_height, im_width = shape[0].numpy(), shape[1].numpy()
result = detector(img[tf.newaxis, ...])
result = {key: value.numpy() for key, value in result.items()}
boxes = result["detection_boxes"][0]
# entities = result["detection_class_entities"]
scores = result["detection_scores"][0]
examples = []
for i in range(min(len(boxes), max_boxes)):
if scores[i] >= min_score:
example = {}
ymin, xmin, ymax, xmax = tuple(boxes[i])
# class_name = entities[i].decode("ascii")
xmin, xmax, ymin, ymax = _to_int32(xmin * im_width), _to_int32(xmax * im_width), _to_int32(
ymin * im_height), _to_int32(ymax * im_height)
tmp = tf.image.crop_to_bounding_box(img, ymin, xmin, ymax - ymin, xmax - xmin)
# example["class_name"] = class_name
example["arr"] = tmp.numpy()
example["score"] = scores[i]
example["bounding_box"] = (xmin, xmax, ymin, ymax)
examples.append(example)
return {
"results": examples,
"height": im_height,
"width": im_width
}
|
the-stack_0_14493 | """
## This script is for run tesing and test NYU dataset
"""
# %matplotlib inline
"""
## This script is for run tesing and test MSRA dataset
"""
# %matplotlib inline
""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import argparse
import os
from lib.solver import train_epoch, val_epoch, test_epoch
from lib.sampler import ChunkSampler
from src.v2v_model import V2VModel
from src.v2v_util import V2VVoxelization
from datasets.nyu_hand import NYUDataset_train, NYUDataset
#######################################################################################
# # Some helpers
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Hand Keypoints Estimation Training')
#parser.add_argument('--resume', 'r', action='store_true', help='resume from checkpoint')
parser.add_argument('--resume', '-r', default=-1, type=int, help='resume after epoch')
args = parser.parse_args()
return args
#######################################################################################
# # Configurations
print('Warning: disable cudnn for batchnorm first, or just use only cuda instead!')
# When we need to resume training, enable randomness to avoid seeing the determinstic
# (agumented) samples many times.
# np.random.seed(1)
# torch.manual_seed(1)
# torch.cuda.manual_seed(1)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
dtype = torch.float
#
args = parse_args()
resume_train = args.resume >= 0
resume_after_epoch = args.resume
save_checkpoint = True
checkpoint_per_epochs = 1
checkpoint_dir = r'./checkpoint_nyu'
start_epoch = 0
epochs_num = 15
batch_size = 6
###############################################################################
# ls '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/joint_data.mat'
###############################################################################
# ls '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/nyu_center/center_train_refined.txt'
#######################################################################################
# # Data, transform, dataset and loader
# Data
print('==> Preparing data ..')
data_dir = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/'
center_dir = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/nyu_center/'
keypoints_num = 21
test_subject_id = 3
cubic_size = 250
# Transform
voxelization_train = V2VVoxelization(cubic_size=250, augmentation=True)
voxelization_val = V2VVoxelization(cubic_size=250, augmentation=False)
def transform_test(sample):
points, refpoint = sample['points'], sample['refpoint']
input = voxelize_input(points, refpoint)
return torch.from_numpy(input), torch.from_numpy(refpoint.reshape((1, -1)))
def transform_train(sample):
points, keypoints, refpoint = sample['points'], sample['joints'], sample['refpoint']
assert(keypoints.shape[0] == keypoints_num)
input, heatmap = voxelization_train({'points': points, 'keypoints': keypoints, 'refpoint': refpoint})
return (torch.from_numpy(input), torch.from_numpy(heatmap))
def transform_val(sample):
points, keypoints, refpoint = sample['points'], sample['joints'], sample['refpoint']
assert(keypoints.shape[0] == keypoints_num)
input, heatmap = voxelization_val({'points': points, 'keypoints': keypoints, 'refpoint': refpoint})
return (torch.from_numpy(input), torch.from_numpy(heatmap))
# Dataset and loader
train_set = NYUDataset_train(data_dir, center_dir ,transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=3)
#train_num = 1
#train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=False, num_workers=6,sampler=ChunkSampler(train_num, 0))
# No separate validation dataset, just use test dataset instead
root = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/test_bin'
val_set = NYUDataset(root, center_dir, transform_test)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=6)
#######################################################################################
# # Model, criterion and optimizer
print('==> Constructing model ..')
net = V2VModel(input_channels=1, output_channels=keypoints_num)
net = net.to(device, dtype)
if device == torch.device('cuda'):
torch.backends.cudnn.enabled = True
cudnn.benchmark = True
print('cudnn.enabled: ', torch.backends.cudnn.enabled)
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
#optimizer = optim.RMSprop(net.parameters(), lr=2.5e-4)
#######################################################################################
# # Resume
if resume_train:
# Load checkpoint
epoch = resume_after_epoch
checkpoint_file = os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth')
print('==> Resuming from checkpoint after epoch {} ..'.format(epoch))
assert os.path.isdir(checkpoint_dir), 'Error: no checkpoint directory found!'
assert os.path.isfile(checkpoint_file), 'Error: no checkpoint file of epoch {}'.format(epoch)
checkpoint = torch.load(os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth'))
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch'] + 1
#######################################################################################
# # Train and Validate
print('==> Training ..')
for epoch in range(start_epoch, start_epoch + epochs_num):
print('Epoch: {}'.format(epoch))
train_epoch(net, criterion, optimizer, train_loader, device=device, dtype=dtype)
val_epoch(net, criterion, val_loader, device=device, dtype=dtype)
if save_checkpoint and epoch % checkpoint_per_epochs == 0:
if not os.path.exists(checkpoint_dir): os.mkdir(checkpoint_dir)
checkpoint_file = os.path.join(checkpoint_dir, 'epoch'+str(epoch)+'.pth')
checkpoint = {
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, checkpoint_file)
""
# # Test
# print('==> Testing ..')
# voxelize_input = voxelization_train.voxelize
# evaluate_keypoints = voxelization_train.evaluate
""
# def transform_test(sample):
# points, refpoint = sample['points'], sample['refpoint']
# input = voxelize_input(points, refpoint)
# return torch.from_numpy(input), torch.from_numpy(refpoint.reshape((1, -1)))
# def transform_output(heatmaps, refpoints):
# keypoints = evaluate_keypoints(heatmaps, refpoints)
# return keypoints
# class BatchResultCollector():
# def __init__(self, samples_num, transform_output):
# self.samples_num = samples_num
# self.transform_output = transform_output
# self.keypoints = None
# self.idx = 0
# def __call__(self, data_batch):
# inputs_batch, outputs_batch, extra_batch = data_batch
# outputs_batch = outputs_batch.cpu().numpy()
# refpoints_batch = extra_batch.cpu().numpy()
# keypoints_batch = self.transform_output(outputs_batch, refpoints_batch)
# if self.keypoints is None:
# # Initialize keypoints until dimensions awailable now
# self.keypoints = np.zeros((self.samples_num, *keypoints_batch.shape[1:]))
# batch_size = keypoints_batch.shape[0]
# self.keypoints[self.idx:self.idx+batch_size] = keypoints_batch
# self.idx += batch_size
# def get_result(self):
# return self.keypoints
# print('Test on test dataset ..')
# def save_keypoints(filename, keypoints):
# # Reshape one sample keypoints into one line
# keypoints = keypoints.reshape(keypoints.shape[0], -1)
# np.savetxt(filename, keypoints, fmt='%0.4f')
# test_set = MARAHandDataset(data_dir, center_dir, 'test', test_subject_id, transform_test)
# test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=6)
# test_res_collector = BatchResultCollector(len(test_set), transform_output)
# test_epoch(net, test_loader, test_res_collector, device, dtype)
# keypoints_test = test_res_collector.get_result()
# save_keypoints('./test_res.txt', keypoints_test)
# print('Fit on train dataset ..')
# fit_set = MARAHandDataset(data_dir, center_dir, 'train', test_subject_id, transform_test)
# fit_loader = torch.utils.data.DataLoader(fit_set, batch_size=batch_size, shuffle=False, num_workers=6)
# fit_res_collector = BatchResultCollector(len(fit_set), transform_output)
# test_epoch(net, fit_loader, fit_res_collector, device, dtype)
# keypoints_fit = fit_res_collector.get_result()
# save_keypoints('./fit_res.txt', keypoints_fit)
# print('All done ..')
###############################################################################
# import os
# import numpy as np
# import sys
# import struct
# from torch.utils.data import Dataset
# import scipy.io as scio
###############################################################################
# num_samples = 72757
# world_dim = 3
# joint_num = 21
###############################################################################
# joints_world = np.zeros((num_samples, joint_num, world_dim))
###############################################################################
# = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/joint_data.mat'
###############################################################################
# root = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/'
###############################################################################
# keypointsXYZ_test = scio.loadmat(keypoint_file)["joint_xyz"].astype(np.float32)[0]
###############################################################################
# EVAL_JOINTS = np.array([
# 0, 6, 12, 18, 24,
# 1, 7, 13, 19, 25,
# 4, 10, 15, 21, 26,
# 5, 11, 17, 23, 28,
# 29 ])
###############################################################################
# keypointsXYZ_test = keypointsXYZ_test[::][:,EVAL_JOINTS,:]
###############################################################################
# np.shape(keypointsXYZ_test)
###############################################################################
# np.shape(joints_world)
###############################################################################
# for fileID in range(0, 72757):
# joints_world[fileID] = keypointsXYZ_test[fileID]
# print(os.path.join(root, 'depth_1_{:0>7d}.bin'.format(fileID+1)))
###############################################################################
# ls /V2V-PoseNet/V2V-PoseNet-pytorch/datasets/train_bin/depth_1_0000001.bin
""
|
the-stack_0_14494 | import os
import signal
import time
from dataclasses import dataclass, field
from typing import Any, List, Optional
import gevent
import gevent.util
import structlog
from gevent._tracer import GreenletTracer
from gevent.hub import Hub
from raiden.exceptions import RaidenUnrecoverableError
LIBEV_LOW_PRIORITY = -2
LIBEV_HIGH_PRIORITY = 2
log = structlog.get_logger(__name__)
def enable_gevent_monitoring_signal() -> None:
"""Install a signal handler for SIGUSR1 that executes gevent.util.print_run_info().
This can help evaluating the gevent greenlet tree.
See http://www.gevent.org/monitoring.html for more information.
Usage:
pytest [...]
# while test is running (or stopped in a pdb session):
kill -SIGUSR1 $(pidof -x pytest)
"""
def on_signal(signalnum: Any, stack_frame: Any) -> None: # pylint: disable=unused-argument
gevent.util.print_run_info()
if os.name == "nt":
# SIGUSR1 not supported on Windows
return
signal.signal(signal.SIGUSR1, on_signal)
def limit_thread_cpu_usage_by_time() -> None:
"""This will enable Gevent's monitoring thread, and if a Greenlet uses the
CPU for longer than `max_blocking_time` it will be killed.
This will result in the whole process being killed, since exceptions are
propagate to the top-level. The goal here is to detect slow functions that
have to be optimized.
"""
gevent.config.monitor_thread = True
gevent.config.max_blocking_time = 10.0
# The monitoring thread will use the trace api just like the TraceSampler
# and the SwitchMonitoring. Sadly there is no API to uninstall the thread,
# but this should not be a problem.
monitor_thread = gevent.get_hub().start_periodic_monitoring_thread()
# This code must not use the tracer from the monitor_thread because calls
# to `did_block_hub` will reset its internal state. If two threads use the
# same underlying tracer false positives will happen, because the switch
# counter will be artifically reset.
greenlet_tracer = GreenletTracer()
def kill_offender(hub: Hub) -> None:
if greenlet_tracer.did_block_hub(hub):
active_greenlet = greenlet_tracer.active_greenlet
msg = ""
if monitor_thread._tracer.active_greenlet != active_greenlet:
msg = (
f"Mismatch values for the active_greenlet among the "
f"monitor_thread and deubgging tracer, this either means "
f"there is a bug in the trace chain (the wrong values are "
f"forwarded), or that one of the trace functions was wrongly "
f"uninstalled. Active greenlets "
f"monitor_thread={monitor_thread._tracer.active_greenlet} "
f"debug_tracer={active_greenlet}."
)
hub.loop.run_callback(
lambda: active_greenlet.throw(
RaidenUnrecoverableError(
f"A greenlet used the CPU for longer than "
f"{gevent.config.max_blocking_time} seconds, killing it.{msg}"
)
)
)
monitor_thread.add_monitoring_function(kill_offender, gevent.config.max_blocking_time)
@dataclass
class IdleMeasurement:
before_poll: float
after_poll: float
@dataclass
class Idle:
""" Measures how much time the thread waited on the libev backend. """
measurement_interval: float
before_poll: Optional[float] = None
last_print: float = field(init=False, default_factory=time.time)
measurements: List[IdleMeasurement] = field(init=False, default_factory=list)
def prepare_handler(self) -> None:
"""The prepare handler executed before the call to the polling backend
(e.g. select/epoll).
Note:
- Gevent uses a prepare handler to execute deferred callbacks. This
means there will be some work done on with this type of handler that
must not added to the idle time. To avoid counting the time spent on
the deferred callbacks the prepare_handler must be installed with a
low priority, so that it executes after the gevent's callbacks.
"""
self.before_poll = time.time()
def check_handler(self) -> None:
"""Check handler executed after the poll backend returns.
Note:
- For each of the watchers in the ready state there will be a callback,
which will do work related to the watcher (e.g. read from a socket).
This time must not be accounted for in the Idle timeout, therefore
this handler must have a high priority.
"""
curr_time = time.time()
# It is possible for the check_handler to be executed before the
# prepare_handler, this happens when the watchers are installed by a
# greenlet that was switched onto because of IO (IOW, Idle.enable is
# called while the event loop is executing watchers, after the `poll`)
if self.before_poll is not None:
self.measurements.append( # pylint: disable=no-member
IdleMeasurement(self.before_poll, curr_time)
)
# keep at least one measurement, this will tell if the code is
# blocking for an extended period of time.
while len(self.measurements) > 1 and self.running_interval > self.measurement_interval:
self.measurements.pop() # pylint: disable=no-member
if curr_time - self.last_print >= self.measurement_interval:
self.log()
self.last_print = curr_time
def enable(self) -> None:
loop = gevent.get_hub().loop
loop.prepare(priority=LIBEV_LOW_PRIORITY).start(self.prepare_handler)
loop.check(priority=LIBEV_HIGH_PRIORITY).start(self.check_handler)
@property
def measurements_start(self) -> float:
return self.measurements[0].before_poll
@property
def measurements_end(self) -> float:
return self.measurements[-1].after_poll
@property
def running_interval(self) -> float:
"""The number of seconds idled by this thread.
This will take into account the measurements frequency. Ideally the
measurements would happen exactly every `measurement_interval` seconds,
however that dependends on the existing load for the given thread, if
the event loop doesn't run often enough the running_interval will be
larger than the target `measurement_interval`.
"""
return self.measurements_end - self.measurements_start
@property
def idled(self) -> float:
""" The amount of seconds the thread idled. """
return sum(interval.after_poll - interval.before_poll for interval in self.measurements)
@property
def idled_pct(self) -> float:
""" The percentage of time the thread idled, waiting on the event loop. """
return self.idled / self.running_interval
@property
def context_switches(self) -> int:
""" The number of context switches done for the past `measurement_interval`. """
return len(IDLE.measurements)
def log(self) -> None:
if not self.measurements:
log.debug("No idle data", context_switches=self.context_switches)
return
is_blocking = (
len(self.measurements) == 1 and self.running_interval > self.measurement_interval
)
if is_blocking:
msg = "Blocking function, there is not a lot of idle time"
else:
msg = "Idle"
log.debug(
msg,
start=self.measurements_start,
context_switches=self.context_switches,
idled=self.idled,
interval=self.running_interval,
idle_pct=self.idled_pct,
)
def __bool__(self) -> bool:
return bool(self.measurements)
def __str__(self) -> str:
if not self.measurements:
return ""
return (
f"The thread had {self.context_switches} context_switches, and "
f"idled {self.idled_pct}% of the time."
)
IDLE = Idle(10)
|
the-stack_0_14499 | import battlecode as bc
import behaviour_tree as bt
import random
import units
class Knight(units.Unit):
"""The container for the knight unit."""
def __init__(self, unit, gc):
super().__init__(unit, gc)
self._targeted_enemy = None
def generate_tree(self):
"""Generates the tree for the knight."""
tree = bt.FallBack()
# Attack or chase enemies
enemy_handling = bt.Sequence()
enemy_visible = self.EnemyVisible(self)
enemy_fallback = bt.FallBack()
enemy_attack = bt.Sequence()
enemy_adjacent = self.EnemyAdjacent(self)
attack = self.Attack(self)
enemy_attack.add_child(enemy_adjacent)
enemy_attack.add_child(attack)
enemy_javelin = bt.Sequence()
can_javelin = self.CanJavelin(self)
javelin = self.Javelin(self)
move_towards_enemy = self.MoveTowardsEnemy(self)
enemy_javelin.add_child(can_javelin)
enemy_javelin.add_child(javelin)
enemy_javelin.add_child(move_towards_enemy)
enemy_chase = bt.Sequence()
enemy_chase.add_child(move_towards_enemy)
enemy_chase.add_child(enemy_attack)
enemy_fallback.add_child(enemy_attack)
enemy_fallback.add_child(enemy_javelin)
enemy_fallback.add_child(enemy_chase)
enemy_handling.add_child(enemy_visible)
enemy_handling.add_child(enemy_fallback)
tree.add_child(enemy_handling)
# Random movement
move_randomly = self.MoveRandomly(self)
tree.add_child(move_randomly)
return tree
##################
# ENEMY HANDLING #
##################
class EnemyVisible(bt.Condition):
"""Check if there is an enemy close to the knight."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
knight = self.__outer.unit()
range = knight.vision_range
location = knight.location.map_location()
team = knight.team
enemy_team = bc.Team.Red if team == bc.Team.Blue else bc.Team.Blue
nearby_units = self.__outer._gc.sense_nearby_units_by_team(location, range, enemy_team)
# No enemy visible
if not nearby_units:
return False
# Look for the enemy closest to the knight with lowest health
best_enemy = nearby_units[0]
best_enemy_distance = location.distance_squared_to(best_enemy.location.map_location())
for unit in nearby_units:
enemy_distance = location.distance_squared_to(unit.location.map_location())
if enemy_distance < best_enemy_distance:
best_enemy = unit
best_enemy_distance = enemy_distance
elif enemy_distance == best_enemy_distance:
if unit.health < best_enemy.health:
best_enemy = unit
best_enemy_distance = enemy_distance
self.__outer._targeted_enemy = best_enemy.id
return True
class EnemyAdjacent(bt.Condition):
"""Check if there is an enemy adjacent to the knight."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
location = self.__outer.unit().location
enemy_location = self.__outer.get_enemy_unit(self.__outer._targeted_enemy).location
return location.is_adjacent_to(enemy_location)
class Attack(bt.Action):
"""Attacks the adjacent enemy."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
if self.__outer._gc.is_attack_ready(knight.id) and self.__outer._gc.can_attack(knight.id, enemy.id):
self.__outer._gc.attack(knight.id, enemy.id)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.RUNNING
class CanJavelin(bt.Condition):
"""Check if the knight can perform a javelin attack."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def condition(self):
knight = self.__outer.unit()
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
if knight.research_level < 3:
return False
if not enemy:
return False
distance = knight.location.map_location().distance_squared_to(enemy.location.map_location())
return distance <= knight.ability_range()
class Javelin(bt.Action):
"""Perform the javelin attack."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
if self.__outer._gc.is_javelin_ready(knight.id) and self.__outer._gc.can_javelin(knight.id, enemy.id):
self.__outer._gc.javelin(knight.id, enemy.id)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.RUNNING
class MoveTowardsEnemy(bt.Action):
"""Moves in the direction of the visible enemy."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
enemy = self.__outer.get_enemy_unit(self.__outer._targeted_enemy)
knight = self.__outer.unit()
if not enemy:
self._status = bt.Status.FAIL
else:
enemy_direction = knight.location.map_location().direction_to(enemy.location.map_location())
if self.__outer._gc.is_move_ready(knight.id) and self.__outer._gc.can_move(knight.id, enemy_direction):
self.__outer._gc.move_robot(knight.id, enemy_direction)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.FAIL
#################
# MOVE RANDOMLY #
#################
class MoveRandomly(bt.Action):
"""Move in some random direction."""
def __init__(self, outer):
super().__init__()
self.__outer = outer
def action(self):
random_dir = random.choice(list(bc.Direction))
knight = self.__outer.unit()
if self.__outer._gc.is_move_ready(knight.id) and self.__outer._gc.can_move(knight.id, random_dir):
self.__outer._gc.move_robot(knight.id, random_dir)
self._status = bt.Status.SUCCESS
else:
self._status = bt.Status.FAIL
|
the-stack_0_14503 | import ast
import os
import shutil
from distutils.dir_util import copy_tree
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import load_model
import tensorflow_hub as hub
from tqdm import tqdm
class PseudoLabelGenerator:
"""
Class to generate pseudo labels for unlabeled images using a trained model.
Arguments:
model_path (str): location of the h5 tensorflow model to use
train_data_path (str): folder which holds training data
unlabeled_path (str): folder which holds unlabeled data
pseudo_data_path (str): folder to store training data and pseudo data combined
output_folder (str): folder to store outputs
csv_filename (str): name of csv file
"""
def __init__(self, model_path="model.h5", train_data_path="data/image_dataset/train",
unlabeled_path="data/unlabeled", pseudo_data_path="data/train_ssl",
output_folder="outputs", csv_filename="data.csv"):
self.train_data_path = train_data_path
self.unlabeled_path = unlabeled_path
self.pseudo_data_path = pseudo_data_path
self.output_folder = output_folder
self.csv_path = os.path.join(self.output_folder, csv_filename)
# Load model
self.model = load_model(model_path, compile=False, custom_objects={"KerasLayer": hub.KerasLayer})
print("Loaded model.")
# Make new output folder
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
# Make dictionary for classes and their index
self.class_names = sorted(os.listdir(self.train_data_path))
self.class_dict = {cat: i for (i, cat) in enumerate(self.class_names)}
def _load_img(self, path, target_size=(299, 299)):
"""
Load an image from a given path and normalize it
Arguments:
path (list): Input image path
target_size (tuple): Size of image
Returns:
np.array: Numpy array of the data
"""
# Read image
bits = tf.io.read_file(path)
image = tf.image.decode_jpeg(bits, channels=3)
# Resize
image = tf.image.resize(image, size=[*target_size])
image = tf.reshape(image, [*target_size, 3])
image = tf.cast(image, tf.uint8)
# Normalize [0, 1]
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image.numpy()
return image
def _plot_data(self, predictions, name, output_path):
"""
Plots a bar plot and saves it to a file.
Arguments:
predictions (list): List of predictions
name (str): Title of the plot
output_path (str): Save file using this name
"""
predictions = sorted(predictions)
samples = list(range(len(predictions)))
plt.bar(samples, predictions, color='g')
plt.axhline(y=0.5, color='r', linestyle='--')
plt.title(name, size=16)
plt.xlabel("Number of unlabelled images", size=16)
plt.ylim([0.0, 1.0])
plt.ylabel("Probability", size=16)
plt.tick_params(labelright=True)
plt.savefig(output_path, dpi=100)
plt.clf() # clear buffer, otherwise plot overlap!
def plot_confidence_scores(self, per_class=True, overall=True):
"""
Generate bar plots for highest confidence predictions per class and overall and save them.
Arguments:
per_class (bool): make bar plots per class or not
overall (bool): make overall bar plot or not
"""
dt = pd.read_csv(self.csv_path)
dt['All Class Predictions List'] = dt['All Class Predictions'].apply(
lambda x: ast.literal_eval(x))
raw_predictions_ = dt[["Highest Confidence"]].values
raw_predictions = [pred[0] for pred in raw_predictions_]
raw_predictions_all_ = dt[['All Class Predictions List']].values
raw_predictions_all = [pred[0] for pred in raw_predictions_all_]
# Plot graph for highest confidence pseudo labels for each class
if per_class:
for idx, cat in enumerate(self.class_names):
predictions = [pred[idx] for pred in raw_predictions_all]
title = "Confidences for the class: {}".format(cat)
path = "{}/{}_confidences.png".format(
self.output_folder, cat)
self._plot_data(predictions, title, path)
# Plot graph for highest confidence pseudo labels for all unlabeled images
if overall:
self._plot_data(raw_predictions,
name="Highest confidence pseudo labels",
output_path="{}/highest_confidence_predictions.png".format(
self.output_folder))
def _make_dataset(self, filenames, batch_size):
def parse_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [299, 299])
image = tf.cast(image, tf.uint8)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def configure_for_performance(ds):
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
filenames_ds = tf.data.Dataset.from_tensor_slices(filenames)
images_ds = filenames_ds.map(parse_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = configure_for_performance(images_ds)
return ds
def move_unlabeled_images(self, threshold=None):
"""
Split unlabeled images into folders of the training data based on pseudo labels. A new training dataset is
created with the labeled and pseudo labeled data.
Arguments:
threshold (float): Discard images with prediction below this confidence, default is None.
"""
# Copy the training/labeled data to the destination folder where
# we will also store the pseudo labels.
copy_tree(self.train_data_path, self.pseudo_data_path)
dt = pd.read_csv(self.csv_path)
filepaths = dt[["Filepaths"]].values
predicted_class = dt[["Predicted Class"]].values
raw_predictions = dt[["Highest Confidence"]].values
for proba, y, path in zip(raw_predictions, predicted_class, filepaths):
# The results variable should be the same as the class category
for class_name, index in self.class_dict.items():
if threshold:
# For thresholding predictions
if index == y and proba >= threshold:
shutil.copy(os.path.join(self.unlabeled_path, str(path[0])),
os.path.join(self.pseudo_data_path, class_name))
else:
# For hard predictions
if index == y:
shutil.copy(os.path.join(self.unlabeled_path, str(path[0])),
os.path.join(self.pseudo_data_path, class_name))
print("Moved unlabeled images to their pseudo label categories.")
def generate_pseudolabel_data(self, plot_confidences=False, threshold=None, move_images=False, batch_size=32):
""" Use trained model to make pseudo labels and save them into a csv file. Also possible to plot the results
and move the unlabeled images directly to the category corresponding to their pseudo label.
Arguments:
plot_confidences (boolean): Whether to plot confidence graphs for raw confidences and per class confidences.
threshold (float): Discard images with prediction below this confidence, default is None. Only used
if move_images is True.
move_images (bool): Move images into categories or not
batch_size (int): Batch size while making predictions
Returns:
pseudo_data_path: A folder with both labeled and pseudo labeled images.
"""
print("Generating pseudo labels...")
# Generate pseudo labels
unlabeled_image_paths = os.listdir(self.unlabeled_path)
print("There are {} unlabeled images.".format(
len(unlabeled_image_paths)))
raw_predictions = [] # single confidence value of predicted class
predicted_class = [] # predicted class index
raw_predictions_all = [] # confidences for all classes
unlabeled_filenames = [os.path.join(self.unlabeled_path,
path) for path in unlabeled_image_paths]
ds = self._make_dataset(unlabeled_filenames, batch_size)
y_preds = []
for batch in tqdm(ds):
y_preds_ = self.model.predict(batch)
y_preds.extend(list(y_preds_))
for y_pred in y_preds:
y = np.argmax(y_pred)
# Get probability score
proba = y_pred[y]
predicted_class.append(y)
raw_predictions.append(proba)
raw_predictions_all.append(list(y_pred))
raw_predictions_paths = [path for path in unlabeled_image_paths]
# 'Pseudo Class Names': pseudo_class_names,
print("Saving CSV with pseudo predictions.")
data = {'Filepaths': raw_predictions_paths,
'Predicted Class': predicted_class,
'Highest Confidence': raw_predictions,
'All Class Predictions': raw_predictions_all}
df = pd.DataFrame(data)
df.to_csv(self.csv_path, index=False)
# move pseudo labeled images
if move_images:
self.move_unlabeled_images(threshold=threshold)
if plot_confidences:
print("Plotting data.")
self.plot_confidence_scores()
|
the-stack_0_14504 | import subprocess
import tempfile
import inspect
import random
import shutil
import time
import json
import sys
import os
KWARGS = "k"
RESULT = "r"
VALUE = "v"
ARGS = "a"
# !!! Dekoratörde, fonksiyon okunduktan sonra dosyalarını silebilirsin aslında
# Ya da aradan belli bir süre geçtiyse, dosyayı sil gitsin, loop'tan silebilirisn
"""
Dosyalama Şekli;
+/tmp
|---> -/dirio
|---> +/12345
|---> -/67891
|-----> __main__.py -> Sadece ilk çalıştırılırken vardır.
|-----> +/func1
|-----> -/func2
| |-------> 11
| |-------> 12
| |-------> 13
| |-> {"a": ["Func args"], "k": {"Func kwargs"}, "r": "Dönüş değeri"}
|-----> variable1
|-----> variable2
|-----> variable3
|-------> "Değişkendeki değer"
"""
# import fcntl
#
#
# # ################################ File LOCK - Henüz kullanmadık
# # https://stackoverflow.com/questions/4843359/python-lock-a-file
# def lock_to_file(filename):
# """ acquire exclusive lock file access """
# locked_file_descriptor = open(filename, 'w+')
# fcntl.lockf(locked_file_descriptor, fcntl.LOCK_EX)
# return locked_file_descriptor
#
#
# def lock_to_release(locked_file_descriptor):
# """ release exclusive lock file access """
# locked_file_descriptor.close()
#
#
# # ##############################################################
def new_dir(tempdir, module, class_name, args, kwargs):
"""/Tempdir/353464325"""
# Dizini oluşturuyoruz
dir_path = os.path.join(tempdir or tempfile.gettempdir(), "dirio")
if not os.path.exists(dir_path):
os.mkdir(dir_path)
no = 111
while str(no) in os.listdir(dir_path):
no = random.randint(111, 9999999999)
new_path = os.path.join(dir_path, str(no))
os.mkdir(new_path)
# #######################
# #######################
# Scripti oluşturuyoruz
# Önce bu dosya okunur.
with open(__file__) as f:
script_body = f.read()
# İçe aktarılacak modülün yolu bulunur.
module_name = os.path.basename(module).split(".")[0]
module_path = os.path.dirname(module)
if module_name in ("__init__", "__main__"):
module_name = os.path.basename(module_path)
module_path = os.path.dirname(module_path)
# Scriptin parçaları oluşturulur
# script_head = f"import sys\nsys.path.append('{module_path}')\nfrom {module_name} import {class_name}"
script_head = f"""
import sys
sys.path.append('{module_path}')
from {module_name} import {class_name}
"""
# script_footer = f"new = Dirio(target={class_name}, args={args}, kwargs={kwargs}, worker=True)\nnew._dr_loop()"
script_footer = f"""
try:
new = Dirio(target={class_name}, args={args}, kwargs={kwargs}, worker=True)
new._dr_loop()
except:
pass
# Çıkışta dosyaları sil
dirname = os.path.dirname(__file__)
if os.path.exists(dirname):
shutil.rmtree(dirname)
sys.exit()
"""
script = "\n".join([script_head, script_body, script_footer])
# Script yazılır
with open(os.path.join(new_path, "__main__.py"), "w") as f:
f.write(script)
# Burada da Process olarak başlatsan iyi olur
subprocess.Popen([
sys.executable,
new_path
],
# close_fds=True
)
print("Dirio -> New Path ->", new_path)
return new_path
def check_type(value):
tip = type(value)
check = True
if tip in (dict, list, tuple, int, str, float, bool, type(None)):
if tip is dict:
for k, v in value.items():
if not (check_type(k) and check_type(v)):
return False
elif tip in (list, tuple):
for i in value:
if not check_type(i):
return False
else:
return False
return check
def set_decorator(self):
# Class'daki tüm değerleri alıyoruz
for attr in self.__dict__:
# Bu isimlerdeyse, dekoratör ekleme, boşver.
# if attr in ("__getattribute__", "__setattr__", "__new__"):
if (attr.startswith("__") and attr.endswith("__")) or attr in ("dr_terminate", "dr_code",
"dr_bind", "dr_binds_check", "dr_isactive"):
continue
# Eğer çağrılabilir fonksiyon ise dekorator ekliyoruz
attribute = getattr(self, attr)
if callable(attribute):
setattr(self, attr, get_decorator(self, attribute))
def get_result(path_code, dr_wait):
start_time = time.time()
wait_time = 1
# Cevabı okurken bekle
while wait_time:
if os.path.exists(path_code):
try:
with open(path_code) as f:
data = json.load(f)
if RESULT in data:
return data.get(RESULT)
except:
pass
# -1 ise cevap gelene kadar bekle
# 0 ise sadece bir kere kontrol et
# 5 gibi değer ise, 5 sn kadar bekle
if dr_wait >= 0:
wait_time = time.time() - start_time < dr_wait
return None
def get_decorator(self, func):
def wrapper(*args, **kwargs):
# kwargs'ın içinde,
# dr_code=True varsa; Okuma kodu döndürür.
# dr_code=2345 gibi ;
# İstemci ise, bu kodun dönüşü varsa, onu döndürür. Yoksa None döndürsün.
# Sunucu ise, o değerdeki dosyaya RESULT değerini kayıt yap demek oluyor
# Hiçbiri yoksa ; En son herhangi bir cevabı döndürür
dr_code = kwargs.pop("dr_code", False)
dr_wait = kwargs.pop("dr_wait", 0)
# Fonksiyonun klasörü
path = os.path.join(self._dr_dir, func.__name__)
# Yoksa oluştur
if not os.path.exists(path):
os.mkdir(path)
# Temel metodlar derhal işletilir. !!! Burayı kullanmak istiyorsan, set_decorator kısmını da düzenle
# if func.__name__.startswith("__") and func.__name__.endswith("__"):
# return func(*args, **kwargs)
# İstemci ise veya self.metod değilse, (@class veya @static ise) baştaki self parametresini sil
if not self._dr_active or "self" not in inspect.getfullargspec(func).args:
args = args[1:]
# ################################
# İstemci ise ve Parametreler uygunsa, dosyaya kaydeder.
if not self._dr_active and check_type(args) and check_type(kwargs):
# dr_code -> int -> Bu kodla olan veri varsa döndür. Belirtilen süre kadar cevabı bekle
if type(dr_code) is int and dr_code > 1:
return get_result(os.path.join(path, str(dr_code)), dr_wait)
# Func dizinindeki dosyaların isimlerini int olarak alır ve
# ["1", "2", ... ] String listesinde en büyük sayıyı verir. Yoksa 10 değerini verir
son_code = self._dr_last_code
new_code = son_code + 1
full_path = os.path.join(path, str(new_code))
while os.path.exists(full_path):
new_code += 1
full_path = os.path.join(path, str(new_code))
# Datayı dosyaya yaz
with open(full_path, 'w') as f:
json.dump({ARGS: args, KWARGS: kwargs}, f)
self._dr_last_code = new_code
# Cevabı bu süre kadar bekle ve dön
if dr_wait:
return get_result(full_path, dr_wait)
# dr_code -> True -> Kodu döndür
if dr_code is True:
return new_code
# dr_code -> False -> Default, Son dosyada cevap varsa döndür
if son_code != 10:
try:
with open(os.path.join(path, str(son_code))) as f:
return json.load(f).get(RESULT)
except:
pass
# Hiçbiri uymuyorsa, boş dön
return None
# ################################
# Kod varsa datayı koddaki dosyaya yaz. Tabi tipler uygunsa yaz.
if type(dr_code) is str:
file = os.path.join(path, dr_code)
try:
with open(file) as f:
data = json.load(f)
except:
return
# Clas fonksiyonu veya self fonksiyon olmasına göre fazla parametre hatası verebildiğinden böyle yapıldı
if "self" not in inspect.getfullargspec(func).args:
result = func(*data.get(ARGS, ()), **data.get(KWARGS, {}))
else:
result = func(args[0], *data.get(ARGS, ()), **data.get(KWARGS, {}))
data[RESULT] = result if check_type(result) else None
with open(file, "w") as f:
json.dump(data, f)
# Zaman kaydedicide, fonksiyonun ismi yoksa, oluştur
if func.__name__ not in self._dr_last_times:
self._dr_last_times[func.__name__] = {}
# Func dosyasını değiştirdiğimiz için, değişim zamanını kaydediyoruz ki, sonradan başkası değişti sanılmasın
self._dr_last_times[func.__name__][dr_code] = os.stat(file).st_mtime
else:
# Sunucuysa, direkt fonksiyonu işle
result = func(*args, **kwargs)
return result
return wrapper
class Dirio:
_dr_inwork = False
_dr_binds = {}
def __init__(self, target=None, args=(), kwargs={}, tempdir="", keeperiod=10, looperiod=.05, worker=False):
"""
:param target: class: Hedef Class
:param args: tuple: Class'ın argümanları
:param kwargs: dict: Class'ın keyword'lü argümanları
:param tempdir: str: Temporary klasörü. Girilmediyse, standart sistemdeki klasör kullanılır.
:param keeperiod: int: Geçmişi tutma süresi. Default: 10 sn boyunca geçmişi saklar.
:param looperiod: int: Sunucu için, döngüde bekleme süresi. Küçük olursa işlemciden, büyük olursa işlemden zarar
:param worker: bool: Read Only. Değiştirme. Sınıfın kendine has kullanımına dahildir.
"""
self._dr_bind = {}
self._dr_active = worker
self._dr_last_code = 10
self._dr_last_times = {}
self._dr_keep_period = keeperiod
self._dr_loop_period = looperiod
# Önce kopyalıyoruz, Çünkü üstünde değişiklik yaptığımızda kalıcı olmasın
target = type(target.__name__, target.__bases__, dict(target.__dict__))
set_decorator(target)
if worker:
# Sunucu kısmıdır. Bu kısım sadece temp klasöründen başlatıldığında çalışır
self._dr_dir = os.path.dirname(__file__)
else:
# İstemci kısmıdır.Sunucu oluşturulur ve başlatılır
self._dr_dir = new_dir(tempdir, inspect.getfile(target), target.__name__, args, kwargs)
# target = type(f'gecis.{target.__name__}', tuple(target.__bases__), dict(target.__dict__))
# Dirio özelliklerini diğer sınıfa ekliyoruz
for attr in self.__dict__:
if attr.startswith("_dr_") or attr.startswith("dr_"): # or attr in ("__getattribute__", "__setattr__"):
setattr(target, attr, self.__getattribute__(attr))
# Kendimizi, Clasın kopyasına çeviriyoruz
self.__class__ = type(f'dirio.{target.__name__}', tuple([Dirio, target]), dict(self.__dict__))
self._dr_inwork = True
super().__init__(*args, **kwargs)
def __getattribute__(self, name):
# _dr_ ile başlıyorsa veya __xxx__ gibi bir değişkense hemen döndürülür
if name.startswith("_dr_") or (name.startswith("__") and name.endswith("__")):
return super().__getattribute__(name)
in_class = name in dir(self)
# print("__getattribute__\t<--\t\t\t", name)
# Fonksiyon ise, direkt döndürülür.
###############
if in_class:
value = super().__getattribute__(name)
if callable(value):
return value
# Değişken ise;
###############
# Değer dosyada varsa, oradan okunur
if name in os.listdir(self._dr_dir):
with open(os.path.join(self._dr_dir, name)) as f:
value = json.load(f).get(VALUE)
return value
if in_class:
value = super().__getattribute__(name)
# Demekki dosyada yok ki buraya kadar geldik, dosyaya da kaydedelim.
self.__setattr__(name, value)
return value
return lambda *args, **kwargs: None
def __setattr__(self, key, value):
# print("__setattribute__\t\t\t-->\t", key, value)
# Eğer value, çağrılabilir ise, ona özelliklerimizi ver.
# Value uygunsa, key isimli dosyaya yaz
# İstemci ve sunucu için de geçerli
if self._dr_inwork:
file = os.path.join(self._dr_dir, key)
if check_type(value):
with open(file, "w") as f:
json.dump({VALUE: value}, f)
else:
# Eğer kaydedilemeyen bir tip ise, dosyada var olanı da sil ki, çağırırken sorun yaşanmasın
if os.path.exists(file):
os.remove(file)
# !!! Aslında değişkenler için bu işleme gerek yok. Sadece fonksiyonlar için yapsak yeterli olur
# Eğer Sunucu ise, dosyanın son değişme zamanını güncelle ki, onu değişti zannetmesin.
# if self._dr_active:
# self._dr_last_times[key] = os.stat(file).st_mtime
super().__setattr__(key, value)
def _dr_loop(self):
# Script dosyasını siliyoruz
if os.path.exists(__file__):
os.remove(__file__)
# Kaydedilmiş değerler varsa önce onları okur
# Daha sonra tüm değerleri dosyaya kaydet
for i in dir(self):
if not (i.startswith("__") and i.endswith("__")):
getattr(self, i)
# Böyle yapıyoruz ki, çağırırken her seferinde class.getattr'e yük olmasın
_dr_dir = self._dr_dir
_dr_last_times = self._dr_last_times
while os.path.exists(_dr_dir):
# Dizindeki, fonksiyon klasörlerinin isimleri alınır.
func_dirs = [i for i in os.listdir(_dr_dir) if os.path.isdir(os.path.join(_dr_dir, i))]
# Tüm fonk dizinlerini gez
for func_dir in func_dirs:
func_full_path = os.path.join(_dr_dir, func_dir)
# last'ta fonk yoksa al
if func_dir not in _dr_last_times:
_dr_last_times[func_dir] = {}
lasts = _dr_last_times[func_dir]
for func_code in os.listdir(func_full_path):
if not func_code.isdigit():
continue
func_code_full_path = os.path.join(func_full_path, func_code)
st = os.stat(func_code_full_path).st_mtime
# Daha önce çalıştırdıysak ve son çalışma zamanı aynıysa geç
if func_code in lasts and st == lasts.get(func_code):
# Saklama zamanı geçtiyse, geçmiş klasörüne aktar ve last_timesden'de kaldır
# if time.time() - st > self._dr_keep_period:
# pass
# # lasts'dan kaldır.
continue
# İlk defa çağrılıyorsa veya son çalışma zamanı farklıysa yap
# Fonksiyon işlenir ve dönüşü kaydedilir. Üstelik zamanı da..
# print("Fonksiyonu çağıtıruz", func_dir, func_code)
getattr(self, func_dir)(dr_code=func_code)
# self.deger += 5
# print(self.deger, self)
# print("Bu da dönüyor", getattr(self, "deger"))
time.sleep(self._dr_loop_period)
def dr_terminate(self):
"""İşlemi bitirir"""
if os.path.exists(self._dr_dir):
shutil.rmtree(self._dr_dir)
def dr_code(self, code, wait=0):
"""Dönüşü koddan direkt olarak okumayı sağlar."""
if type(code) is int:
code = str(code)
# Tüm fonksiyon klasörlerini gezer, eğer içinde elimizdeki koddan dosya varsa onu okur
for func_name in [j for j in os.listdir(self._dr_dir) if os.path.isdir(os.path.join(self._dr_dir, j))]:
func_path = os.path.join(self._dr_dir, func_name)
if code in os.listdir(func_path):
return get_result(os.path.join(func_path, code), wait)
return None
def dr_bind(self, code, func, args=(), kwargs={}):
"""Girilen kod ile sonuç alındığında, 'func'u çağırır. Parametrelerini de girer.
Sonuçları arayabilmesi için, arada 'dr_binds_check'in çalıştırılması gerekir.
Fonksiyonun alacağı ilk parametre, code'un dönüş değeri olmalı"""
self._dr_binds[code] = [func, args, kwargs]
def dr_binds_check(self):
"""Sonuçları kontrol eder. Sonuçlar geldiyse, Bind'leri çalıştırır"""
event = False
for code, vals in self._dr_binds.copy().items():
result = self.dr_code(code)
if result is not None:
func = vals[0]
args = vals[1]
kwargs = vals[2]
func(*args, **kwargs, result=result)
self._dr_binds.pop(code)
event = True
return event
def dr_isactive(self):
return self._dr_inwork and os.path.exists(self._dr_dir) |
the-stack_0_14505 | import urllib.request
import urllib.error
import urllib.parse
import json
from arbitrage.public_markets.market import Market
class GDAX(Market):
def __init__(self, currency, code):
super().__init__(currency)
self.code = code
self.update_rate = 30
def update_depth(self):
url = 'https://api.gdax.com/products/%s/book?level=2' % self.code
req = urllib.request.Request(url, headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"User-Agent": "curl/7.24.0 (x86_64-apple-darwin12.0)"})
res = urllib.request.urlopen(req)
depth = json.loads(res.read().decode('utf8'))
self.depth = self.format_depth(depth)
def sort_and_format(self, l, reverse=False):
l.sort(key=lambda x: float(x[0]), reverse=reverse)
r = []
for i in l:
r.append({'price': float(i[0]), 'amount': float(i[1])})
return r
def format_depth(self, depth):
bids = self.sort_and_format(depth['bids'], True)
asks = self.sort_and_format(depth['asks'], False)
return {'asks': asks, 'bids': bids}
|
the-stack_0_14507 | """Tensorflow trainer class."""
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
# something similar to a PT Dataset.
# This is just temporary before to have
# a framework-agnostic approach for datasets.
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
"""
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
"""
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
"""
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
"""
Create a summary writer to be able to read the logs in Tensorboard.
"""
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
"""
Prepare the training, validation and test data.
"""
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
"""
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
# This is for the case where the optimizer is not Adam-like such as SGD
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.optimizer))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
"""
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
"""
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
"""
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
"""
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["loss"] = loss.numpy()
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
"""
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
"""
Train method to train the model.
"""
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
"""
Returns a generator over training steps (i.e. parameters update).
"""
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
"""Applies gradients and resets accumulation."""
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
vars = self.model.trainable_variables
if self.args.mode == "token-classification":
vars = [var for var in self.model.trainable_variables if "pooler" not in var.name]
self.optimizer.apply_gradients(list(zip(gradients, vars)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
"""Accumulates the gradients from the next element in dataset."""
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
"""Accumulates the gradients across all the replica."""
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
"""Forwards a training example and accumulates the gradients."""
per_example_loss, _ = self._run_model(features, labels, True)
vars = self.model.trainable_variables
if self.args.mode == "token-classification":
vars = [var for var in self.model.trainable_variables if "pooler" not in var.name]
gradients = self.optimizer.get_gradients(per_example_loss, vars)
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
"""
if self.args.mode == "sequence-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
"""
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
"""
Save the pretrained model and create a Tensorflow saved model.
"""
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
|
the-stack_0_14508 | class TableFormat:
"""This class handles all related things to the visual presentation of a table."""
def __init__(self):
self._widths = []
self._columns = []
self._rows = []
def set(self, columns):
self._columns = columns
self._widths = [len(column) + 2 for column in columns]
def add_row(self, rows):
rows = [str(row) for row in rows]
self._rows.append(rows)
for index, row in enumerate(rows):
width = len(row) + 2
if width > self._widths[index]:
self._widths[index] = width
def add(self, rows):
for row in rows:
self.add_row(row)
def render(self):
"""Renders a table in rST format for graphical presentation in Discord chat."""
table = '+' + ('+'.join('-' * width for width in self._widths)) + '+'
to_draw = [table]
def get(results):
element = '|'.join(f'{result:^{self._widths[index]}}' for index, result in enumerate(results))
return f'|{element}|'
to_draw.append(get(self._columns))
to_draw.append(table)
for row in self._rows:
to_draw.append(get(row))
to_draw.append(table)
return '\n'.join(to_draw)
|
the-stack_0_14516 | import logging
import datetime
from ipyc import AsyncIPyCHost, AsyncIPyCLink
host = AsyncIPyCHost()
# logging.basicConfig(level=logging.DEBUG)
@host.on_connect
async def on_connection(connection: AsyncIPyCLink):
connection_idx = len(host.connections)
print(f'We got a new connection! ({connection_idx})')
while connection.is_active():
message = await connection.receive()
if message:
print(f"[{datetime.datetime.now()}] - Connection {connection_idx} says: {message}")
print(f"[{datetime.datetime.now()}] - Connection {connection_idx} was closed!")
print('Starting to wait for connections!')
host.run()
|
the-stack_0_14517 | import pathlib
import os
import shutil
from flask import Flask
import logging
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
import numpy as np
import plotly.graph_objs as go
import roslibpy
import time
from dash.dependencies import State, Input, Output
import pointcloud_msg
server = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = dash.Dash(
__name__, external_stylesheets=[dbc.themes.SLATE],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
title="Open AgroLiDAR Control",
server=server
)
# This is for gunicorn
#server = app.server
# Mapbox
MAPBOX_ACCESS_TOKEN = "pk.eyJ1IjoiamFqYmVybmkiLCJhIjoiY2oyMXFsZjdsMDAxNTJybzd0bDNxczZyeCJ9.6EKxvkWLdnzNI0RJLAsimA"
MAPBOX_STYLE = "mapbox://styles/jajberni/ckk35n9qg3uvw17qg3du25oyb"
GPS_FIX_COLORS = {6: "#67d03b", 5: "#f9f025", 4: "#f97654", 3: "#f97654", 2: "#f43c4e", 1: "#f43c4e", 0: "#f43c4e",
-1: "#f43c4e"}
DEFAULT_FOLDER = "/data"
class StatusManager:
"""Class to store information useful to callbacks"""
def __init__(self):
self.is_connected = False
self.scan_count = 0
self.lat = 0.0
self.lon = 0.0
self.h_accuracy = 0.0
self.v_accuracy = 0.0
self.gps_status = -1
self.lat_path = []
self.lon_path = []
self.fix_path = []
self.speed_kph = 0.0
self.last_cloud = None
self.last_gps = None
self.last_pose = None
self.last_sat_count = 0
self.rtk_listener = None
self.cloud_listener = None
self.pose_listener = None
self.project_talker = None
self.project_service = None
self.project_list = []
ros_master = '127.0.0.1'
if "ROS_MASTER_HOSTNAME" in os.environ:
ros_master = os.environ["ROS_MASTER_HOSTNAME"]
self.client = roslibpy.Ros(host=ros_master, port=9090)
self.connect()
def connect(self):
try:
self.client.run(timeout=50)
self.is_connected = True
self.create_listeners()
except Exception as ex:
self.is_connected = False
print("Error connecting to ROS")
def rtk_callback(self, msg):
self.last_gps = msg
self.lat = msg['lat'] /1e7
self.lon = msg['lon'] / 1e7
self.h_accuracy = msg['h_acc']/1e3
self.v_accuracy = msg['v_acc']/1e3
self.gps_status = msg['fix_type']
self.lat_path.append(self.lat)
self.lon_path.append(self.lon)
self.fix_path.append(self.gps_status)
self.speed_kph = msg['vel']*0.036
self.last_sat_count = msg['satellites_visible']
#print(msg['cog']/100, msg['fix_type'], msg['dgps_age'])
def cloud_callback(self, msg):
self.scan_count += 1
self.last_cloud = pointcloud_msg.msg_to_cloud(msg)
def pose_callback(self, msg):
self.last_pose = msg
def start_recording(self, project_name):
if self.is_connected:
self.project_talker.publish(roslibpy.Message({'data': project_name}))
def stop_recording(self):
if self.is_connected:
self.project_talker.publish(roslibpy.Message({'data': '.'}))
def create_listeners(self):
if self.is_connected:
self.project_talker = roslibpy.Topic(self.client, '/project/name', 'std_msgs/String')
self.rtk_listener = roslibpy.Topic(self.client, '/mavros/gpsstatus/gps1/raw', 'mavros_msgs/GPSRAW')
self.rtk_listener.subscribe(self.rtk_callback)
self.cloud_listener = roslibpy.Topic(self.client, '/laserMapping/laser_points', 'sensor_msgs/PointCloud2')
self.cloud_listener.subscribe(self.cloud_callback)
self.pose_listener = roslibpy.Topic(self.client, '/mavros/global_position/local', 'nav_msgs/Odometry')
self.pose_listener.subscribe(self.pose_callback)
self.project_service = roslibpy.Service(self.client, '/project_service', 'agrolaser_node/ProjectService')
project_request = roslibpy.ServiceRequest({'request_string': 'list', 'project': ''})
resp = self.project_service.call(project_request)
if len(resp['list_strings']) == 0:
self.project_list = [{'label': 'Test', 'value': 'Test'}]
else:
self.project_list = [{'label': project_name, 'value': project_name} for project_name in sorted(resp['list_strings'])]
local_vars = StatusManager()
# Point Cloud graph components
# Helix equation for demo
t = np.linspace(0, 10, 50)
x, y, z = np.cos(t), np.sin(t), t
default_fig = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z,
mode='markers',
marker=dict(
size=1,
opacity=0.8
))])
default_fig.update_layout(hovermode=False)
default_fig.update_layout(margin=dict(l=0, r=0, b=0, t=0))
default_fig.update_scenes(aspectmode='manual', aspectratio_z=0.1)
cloud_graph_card = dbc.Card(
dcc.Graph(id='point-cloud-graph', figure=default_fig, config={
'displayModeBar': False,
}), body=True
)
update_button = dbc.Button(
"Clear Point Cloud", id="update", n_clicks=0, color="primary", outline=True
)
update_button_2 = dbc.Button(
"Force Update", id="update-2", n_clicks=0, color="primary", outline=True
)
setup_button = html.Div(
[
dbc.Button("Options", id="open-setup"),
dbc.Modal(
[
dbc.ModalHeader("Options"),
dbc.ModalBody("This is the content of the modal"),
dbc.ModalFooter(
dbc.Button("Close", id="close-setup", className="ml-auto")
),
],
id="setup-modal",
),
]
)
new_project_button = html.Div(
[
dbc.Button("New Project", id="new-project-button"),
dbc.Modal(
[dbc.Row(
dbc.Col(dbc.FormGroup(
[
dbc.Label("Project Name", className="mr-2"),
dbc.Input(id="input-new-project-name", type="text", placeholder="Enter project"),
],
className="mr-3",
)), form=True),
dbc.Row(
dbc.Col(dbc.FormGroup(
[
dbc.Label("Description", className="mr-2"),
dbc.Input(type="text", placeholder="Enter description"),
],
className="mr-3",
)), form=True),
dbc.Row([
dbc.Col(dbc.Button("Accept", color="primary", id="accept-project-button")),
dbc.Col(dbc.Button("Cancel", color="primary", id="cancel-project-button")),
],
)
],
id="new-project-modal",
),
]
)
# Dash_DAQ elements
utc = html.Div(
id="control-panel-utc",
children=[
daq.LEDDisplay(
id="control-panel-utc-component",
value="16:23",
label="Time",
size=40,
color="#fec036",
backgroundColor="#2b2b2b",
)
],
n_clicks=0,
)
speed = daq.Gauge(
id="control-panel-speed-component",
label="Speed",
min=0,
max=10,
showCurrentValue=True,
value=4.0,
size=175,
units="km/h",
color="#fec036",
)
scan_count = daq.LEDDisplay(
id="control-panel-scans-component",
value="0000000",
label="Scans",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
storage_indicator = html.Div(
id="control-panel-disk",
children=[
daq.GraduatedBar(
id="control-panel-disk-component",
label="Disk Capacity",
min=0,
max=100,
value=76,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
battery_indicator = html.Div(
id="control-panel-battery",
children=[
daq.GraduatedBar(
id="control-panel-battery-component",
label="Battery-Level",
min=0,
max=100,
value=85,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
longitude = daq.LEDDisplay(
id="control-panel-longitude-component",
value="0000.0000",
label="Longitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
latitude = daq.LEDDisplay(
id="control-panel-latitude-component",
value="0050.9789",
label="Latitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
h_accuracy = daq.LEDDisplay(
id="control-panel-h-accuracy-component",
value="0.0000",
label="H Accuracy (m)",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
v_accuracy = daq.LEDDisplay(
id="control-panel-v-accuracy-component",
value="0.0000",
label="V Accuracy (m)",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
satellites = html.Div([
dbc.Row([
dbc.Col(
daq.LEDDisplay(
id="satellite-count",
value="00",
label="Satellites",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
),
dbc.Col(
daq.Indicator(
id="rtk-indicator",
label="RTK Status",
labelPosition="bottom",
value=True,
color="#15e82e",
style={"color": "#black"},
)
),
], no_gutters=True, align="center")
])
gps_card = dbc.Card([
satellites,
dbc.Row([
dbc.Col([
latitude,
longitude]),
dbc.Col([
h_accuracy,
v_accuracy]),
])
])
map_toggle = daq.ToggleSwitch(
id="control-panel-toggle-map",
value=True,
label=["Hide path", "Show path"],
color="#ffe102",
style={"color": "#black"},
)
# Side panel
project_dropdown_text = html.P(
id="project-dropdown-text", children=["Control"]
)
"""project_dropdown = dbc.FormGroup(
[
dbc.Label("Camera Position"),
dbc.Select(
id="project",
options=[
{"label": "New Project...", "value": "new_project"},
{"label": "Project 1", "value": "project_1"},
{"label": "Project 2", "value": "project_2"},
],
value="project_1",
),
]
)
"""
project_select = dbc.InputGroup(
[
dbc.InputGroupAddon("Select Project", addon_type="prepend"),
dbc.Select(
id="project-dropdown-component",
options=local_vars.project_list,
value=local_vars.project_list[0]['value']
),
dbc.InputGroupAddon(
new_project_button,
addon_type="append",
),
]
),
project_title = html.H1(id="project-name", children="")
recording_button = daq.PowerButton(
id='recording-button',
on=False,
color='#FF5E5E',
size=80,
label='Record',
labelPosition='top'
)
project_body = html.P(
className="project-description", id="project-description", children=[""]
)
side_panel_layout = html.Div(
id="panel-side",
children=[
dbc.Card([
dbc.Row([
dbc.Col(project_select),
]),
dbc.Row([
dbc.Col(recording_button),
]),
dbc.Row([
dbc.Col(update_button),
dbc.Col(update_button_2),
dbc.Col(setup_button),
])
]),
],
)
# project location tracker
# Helper to straighten lines on the map
def flatten_path(xy1, xy2):
diff_rate = (xy2 - xy1) / 100
res_list = []
for i in range(100):
res_list.append(xy1 + i * diff_rate)
return res_list
map_data = [
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "LiDAR Path",
"mode": "lines",
"line": {"width": 3, "color": "#126de3"},
},
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "Current Position",
"mode": "markers",
"marker": {"size": 10, "color": "#fec036"},
},
]
map_layout = {
"mapbox": {
"accesstoken": MAPBOX_ACCESS_TOKEN,
"style": MAPBOX_STYLE,
"center": {"lat": 37.8, "lon": -4.8}, "zoom": 18,
},
"showlegend": False,
"autosize": True,
"paper_bgcolor": "#1e1e1e",
"plot_bgcolor": "#1e1e1e",
"margin": {"t": 0, "r": 0, "b": 0, "l": 0},
}
map_graph = dbc.Card(
id="world-map-wrapper",
children=[
map_toggle,
dcc.Graph(
id="world-map",
figure={"data": map_data, "layout": map_layout},
config={"displayModeBar": False, "scrollZoom": True},
)
],
body=True
)
main_panel_card = html.Div([
dcc.Interval(id="interval", interval=1 * 2000, n_intervals=0),
dcc.Interval(id="interval-fast", interval=500, n_intervals=0),
dbc.Card([
dbc.Row([
dbc.Col(speed, width=3),
dbc.Col([dbc.Row(utc), dbc.Row(scan_count), dbc.Row(storage_indicator)], width=3),
dbc.Col(gps_card, width=5)
]
),
]),
dbc.Card(dbc.Row([dbc.Col(cloud_graph_card, width=6), dbc.Col(map_graph, width=6)]))
])
# Data generation
# Pandas
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
# Root
root_layout = dbc.Container(
[
dcc.Store(id="store-placeholder"),
dcc.Store(
id="store-data"),
html.H1("Open PhenoLiDAR Control"),
html.Hr(),
dbc.Row([
dbc.Col(main_panel_card, md=8),
dbc.Col(side_panel_layout, md=4),
], align="start")
], fluid=True,
)
app.layout = root_layout
# Callback free space
@app.callback(
Output("control-panel-disk-component", "value"), [Input("interval", "n_intervals")]
)
def update_free_disk(interval):
total, used, free = shutil.disk_usage("/")
free_pc = 100 * used / total
return free_pc
# Callbacks Data
# Callbacks Components
@app.callback(
Output("control-panel-utc-component", "value"), [Input("interval", "n_intervals")]
)
def update_time(interval):
hour = time.localtime(time.time())[3]
hour = str(hour).zfill(2)
minute = time.localtime(time.time())[4]
minute = str(minute).zfill(2)
return hour + ":" + minute
@app.callback(
[
Output("control-panel-latitude-component", "value"),
Output("control-panel-longitude-component", "value"),
Output("control-panel-h-accuracy-component", "value"),
Output("control-panel-v-accuracy-component", "value"),
Output("control-panel-scans-component", "value"),
Output("satellite-count", "value"),
Output("rtk-indicator", "value"),
Output("rtk-indicator", "color"),
],
[Input("interval", "n_intervals")],
)
def update_gps_component(clicks):
rtk_status = False
rtk_color = "#fec036"
if local_vars.gps_status > 5:
rtk_status = True
if local_vars.gps_status < 3:
rtk_color = "#dc1330"
elif local_vars.gps_status == 5:
rtk_color = "#f9f025"
elif local_vars.gps_status == 6:
rtk_color = "#6bd71f"
else:
rtk_color = "#dc1330"
return "{:.4f}".format(local_vars.lat), "{:.4f}".format(
local_vars.lon), "{:.3f}".format(local_vars.h_accuracy), "{:.3f}".format(
local_vars.v_accuracy), "{:08d}".format(local_vars.scan_count), local_vars.last_sat_count, rtk_status, rtk_color
@app.callback(Output("control-panel-speed-component", "value"),
[Input("interval-fast", "n_intervals")],
)
def update_speed_component(clicks):
return local_vars.speed_kph
@app.callback(
Output("point-cloud-graph", "figure"),
[Input("update", "n_clicks"), ],
[State("point-cloud-graph", "figure")]
)
def create_cloud_graph(clicks, graph_data):
if local_vars.last_cloud is not None:
# print(graph_data)
# print(local_vars.last_cloud.points.head())
df = local_vars.last_cloud
graph_data['data'] = [
go.Scatter3d(
x=df['x'],
y=df['y'],
z=df['z'],
mode='markers',
marker=dict(
size=1,
color=df['intensity'],
opacity=0.8
)
)
]
else:
print("No data")
return graph_data
@app.callback(
Output("point-cloud-graph", "extendData"),
[Input("interval", "n_intervals"), Input("update-2", "n_clicks"), ],
[State("point-cloud-graph", "figure")]
)
def update_cloud_graph(interval, clicks, graph_data):
# print(graph_data['data'])
if local_vars.last_cloud is not None:
df = local_vars.last_cloud
data = [go.Scatter3d(
x=df['x'],
y=df['y'],
z=df['z'],
mode='markers',
marker=dict(
size=1,
color=df['intensity'],
opacity=0.8
)
)]
# print(data[0]['marker'])
if graph_data is None:
return
if len(graph_data['data']) > 0:
# return data[0], [0]
return dict(x=[data[0]['x']], y=[data[0]['y']], z=[data[0]['z']]), [
0] # , marker=dict(color=[data[0]['marker']['color']])), [0]
# return data
@app.callback(
Output("world-map", "figure"),
[
Input("interval", "n_intervals"),
Input("control-panel-toggle-map", "value"),
],
[
State("world-map", "figure"),
State("store-data", "data"),
],
)
def update_word_map(clicks, toggle, old_figure, data):
figure = old_figure
figure["data"][1]["lat"] = [local_vars.lat]
figure["data"][1]["lon"] = [local_vars.lon]
figure["data"][1]["marker"]["color"] = GPS_FIX_COLORS[local_vars.gps_status]
figure["layout"]["mapbox"]["center"] = {"lat": local_vars.lat, "lon": local_vars.lon}
if not toggle:
figure["data"][0]["lat"] = []
figure["data"][0]["lon"] = []
else:
figure["data"][0]["lat"] = local_vars.lat_path
figure["data"][0]["lon"] = local_vars.lon_path
return figure
@app.callback(
[Output("project-dropdown-component", "disabled"), Output("new-project-button", "disabled")],
[
Input("recording-button", "on"),
Input("project-dropdown-component", "value"),
],
)
def recording_control(on, project):
if project != 'new_project':
if on:
print("Start record: " + project)
local_vars.start_recording(project)
else:
local_vars.stop_recording()
return on, on
@app.callback(
Output("setup-modal", "is_open"),
[Input("open-setup", "n_clicks"), Input("close-setup", "n_clicks")],
[State("setup-modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
[Output("new-project-modal", "is_open"), Output("project-dropdown-component", "options")],
[Input("new-project-button", "n_clicks"), Input("accept-project-button", "n_clicks"),
Input("cancel-project-button", "n_clicks"), Input("input-new-project-name", "value")],
[State("new-project-modal", "is_open"), State("accept-project-button", "n_clicks"), State("cancel-project-button", "n_clicks")],
)
def toggle_modal(n1, n2, n3, new_project_name, is_open, n2_s, n3_s):
if n1 is None:
return is_open, local_vars.project_list
if n2 == n1:
print("Create new project: " + new_project_name)
resp = local_vars.project_service.call(roslibpy.ServiceRequest({'request_string': 'create', 'project': new_project_name}))
project_request = roslibpy.ServiceRequest({'request_string': 'list', 'project': ''})
local_vars.project_list = [{'label': project_name, 'value': project_name} for project_name in
sorted(local_vars.project_service.call(project_request)['list_strings'])]
return False, local_vars.project_list
if n3 == n1:
return False, local_vars.project_list
if n1:
return True, local_vars.project_list
return is_open, local_vars.project_list
if __name__ == "__main__":
debug = True
port = 8051
if "DASH_DEBUG_MODE" in os.environ:
debug = False if os.environ["DASH_DEBUG_MODE"] == "False" else True
if "DASH_PORT" in os.environ:
port = os.environ["DASH_PORT"]
app.run_server(host="0.0.0.0", port=port, debug=debug)
|
the-stack_0_14518 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectToSourceMySqlTaskInput(Model):
"""Input for the task that validates MySQL database connection.
All required parameters must be populated in order to send to Azure.
:param source_connection_info: Required. Information for connecting to
MySQL source
:type source_connection_info:
~azure.mgmt.datamigration.models.MySqlConnectionInfo
:param target_platform: Target Platform for the migration. Possible values
include: 'AzureDbForMySQL'
:type target_platform: str or
~azure.mgmt.datamigration.models.MySqlTargetPlatformType
:param check_permissions_group: Permission group for validations. Possible
values include: 'Default', 'MigrationFromSqlServerToAzureDB',
'MigrationFromSqlServerToAzureMI', 'MigrationFromMySQLToAzureDBForMySQL'
:type check_permissions_group: str or
~azure.mgmt.datamigration.models.ServerLevelPermissionsGroup
"""
_validation = {
'source_connection_info': {'required': True},
}
_attribute_map = {
'source_connection_info': {'key': 'sourceConnectionInfo', 'type': 'MySqlConnectionInfo'},
'target_platform': {'key': 'targetPlatform', 'type': 'str'},
'check_permissions_group': {'key': 'checkPermissionsGroup', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ConnectToSourceMySqlTaskInput, self).__init__(**kwargs)
self.source_connection_info = kwargs.get('source_connection_info', None)
self.target_platform = kwargs.get('target_platform', None)
self.check_permissions_group = kwargs.get('check_permissions_group', None)
|
the-stack_0_14519 | from mavetools.validators import dataset_validators
def validate_all(countfile=None, scorefile=None, scorejson=None):
"""
By calling other helper functions, this function runs all of the validation code
"""
validate_dataset(countfile, scorefile, scorejson)
def validate_dataset(countfile=None, scorefile=None, scorejson=None):
"""
This function calls all of the validation functions within
mavetools/mavetools/validators/dataset_validation.py
Returns
-------
"""
# how to incorporate word limit validator?
if scorefile is not None:
# open scorefile
open(scorefile)
# this one returns header
scoreheader = dataset_validators.read_header_from_io(file=scorefile)
# if the header was returned, do these ones
dataset_validators.validate_has_hgvs_in_header(header=scoreheader)
dataset_validators.validate_at_least_one_additional_column(header=scoreheader)
dataset_validators.validate_header_contains_no_null_columns(header=scoreheader)
dataset_validators.validate_scoreset_score_data_input(file=scorefile)
if scorejson is not None:
# open scorejson
open(scorejson)
dataset_validators.validate_scoreset_json(dict_=scorejson)
if countfile is not None:
# open countfile
open(countfile)
countheader = dataset_validators.read_header_from_io(file=countfile)
# if the header was returned, do these ones
dataset_validators.validate_has_hgvs_in_header(header=countheader)
dataset_validators.validate_at_least_one_additional_column(header=countheader)
dataset_validators.validate_header_contains_no_null_columns(header=countheader)
dataset_validators.validate_scoreset_count_data_input(file=countfile)
if scorefile is not None and countfile is not None:
dataset_validators.validate_datasets_define_same_variants(
scores=scorefile, counts=countfile
)
|
the-stack_0_14521 | import shutil
import pytest
import yaml
from click.testing import CliRunner
from kedro.extras.datasets.pandas import CSVDataSet
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline, node
@pytest.fixture
def fake_load_context(mocker):
context = mocker.MagicMock()
return mocker.patch(
"kedro.framework.session.KedroSession.load_context", return_value=context
)
PIPELINE_NAME = "pipeline"
@pytest.fixture
def mock_pipelines(mocker):
dummy_pipelines = {PIPELINE_NAME: Pipeline([]), "second": Pipeline([])}
return mocker.patch("kedro.framework.cli.catalog.pipelines", dummy_pipelines)
@pytest.mark.usefixtures(
"chdir_to_dummy_project", "fake_load_context", "mock_pipelines"
)
class TestCatalogListCommand:
def test_list_all_pipelines(self, fake_project_cli, fake_metadata, mocker):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
result = CliRunner().invoke(
fake_project_cli, ["catalog", "list"], obj=fake_metadata
)
assert not result.exit_code
expected_dict = {
"DataSets in 'pipeline' pipeline": {},
"DataSets in 'second' pipeline": {},
}
yaml_dump_mock.assert_called_once_with(expected_dict)
def test_list_specific_pipelines(self, fake_project_cli, fake_metadata, mocker):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list", "--pipeline", PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
expected_dict = {f"DataSets in '{PIPELINE_NAME}' pipeline": {}}
yaml_dump_mock.assert_called_once_with(expected_dict)
def test_not_found_pipeline(self, fake_project_cli, fake_metadata):
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list", "--pipeline", "fake"],
obj=fake_metadata,
)
assert result.exit_code
expected_output = (
"Error: `fake` pipeline not found! Existing pipelines: pipeline, second"
)
assert expected_output in result.output
def test_no_param_datasets_in_respose(
self, fake_project_cli, fake_metadata, fake_load_context, mocker, mock_pipelines
):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
mocked_context = fake_load_context.return_value
catalog_data_sets = {
"iris_data": CSVDataSet("test.csv"),
"intermediate": MemoryDataSet(),
"parameters": MemoryDataSet(),
"params:data_ratio": MemoryDataSet(),
"not_used": CSVDataSet("test2.csv"),
}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocker.patch.object(
mock_pipelines[PIPELINE_NAME],
"data_sets",
return_value=catalog_data_sets.keys() - {"not_used"},
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list"],
obj=fake_metadata,
)
assert not result.exit_code
# 'parameters' and 'params:data_ratio' should not appear in the response
expected_dict = {
f"DataSets in '{PIPELINE_NAME}' pipeline": {
"Datasets mentioned in pipeline": {
"CSVDataSet": ["iris_data"],
"MemoryDataSet": ["intermediate"],
},
"Datasets not mentioned in pipeline": {"CSVDataSet": ["not_used"]},
}
}
key = f"DataSets in '{PIPELINE_NAME}' pipeline"
assert yaml_dump_mock.call_count == 1
assert yaml_dump_mock.call_args[0][0][key] == expected_dict[key]
def test_default_dataset(
self, fake_project_cli, fake_metadata, fake_load_context, mocker, mock_pipelines
):
"""Test that datasets that are found in `Pipeline.data_sets()`,
but not in the catalog, are outputted under the key "DefaultDataset".
"""
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
mocked_context = fake_load_context.return_value
catalog_data_sets = {"some_dataset": CSVDataSet("test.csv")}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocker.patch.object(
mock_pipelines[PIPELINE_NAME],
"data_sets",
return_value=catalog_data_sets.keys() | {"intermediate"},
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list"],
obj=fake_metadata,
)
assert not result.exit_code
expected_dict = {
f"DataSets in '{PIPELINE_NAME}' pipeline": {
"Datasets mentioned in pipeline": {
"CSVDataSet": ["some_dataset"],
"DefaultDataSet": ["intermediate"],
}
}
}
key = f"DataSets in '{PIPELINE_NAME}' pipeline"
assert yaml_dump_mock.call_count == 1
assert yaml_dump_mock.call_args[0][0][key] == expected_dict[key]
def identity(data):
return data # pragma: no cover
@pytest.mark.usefixtures("chdir_to_dummy_project")
class TestCatalogCreateCommand:
PIPELINE_NAME = "de"
@staticmethod
@pytest.fixture(params=["base"])
def catalog_path(request, fake_repo_path):
catalog_path = fake_repo_path / "conf" / request.param / "catalog"
yield catalog_path
shutil.rmtree(catalog_path, ignore_errors=True)
def test_pipeline_argument_is_required(self, fake_project_cli):
result = CliRunner().invoke(fake_project_cli, ["catalog", "create"])
assert result.exit_code
expected_output = "Error: Missing option '--pipeline' / '-p'."
assert expected_output in result.output
@pytest.mark.usefixtures("fake_load_context")
def test_not_found_pipeline(self, fake_project_cli, fake_metadata, mock_pipelines):
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", "fake"],
obj=fake_metadata,
)
assert result.exit_code
existing_pipelines = ", ".join(sorted(mock_pipelines.keys()))
expected_output = (
f"Error: `fake` pipeline not found! Existing "
f"pipelines: {existing_pipelines}\n"
)
assert expected_output in result.output
def test_catalog_is_created_in_base_by_default(
self, fake_project_cli, fake_metadata, fake_repo_path, catalog_path
):
main_catalog_path = fake_repo_path / "conf" / "base" / "catalog.yml"
main_catalog_config = yaml.safe_load(main_catalog_path.read_text())
assert "example_iris_data" in main_catalog_config
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
assert data_catalog_file.is_file()
expected_catalog_config = {
"example_test_x": {"type": "MemoryDataSet"},
"example_test_y": {"type": "MemoryDataSet"},
"example_train_x": {"type": "MemoryDataSet"},
"example_train_y": {"type": "MemoryDataSet"},
}
catalog_config = yaml.safe_load(data_catalog_file.read_text())
assert catalog_config == expected_catalog_config
@pytest.mark.parametrize("catalog_path", ["local"], indirect=True)
def test_catalog_is_created_in_correct_env(
self, fake_project_cli, fake_metadata, catalog_path
):
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
env = catalog_path.parent.name
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME, "--env", env],
obj=fake_metadata,
)
assert not result.exit_code
assert data_catalog_file.is_file()
def test_no_missing_datasets(
self,
fake_project_cli,
fake_metadata,
fake_load_context,
fake_repo_path,
mock_pipelines,
):
mocked_context = fake_load_context.return_value
catalog_data_sets = {
"input_data": CSVDataSet("test.csv"),
"output_data": CSVDataSet("test2.csv"),
}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocked_context.project_path = fake_repo_path
mock_pipelines[self.PIPELINE_NAME] = Pipeline(
[node(identity, "input_data", "output_data")]
)
data_catalog_file = (
fake_repo_path / "conf" / "base" / "catalog" / f"{self.PIPELINE_NAME}.yml"
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
assert not data_catalog_file.exists()
@pytest.mark.usefixtures("fake_repo_path")
def test_missing_datasets_appended(
self, fake_project_cli, fake_metadata, catalog_path
):
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
assert not catalog_path.exists()
catalog_path.mkdir()
catalog_config = {
"example_test_x": {"type": "pandas.CSVDataSet", "filepath": "test.csv"}
}
with data_catalog_file.open(mode="w") as catalog_file:
yaml.safe_dump(catalog_config, catalog_file, default_flow_style=False)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
expected_catalog_config = {
"example_test_x": catalog_config["example_test_x"],
"example_test_y": {"type": "MemoryDataSet"},
"example_train_x": {"type": "MemoryDataSet"},
"example_train_y": {"type": "MemoryDataSet"},
}
catalog_config = yaml.safe_load(data_catalog_file.read_text())
assert catalog_config == expected_catalog_config
def test_bad_env(self, fake_project_cli, fake_metadata):
"""Test error when provided conf environment does not exist"""
env = "no_such_env"
cmd = ["catalog", "list", "-e", env, "--pipeline", PIPELINE_NAME]
result = CliRunner().invoke(fake_project_cli, cmd, obj=fake_metadata)
assert result.exit_code
assert "Unable to instantiate Kedro session" in result.output
|
the-stack_0_14522 | """Dyson test configuration."""
from unittest.mock import patch
import pytest
from . import CREDENTIAL, HOST, SERIAL
from .mocked_mqtt import MockedMQTT
@pytest.fixture()
def mqtt_client(request: pytest.FixtureRequest) -> MockedMQTT:
"""Return mocked mqtt client."""
device_type = request.module.DEVICE_TYPE
status = request.module.STATUS
environmental_data = request.module.ENVIRONMENTAL_DATA
mocked_mqtt = MockedMQTT(
HOST,
SERIAL,
CREDENTIAL,
f"{device_type}/{SERIAL}/command",
f"{device_type}/{SERIAL}/status/current",
status,
environmental_data,
)
with patch("libdyson.dyson_device.mqtt.Client", mocked_mqtt.refersh), patch(
"libdyson.dyson_device.TIMEOUT", 0
):
yield mocked_mqtt
|
the-stack_0_14523 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Yields a list chunk of size n from list
def group(list, n):
for i in range(0, len(list), n):
yield list[i:i+n]
# Takes requested data and breaks it up into a number of pages,
# each with n pages
def pagebreak(request, data, n):
paginator = Paginator(data, 1)
page = request.GET.get('page')
items = paginator.get_page(page)
# Generate page numbers to provide a link to from current page
page_span = 2
last = paginator.num_pages
index = items.number
prev = (index - page_span) if index > page_span else 1
next = (index + page_span) if index < (last - page_span) else last
page_range = paginator.page_range[prev:next]
return items, page_range
|
the-stack_0_14524 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from collections import OrderedDict
class LinearFeatureBaseline(nn.Module):
"""Linear baseline based on handcrafted features, as described in [1]
(Supplementary Material 2).
[1] Yan Duan, Xi Chen, Rein Houthooft, John Schulman, Pieter Abbeel,
"Benchmarking Deep Reinforcement Learning for Continuous Control", 2016
(https://arxiv.org/abs/1604.06778)
"""
def __init__(self, input_size, reg_coeff=1e-5):
super(LinearFeatureBaseline, self).__init__()
self.input_size = input_size
self._reg_coeff = reg_coeff
self.weight = nn.Parameter(torch.Tensor(self.feature_size,),
requires_grad=False)
self.weight.data.zero_()
self._eye = torch.eye(self.feature_size,
dtype=torch.float32,
device=self.weight.device)
@property
def feature_size(self):
return 2 * self.input_size + 4
def _feature(self, episodes):
ones = episodes.mask.unsqueeze(2)
observations = episodes.observations
time_step = torch.arange(len(episodes)).view(-1, 1, 1) * ones / 100.0
return torch.cat([
observations,
observations ** 2,
time_step,
time_step ** 2,
time_step ** 3,
ones
], dim=2)
def fit(self, episodes):
# sequence_length * batch_size x feature_size
featmat = self._feature(episodes).view(-1, self.feature_size)
# sequence_length * batch_size x 1
returns = episodes.returns.view(-1, 1)
# Remove blank (all-zero) episodes that only exist because episode lengths vary
flat_mask = episodes.mask.flatten()
flat_mask_nnz = torch.nonzero(flat_mask, as_tuple=False)
featmat = featmat[flat_mask_nnz].view(-1, self.feature_size)
returns = returns[flat_mask_nnz].view(-1, 1)
reg_coeff = self._reg_coeff
XT_y = torch.matmul(featmat.t(), returns)
XT_X = torch.matmul(featmat.t(), featmat)
for _ in range(5):
try:
coeffs, _ = torch.lstsq(XT_y, XT_X + reg_coeff * self._eye)
# An extra round of increasing regularization eliminated
# inf or nan in the least-squares solution most of the time
if torch.isnan(coeffs).any() or torch.isinf(coeffs).any():
raise RuntimeError
break
except RuntimeError:
reg_coeff *= 10
else:
raise RuntimeError('Unable to solve the normal equations in '
'`LinearFeatureBaseline`. The matrix X^T*X (with X the design '
'matrix) is not full-rank, regardless of the regularization '
'(maximum regularization: {0}).'.format(reg_coeff))
self.weight.copy_(coeffs.flatten())
def forward(self, episodes):
features = self._feature(episodes)
values = torch.mv(features.view(-1, self.feature_size), self.weight)
return values.view(features.shape[:2])
|
the-stack_0_14525 | import random
from collections import Iterable
from dynaconf import settings
def lang_raw(lang_code, *path):
package = settings.LANG[lang_code]
for p in path:
package = package[p]
return package
def lang(lang_code, *path):
package = settings.LANG[lang_code]
for p in path:
package = package[p]
if isinstance(package, Iterable) and not isinstance(package, str):
return random.choice(list(package))
else:
return package
def limitation(name):
return settings.LIMITATIONS[name]
|
the-stack_0_14527 | """ Code for `daugman_visual_explanation.ipynb`
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import itertools
import random
from daugman import daugman
from daugman import find_iris
from typing import List, Tuple, Iterable
class DaugmanVisualExplanation:
def __init__(self, img_path: str, start_r=10, end_r=30, circle_step=2, points_step=3):
self.img = self._get_new_image(img_path)
self.start_r = start_r
self.end_r = end_r
self.circle_step = circle_step
self.points_step = points_step
self.all_points = self._get_all_potential_iris_centers(self.img)
self.colors = self._get_unique_color_for_each_point(self.all_points)
def _get_new_image(self, img_path, gray=False) -> np.ndarray:
""" Get properly cropped BGR image, which looks like grayscale
"""
img = cv2.imread(img_path)
img = img[20:130, 20:130]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if not gray:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def _get_all_potential_iris_centers(self, img: np.ndarray) -> List[Tuple[int, int]]:
# get all potential points for search (from `find_iris()`)
h = img.shape[0]
# we will look only on dots within central 1/3 of image
single_axis_range = range(int(h / 3), h - int(h / 3), self.points_step)
all_points = list(itertools.product(single_axis_range, single_axis_range))
return all_points
def _get_unique_color_for_each_point(self, all_points: List[Tuple[int, int]]) -> List[Tuple[int, int, int]]:
colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for i in self.all_points]
return colors
def plot_all_potential_iris_centers(self) -> np.ndarray:
# plot all potential points
img_dot = self.img.copy()
for point, color in zip(self.all_points, self.colors):
cv2.circle(img_dot, point, 0, color, -1)
_ = plt.imshow(img_dot[::, ::, ::-1])
return img_dot
def plot_circles_for_one_center(self, img_dot: np.ndarray, dot_idx=0) -> np.ndarray:
img_circles = img_dot.copy()
# within circles in radii range from 10px to 1/4 of image side
# plot the chosen potential point
cv2.circle(img_circles, list(self.all_points)[dot_idx], 0, self.colors[dot_idx], 1)
# plot all circle candidates for the single potential point
img_circles = self._draw_circles(img_circles, self.all_points[dot_idx], self.colors[dot_idx],
start_r=self.start_r, end_r=self.end_r, step=self.circle_step)
_ = plt.imshow(img_circles[::, ::, ::-1])
return img_circles
def _draw_circles(self, img: np.ndarray,
center: Tuple[int, int], color: Tuple[int, int, int],
start_r: int, end_r: int, step: int,
alpha=0.5) -> np.ndarray:
""" Part of ``daugman()`` modified for presentation purposes
"""
# get separate coordinates
x, y = center
overlay = img.copy()
radii = list(range(start_r, end_r, step))
for r in radii:
cv2.circle(overlay, center, r, color, 1)
img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
return img
def plot_best_circle_for_single_potential_iris_center(self, img_dot: np.ndarray,
dot_idx: int, color=None, alpha=0.8) -> np.ndarray:
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# get best circle
_, best_radius = daugman(gray_img, self.all_points[dot_idx],
self.start_r, self.end_r, self.circle_step)
# plot best circle
if not color:
color = self.colors[dot_idx]
overlay = img_dot.copy()
cv2.circle(overlay, self.all_points[dot_idx], best_radius, color, 1)
img_dot = cv2.addWeighted(overlay, alpha, img_dot, 1 - alpha, 0)
return img_dot
def plot_best_circle_for_a_few_potential_iris_centers(self, img_dot: np.ndarray,
idxs: Iterable[int]) -> np.ndarray:
img = img_dot.copy()
for idx in idxs:
img = self.plot_best_circle_for_single_potential_iris_center(img, idx)
_ = plt.imshow(img[::, ::, ::-1])
return img_dot
def find_iris(self, *, daugman_start, daugman_end, daugman_step, points_step) -> np.ndarray:
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
answer = find_iris(gray_img, daugman_start=daugman_start, daugman_end=daugman_end,
daugman_step=daugman_step, points_step=points_step)
iris_center, iris_rad = answer
out = self.img.copy()
cv2.circle(out, iris_center, iris_rad, (0, 0, 255), 1)
_ = plt.imshow(out[::, ::, ::-1])
return out
def plot_pixel_intensity_delta_pic(self) -> None:
# white image
img = np.full([100, 100, 3], 255, dtype=np.uint8)
# black circle
img = cv2.circle(img, (50, 50), 20, [0, 0, 0], -1)
# yellow
img = cv2.circle(img, (50, 50), 10, [255, 255, 0], 1)
# green
img = cv2.circle(img, (50, 50), 15, [0, 255, 0], 1)
# red
img = cv2.circle(img, (50, 50), 20, [255, 0, 0], 1)
# blue
img = cv2.circle(img, (50, 50), 25, [0, 0, 255], 1)
_ = plt.imshow(img)
def find_iris_on_binary_image(self, *, daugman_start, daugman_end, daugman_step, points_step) -> None:
# create simple image
img = np.full([100, 100, 3], 255, dtype=np.uint8)
img = cv2.circle(img, (50, 50), 20, [0, 0, 0], -1)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
answer = find_iris(gray_img, daugman_start=daugman_start, daugman_end=daugman_end,
daugman_step=daugman_step, points_step=points_step)
iris_center, iris_rad = answer
cv2.circle(img, iris_center, iris_rad, (0, 0, 255), 1)
_ = plt.imshow(img[::, ::, ::-1])
|
the-stack_0_14528 | import math
import torch.nn as nn
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, features):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 10),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']))
|
the-stack_0_14530 | """Very simple breakout clone. A circle shape serves as the paddle, then
breakable bricks constructed of Poly-shapes.
The code showcases several pymunk concepts such as elasitcity, impulses,
constant object speed, joints, collision handlers and post step callbacks.
"""
import math, sys, random
import os
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk
from pymunk import Vec2d
import pymunk.pygame_util
width, height = 600,600
collision_types = {
"ball": 1,
"brick": 2,
"bottom": 3,
"player": 4,
}
def spawn_ball(space, position, direction):
ball_body = pymunk.Body(1, pymunk.inf)
ball_body.position = position
ball_shape = pymunk.Circle(ball_body, 5)
ball_shape.color = THECOLORS["green"]
ball_shape.elasticity = 1.0
ball_shape.collision_type = collision_types["ball"]
ball_body.apply_impulse_at_local_point(Vec2d(direction))
# Keep ball velocity at a static value
def constant_velocity(body, gravity, damping, dt):
body.velocity = body.velocity.normalized() * 400
ball_body.velocity_func = constant_velocity
space.add(ball_body, ball_shape)
def setup_level(space, player_body):
# Remove balls and bricks
for s in space.shapes[:]:
if s.body.body_type == pymunk.Body.DYNAMIC and s.body not in [player_body]:
space.remove(s.body, s)
# Spawn a ball for the player to have something to play with
spawn_ball(space, player_body.position + (0,40), random.choice([(1,10),(-1,10)]))
# Spawn bricks
for x in range(0,21):
x = x * 20 + 100
for y in range(0,5):
y = y * 10 + 400
brick_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
brick_body.position = x, y
brick_shape = pymunk.Poly.create_box(brick_body, (20,10))
brick_shape.elasticity = 1.0
brick_shape.color = THECOLORS['blue']
brick_shape.group = 1
brick_shape.collision_type = collision_types["brick"]
space.add(brick_body, brick_shape)
# Make bricks be removed when hit by ball
def remove_brick(arbiter, space, data):
brick_shape = arbiter.shapes[0]
space.remove(brick_shape, brick_shape.body)
h = space.add_collision_handler(
collision_types["brick"],
collision_types["ball"])
h.separate = remove_brick
def main():
### PyGame init
pygame.init()
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
running = True
font = pygame.font.SysFont("Arial", 16)
### Physics stuff
space = pymunk.Space()
draw_options = pymunk.pygame_util.DrawOptions(screen)
### Game area
# walls - the left-top-right walls
static_lines = [pymunk.Segment(space.static_body, (50, 50), (50, 550), 2)
,pymunk.Segment(space.static_body, (50, 550), (550, 550), 2)
,pymunk.Segment(space.static_body, (550, 550), (550, 50), 2)
]
for line in static_lines:
line.color = THECOLORS['lightgray']
line.elasticity = 1.0
space.add(static_lines)
# bottom - a sensor that removes anything touching it
bottom = pymunk.Segment(space.static_body, (50, 50), (550, 50), 2)
bottom.sensor = True
bottom.collision_type = collision_types["bottom"]
bottom.color = THECOLORS['red']
def remove_first(arbiter, space, data):
ball_shape = arbiter.shapes[0]
space.remove(ball_shape, ball_shape.body)
return True
h = space.add_collision_handler(
collision_types["ball"],
collision_types["bottom"])
h.begin = remove_first
space.add(bottom)
### Player ship
player_body = pymunk.Body(500, pymunk.inf)
player_body.position = 300,100
player_shape = pymunk.Segment(player_body, (-50,0), (50,0), 8)
player_shape.color = THECOLORS["red"]
player_shape.elasticity = 1.0
player_shape.collision_type = collision_types["player"]
def pre_solve(arbiter, space, data):
# We want to update the collision normal to make the bounce direction
# dependent of where on the paddle the ball hits. Note that this
# calculation isn't perfect, but just a quick example.
set_ = arbiter.contact_point_set
if len(set_.points) > 0:
player_shape = arbiter.shapes[0]
width = (player_shape.b - player_shape.a).x
delta = (player_shape.body.position - set_.points[0].point_a.x).x
normal = Vec2d(0, 1).rotated(delta / width / 2)
set_.normal = normal
set_.points[0].distance = 0
arbiter.contact_point_set = set_
return True
h = space.add_collision_handler(
collision_types["player"],
collision_types["ball"])
h.pre_solve = pre_solve
# restrict movement of player to a straigt line
move_joint = pymunk.GrooveJoint(space.static_body, player_body, (100,100), (500,100), (0,0))
space.add(player_body, player_shape, move_joint)
global state
# Start game
setup_level(space, player_body)
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "breakout.png")
elif event.type == KEYDOWN and event.key == K_LEFT:
player_body.velocity = (-600,0)
elif event.type == KEYUP and event.key == K_LEFT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_RIGHT:
player_body.velocity = (600,0)
elif event.type == KEYUP and event.key == K_RIGHT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_r:
setup_level(space, player_body)
elif event.type == KEYDOWN and event.key == K_SPACE:
spawn_ball(space, player_body.position + (0,40), random.choice([(1,10),(-1,10)]))
### Clear screen
screen.fill(THECOLORS["black"])
### Draw stuff
space.debug_draw(draw_options)
state = []
for x in space.shapes:
s = "%s %s %s" % (x, x.body.position, x.body.velocity)
state.append(s)
### Update physics
fps = 60
dt = 1./fps
space.step(dt)
### Info and flip screen
screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
screen.blit(font.render("Move with left/right arrows, space to spawn a ball", 1, THECOLORS["darkgrey"]), (5,height - 35))
screen.blit(font.render("Press R to reset, ESC or Q to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
pygame.display.flip()
clock.tick(fps)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_14531 | """
Module for Optuna hyperparameter optimization (optuna.org)
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
# required to make json saving work in Python 2/3
try:
to_unicode = unicode
except NameError:
to_unicode = str
import imp
import json
import logging
import datetime
import os
import signal
import glob
from copy import copy
from random import Random
from time import sleep, time
from itertools import product
from subprocess import Popen, PIPE
import importlib, types
import numpy.random as nr
import numpy as np
import pickle
from neuron import h
from netpyne import sim, specs
import optuna
from .utils import createFolder
from .utils import bashTemplate
from .utils import dcp, sigfig
pc = h.ParallelContext() # use bulletin board master/slave
# -------------------------------------------------------------------------------
# Optuna optimization
# -------------------------------------------------------------------------------
# func needs to be outside of class
def runJob(nrnCommand, script, cfgSavePath, netParamsSavePath, simDataPath):
"""
Function for/to <short description of `netpyne.batch.optuna_parallel.runJob`>
Parameters
----------
script : <type>
<Short description of script>
**Default:** *required*
cfgSavePath : <type>
<Short description of cfgSavePath>
**Default:** *required*
netParamsSavePath : <type>
<Short description of netParamsSavePath>
**Default:** *required*
simDataPath : <type>
<Short description of simDataPath>
**Default:** *required*
"""
import os
print('\nJob in rank id: ',pc.id())
command = '%s %s simConfig=%s netParams=%s' % (nrnCommand, script, cfgSavePath, netParamsSavePath)
print(command)
with open(simDataPath+'.run', 'w') as outf, open(simDataPath+'.err', 'w') as errf:
pid = Popen(command.split(' '), stdout=outf, stderr=errf, preexec_fn=os.setsid).pid
with open('./pids.pid', 'a') as file:
file.write(str(pid) + ' ')
def optunaOptim(self, pc):
"""
Function for/to <short description of `netpyne.batch.optuna_parallel.optunaOptim`>
Parameters
----------
self : <type>
<Short description of self>
**Default:** *required*
pc : <type>
<Short description of pc>
**Default:** *required*
"""
import sys
# -------------------------------------------------------------------------------
# Optuna optimization: Parallel evaluation
# -------------------------------------------------------------------------------
def objective(trial, args):
import os
ngen = trial.number
total_jobs = 0
# options slurm, mpi
type = args.get('type', 'mpi_direct')
# params
paramLabels = args.get('paramLabels', [])
minVals = args.get('minVals', [])
maxVals = args.get('maxVals', [])
# paths to required scripts
script = args.get('script', 'init.py')
netParamsSavePath = args.get('netParamsSavePath')
genFolderPath = self.saveFolder + '/trial_' + str(ngen)
# mpi command setup
nodes = args.get('nodes', 1)
coresPerNode = args.get('coresPerNode', 1)
mpiCommand = args.get('mpiCommand', 'mpiexec')
nrnCommand = args.get('nrnCommand', 'nrniv -python -mpi')
numproc = nodes*coresPerNode
# slurm setup
custom = args.get('custom', '')
folder = args.get('folder', '.')
email = args.get('email', '[email protected]')
walltime = args.get('walltime', '00:01:00')
reservation = args.get('reservation', None)
allocation = args.get('allocation', 'csd403') # NSG account
# fitness function
fitnessFunc = args.get('fitnessFunc')
fitnessFuncArgs = args.get('fitnessFuncArgs')
maxFitness = args.get('maxFitness')
# read params or set defaults
sleepInterval = args.get('sleepInterval', 0.2)
# create folder if it does not exist
createFolder(genFolderPath)
# --------------------------------------
# generate param values for optuna trial
candidate = []
for paramLabel, minVal, maxVal in zip(paramLabels, minVals, maxVals):
candidate.append(trial.suggest_uniform(str(paramLabel), minVal, maxVal))
# remember pids and jobids in a list
pids = []
jobids = {}
# create a job for the candidate
candidate_index = 0
sleep(sleepInterval) # required for slurm
# name and path
jobName = "trial_" + str(ngen)
jobPath = genFolderPath + '/' + jobName
# set initial cfg initCfg
if len(self.initCfg) > 0:
for paramLabel, paramVal in self.initCfg.items():
self.setCfgNestedParam(paramLabel, paramVal)
# modify cfg instance with candidate values
#print(paramLabels, candidate)
for label, value in zip(paramLabels, candidate):
print('set %s=%s' % (label, value))
self.setCfgNestedParam(label, value)
#self.setCfgNestedParam("filename", jobPath)
self.cfg.simLabel = jobName
self.cfg.saveFolder = genFolderPath
# save cfg instance to file
cfgSavePath = jobPath + '_cfg.json'
self.cfg.save(cfgSavePath)
if type=='mpi_bulletin':
# ----------------------------------------------------------------------
# MPI master-slaves
# ----------------------------------------------------------------------
pc.submit(runJob, nrnCommand, script, cfgSavePath, netParamsSavePath, jobPath)
print('-'*80)
else:
# ----------------------------------------------------------------------
# MPI job commnand
# ----------------------------------------------------------------------
if mpiCommand == '':
command = '%s %s simConfig=%s netParams=%s ' % (nrnCommand, script, cfgSavePath, netParamsSavePath)
else:
command = '%s -n %d %s %s simConfig=%s netParams=%s ' % (mpiCommand, numproc, nrnCommand, script, cfgSavePath, netParamsSavePath)
# ----------------------------------------------------------------------
# run on local machine with <nodes*coresPerNode> cores
# ----------------------------------------------------------------------
if type=='mpi_direct':
executer = '/bin/bash'
jobString = bashTemplate('mpi_direct') %(custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through slurm
# ----------------------------------------------------------------------
elif type=='hpc_slurm':
executer = 'sbatch'
res = '#SBATCH --res=%s' % (reservation) if reservation else ''
jobString = bashTemplate('hpc_slurm') % (jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through PBS
# ----------------------------------------------------------------------
elif type=='hpc_torque':
executer = 'qsub'
queueName = args.get('queueName', 'default')
nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode)
jobString = bashTemplate('hpc_torque') % (jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command)
# ----------------------------------------------------------------------
# save job and run
# ----------------------------------------------------------------------
print('Submitting job ', jobName)
print(jobString)
print('-'*80)
# save file
batchfile = '%s.sbatch' % (jobPath)
with open(batchfile, 'w') as text_file:
text_file.write("%s" % jobString)
if type == 'mpi_direct':
with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
else:
with open(jobPath+'.jobid', 'w') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
#proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE)
sleep(0.1)
#read = proc.stdout.read()
if type == 'mpi_direct':
with open('./pids.pid', 'a') as file:
file.write(str(pids))
else:
with open(jobPath+'.jobid', 'r') as outf:
read=outf.readline()
print(read)
if len(read) > 0:
jobid = int(read.split()[-1])
jobids[candidate_index] = jobid
print('jobids', jobids)
total_jobs += 1
sleep(0.1)
# ----------------------------------------------------------------------
# gather data and compute fitness
# ----------------------------------------------------------------------
if type == 'mpi_bulletin':
# wait for pc bulletin board jobs to finish
try:
while pc.working():
sleep(1)
#pc.done()
except:
pass
num_iters = 0
jobs_completed = 0
fitness = [None] # just 1 candidate
# print outfilestem
print("Waiting for jobs from generation %d/%d ..." %(ngen, args.get('maxiters')))
# print "PID's: %r" %(pids)
# start fitness calculation
while jobs_completed < total_jobs:
unfinished = [i for i, x in enumerate(fitness) if x is None ]
for candidate_index in unfinished:
try: # load simData and evaluate fitness
jobNamePath = genFolderPath + "/trial_" + str(ngen)
if os.path.isfile(jobNamePath+'.json'):
with open('%s.json'% (jobNamePath)) as file:
simData = json.load(file)['simData']
fitness[candidate_index] = fitnessFunc(simData, **fitnessFuncArgs)
jobs_completed += 1
print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index]))
elif os.path.isfile(jobNamePath+'.pkl'):
with open('%s.pkl'% (jobNamePath), 'rb') as file:
simData = pickle.load(file)['simData']
fitness[candidate_index] = fitnessFunc(simData, **fitnessFuncArgs)
jobs_completed += 1
print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index]))
except Exception as e:
err = "There was an exception evaluating candidate %d:"%(candidate_index)
print(("%s \n %s"%(err,e)))
num_iters += 1
print('completed: %d' %(jobs_completed))
if num_iters >= args.get('maxiter_wait', 5000):
print("Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % (len(unfinished)))
for canditade_index in unfinished:
fitness[canditade_index] = maxFitness # rerun those that didn't complete;
jobs_completed += 1
try:
if 'scancelUser' in kwargs:
os.system('scancel -u %s'%(kwargs['scancelUser']))
else:
os.system('scancel %d' % (jobids[candidate_index])) # terminate unfinished job (resubmitted jobs not terminated!)
except:
pass
sleep(args.get('time_sleep', 1))
# kill all processes
if type == 'mpi_bulletin':
try:
with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin
pids = [int(i) for i in file.read().split(' ')[:-1]]
with open("./pids.pid", 'w') as file: # delete content
pass
for pid in pids:
try:
os.killpg(os.getpgid(pid), signal.SIGTERM)
except:
pass
except:
pass
elif type == 'mpi_direct':
import psutil
PROCNAME = "nrniv"
for proc in psutil.process_iter():
# check whether the process name matches
try:
if proc.name() == PROCNAME:
proc.kill()
except:
pass
# don't want to to this for hpcs since jobs are running on compute nodes not master
print("-" * 80)
print(" Completed a generation ")
print("-" * 80)
return fitness[0] # single candidate for now
# -------------------------------------------------------------------------------
# Optuna optimization: Main code
# -------------------------------------------------------------------------------
import os
from time import sleep
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
except:
size = 1
rank = 0
# create main sim directory and save scripts
self.saveScripts()
global ngen
ngen = -1
# gather **kwargs
args = {}
args['popsize'] = self.optimCfg.get('popsize', 1)
args['minVals'] = [x['values'][0] for x in self.params]
args['maxVals'] = [x['values'][1] for x in self.params]
args['cfg'] = self.cfg # include here args/params to pass to evaluator function
args['paramLabels'] = [x['label'] for x in self.params]
args['netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py'
args['maxiters'] = self.optimCfg['maxiters'] if 'maxiters' in self.optimCfg else 1000
args['maxtime'] = self.optimCfg['maxtime'] if 'maxtime' in self.optimCfg else None
args['fitnessFunc'] = self.optimCfg['fitnessFunc']
args['fitnessFuncArgs'] = self.optimCfg['fitnessFuncArgs']
args['maxiter_wait'] = self.optimCfg['maxiter_wait']
args['time_sleep'] = self.optimCfg['time_sleep']
args['maxFitness'] = self.optimCfg.get('maxFitness', 1000)
args['direction'] = self.optimCfg['direction'] if 'direction' in self.optimCfg else 'minimize'
for key, value in self.optimCfg.items():
args[key] = value
for key, value in self.runCfg.items():
args[key] = value
# if using pc bulletin board, initialize all workers
if self.runCfg.get('type', None) == 'mpi_bulletin':
for iworker in range(int(pc.nhost())):
pc.runworker()
# -------------------------------------------------------------------------------
# Run algorithm
# -------------------------------------------------------------------------------
sleep(rank) # each process wiats a different time to avoid saturating sqlite database
study = optuna.create_study(study_name=self.batchLabel, storage='sqlite:///%s/%s_storage.db' % (self.saveFolder, self.batchLabel),
load_if_exists=True, direction=args['direction'])
try:
study.optimize(lambda trial: objective(trial, args), n_trials=args['maxiters'], timeout=args['maxtime'])
except Exception as e:
print(e)
# print best and finish
if rank == size-1:
df = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))
importance = optuna.importance.get_param_importances(study=study)
print('\nBest trial: ', study.best_trial)
print('\nParameter importance: ', dict(importance))
print('\nBest Solution with fitness = %.4g: \n' % (study.best_value), study.best_params)
print('\nSaving to output.pkl...\n')
output = {'study': study, 'df': df, 'importance': importance}
with open('%s/%s_output.pkl' % (self.saveFolder, self.batchLabel), 'wb') as f:
pickle.dump(output, f)
sleep(1)
print("-" * 80)
print(" Completed Optuna parameter optimization ")
print("-" * 80)
sys.exit()
|
the-stack_0_14532 | # Standard library imports
import sqlite3
from dataclasses import asdict
# Third party imports
import pandas as pd
from spotify_flows.spotify.artists import read_artists_from_id
from spotify_flows.database import SpotifyDatabase
# Main body
def main():
db = SpotifyDatabase("data/spotify.db", op_table="operations")
df_related, df_artists = db.table_contents(["related", "artists"])
df_related = df_related.drop_duplicates()
enriched_artist_ids = df_artists.loc[:, "id"].unique().tolist()
all_artist_ids = set(
df_related["artist_id"].tolist() + df_related["related_artist_id"].tolist()
)
artists_to_enrich = [id for id in all_artist_ids if id not in enriched_artist_ids]
remaining_artists = artists_to_enrich
while remaining_artists:
n = min(len(remaining_artists), 50)
artists = read_artists_from_id(artist_ids=remaining_artists[:n])
df_data = pd.DataFrame([asdict(artist) for artist in artists]).drop(
columns=["genres"]
)
db.enrich_database_table(df_data=df_data, table="artists")
remaining_artists = remaining_artists[n:]
if __name__ == "__main__":
raise SystemExit(main())
|
the-stack_0_14533 | import glob
import os
import pytest
from cli.src.helpers.build_io import get_build_path
from cli.src.helpers.data_loader import load_schema_obj, load_all_schema_objs, load_all_schema_objs_from_directory,\
load_template_file, load_json_obj, types, SCHEMA_DIR
from tests.unit.helpers.constants import CLUSTER_NAME_LOAD, NON_EXISTING_CLUSTER, TEST_DOCS, OUTPUT_PATH, TEST_INVENTORY, TEST_JSON,\
TEST_JSON_NAME, TEST_CLUSTER_MODEL
TEST_MINIMAL_CLUSTER_CONFIG = {
'kind': 'epiphany-cluster',
'title': 'Epiphany cluster Config',
'provider': 'aws',
'name': 'default',
'specification':
{
'name': 'name',
'prefix': 'prefix',
'admin_user':
{
'name': 'ubuntu',
'key_path': '/shared/.ssh/epiphany-operations/id_rsa'
},
'cloud':
{
'k8s_as_cloud_service': False,
'use_public_ips': False,
'credentials':
{
'key': 'XXXX-XXXX-XXXX',
'secret': 'XXXXXXXXXXXXXXXX'
},
'default_os_image': 'default'
},
'components':
{
'repository': {'count': 1},
'kubernetes_master': {'count': 1},
'kubernetes_node': {'count': 2},
'logging': {'count': 1},
'monitoring': {'count': 1},
'kafka': {'count': 2},
'postgresql': {'count': 1},
'load_balancer': {'count': 1},
'rabbitmq': {'count': 1}
}
}
}
def test_load_schema_obj():
yaml_obj = load_schema_obj(types.DEFAULT, 'aws', 'configuration/minimal-cluster-config')
assert yaml_obj == TEST_MINIMAL_CLUSTER_CONFIG
def test_load_all_schema_objs():
yaml_objs = load_all_schema_objs(types.DEFAULT, 'aws', 'configuration/minimal-cluster-config')
assert yaml_objs == [TEST_MINIMAL_CLUSTER_CONFIG]
def test_load_all_schema_objs_from_directory():
defaults = load_all_schema_objs_from_directory(types.DEFAULT, 'common', 'configuration')
directory_path = os.path.join(SCHEMA_DIR, 'common', types.DEFAULT, 'configuration')
assert len(defaults) == len(glob.glob(os.path.join(directory_path, '*.yml')))
def test_load_template_file():
template = load_template_file(types.ANSIBLE, '', 'inventory')
content = template.render(inventory=TEST_INVENTORY, cluster_model=TEST_CLUSTER_MODEL)
assert 'test-1 ansible_host=10.0.0.1' in content
assert 'test-2 ansible_host=10.0.0.2' in content
assert 'test-3 ansible_host=10.0.0.3' in content
assert 'test-4 ansible_host=10.0.0.4' in content
assert 'ansible_user=operations' in content
assert 'ansible_ssh_private_key_file=id_rsa' in content
def test_load_json_obj():
loaded_json = load_json_obj(os.path.join(OUTPUT_PATH, TEST_JSON_NAME))
assert loaded_json == TEST_JSON
def test_load_not_existing_manifest_docs():
build_path = get_build_path(NON_EXISTING_CLUSTER)
with pytest.raises(Exception):
load_manifest(build_path)
|
the-stack_0_14534 | import tempfile
from unittest import TestCase
from qtlayoutbuilder.lib.multiline_string_utils import MultilineString
from qtlayoutbuilder.lib.original_file_rewriter import OriginalFileReWriter
class TestOriginalFileReWriter(TestCase):
# Lower level functions first.
def test_add_backup_location_comment(self):
# First check that we get what we expect when the existing
# one_big_string, does not already have such a comment in.
one_big_string = 'just this text'
mock_backup_folder_string = 'mock_backup_folder'
output = OriginalFileReWriter._add_backup_location_comment(
mock_backup_folder_string, one_big_string)
output = MultilineString.normalise(output)
expected = MultilineString.normalise("""
# This file has been automatically re-formatted.
# Previous versions can be found here:
# mock_backup_folder
##
just this text
""")
self.assertEquals(output, expected)
# Now ensure that if we do it again - but this time with the
# new one_big_string that already has a comment in, the old comment
# gets replaced with the new.
previous_output = output
mock_backup_folder_string = 'DIFFERENT_mock_backup_folder'
new_output = \
OriginalFileReWriter._add_backup_location_comment(
mock_backup_folder_string, previous_output)
new_output = MultilineString.normalise(new_output)
expected = MultilineString.normalise("""
# This file has been automatically re-formatted.
# Previous versions can be found here:
# DIFFERENT_mock_backup_folder
##
just this text
""")
self.assertEquals(new_output, expected)
def test_make_backup_of_existing_file(self):
# Make a file that we will then back up.
orig_fd = tempfile.NamedTemporaryFile(delete=False)
orig_file_path = orig_fd.name
orig_fd.write('original file content')
orig_fd.close()
# Back it up
backup_folder, backup_file_path = \
OriginalFileReWriter._make_backup_of_existing_file(orig_file_path)
# Ensure that the backed up file has the expected content.
with open(backup_file_path, 'r') as read_fd:
content = read_fd.read()
self.assertEqual(content, 'original file content')
# Now at API level
def test_at_api_level(self):
# Make a file that we will then overwrite.
orig_fd = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
orig_file_path = orig_fd.name
content = MultilineString.shift_left("""
layout QHBoxLayout
widget QWidget
""")
orig_fd.write(content)
orig_fd.close()
# Mandate the overwrite
OriginalFileReWriter.overwrite_original(orig_file_path, 'new content')
# Check for both the presence of the new content, and the
# backup message.
with open(orig_file_path, 'r') as input_file:
content = input_file.read()
self.assertTrue('new content' in content)
self.assertTrue('has been' in content)
|
the-stack_0_14537 | from PIL import ImageGrab #Used to screenshots
#it takes board number 1-6!!
def screen_board(board_no):
grab_displays = ((10,50,630,380),
(650,50,1270,380),
(1290,50,1910,380),
(10,590,630,920),
(650,590,1270,920),
(1290,590,1910,920))
#Screenshot of whole window number board-1
return ImageGrab.grab(bbox=(grab_displays[board_no-1]))
#takes and returns a screenshot of given coords x1 y1 to x2 y2
def screenshot(xyxy):
return ImageGrab.grab(bbox=(xyxy)) |
the-stack_0_14538 | # -*- coding: utf-8 -*-
settings = {
'source': 'csv',
#'source': 'mongodb',
'data_path': './data',
'stock_commission': 3 / 10000.0,
'future_commission': 1 / 10000.0,
'tick_test': False,
}
class ConfigLog(object):
log_level = 'INFO'
log_to_file = True
log_to_console = True
log_path = './log'
__all__ = ['settings', 'ConfigLog']
|
the-stack_0_14539 | # _ _
# | | | |
# ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
# / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
# \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
"""
constants.py
various fixed values used elsewhere
"""
# place to store downloaded imaged
DOWNLOAD_DIRECTORY = './downloaded_images/'
# directory to save augmented images before sending to S3
TMP_SAVE_DIR = "./aug_img_tmp/"
# should reflect number of distinct transformations in transformations.py
NUM_POSSIBLE_TRANSFORMS = 6
# redis database host app name
HOST = "redis"
# redis database key for the jobs
JOB_NAME = "job2"
|
the-stack_0_14540 | import datetime
from typing import List
from unittest.mock import patch
import pytz
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from posthog.email import EmailMessage
from posthog.models import Event, Organization, Person, Team, User
from posthog.tasks.email import send_weekly_email_report
class TestEmail(TestCase):
def create_person(self, team: Team, base_distinct_id: str = "") -> Person:
person = Person.objects.create(team=team)
person.add_distinct_id(base_distinct_id)
return person
@freeze_time("2020-09-21")
def setUp(self):
super().setUp()
self.organization = Organization.objects.create()
self.team = Team.objects.create(organization=self.organization, name="The Bakery")
self.user = User.objects.create(email="[email protected]")
self.user2 = User.objects.create(email="[email protected]")
self.organization.members.add(self.user)
self.organization.members.add(self.user2)
last_week = datetime.datetime(2020, 9, 17, 3, 22, tzinfo=pytz.UTC)
two_weeks_ago = datetime.datetime(2020, 9, 8, 19, 54, tzinfo=pytz.UTC)
self.persons: List = [self.create_person(self.team, str(i)) for i in range(0, 7)]
# Resurrected
self.persons[0].created_at = timezone.now() - datetime.timedelta(weeks=3)
self.persons[0].save()
self.persons[1].created_at = timezone.now() - datetime.timedelta(weeks=4)
self.persons[1].save()
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=0)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=1)
# Retained
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=2)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=2)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=3)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=3)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=4)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=4)
# New
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
# Churned
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=6)
def test_cant_send_emails_if_not_properly_configured(self) -> None:
with self.settings(EMAIL_HOST=None):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("Subject", "template")
self.assertEqual(
str(e.exception), "Email settings not configured! Set at least the EMAIL_HOST environment variable.",
)
@freeze_time("2020-09-21")
def test_weekly_email_report(self) -> None:
with self.settings(
EMAIL_HOST="localhost", SITE_URL="http://localhost:9999",
):
send_weekly_email_report()
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ["[email protected]"])
self.assertEqual(mail.outbox[1].to, ["[email protected]"])
self.assertEqual(
mail.outbox[0].subject, "PostHog weekly report for Sep 14, 2020 to Sep 20",
)
self.assertEqual(
mail.outbox[0].body, "",
) # no plain-text version support yet
html_message = mail.outbox[0].alternatives[0][0] # type: ignore
self.assertIn(
"http://localhost:9999/static/posthog-logo.png", html_message,
) # absolute URLs are used
self.assertIn('style="font-weight: 300"', html_message) # CSS is inlined
self.assertIn(
"Your PostHog weekly report is ready! Your team had 6 active users last week! 🎉", html_message,
) # preheader
@patch("posthog.tasks.email.EmailMessage")
@freeze_time("2020-09-21")
def test_weekly_email_report_content(self, mock_email_message):
with self.settings(EMAIL_HOST="localhost"):
send_weekly_email_report()
self.assertEqual(
mock_email_message.call_args[0][0], "PostHog weekly report for Sep 14, 2020 to Sep 20",
) # Email subject
self.assertEqual(mock_email_message.call_args[0][1], "weekly_report")
template_context = mock_email_message.call_args[0][2]
self.assertEqual(template_context["team"], "The Bakery")
self.assertEqual(
template_context["period_start"], datetime.datetime(2020, 9, 14, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["period_end"], datetime.datetime(2020, 9, 20, 23, 59, 59, 999999, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["active_users"], 6,
)
self.assertEqual(
template_context["active_users_delta"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["new"], 2), 0.17,
)
self.assertEqual(
template_context["user_distribution"]["retained"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["resurrected"], 2), 0.33,
)
self.assertEqual(
template_context["churned_users"], {"abs": 1, "ratio": 0.25, "delta": None},
)
|
the-stack_0_14542 | """Given a folder with subfolders, run the schizo test and report on the min,
max, etc statistics of each subfolder.
"""
import argparse
import collections
import multiprocessing
import os
import re
import subprocess
progdir = os.path.dirname(os.path.abspath(__file__))
mainscript = os.path.join(progdir, '../main.py')
def dir_arg(s):
if os.path.isdir(s):
return s
raise ArgumentError(f'{s} is not a directory')
def get_rmse(fpath):
p = subprocess.Popen(['python3', mainscript, fpath],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
r = p.wait()
vals = {}
for m in re.finditer(rb'^Max RMSE, (.*) to (.*): ([^\n]+)', stderr, flags=re.M):
vals[(m.group(1), m.group(2))] = float(m.group(3))
return vals
def main():
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('folder', type=dir_arg)
args = ap.parse_args()
pool = multiprocessing.Pool()
all_files = []
for path, dirs, files in os.walk(args.folder):
for f in files:
fpath = os.path.join(path, f)
if os.path.isfile(fpath):
all_files.append(fpath)
results = pool.map(get_rmse, all_files)
rmses = sorted(zip(all_files, results), key=lambda k: k[0])
folders = []
class Tracker:
def __init__(self):
self.items = collections.defaultdict(list)
def __str__(self):
result = []
for (t1, t2), vals in self.items.items():
result.append(f'{t1} to {t2}: {sum(vals) / len(vals):.4f} ({min(vals):.4f} / {max(vals):.4f})')
return ', '.join(result)
def add(self, rmse_vals):
for k, v in rmse_vals.items():
self.items[k].append(v)
def cap(parts, rmse):
for fi in reversed(range(len(folders))):
if len(parts) <= fi or parts[fi] != folders[fi][0]:
fname = '/'.join([ff[0] for ff in folders])
fstr = "\n ".join(str(folders[fi][1]).split(", "))
print(f'{fname}\n {fstr}')
folders.pop()
else:
break
for fi in range(len(folders), len(parts)):
folders.append([parts[fi], Tracker()])
for f in folders:
f[1].add(rmse)
for f, r in rmses:
remainder = f
parts = []
while True:
remainder, tail = os.path.split(remainder)
if not tail:
break
parts.insert(0, tail)
if not remainder:
break
cap(parts, r)
cap([], None)
if __name__ == '__main__':
main()
|
the-stack_0_14543 | from __future__ import absolute_import, print_function
from django.conf.urls import include, patterns, url
from .endpoints.accept_project_transfer import AcceptProjectTransferEndpoint
from .endpoints.organization_dashboards import OrganizationDashboardsEndpoint
from .endpoints.relay_heartbeat import RelayHeartbeatEndpoint
from .endpoints.relay_projectconfigs import RelayProjectConfigsEndpoint
from .endpoints.relay_publickeys import RelayPublicKeysEndpoint
from .endpoints.relay_index import RelayIndexEndpoint
from .endpoints.relay_details import RelayDetailsEndpoint
from .endpoints.relay_register import RelayRegisterChallengeEndpoint, \
RelayRegisterResponseEndpoint
from .endpoints.api_applications import ApiApplicationsEndpoint
from .endpoints.api_application_details import ApiApplicationDetailsEndpoint
from .endpoints.api_authorizations import ApiAuthorizationsEndpoint
from .endpoints.api_tokens import ApiTokensEndpoint
from .endpoints.assistant import AssistantEndpoint
from .endpoints.auth_index import AuthIndexEndpoint
from .endpoints.authenticator_index import AuthenticatorIndexEndpoint
from .endpoints.broadcast_details import BroadcastDetailsEndpoint
from .endpoints.broadcast_index import BroadcastIndexEndpoint
from .endpoints.catchall import CatchallEndpoint
from .endpoints.chunk import ChunkUploadEndpoint
from .endpoints.event_attachment_details import EventAttachmentDetailsEndpoint
from .endpoints.event_attachments import EventAttachmentsEndpoint
from .endpoints.event_details import EventDetailsEndpoint
from .endpoints.event_owners import EventOwnersEndpoint
from .endpoints.event_apple_crash_report import EventAppleCrashReportEndpoint
from .endpoints.event_grouping_info import EventGroupingInfoEndpoint
from .endpoints.group_details import GroupDetailsEndpoint
from .endpoints.group_events import GroupEventsEndpoint
from .endpoints.group_events_latest import GroupEventsLatestEndpoint
from .endpoints.group_events_oldest import GroupEventsOldestEndpoint
from .endpoints.group_hashes import GroupHashesEndpoint
from .endpoints.group_integration_details import GroupIntegrationDetailsEndpoint
from .endpoints.group_integrations import GroupIntegrationsEndpoint
from .endpoints.group_notes import GroupNotesEndpoint
from .endpoints.group_notes_details import GroupNotesDetailsEndpoint
from .endpoints.group_participants import GroupParticipantsEndpoint
from .endpoints.group_external_issues import GroupExternalIssuesEndpoint
from .endpoints.group_external_issue_details import GroupExternalIssueDetailsEndpoint
from .endpoints.group_similar_issues import GroupSimilarIssuesEndpoint
from .endpoints.group_stats import GroupStatsEndpoint
from .endpoints.group_tags import GroupTagsEndpoint
from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint
from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint
from .endpoints.group_tombstone_details import GroupTombstoneDetailsEndpoint
from .endpoints.group_tombstone import GroupTombstoneEndpoint
from .endpoints.group_user_reports import GroupUserReportsEndpoint
from .endpoints.organization_incident_details import OrganizationIncidentDetailsEndpoint
from .endpoints.organization_incident_seen import OrganizationIncidentSeenEndpoint
from .endpoints.index import IndexEndpoint
from .endpoints.internal_queue_tasks import InternalQueueTasksEndpoint
from .endpoints.internal_quotas import InternalQuotasEndpoint
from .endpoints.internal_stats import InternalStatsEndpoint
from .endpoints.monitor_checkins import MonitorCheckInsEndpoint
from .endpoints.monitor_checkin_details import MonitorCheckInDetailsEndpoint
from .endpoints.monitor_details import MonitorDetailsEndpoint
from .endpoints.monitor_stats import MonitorStatsEndpoint
from .endpoints.organization_access_request_details import OrganizationAccessRequestDetailsEndpoint
from .endpoints.organization_activity import OrganizationActivityEndpoint
from .endpoints.organization_auditlogs import OrganizationAuditLogsEndpoint
from .endpoints.organization_api_key_index import OrganizationApiKeyIndexEndpoint
from .endpoints.organization_api_key_details import OrganizationApiKeyDetailsEndpoint
from .endpoints.organization_auth_providers import OrganizationAuthProvidersEndpoint
from .endpoints.organization_auth_provider_details import OrganizationAuthProviderDetailsEndpoint
from .endpoints.organization_auth_provider_send_reminders import OrganizationAuthProviderSendRemindersEndpoint
from .endpoints.organization_avatar import OrganizationAvatarEndpoint
from .endpoints.organization_details import OrganizationDetailsEndpoint
from .endpoints.organization_discover_query import OrganizationDiscoverQueryEndpoint
from .endpoints.organization_discover_saved_queries import OrganizationDiscoverSavedQueriesEndpoint
from .endpoints.organization_discover_saved_query_detail import OrganizationDiscoverSavedQueryDetailEndpoint
from .endpoints.organization_events import OrganizationEventsEndpoint, OrganizationEventsMetaEndpoint, OrganizationEventsStatsEndpoint, OrganizationEventsHeatmapEndpoint
from .endpoints.organization_group_index import OrganizationGroupIndexEndpoint
from .endpoints.organization_dashboard_details import OrganizationDashboardDetailsEndpoint
from .endpoints.organization_dashboard_widget_details import OrganizationDashboardWidgetDetailsEndpoint
from .endpoints.organization_dashboard_widgets import OrganizationDashboardWidgetsEndpoint
from .endpoints.organization_health import OrganizationHealthTopEndpoint, OrganizationHealthGraphEndpoint
from .endpoints.organization_shortid import ShortIdLookupEndpoint
from .endpoints.organization_environments import OrganizationEnvironmentsEndpoint
from .endpoints.organization_eventid import EventIdLookupEndpoint
from .endpoints.organization_slugs import SlugsUpdateEndpoint
from .endpoints.organization_incident_activity_index import OrganizationIncidentActivityIndexEndpoint
from .endpoints.organization_incident_comment_index import OrganizationIncidentCommentIndexEndpoint
from .endpoints.organization_incident_comment_details import OrganizationIncidentCommentDetailsEndpoint
from .endpoints.organization_incident_index import OrganizationIncidentIndexEndpoint
from .endpoints.organization_incident_subscription_index import OrganizationIncidentSubscriptionIndexEndpoint
from .endpoints.organization_incident_suspects_index import OrganizationIncidentSuspectsIndexEndpoint
from .endpoints.organization_issues_new import OrganizationIssuesNewEndpoint
from .endpoints.organization_issues_resolved_in_release import OrganizationIssuesResolvedInReleaseEndpoint
from .endpoints.organization_member_details import OrganizationMemberDetailsEndpoint
from .endpoints.organization_member_index import OrganizationMemberIndexEndpoint
from .endpoints.organization_member_issues_assigned import OrganizationMemberIssuesAssignedEndpoint
from .endpoints.organization_member_issues_bookmarked import OrganizationMemberIssuesBookmarkedEndpoint
from .endpoints.organization_member_issues_viewed import OrganizationMemberIssuesViewedEndpoint
from .endpoints.organization_member_unreleased_commits import OrganizationMemberUnreleasedCommitsEndpoint
from .endpoints.organization_member_team_details import OrganizationMemberTeamDetailsEndpoint
from .endpoints.organization_monitors import OrganizationMonitorsEndpoint
from .endpoints.organization_onboarding_tasks import OrganizationOnboardingTaskEndpoint
from .endpoints.organization_index import OrganizationIndexEndpoint
from .endpoints.organization_pinned_searches import OrganizationPinnedSearchEndpoint
from .endpoints.organization_plugins import OrganizationPluginsEndpoint
from .endpoints.organization_processingissues import OrganizationProcessingIssuesEndpoint
from .endpoints.organization_projects import OrganizationProjectsEndpoint
from .endpoints.organization_recent_searches import OrganizationRecentSearchesEndpoint
from .endpoints.organization_releases import OrganizationReleasesEndpoint
from .endpoints.organization_release_details import OrganizationReleaseDetailsEndpoint
from .endpoints.organization_release_assemble import OrganizationReleaseAssembleEndpoint
from .endpoints.organization_release_files import OrganizationReleaseFilesEndpoint
from .endpoints.organization_release_file_details import OrganizationReleaseFileDetailsEndpoint
from .endpoints.organization_release_commits import OrganizationReleaseCommitsEndpoint
from .endpoints.organization_repositories import OrganizationRepositoriesEndpoint
from .endpoints.organization_integration_details import OrganizationIntegrationDetailsEndpoint
from .endpoints.organization_integration_repos import OrganizationIntegrationReposEndpoint
from .endpoints.organization_integrations import OrganizationIntegrationsEndpoint
from .endpoints.organization_config_integrations import OrganizationConfigIntegrationsEndpoint
from .endpoints.organization_config_repositories import OrganizationConfigRepositoriesEndpoint
from .endpoints.organization_repository_commits import OrganizationRepositoryCommitsEndpoint
from .endpoints.organization_repository_details import OrganizationRepositoryDetailsEndpoint
from .endpoints.organization_search_details import OrganizationSearchDetailsEndpoint
from .endpoints.organization_searches import OrganizationSearchesEndpoint
from .endpoints.organization_sentry_apps import OrganizationSentryAppsEndpoint
from .endpoints.organization_tagkey_values import OrganizationTagKeyValuesEndpoint
from .endpoints.organization_tags import OrganizationTagsEndpoint
from .endpoints.organization_user_reports import OrganizationUserReportsEndpoint
from .endpoints.organization_users import OrganizationUsersEndpoint
from .endpoints.organization_user_details import OrganizationUserDetailsEndpoint
from .endpoints.sentry_app_installations import SentryAppInstallationsEndpoint
from .endpoints.sentry_app_installation_details import SentryAppInstallationDetailsEndpoint
from .endpoints.sentry_app_installation_external_requests import SentryAppInstallationExternalRequestsEndpoint
from .endpoints.sentry_app_installation_external_issues import SentryAppInstallationExternalIssuesEndpoint
from .endpoints.organization_stats import OrganizationStatsEndpoint
from .endpoints.organization_teams import OrganizationTeamsEndpoint
from .endpoints.organization_user_issues import OrganizationUserIssuesEndpoint
from .endpoints.organization_user_issues_search import OrganizationUserIssuesSearchEndpoint
from .endpoints.project_avatar import ProjectAvatarEndpoint
from .endpoints.project_details import ProjectDetailsEndpoint
from .endpoints.project_transfer import ProjectTransferEndpoint
from .endpoints.project_create_sample import ProjectCreateSampleEndpoint
from .endpoints.project_docs_platform import ProjectDocsPlatformEndpoint
from .endpoints.project_environments import ProjectEnvironmentsEndpoint
from .endpoints.project_environment_details import ProjectEnvironmentDetailsEndpoint
from .endpoints.project_platforms import ProjectPlatformsEndpoint
from .endpoints.project_events import ProjectEventsEndpoint
from .endpoints.project_event_details import ProjectEventDetailsEndpoint, EventJsonEndpoint
from .endpoints.project_filters import ProjectFiltersEndpoint
from .endpoints.project_filter_details import ProjectFilterDetailsEndpoint
from .endpoints.project_group_index import ProjectGroupIndexEndpoint
from .endpoints.project_group_stats import ProjectGroupStatsEndpoint
from .endpoints.project_index import ProjectIndexEndpoint
from .endpoints.project_issues_resolved_in_release import ProjectIssuesResolvedInReleaseEndpoint
from .endpoints.project_keys import ProjectKeysEndpoint
from .endpoints.project_key_details import ProjectKeyDetailsEndpoint
from .endpoints.project_key_stats import ProjectKeyStatsEndpoint
from .endpoints.project_member_index import ProjectMemberIndexEndpoint
from .endpoints.project_ownership import ProjectOwnershipEndpoint
from .endpoints.project_plugins import ProjectPluginsEndpoint
from .endpoints.project_plugin_details import ProjectPluginDetailsEndpoint
from .endpoints.project_release_details import ProjectReleaseDetailsEndpoint
from .endpoints.project_release_files import ProjectReleaseFilesEndpoint
from .endpoints.project_release_file_details import ProjectReleaseFileDetailsEndpoint
from .endpoints.project_release_commits import ProjectReleaseCommitsEndpoint
from .endpoints.project_releases import ProjectReleasesEndpoint
from .endpoints.project_release_setup import ProjectReleaseSetupCompletionEndpoint
from .endpoints.project_releases_token import ProjectReleasesTokenEndpoint
from .endpoints.project_rules import ProjectRulesEndpoint
from .endpoints.project_rules_configuration import ProjectRulesConfigurationEndpoint
from .endpoints.project_rule_details import ProjectRuleDetailsEndpoint
from .endpoints.project_searches import ProjectSearchesEndpoint
from .endpoints.project_search_details import ProjectSearchDetailsEndpoint
from .endpoints.project_stats import ProjectStatsEndpoint
from .endpoints.project_tags import ProjectTagsEndpoint
from .endpoints.project_tagkey_details import ProjectTagKeyDetailsEndpoint
from .endpoints.project_tagkey_values import ProjectTagKeyValuesEndpoint
from .endpoints.project_team_details import ProjectTeamDetailsEndpoint
from .endpoints.project_teams import ProjectTeamsEndpoint
from .endpoints.project_processingissues import ProjectProcessingIssuesEndpoint, \
ProjectProcessingIssuesFixEndpoint, ProjectProcessingIssuesDiscardEndpoint
from .endpoints.project_reprocessing import ProjectReprocessingEndpoint
from .endpoints.project_servicehooks import ProjectServiceHooksEndpoint
from .endpoints.project_servicehook_details import ProjectServiceHookDetailsEndpoint
from .endpoints.project_servicehook_stats import ProjectServiceHookStatsEndpoint
from .endpoints.project_user_details import ProjectUserDetailsEndpoint
from .endpoints.project_user_reports import ProjectUserReportsEndpoint
from .endpoints.project_user_stats import ProjectUserStatsEndpoint
from .endpoints.project_users import ProjectUsersEndpoint
from .endpoints.prompts_activity import PromptsActivityEndpoint
from .endpoints.filechange import CommitFileChangeEndpoint
from .endpoints.release_deploys import ReleaseDeploysEndpoint
from .endpoints.debug_files import DebugFilesEndpoint, DifAssembleEndpoint, \
UnknownDebugFilesEndpoint, AssociateDSymFilesEndpoint
from .endpoints.sentry_apps import SentryAppsEndpoint
from .endpoints.sentry_app_features import SentryAppFeaturesEndpoint
from .endpoints.sentry_apps_stats import SentryAppsStatsEndpoint
from .endpoints.sentry_app_components import SentryAppComponentsEndpoint, \
OrganizationSentryAppComponentsEndpoint
from .endpoints.sentry_app_details import SentryAppDetailsEndpoint
from .endpoints.sentry_app_authorizations import SentryAppAuthorizationsEndpoint
from .endpoints.shared_group_details import SharedGroupDetailsEndpoint
from .endpoints.system_health import SystemHealthEndpoint
from .endpoints.system_options import SystemOptionsEndpoint
from .endpoints.team_avatar import TeamAvatarEndpoint
from .endpoints.team_details import TeamDetailsEndpoint
from .endpoints.team_groups_new import TeamGroupsNewEndpoint
from .endpoints.team_groups_trending import TeamGroupsTrendingEndpoint
from .endpoints.team_members import TeamMembersEndpoint
from .endpoints.team_projects import TeamProjectsEndpoint
from .endpoints.team_stats import TeamStatsEndpoint
from .endpoints.useravatar import UserAvatarEndpoint
from .endpoints.user_appearance import UserAppearanceEndpoint
from .endpoints.user_authenticator_index import UserAuthenticatorIndexEndpoint
from .endpoints.user_authenticator_enroll import UserAuthenticatorEnrollEndpoint
from .endpoints.user_authenticator_details import UserAuthenticatorDetailsEndpoint
from .endpoints.user_identity_details import UserIdentityDetailsEndpoint
from .endpoints.user_index import UserIndexEndpoint
from .endpoints.user_details import UserDetailsEndpoint
from .endpoints.user_emails import UserEmailsEndpoint
from .endpoints.user_emails_confirm import UserEmailsConfirmEndpoint
from .endpoints.user_ips import UserIPsEndpoint
from .endpoints.user_organizations import UserOrganizationsEndpoint
from .endpoints.user_notification_details import UserNotificationDetailsEndpoint
from .endpoints.user_password import UserPasswordEndpoint
from .endpoints.user_notification_fine_tuning import UserNotificationFineTuningEndpoint
from .endpoints.user_social_identities_index import UserSocialIdentitiesIndexEndpoint
from .endpoints.user_social_identity_details import UserSocialIdentityDetailsEndpoint
from .endpoints.user_subscriptions import UserSubscriptionsEndpoint
from .endpoints.event_file_committers import EventFileCommittersEndpoint
from .endpoints.setup_wizard import SetupWizard
from .endpoints.grouping_configs import GroupingConfigsEndpoint
from .endpoints.grouping_enhancements import GroupingEnhancementsEndpoint
from .endpoints.builtin_symbol_sources import BuiltinSymbolSourcesEndpoint
urlpatterns = patterns(
'',
# Relay
url(
r'^relays/$',
RelayIndexEndpoint.as_view(),
name='sentry-api-0-relays-index'
),
url(
r'^relays/register/challenge/$',
RelayRegisterChallengeEndpoint.as_view(),
name='sentry-api-0-relay-register-challenge'
),
url(
r'^relays/register/response/$',
RelayRegisterResponseEndpoint.as_view(),
name='sentry-api-0-relay-register-response'
),
url(
r'^relays/heartbeat/$',
RelayHeartbeatEndpoint.as_view(),
name='sentry-api-0-relay-heartbeat'
),
url(
r'^relays/projectconfigs/$',
RelayProjectConfigsEndpoint.as_view(),
name='sentry-api-0-relay-projectconfigs'
),
url(
r'^relays/publickeys/$',
RelayPublicKeysEndpoint.as_view(),
name='sentry-api-0-relay-publickeys'
),
url(
r'^relays/(?P<relay_id>[^\/]+)/$',
RelayDetailsEndpoint.as_view(),
name='sentry-api-0-relays-details'
),
# Api Data
url(
r'^assistant/$',
AssistantEndpoint.as_view(),
name='sentry-api-0-assistant',
),
url(
r'^api-applications/$',
ApiApplicationsEndpoint.as_view(),
name='sentry-api-0-api-applications'
),
url(
r'^api-applications/(?P<app_id>[^\/]+)/$',
ApiApplicationDetailsEndpoint.as_view(),
name='sentry-api-0-api-application-details'
),
url(
r'^api-authorizations/$',
ApiAuthorizationsEndpoint.as_view(),
name='sentry-api-0-api-authorizations'
),
url(r'^api-tokens/$', ApiTokensEndpoint.as_view(),
name='sentry-api-0-api-tokens'),
url(
r'^promptsactivity/$',
PromptsActivityEndpoint.as_view(),
name='sentry-api-0-promptsactivity',
),
# Auth
url(r'^auth/$', AuthIndexEndpoint.as_view(), name='sentry-api-0-auth'),
# List Authentiactors
url(r'^authenticators/$',
AuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-authenticator-index'),
# Broadcasts
url(r'^broadcasts/$', BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-broadcast-index'),
url(r'^broadcasts/(?P<broadcast_id>[^\/]+)/$', BroadcastDetailsEndpoint.as_view()),
# Project transfer
url(r'^accept-transfer/$', AcceptProjectTransferEndpoint.as_view(),
name='sentry-api-0-accept-project-transfer'),
# Monitors
url(r'^monitors/(?P<monitor_id>[^\/]+)/$', MonitorDetailsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/checkins/$', MonitorCheckInsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/checkins/(?P<checkin_id>[^\/]+)/$',
MonitorCheckInDetailsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/stats/$', MonitorStatsEndpoint.as_view()),
# Users
url(r'^users/$', UserIndexEndpoint.as_view(), name='sentry-api-0-user-index'),
url(
r'^users/(?P<user_id>[^\/]+)/$',
UserDetailsEndpoint.as_view(),
name='sentry-api-0-user-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/avatar/$',
UserAvatarEndpoint.as_view(),
name='sentry-api-0-user-avatar'
),
url(
r'^users/(?P<user_id>[^\/]+)/appearance/$',
UserAppearanceEndpoint.as_view(),
name='sentry-api-0-user-appearance'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/$',
UserAuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-user-authenticator-index'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<interface_id>[^\/]+)/enroll/$',
UserAuthenticatorEnrollEndpoint.as_view(),
name='sentry-api-0-user-authenticator-enroll'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/(?P<interface_device_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-device-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/$',
UserEmailsEndpoint.as_view(),
name='sentry-api-0-user-emails'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/confirm/$',
UserEmailsConfirmEndpoint.as_view(),
name='sentry-api-0-user-emails-confirm'
),
url(
r'^users/(?P<user_id>[^\/]+)/identities/(?P<identity_id>[^\/]+)/$',
UserIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-identity-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/ips/$',
UserIPsEndpoint.as_view(),
name='sentry-api-0-user-ips'
),
url(
r'^users/(?P<user_id>[^\/]+)/organizations/$',
UserOrganizationsEndpoint.as_view(),
name='sentry-api-0-user-organizations'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/$',
UserNotificationDetailsEndpoint.as_view(),
name='sentry-api-0-user-notifications'
),
url(
r'^users/(?P<user_id>[^\/]+)/password/$',
UserPasswordEndpoint.as_view(),
name='sentry-api-0-user-password'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/(?P<notification_type>[^\/]+)/$',
UserNotificationFineTuningEndpoint.as_view(),
name='sentry-api-0-user-notifications-fine-tuning'
),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/$',
UserSocialIdentitiesIndexEndpoint.as_view(),
name='sentry-api-0-user-social-identities-index'),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/(?P<identity_id>[^\/]+)/$',
UserSocialIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-social-identity-details'),
url(
r'^users/(?P<user_id>[^\/]+)/subscriptions/$',
UserSubscriptionsEndpoint.as_view(),
name='sentry-api-0-user-subscriptions'
),
# Incidents
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/activity/$',
OrganizationIncidentActivityIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-activity'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/comments/$',
OrganizationIncidentCommentIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-comments'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/comments/(?P<activity_id>[^\/]+)/$',
OrganizationIncidentCommentDetailsEndpoint.as_view(),
name='sentry-api-0-organization-incident-comment-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/$',
OrganizationIncidentDetailsEndpoint.as_view(),
name='sentry-api-0-organization-incident-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/$',
OrganizationIncidentIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/seen/$',
OrganizationIncidentSeenEndpoint.as_view(),
name='sentry-api-0-organization-incident-seen'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/subscriptions/$',
OrganizationIncidentSubscriptionIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-subscription-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/suspects/$',
OrganizationIncidentSuspectsIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-suspect-index'
),
# Organizations
url(
r'^organizations/(?P<organization_slug>[^\/]+)/chunk-upload/$',
ChunkUploadEndpoint.as_view(),
name='sentry-api-0-chunk-upload'
),
url(
r'^organizations/$', OrganizationIndexEndpoint.as_view(), name='sentry-api-0-organizations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/$',
OrganizationDetailsEndpoint.as_view(),
name='sentry-api-0-organization-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/query/$',
OrganizationDiscoverQueryEndpoint.as_view(),
name='sentry-api-0-organization-discover-query'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/saved/$',
OrganizationDiscoverSavedQueriesEndpoint.as_view(),
name='sentry-api-0-organization-discover-saved-queries'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/saved/(?P<query_id>[^\/]+)/$',
OrganizationDiscoverSavedQueryDetailEndpoint.as_view(),
name='sentry-api-0-organization-discover-saved-query-detail'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/$',
OrganizationDashboardDetailsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-details',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/$',
OrganizationDashboardsEndpoint.as_view(),
name='sentry-api-0-organization-dashboards'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/widgets/$',
OrganizationDashboardWidgetsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-widgets',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/widgets/(?P<widget_id>[^\/]+)$',
OrganizationDashboardWidgetDetailsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-widget-details',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/health/top/$',
OrganizationHealthTopEndpoint.as_view(),
name='sentry-api-0-organization-health-top',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/health/graph/$',
OrganizationHealthGraphEndpoint.as_view(),
name='sentry-api-0-organization-health-graph',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/shortids/(?P<short_id>[^\/]+)/$',
ShortIdLookupEndpoint.as_view(),
name='sentry-api-0-short-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/eventids/(?P<event_id>[^\/]+)/$',
EventIdLookupEndpoint.as_view(),
name='sentry-api-0-event-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/slugs/$',
SlugsUpdateEndpoint.as_view(),
name='sentry-api-0-short-ids-update'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-requests'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/(?P<request_id>\d+)/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-request-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/activity/$',
OrganizationActivityEndpoint.as_view(),
name='sentry-api-0-organization-activity'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/$',
OrganizationApiKeyIndexEndpoint.as_view(),
name='sentry-api-0-organization-api-key-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/(?P<api_key_id>[^\/]+)/$',
OrganizationApiKeyDetailsEndpoint.as_view(),
name='sentry-api-0-organization-api-key-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/audit-logs/$',
OrganizationAuditLogsEndpoint.as_view(),
name='sentry-api-0-organization-audit-logs'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/$',
OrganizationAuthProviderDetailsEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-providers/$',
OrganizationAuthProvidersEndpoint.as_view(),
name='sentry-api-0-organization-auth-providers'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/send-reminders/$',
OrganizationAuthProviderSendRemindersEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider-send-reminders'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/avatar/$',
OrganizationAvatarEndpoint.as_view(),
name='sentry-api-0-organization-avatar'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/integrations/$',
OrganizationConfigIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-config-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/repos/$',
OrganizationConfigRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-config-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events/$',
OrganizationEventsEndpoint.as_view(),
name='sentry-api-0-organization-events'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-stats/$',
OrganizationEventsStatsEndpoint.as_view(),
name='sentry-api-0-organization-events-stats'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-heatmap/$',
OrganizationEventsHeatmapEndpoint.as_view(),
name='sentry-api-0-organization-events-heatmap'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-meta/$',
OrganizationEventsMetaEndpoint.as_view(),
name='sentry-api-0-organization-events-meta'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/issues/new/$',
OrganizationIssuesNewEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/issues/$',
OrganizationGroupIndexEndpoint.as_view(),
name='sentry-api-0-organization-group-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/$',
OrganizationIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/(?P<integration_id>[^\/]+)/$',
OrganizationIntegrationDetailsEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/(?P<integration_id>[^\/]+)/repos/$',
OrganizationIntegrationReposEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/$',
OrganizationMemberIndexEndpoint.as_view(),
name='sentry-api-0-organization-member-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/monitors/$',
OrganizationMonitorsEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/pinned-searches/$',
OrganizationPinnedSearchEndpoint.as_view(),
name='sentry-api-0-organization-pinned-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/recent-searches/$',
OrganizationRecentSearchesEndpoint.as_view(),
name='sentry-api-0-organization-recent-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
OrganizationSearchDetailsEndpoint.as_view(),
name='sentry-api-0-organization-search-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/searches/$',
OrganizationSearchesEndpoint.as_view(),
name='sentry-api-0-organization-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/issues/$',
OrganizationUserIssuesSearchEndpoint.as_view(),
name='sentry-api-0-organization-issue-search'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/(?P<user_id>[^\/]+)/issues/$',
OrganizationUserIssuesEndpoint.as_view(),
name='sentry-api-0-organization-user-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/resolved/$',
OrganizationIssuesResolvedInReleaseEndpoint.as_view(),
name='sentry-api-0-organization-release-resolved'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/$',
OrganizationMemberDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/unreleased-commits/$',
OrganizationMemberUnreleasedCommitsEndpoint.as_view(),
name='sentry-api-0-organization-member-unreleased-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/assigned/$',
OrganizationMemberIssuesAssignedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-assigned'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/bookmarked/$',
OrganizationMemberIssuesBookmarkedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-bookmarked'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/viewed/$',
OrganizationMemberIssuesViewedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-viewed'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
OrganizationMemberTeamDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-team-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/processingissues/$',
OrganizationProcessingIssuesEndpoint.as_view(),
name='sentry-api-0-organization-processing-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/projects/$',
OrganizationProjectsEndpoint.as_view(),
name='sentry-api-0-organization-projects'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/$',
OrganizationRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/$',
OrganizationRepositoryDetailsEndpoint.as_view(),
name='sentry-api-0-organization-repository-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/commits/$',
OrganizationRepositoryCommitsEndpoint.as_view(),
name='sentry-api-0-organization-repository-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/plugins/$',
OrganizationPluginsEndpoint.as_view(),
name='sentry-api-0-organization-plugins'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/$',
OrganizationReleasesEndpoint.as_view(),
name='sentry-api-0-organization-releases'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
OrganizationReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/assemble/$',
OrganizationReleaseAssembleEndpoint.as_view(),
name='sentry-api-0-organization-release-assemble'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
OrganizationReleaseFilesEndpoint.as_view(),
name='sentry-api-0-organization-release-files'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
OrganizationReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-file-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commitfiles/$',
CommitFileChangeEndpoint.as_view(),
name='sentry-api-0-release-commitfilechange'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/deploys/$',
ReleaseDeploysEndpoint.as_view(),
name='sentry-api-0-organization-release-deploys'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
OrganizationReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-organization-release-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/user-feedback/$',
OrganizationUserReportsEndpoint.as_view(),
name='sentry-api-0-organization-user-feedback'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/$',
OrganizationUsersEndpoint.as_view(),
name='sentry-api-0-organization-users'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/(?P<user_id>[^\/]+)/$',
OrganizationUserDetailsEndpoint.as_view(),
name='sentry-api-0-organization-user-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-app-installations/$',
SentryAppInstallationsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installations'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/$',
SentryAppInstallationDetailsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-details'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/external-requests/$',
SentryAppInstallationExternalRequestsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-external-requests'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/external-issues/$',
SentryAppInstallationExternalIssuesEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-external-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-apps/$',
OrganizationSentryAppsEndpoint.as_view(),
name='sentry-api-0-organization-sentry-apps'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/stats/$',
OrganizationStatsEndpoint.as_view(),
name='sentry-api-0-organization-stats'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/teams/$',
OrganizationTeamsEndpoint.as_view(),
name='sentry-api-0-organization-teams'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/tags/$',
OrganizationTagsEndpoint.as_view(),
name='sentry-api-0-organization-tags'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
OrganizationTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-organization-tagkey-values'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/onboarding-tasks/$',
OrganizationOnboardingTaskEndpoint.as_view(),
name='sentry-api-0-organization-onboardingtasks'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/environments/$',
OrganizationEnvironmentsEndpoint.as_view(),
name='sentry-api-0-organization-environments',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/broadcasts/$',
BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-organization-broadcasts'
),
# Teams
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/$',
TeamDetailsEndpoint.as_view(),
name='sentry-api-0-team-details'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/new/$',
TeamGroupsNewEndpoint.as_view(),
name='sentry-api-0-team-groups-new'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/trending/$',
TeamGroupsTrendingEndpoint.as_view(),
name='sentry-api-0-team-groups-trending'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/members/$',
TeamMembersEndpoint.as_view(),
name='sentry-api-0-team-members'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/projects/$',
TeamProjectsEndpoint.as_view(),
name='sentry-api-0-team-project-index'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/stats/$',
TeamStatsEndpoint.as_view(),
name='sentry-api-0-team-stats'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/avatar/$',
TeamAvatarEndpoint.as_view(),
name='sentry-api-0-team-avatar'
),
# Projects
url(r'^projects/$', ProjectIndexEndpoint.as_view(),
name='sentry-api-0-projects'),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/$',
ProjectDetailsEndpoint.as_view(),
name='sentry-api-0-project-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/avatar/$',
ProjectAvatarEndpoint.as_view(),
name='sentry-api-0-project-avatar'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/create-sample/$',
ProjectCreateSampleEndpoint.as_view(),
name='sentry-api-0-project-create-sample'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/(?P<platform>[\w-]+)/$',
ProjectDocsPlatformEndpoint.as_view(),
name='sentry-api-0-project-docs-platform'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/$',
ProjectEnvironmentsEndpoint.as_view(),
name='sentry-api-0-project-environments'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/(?P<environment>[^/]+)/$',
ProjectEnvironmentDetailsEndpoint.as_view(),
name='sentry-api-0-project-environment-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/platforms/$',
ProjectPlatformsEndpoint.as_view(),
name='sentry-api-0-project-platform-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/$',
ProjectEventsEndpoint.as_view(),
name='sentry-api-0-project-events'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>(?:\d+|[A-Fa-f0-9]{32}))/$',
ProjectEventDetailsEndpoint.as_view(),
name='sentry-api-0-project-event-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/grouping-info/$',
EventGroupingInfoEndpoint.as_view(),
name='sentry-api-0-event-grouping-info'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/apple-crash-report$',
EventAppleCrashReportEndpoint.as_view(),
name='sentry-api-0-event-apple-crash-report'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/attachments/$',
EventAttachmentsEndpoint.as_view(),
name='sentry-api-0-event-attachments'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/attachments/(?P<attachment_id>[\w-]+)/$',
EventAttachmentDetailsEndpoint.as_view(),
name='sentry-api-0-event-attachment-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/committers/$',
EventFileCommittersEndpoint.as_view(),
name='sentry-api-0-event-file-committers'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/json/$',
EventJsonEndpoint.as_view(),
name='sentry-api-0-event-json'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/owners/$',
EventOwnersEndpoint.as_view(),
name='sentry-api-0-event-owners'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/$',
DebugFilesEndpoint.as_view(),
name='sentry-api-0-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/difs/assemble/$',
DifAssembleEndpoint.as_view(),
name='sentry-api-0-assemble-dif-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/unknown/$',
UnknownDebugFilesEndpoint.as_view(),
name='sentry-api-0-unknown-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/associate/$',
AssociateDSymFilesEndpoint.as_view(),
name='sentry-api-0-associate-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/$',
ProjectFiltersEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/(?P<filter_id>[\w-]+)/$',
ProjectFilterDetailsEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/$',
ProjectServiceHooksEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/$',
ProjectServiceHookDetailsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/stats/$',
ProjectServiceHookStatsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/$',
ProjectGroupIndexEndpoint.as_view(),
name='sentry-api-0-project-group-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/stats/$',
ProjectGroupStatsEndpoint.as_view(),
name='sentry-api-0-project-group-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/$',
ProjectKeysEndpoint.as_view(),
name='sentry-api-0-project-keys'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/$',
ProjectKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-key-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/stats/$',
ProjectKeyStatsEndpoint.as_view()
),
url(
r'^projects/(?P<organization_slug>[^/]+)/(?P<project_slug>[^/]+)/members/$',
ProjectMemberIndexEndpoint.as_view(),
name='sentry-api-0-project-member-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/$',
ProjectReleasesEndpoint.as_view(),
name='sentry-api-0-project-releases'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/token/$',
ProjectReleasesTokenEndpoint.as_view(),
name='sentry-api-0-project-releases-token'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/completion/$',
ProjectReleaseSetupCompletionEndpoint.as_view(),
name='sentry-api-0-project-releases-completion-status'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
ProjectReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
ProjectReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-project-release-commits'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/resolved/$',
ProjectIssuesResolvedInReleaseEndpoint.as_view(),
name='sentry-api-0-project-release-resolved'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
ProjectReleaseFilesEndpoint.as_view(),
name='sentry-api-0-project-release-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
ProjectReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-file-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/$',
ProjectRulesEndpoint.as_view(),
name='sentry-api-0-project-rules'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/configuration/$',
ProjectRulesConfigurationEndpoint.as_view(),
name='sentry-api-0-project-rules-configuration'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/(?P<rule_id>[^\/]+)/$',
ProjectRuleDetailsEndpoint.as_view(),
name='sentry-api-0-project-rule-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/$',
ProjectSearchesEndpoint.as_view(),
name='sentry-api-0-project-searches'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
ProjectSearchDetailsEndpoint.as_view(),
name='sentry-api-0-project-search-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/stats/$',
ProjectStatsEndpoint.as_view(),
name='sentry-api-0-project-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/$',
ProjectTagsEndpoint.as_view(),
name='sentry-api-0-project-tags'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/$',
ProjectTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-tagkey-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
ProjectTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-project-tagkey-values'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/$',
ProjectTeamsEndpoint.as_view(),
name='sentry-api-0-project-teams'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
ProjectTeamDetailsEndpoint.as_view(),
name='sentry-api-0-project-team-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/transfer/$',
ProjectTransferEndpoint.as_view(),
name='sentry-api-0-project-transfer'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/$',
ProjectUsersEndpoint.as_view(),
name='sentry-api-0-project-users'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/(?P<user_hash>[^/]+)/$',
ProjectUserDetailsEndpoint.as_view(),
name='sentry-api-0-project-user-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:user-feedback|user-reports)/$',
ProjectUserReportsEndpoint.as_view(),
name='sentry-api-0-project-user-reports'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/user-stats/$',
ProjectUserStatsEndpoint.as_view(),
name='sentry-api-0-project-userstats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/$',
ProjectProcessingIssuesEndpoint.as_view(),
name='sentry-api-0-project-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/fix$',
ProjectProcessingIssuesFixEndpoint.as_view(),
name='sentry-api-0-project-fix-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/reprocessing/$',
ProjectReprocessingEndpoint.as_view(),
name='sentry-api-0-project-reprocessing'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/discard/$',
ProjectProcessingIssuesDiscardEndpoint.as_view(),
name='sentry-api-0-project-discard-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/ownership/$',
ProjectOwnershipEndpoint.as_view(),
name='sentry-api-0-project-ownership'
),
# Load plugin project urls
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/$',
ProjectPluginsEndpoint.as_view(),
name='sentry-api-0-project-plugins'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/(?P<plugin_id>[^\/]+)/$',
ProjectPluginDetailsEndpoint.as_view(),
name='sentry-api-0-project-plugin-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins?/',
include('sentry.plugins.base.project_api_urls')
),
# Groups
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/$',
GroupDetailsEndpoint.as_view(),
name='sentry-api-0-group-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/$',
GroupEventsEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/latest/$',
GroupEventsLatestEndpoint.as_view(),
name='sentry-api-0-group-events-latest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/oldest/$',
GroupEventsOldestEndpoint.as_view(),
name='sentry-api-0-group-events-oldest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/$',
GroupNotesEndpoint.as_view(),
name='sentry-api-0-group-notes'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/(?P<note_id>[^\/]+)/$',
GroupNotesDetailsEndpoint.as_view(),
name='sentry-api-0-group-notes-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/hashes/$',
GroupHashesEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^issues/(?P<issue_id>\d+)/participants/$',
GroupParticipantsEndpoint.as_view(),
name='sentry-api-0-group-stats'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/stats/$',
GroupStatsEndpoint.as_view(),
name='sentry-api-0-group-stats'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/$',
GroupTagsEndpoint.as_view(),
name='sentry-api-0-group-tags'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/$',
GroupTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-group-tagkey-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/values/$',
GroupTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-group-tagkey-values'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:user-feedback|user-reports)/$',
GroupUserReportsEndpoint.as_view(),
name='sentry-api-0-group-user-reports'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/similar/$',
GroupSimilarIssuesEndpoint.as_view(),
name='sentry-api-0-group-similar-issues'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/external-issues/$',
GroupExternalIssuesEndpoint.as_view(),
name='sentry-api-0-group-external-issues'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/external-issues/(?P<external_issue_id>\d+)/$',
GroupExternalIssueDetailsEndpoint.as_view(),
name='sentry-api-0-group-external-issue-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/integrations/$',
GroupIntegrationsEndpoint.as_view(),
name='sentry-api-0-group-integrations'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/integrations/(?P<integration_id>\d+)/$',
GroupIntegrationDetailsEndpoint.as_view(),
name='sentry-api-0-group-integration-details'
),
# Load plugin group urls
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/plugins?/',
include('sentry.plugins.base.group_api_urls')
),
url(
r'^shared/(?:issues|groups)/(?P<share_id>[^\/]+)/$',
SharedGroupDetailsEndpoint.as_view(),
name='sentry-api-0-shared-group-details'
),
# Tombstone
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tombstones/$',
GroupTombstoneEndpoint.as_view(),
name='sentry-api-0-group-tombstones'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tombstones/(?P<tombstone_id>\d+)/$',
GroupTombstoneDetailsEndpoint.as_view(),
name='sentry-api-0-group-tombstone-details'
),
# Events
url(
r'^events/(?P<event_id>\d+)/$',
EventDetailsEndpoint.as_view(),
name='sentry-api-0-event-details'
),
# Sentry Apps
url(
r'^sentry-apps/$',
SentryAppsEndpoint.as_view(),
name='sentry-api-0-sentry-apps'
),
url(
r'^sentry-apps-stats/$',
SentryAppsStatsEndpoint.as_view(),
name='sentry-api-0-sentry-apps-stats'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/$',
SentryAppDetailsEndpoint.as_view(),
name='sentry-api-0-sentry-app-details'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/features/$',
SentryAppFeaturesEndpoint.as_view(),
name='sentry-api-0-sentry-app-features'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/components/$',
SentryAppComponentsEndpoint.as_view(),
name='sentry-api-0-sentry-app-components'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-app-components/$',
OrganizationSentryAppComponentsEndpoint.as_view(),
name='sentry-api-0-org-sentry-app-components'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/authorizations/$',
SentryAppAuthorizationsEndpoint.as_view(),
name='sentry-api-0-sentry-app-authorizations'
),
# Grouping configs
url(
r'^grouping-configs/$', GroupingConfigsEndpoint.as_view(),
name='sentry-api-0-grouping-configs'
),
url(
r'^grouping-enhancements/$', GroupingEnhancementsEndpoint.as_view(),
name='sentry-api-0-grouping-enhancements'
),
# Symbolicator Builtin Sources
url(
r'^builtin-symbol-sources/$', BuiltinSymbolSourcesEndpoint.as_view(),
name='sentry-api-0-builtin-symbol-sources',
),
# Internal
url(r'^internal/health/$', SystemHealthEndpoint.as_view(),
name='sentry-api-0-system-health'),
url(
r'^internal/options/$', SystemOptionsEndpoint.as_view(), name='sentry-api-0-system-options'
),
url(r'^internal/quotas/$', InternalQuotasEndpoint.as_view()),
url(r'^internal/queue/tasks/$', InternalQueueTasksEndpoint.as_view()),
url(r'^internal/stats/$', InternalStatsEndpoint.as_view(),
name='sentry-api-0-internal-stats'),
# Project Wizard
url(
r'^wizard/$',
SetupWizard.as_view(),
name='sentry-api-0-project-wizard-new'
),
url(
r'^wizard/(?P<wizard_hash>[^\/]+)/$',
SetupWizard.as_view(),
name='sentry-api-0-project-wizard'
),
# Catch all
url(r'^$', IndexEndpoint.as_view(), name='sentry-api-index'),
url(r'^', CatchallEndpoint.as_view(), name='sentry-api-catchall'),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
|
the-stack_0_14544 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import math
from typing import Callable, Dict, Iterator, List, Tuple, Union
import oneflow as flow
from oneflow.nn.optimizer.optimizer import Optimizer, ParamGroup
from oneflow.nn.parameter import Parameter
class AdamW(Optimizer):
"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
The optimizer of the Adam-weight-decay algorithm.
(More details please refer to `Adam-weight-decay <https://www.fast.ai/2018/07/02/adam-weight-decay/>`_).
So we use Adam-weight-decay algorithm to solve this problem.
the equation of parameters updating is:
.. math::
& V_t = \\beta_1*V_{t-1} + (1-\\beta_1)*grad
& S_t = \\beta_2*S_{t-1} + (1-\\beta_2)*{grad} \\odot {grad}
& \\hat{g} = learning\\_rate*(\\frac{{V_t}}{\\sqrt{{S_t}}+\\epsilon}+\\lambda*param_{old})
& param_{new} = param_{old} - \\hat{g}
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (In the equation is λ, default: 0)
amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm. (default: False)
do_bias_correction (bool, optional): Whether do bias correction (default: True)
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
For example:
Example 1:
.. code-block:: python
# Assume net is a custom model.
adamw = flow.optim.AdamW(net.parameters(), lr=1e-3)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
adamw.step()
adamw.zero_grad()
Example 2:
.. code-block:: python
# Assume net is a custom model.
adamw = flow.optim.AdamW(
[
{
"params": net.parameters(),
"lr": learning_rate,
"clip_grad_max_norm": 0.5,
"clip_grad_norm_type": 2.0,
}
],
)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
adamw.clip_grad()
adamw.step()
adamw.zero_grad()
If you want to use clip_grad, you can refer this example.
For more details of `clip_grad_max_norm` and `clip_grad_norm_type`, you can refer to :func:`oneflow.nn.utils.clip_grad_norm_`.
"""
def __init__(
self,
params: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0,
amsgrad: bool = False,
do_bias_correction: bool = True,
):
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert eps >= 0.0, f"Invalid epsilon value: {eps}"
assert (
betas[0] >= 0.0 and betas[0] < 1.0
), f"Invalid beta parameter at index 0: {betas[0]}"
assert (
betas[1] >= 0.0 and betas[1] < 1.0
), f"Invalid beta parameter at index 1: {betas[1]}"
assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
options = dict()
options["lr"] = lr
options["eps"] = eps
options["betas"] = betas
options["weight_decay"] = weight_decay
options["bias_correction1"] = 1.0
options["bias_correction2"] = 1.0
options["do_bias_correction"] = do_bias_correction
options["amsgrad"] = amsgrad
super().__init__(params, options)
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._op_with_amsgrad = (
flow.stateful_op("adam_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Input("max_v")
.Build()
)
self._op_without_amsgrad = (
flow.stateful_op("adam_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Build()
)
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
if param_group["do_bias_correction"]:
param_group["bias_correction1"] = 1.0 - math.pow(
param_group["betas"][0], self._state["step"] + 1
)
param_group["bias_correction2"] = 1.0 - math.pow(
param_group["betas"][1], self._state["step"] + 1
)
kwargs = {
"learning_rate": param_group["lr"],
"bias_correction1": param_group["bias_correction1"],
"bias_correction2": param_group["bias_correction2"],
"weight_decay": param_group["weight_decay"],
"beta1": param_group["betas"][0],
"beta2": param_group["betas"][1],
"epsilon": param_group["eps"],
"do_bias_correction": param_group["do_bias_correction"],
"amsgrad": param_group["amsgrad"],
}
for param in param_group.parameters:
if param.grad is None:
continue
if "exp_avg" not in self._state[param]:
self._state[param]["exp_avg"] = flow.zeros_like(param)
if "exp_avg_sq" not in self._state[param]:
self._state[param]["exp_avg_sq"] = flow.zeros_like(param)
if param_group["amsgrad"]:
if "max_exp_avg_sq" not in self._state[param]:
self._state[param]["max_exp_avg_sq"] = flow.zeros_like(
param
)
m_tensor = self._state[param]["exp_avg"]
v_tensor = self._state[param]["exp_avg_sq"]
if param_group["amsgrad"]:
max_v_tensor = self._state[param]["max_exp_avg_sq"]
flow._C.dispatch_adam_update(
self._op_with_amsgrad,
(param, param.grad, m_tensor, v_tensor, max_v_tensor),
**kwargs,
)
else:
flow._C.dispatch_adam_update(
self._op_without_amsgrad,
(param, param.grad, m_tensor, v_tensor),
**kwargs,
)
self._state["step"] += 1
return loss
def _generate_conf_for_graph(self, train_conf, vars_conf):
new_opt_confs = []
for param_group in self.param_groups:
optimizer_conf = train_conf.mutable_optimizer_conf().Add()
lr = (
param_group["initial_lr"]
if "initial_lr" in param_group
else param_group["lr"]
)
weight_decay = param_group["weight_decay"]
beta1 = param_group["betas"][0]
beta2 = param_group["betas"][1]
epsilon = param_group["eps"]
do_bias_correction = param_group["do_bias_correction"]
amsgrad = param_group["amsgrad"]
optimizer_conf.set_base_learning_rate(lr)
optimizer_conf.mutable_adam_conf().set_beta1(beta1)
optimizer_conf.mutable_adam_conf().set_beta2(beta2)
optimizer_conf.mutable_adam_conf().set_epsilon(epsilon)
optimizer_conf.mutable_adam_conf().set_do_bias_correction(
do_bias_correction
)
optimizer_conf.mutable_adam_conf().set_amsgrad(amsgrad)
optimizer_conf.mutable_weight_decay_conf().set_weight_decay_rate(
weight_decay
)
self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf)
for param in param_group.parameters:
if param.requires_grad:
optimizer_conf.add_variable_op_names(vars_conf[param].name)
new_opt_confs.append(optimizer_conf)
return new_opt_confs
@property
def support_sparse(self):
"""Whether AdamW Optimizer support sparse update.
"""
return True
|
the-stack_0_14545 | import sys
import re
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sklearn.preprocessing import scale
import pytest
from nibabel import Nifti1Image
from nilearn.input_data import NiftiMasker
from nilearn.interfaces.fmriprep import load_confounds
from nilearn.interfaces.fmriprep.load_confounds import _check_strategy
from nilearn._utils.fmriprep_confounds import _to_camel_case
from nilearn.interfaces.fmriprep.tests.utils import (
create_tmp_filepath, get_leagal_confound
)
def _simu_img(tmp_path, demean):
"""Simulate an nifti image based on confound file with some parts confounds
and some parts noise."""
file_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
# set the size of the image matrix
nx = 5
ny = 5
# the actual number of slices will actually be double of that
# as we will stack slices with confounds on top of slices with noise
nz = 2
# Load a simple 6 parameters motion models as confounds
# demean set to False just for simulating signal based on the original
# state
confounds, _ = load_confounds(
file_nii, strategy=("motion", ), motion="basic", demean=False
)
X = _handle_non_steady(confounds)
X = X.values
# the number of time points is based on the example confound file
nt = X.shape[0]
# initialize an empty 4D volume
vol = np.zeros([nx, ny, 2 * nz, nt])
vol_conf = np.zeros([nx, ny, 2 * nz])
vol_rand = np.zeros([nx, ny, 2 * nz])
# create random noise and a random mixture of confounds standardized
# to zero mean and unit variance
if sys.version_info < (3, 7): # fall back to random state for 3.6
np.random.RandomState(42)
beta = np.random.rand(nx * ny * nz, X.shape[1])
tseries_rand = scale(np.random.rand(nx * ny * nz, nt), axis=1)
else:
randome_state = np.random.default_rng(0)
beta = randome_state.random((nx * ny * nz, X.shape[1]))
tseries_rand = scale(randome_state.random((nx * ny * nz, nt)), axis=1)
# create the confound mixture
tseries_conf = scale(np.matmul(beta, X.transpose()), axis=1)
# fill the first half of the 4D data with the random mixture
vol[:, :, 0:nz, :] = tseries_conf.reshape(nx, ny, nz, nt)
vol_conf[:, :, 0:nz] = 1
# create random noise in the second half of the 4D data
vol[:, :, range(nz, 2 * nz), :] = tseries_rand.reshape(nx, ny, nz, nt)
vol_rand[:, :, range(nz, 2 * nz)] = 1
# Shift the mean to non-zero
vol = vol + 10
# create an nifti image with the data, and corresponding mask
img = Nifti1Image(vol, np.eye(4))
mask_conf = Nifti1Image(vol_conf, np.eye(4))
mask_rand = Nifti1Image(vol_rand, np.eye(4))
# generate the associated confounds for testing
test_confounds, _ = load_confounds(
file_nii, strategy=("motion",), motion="basic", demean=demean)
# match how we extend the length to increase the degree of freedom
test_confounds = _handle_non_steady(test_confounds)
sample_mask = np.arange(test_confounds.shape[0])[1:]
return img, mask_conf, mask_rand, test_confounds, sample_mask
def _handle_non_steady(confounds):
"""Simulate non steady state correctly while increase the length."""
X = confounds.values
# the first row is non-steady state, replace it with the input from the
# second row
non_steady = X[0, :]
X[0, :] = X[1, :]
# repeat X in length (axis = 0) 10 times to increase
# the degree of freedom for numerical stability
X = np.tile(X, (10, 1))
# put non-steady state volume back at the first sample
X[0, :] = non_steady
X = pd.DataFrame(X, columns=confounds.columns)
return X
def _regression(confounds, tmp_path):
"""Simple regression with NiftiMasker."""
# Simulate data
img, mask_conf, _, _, _ = _simu_img(tmp_path, demean=False)
confounds = _handle_non_steady(confounds)
# Do the regression
masker = NiftiMasker(mask_img=mask_conf, standardize=True)
tseries_clean = masker.fit_transform(
img, confounds=confounds, sample_mask=None
)
assert tseries_clean.shape[0] == confounds.shape[0]
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
"test_strategy,param",
[
(("motion", ), {}),
(("high_pass", ), {}),
(("wm_csf", ), {"wm_csf": "full"}),
(("global_signal", ), {"global_signal": "full"}),
(("high_pass", "compcor", ), {}),
(("high_pass", "compcor", ), {"compcor": "anat_separated"}),
(("high_pass", "compcor", ), {"compcor": "temporal"}),
(("ica_aroma", ), {"ica_aroma": "basic"}),
],
)
def test_nilearn_regress(tmp_path, test_strategy, param):
"""Try regressing out all motion types without sample mask."""
img_nii, _ = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=True
)
confounds, _ = load_confounds(img_nii, strategy=test_strategy, **param)
_regression(confounds, tmp_path)
def _tseries_std(img, mask_img, confounds, sample_mask,
standardize_signal=False, standardize_confounds=True,
detrend=False):
"""Get the std of time series in a mask."""
masker = NiftiMasker(
mask_img=mask_img,
standardize=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend
)
tseries = masker.fit_transform(img,
confounds=confounds,
sample_mask=sample_mask)
return tseries.std(axis=0)
def _denoise(img, mask_img, confounds, sample_mask,
standardize_signal=False, standardize_confounds=True,
detrend=False):
"""Extract time series with and without confounds."""
masker = NiftiMasker(mask_img=mask_img,
standardize=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
tseries_raw = masker.fit_transform(img, sample_mask=sample_mask)
tseries_clean = masker.fit_transform(
img, confounds=confounds, sample_mask=sample_mask
)
return tseries_raw, tseries_clean
def _corr_tseries(tseries1, tseries2):
"""Compute the correlation between two sets of time series."""
corr = np.zeros(tseries1.shape[1])
for ind in range(tseries1.shape[1]):
corr[ind], _ = pearsonr(tseries1[:, ind], tseries2[:, ind])
return corr
@pytest.mark.filterwarnings("ignore")
def test_nilearn_standardize_false(tmp_path):
"""Test removing confounds with no standardization."""
# niftimasker default:
# standardize=False, standardize_confounds=True, detrend=False
# Simulate data; set demean to False as standardize_confounds=True
(img, mask_conf, mask_rand,
confounds, sample_mask) = _simu_img(tmp_path, demean=False)
# Check that most variance is removed
# in voxels composed of pure confounds
tseries_std = _tseries_std(img, mask_conf, confounds, sample_mask,
standardize_signal=False,
standardize_confounds=True,
detrend=False)
assert np.mean(tseries_std < 0.0001)
# Check that most variance is preserved
# in voxels composed of random noise
tseries_std = _tseries_std(img, mask_rand, confounds, sample_mask,
standardize_signal=False,
standardize_confounds=True,
detrend=False)
assert np.mean(tseries_std > 0.9)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("standardize_signal", ["zscore", "psc"])
@pytest.mark.parametrize("standardize_confounds,detrend", [(True, False),
(False, True),
(True, True)])
def test_nilearn_standardize(tmp_path, standardize_signal,
standardize_confounds, detrend):
"""Test confounds removal with logical parameters for processing signal."""
# demean is set to False to let signal.clean handle everything
(img, mask_conf, mask_rand, confounds, mask) = _simu_img(tmp_path,
demean=False)
# We now load the time series with vs without confounds
# in voxels composed of pure confounds
# the correlation before and after denoising should be very low
# as most of the variance is removed by denoising
tseries_raw, tseries_clean = _denoise(
img, mask_conf, confounds, mask,
standardize_signal=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
corr = _corr_tseries(tseries_raw, tseries_clean)
assert np.absolute(np.mean(corr)) < 0.2
# We now load the time series with zscore standardization
# with vs without confounds in voxels where the signal is uncorrelated
# with confounds. The correlation before and after denoising should be very
# high as very little of the variance is removed by denoising
tseries_raw, tseries_clean = _denoise(
img, mask_rand, confounds, mask,
standardize_signal=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
corr = _corr_tseries(tseries_raw, tseries_clean)
assert corr.mean() > 0.8
def test_confounds2df(tmp_path):
"""Check auto-detect of confonds from an fMRI nii image."""
img_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
confounds, _ = load_confounds(img_nii)
assert "trans_x" in confounds.columns
@pytest.mark.parametrize("strategy,message",
[(["string", ], "not a supported type of confounds."),
("error", "tuple or list of strings"),
((0, ), "not a supported type of confounds."),
(("compcor", ), "high_pass")])
def test_check_strategy(strategy, message):
"""Check that flawed strategy options generate meaningful error
messages."""
with pytest.raises(ValueError) as exc_info:
_check_strategy(strategy=strategy)
assert message in exc_info.value.args[0]
SUFFIXES = np.array(["", "_derivative1", "_power2", "_derivative1_power2"])
@pytest.fixture
def expected_suffixes(motion):
expectation = {
"basic": slice(1),
"derivatives": slice(2),
"power2": np.array([True, False, True, False]),
"full": slice(4),
}
return SUFFIXES[expectation[motion]]
@pytest.mark.parametrize("motion", ["basic", "derivatives", "power2", "full"])
@pytest.mark.parametrize(
"param", ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]
)
def test_motion(tmp_path, motion, param, expected_suffixes):
img_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
conf, _ = load_confounds(
img_nii, strategy=("motion", ), motion=motion
)
for suff in SUFFIXES:
if suff in expected_suffixes:
assert f"{param}{suff}" in conf.columns
else:
assert f"{param}{suff}" not in conf.columns
@pytest.mark.parametrize("compcor,n_compcor,test_keyword,test_n",
[("anat_combined", 2, "a_comp_cor_", 2),
("anat_combined", "all", "a_comp_cor_", 57),
("temporal", "all", "t_comp_cor_", 6)])
def test_n_compcor(tmp_path, compcor, n_compcor, test_keyword, test_n):
img_nii, _ = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(
img_nii, strategy=("high_pass", "compcor", ), compcor=compcor,
n_compcor=n_compcor
)
assert sum(True for col in conf.columns if test_keyword in col) == test_n
def test_not_found_exception(tmp_path):
"""Check various file or parameter missing scenario."""
# Create invalid confound file in temporary dir
img_missing_confounds, bad_conf = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=False
)
missing_params = ["trans_y", "trans_x_derivative1", "rot_z_power2"]
missing_keywords = ["cosine"]
leagal_confounds = pd.read_csv(bad_conf, delimiter="\t", encoding="utf-8")
cosine = [
col_name
for col_name in leagal_confounds.columns
if "cosine" in col_name
]
aroma = [
col_name
for col_name in leagal_confounds.columns
if "aroma" in col_name
]
missing_confounds = leagal_confounds.drop(
columns=missing_params + cosine + aroma
)
missing_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds,
strategy=("high_pass", "motion", "global_signal", ),
global_signal="full",
motion="full",
)
assert f"{missing_params}" in exc_info.value.args[0]
assert f"{missing_keywords}" in exc_info.value.args[0]
# loading anat compcor should also raise an error, because the json file is
# missing for that example dataset
with pytest.raises(ValueError):
load_confounds(
img_missing_confounds,
strategy=("high_pass", "compcor"),
compcor="anat_combined",
)
# catch invalid compcor option
with pytest.raises(KeyError):
load_confounds(
img_missing_confounds, strategy=("high_pass", "compcor"),
compcor="blah"
)
# Aggressive ICA-AROMA strategy requires
# default nifti and noise ICs in confound file
# correct nifti but missing noise regressor
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds, strategy=("ica_aroma", ), ica_aroma="basic"
)
assert "aroma" in exc_info.value.args[0]
# Aggressive ICA-AROMA strategy requires
# default nifti
aroma_nii, _ = create_tmp_filepath(
tmp_path, image_type="ica_aroma", suffix="aroma"
)
with pytest.raises(ValueError) as exc_info:
load_confounds(
aroma_nii, strategy=("ica_aroma", ), ica_aroma="basic"
)
assert "Invalid file type" in exc_info.value.args[0]
# non aggressive ICA-AROMA strategy requires
# desc-smoothAROMAnonaggr nifti file
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds, strategy=("ica_aroma", ), ica_aroma="full"
)
assert "desc-smoothAROMAnonaggr_bold" in exc_info.value.args[0]
# no confound files along the image file
(tmp_path / bad_conf).unlink()
with pytest.raises(ValueError) as exc_info:
load_confounds(img_missing_confounds)
assert "Could not find associated confound file." in exc_info.value.args[0]
def test_non_steady_state(tmp_path):
"""Warn when 'non_steady_state' is in strategy."""
# supplying 'non_steady_state' in strategy is not necessary
# check warning is correctly raised
img, conf = create_tmp_filepath(
tmp_path, copy_confounds=True
)
warning_message = (r"Non-steady state")
with pytest.warns(UserWarning, match=warning_message):
load_confounds(img, strategy=('non_steady_state', 'motion'))
def test_load_non_nifti(tmp_path):
"""Test non-nifti and invalid file type as input."""
# tsv file - unsupported input
_, tsv = create_tmp_filepath(tmp_path, copy_confounds=True, copy_json=True)
with pytest.raises(ValueError):
load_confounds(str(tsv))
# cifti file should be supported
cifti, _ = create_tmp_filepath(
tmp_path, image_type="cifti", copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(cifti)
assert conf.size != 0
# gifti support
gifti, _ = create_tmp_filepath(
tmp_path, image_type="gifti", copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(gifti)
assert conf.size != 0
def test_invalid_filetype(tmp_path):
"""Invalid file types/associated files for load method."""
bad_nii, bad_conf = create_tmp_filepath(tmp_path, copy_confounds=True)
conf, _ = load_confounds(bad_nii)
# more than one legal filename for confounds
add_conf = "test_desc-confounds_timeseries.tsv"
leagal_confounds, _ = get_leagal_confound()
leagal_confounds.to_csv(tmp_path / add_conf, sep="\t", index=False)
with pytest.raises(ValueError) as info:
load_confounds(bad_nii)
assert "more than one" in str(info.value)
(tmp_path / add_conf).unlink() # Remove for the rest of the tests to run
# invalid fmriprep version: confound file with no header (<1.0)
fake_confounds = np.random.rand(30, 20)
np.savetxt(bad_conf, fake_confounds, delimiter="\t")
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "The confound file contains no header." in str(error_log.value)
# invalid fmriprep version: old camel case header (<1.2)
leagal_confounds, _ = get_leagal_confound()
camel_confounds = leagal_confounds.copy()
camel_confounds.columns = [
_to_camel_case(col_name) for col_name in leagal_confounds.columns
]
camel_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "contains header in camel case." in str(error_log.value)
# create a empty nifti file with no associated confound file
# We only need the path to check this
no_conf = "no_confound_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
no_confound = tmp_path / no_conf
no_confound.touch()
with pytest.raises(ValueError):
load_confounds(bad_nii)
def test_ica_aroma(tmp_path):
"""Test ICA AROMA related file input."""
aroma_nii, _ = create_tmp_filepath(
tmp_path, image_type="ica_aroma", copy_confounds=True
)
regular_nii, _ = create_tmp_filepath(
tmp_path, image_type="regular", copy_confounds=True
)
# Aggressive strategy
conf, _ = load_confounds(
regular_nii, strategy=("ica_aroma", ), ica_aroma="basic"
)
for col_name in conf.columns:
# only aroma and non-steady state columns will be present
assert re.match("(?:aroma_motion_+|non_steady_state+)", col_name)
# Non-aggressive strategy
conf, _ = load_confounds(
aroma_nii, strategy=("ica_aroma", ), ica_aroma="full"
)
assert conf.size == 0
# invalid combination of strategy and option
with pytest.raises(ValueError) as exc_info:
conf, _ = load_confounds(
regular_nii, strategy=("ica_aroma", ), ica_aroma="invalid"
)
assert "Current input: invalid" in exc_info.value.args[0]
def test_sample_mask(tmp_path):
"""Test load method and sample mask."""
regular_nii, regular_conf = create_tmp_filepath(
tmp_path, image_type="regular", copy_confounds=True
)
reg, mask = load_confounds(
regular_nii, strategy=("motion", "scrub"), scrub=5, fd_threshold=0.15
)
# the current test data has 6 time points marked as motion outliers,
# and one nonsteady state (overlap with the first motion outlier)
# 2 time points removed due to the "full" srubbing strategy (remove segment
# shorter than 5 volumes)
assert reg.shape[0] - len(mask) == 8
# nilearn requires unmasked confound regressors
assert reg.shape[0] == 30
# non steady state will always be removed
reg, mask = load_confounds(regular_nii, strategy=("motion", ))
assert reg.shape[0] - len(mask) == 1
# When no non-steady state volumes are present
conf_data, _ = get_leagal_confound(non_steady_state=False)
conf_data.to_csv(regular_conf, sep="\t", index=False) # save to tmp
reg, mask = load_confounds(regular_nii, strategy=("motion", ))
assert mask is None
# When no volumes needs removing (very liberal motion threshould)
reg, mask = load_confounds(
regular_nii, strategy=("motion", "scrub"), scrub=0, fd_threshold=4
)
assert mask is None
@pytest.mark.parametrize(
"image_type", ["regular", "ica_aroma", "gifti", "cifti"]
)
def test_inputs(tmp_path, image_type):
"""Test multiple images as input."""
# generate files
files = []
for i in range(2): # gifti edge case
nii, _ = create_tmp_filepath(
tmp_path,
suffix=f"img{i+1}",
image_type=image_type,
copy_confounds=True,
copy_json=True,
)
files.append(nii)
if image_type == "ica_aroma":
conf, _ = load_confounds(files, strategy=("ica_aroma", ))
else:
conf, _ = load_confounds(files)
assert len(conf) == 2
|
the-stack_0_14546 | # Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import print_function, division
import inspect
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from itertools import product
import cudf
from cudf import queryutils
from cudf.dataframe import DataFrame
_params_query_parser = []
_params_query_parser.append(('a > @b', ('a', '__CUDF_ENVREF__b')))
_params_query_parser.append(('(a + b) <= @c', ('a', 'b', '__CUDF_ENVREF__c')))
_params_query_parser.append(('a > b if a > 0 else b > a', ('a', 'b')))
@pytest.mark.parametrize('text,expect_args', _params_query_parser)
def test_query_parser(text, expect_args):
info = queryutils.query_parser(text)
fn = queryutils.query_builder(info, 'myfoo')
assert callable(fn)
argspec = inspect.getfullargspec(fn)
assert tuple(argspec.args) == tuple(expect_args)
params_query_data = list(product([1, 2, 7, 8, 9, 16, 100, 129], range(2)))
params_query_fn = [
(lambda a, b: a < b, 'a < b'),
(lambda a, b: a * 2 >= b, 'a * 2 >= b'),
(lambda a, b: 2 * (a + b) > (a + b) / 2, '2 * (a + b) > (a + b) / 2'),
]
@pytest.mark.parametrize('data,fn',
product(params_query_data, params_query_fn))
def test_query(data, fn):
# prepare
nelem, seed = data
expect_fn, query_expr = fn
np.random.seed(seed)
df = DataFrame()
df['a'] = aa = np.arange(nelem)
df['b'] = bb = np.random.random(nelem) * nelem
# udt
expect_mask = expect_fn(aa, bb)
df2 = df.query(query_expr)
# check
assert len(df2) == np.count_nonzero(expect_mask)
np.testing.assert_array_almost_equal(df2['a'].to_array(), aa[expect_mask])
np.testing.assert_array_almost_equal(df2['b'].to_array(), bb[expect_mask])
params_query_env_fn = [
(lambda a, b, c, d: a * c > b + d,
'a * @c > b + @d'),
(lambda a, b, c, d: ((a / c) < d) | ((b ** c) > d),
'((a / @c) < @d) | ((b ** @c) > @d)')
]
@pytest.mark.parametrize('data,fn',
product(params_query_data, params_query_env_fn))
def test_query_ref_env(data, fn):
# prepare
nelem, seed = data
expect_fn, query_expr = fn
np.random.seed(seed)
df = DataFrame()
df['a'] = aa = np.arange(nelem)
df['b'] = bb = np.random.random(nelem) * nelem
c = 2.3
d = 1.2
# udt
expect_mask = expect_fn(aa, bb, c, d)
print(expect_mask)
df2 = df.query(query_expr)
# check
assert len(df2) == np.count_nonzero(expect_mask)
np.testing.assert_array_almost_equal(df2['a'].to_array(), aa[expect_mask])
np.testing.assert_array_almost_equal(df2['b'].to_array(), bb[expect_mask])
def test_query_env_changing():
df = DataFrame()
df['a'] = aa = np.arange(100)
expr = 'a < @c'
# first attempt
c = 10
got = df.query(expr)
np.testing.assert_array_equal(aa[aa < c], got['a'].to_array())
# change env
c = 50
got = df.query(expr)
np.testing.assert_array_equal(aa[aa < c], got['a'].to_array())
def test_query_splitted_combine():
np.random.seed(0)
df = pd.DataFrame({'x': np.random.randint(0, 5, size=10),
'y': np.random.normal(size=10)})
gdf = DataFrame.from_pandas(df)
# Split the GDF
s1 = gdf[:5]
s2 = gdf[5:]
# Do the query
expr = 'x > 2'
q1 = s1.query(expr)
q2 = s2.query(expr)
# Combine
got = cudf.concat([q1, q2]).to_pandas()
# Should equal to just querying the original GDF
expect = gdf.query(expr).to_pandas()
assert_frame_equal(got, expect)
|
the-stack_0_14547 | #!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Produce messages to Confluent Cloud
# Using Confluent Python Client for Apache Kafka
#
# =============================================================================
from confluent_kafka import Producer, KafkaError
import json
import ccloud_lib
if __name__ == '__main__':
# Read arguments and configurations and initialize
args = ccloud_lib.parse_args()
config_file = args.config_file
topic = args.topic
conf = ccloud_lib.read_ccloud_config(config_file)
# Create Producer instance
producer = Producer({
'bootstrap.servers': conf['bootstrap.servers'],
'sasl.mechanisms': conf['sasl.mechanisms'],
'security.protocol': conf['security.protocol'],
'sasl.username': conf['sasl.username'],
'sasl.password': conf['sasl.password'],
})
# Create topic if needed
ccloud_lib.create_topic(conf, topic)
delivered_records = 0
# Optional per-message on_delivery handler (triggered by poll() or flush())
# when a message has been successfully delivered or
# permanently failed delivery (after retries).
def acked(err, msg):
global delivered_records
"""Delivery report handler called on
successful or failed delivery of message
"""
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
for n in range(10):
record_key = "alice"
record_value = json.dumps({'count': n})
print("Producing record: {}\t{}".format(record_key, record_value))
producer.produce(topic, key=record_key, value=record_value, on_delivery=acked)
# p.poll() serves delivery reports (on_delivery)
# from previous produce() calls.
producer.poll(0)
producer.flush()
print("{} messages were produced to topic {}!".format(delivered_records, topic)) |
the-stack_0_14548 | import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from datasets import *
from models import *
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=8, help="size of the batches")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--mask_size", type=int, default=32, help="size of random mask")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=500, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
input_shape = (opt.channels, opt.img_size, opt.img_size)
# Loss function
adversarial_loss = torch.nn.MSELoss()
# Initialize generator and discriminator
generator = Generator(input_shape)
discriminator = Discriminator(input_shape)
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Dataset loader
transforms_ = [
transforms.Resize((opt.img_size, opt.img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
transforms_lr = [
transforms.Resize((opt.img_size // 4, opt.img_size // 4), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, transforms_x=transforms_, transforms_lr=transforms_lr),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def apply_random_mask(imgs):
idx = np.random.randint(0, opt.img_size - opt.mask_size, (imgs.shape[0], 2))
masked_imgs = imgs.clone()
for i, (y1, x1) in enumerate(idx):
y2, x2 = y1 + opt.mask_size, x1 + opt.mask_size
masked_imgs[i, :, y1:y2, x1:x2] = -1
return masked_imgs
def save_sample(saved_samples):
# Generate inpainted image
gen_imgs = generator(saved_samples["masked"], saved_samples["lowres"])
# Save sample
sample = torch.cat((saved_samples["masked"].data, gen_imgs.data, saved_samples["imgs"].data), -2)
save_image(sample, "images/%d.png" % batches_done, nrow=5, normalize=True)
saved_samples = {}
for epoch in range(opt.n_epochs):
for i, batch in enumerate(dataloader):
imgs = batch["x"]
imgs_lr = batch["x_lr"]
masked_imgs = apply_random_mask(imgs)
# Adversarial ground truths
valid = Variable(Tensor(imgs.shape[0], *discriminator.output_shape).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], *discriminator.output_shape).fill_(0.0), requires_grad=False)
if cuda:
imgs = imgs.type(Tensor)
imgs_lr = imgs_lr.type(Tensor)
masked_imgs = masked_imgs.type(Tensor)
real_imgs = Variable(imgs)
imgs_lr = Variable(imgs_lr)
masked_imgs = Variable(masked_imgs)
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_imgs = generator(masked_imgs, imgs_lr)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = 0.5 * (real_loss + fake_loss)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
)
# Save first ten samples
if not saved_samples:
saved_samples["imgs"] = real_imgs[:1].clone()
saved_samples["masked"] = masked_imgs[:1].clone()
saved_samples["lowres"] = imgs_lr[:1].clone()
elif saved_samples["imgs"].size(0) < 10:
saved_samples["imgs"] = torch.cat((saved_samples["imgs"], real_imgs[:1]), 0)
saved_samples["masked"] = torch.cat((saved_samples["masked"], masked_imgs[:1]), 0)
saved_samples["lowres"] = torch.cat((saved_samples["lowres"], imgs_lr[:1]), 0)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_sample(saved_samples)
|
the-stack_0_14549 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 14:23:32 2018
CSV module handles parsing better as delimiter can be part of data as well
@author: dongrp2
"""
import csv
with open('names.csv','r') as names_csv:
csv_reader = csv.reader(names_csv)
next(csv_reader) # to loop over the first line which is headder
for line in csv_reader:
#print(line) #print all the lines in list
print(line[2]) # 2nd index of the list
"""Creating a tab delimited file"""
with open('names.csv','r') as names_csv:
csv_reader = csv.reader(names_csv)
with open('new_names.csv','w') as new_names:
csv_writer = csv.writer(new_names, delimiter='\t')
for line in csv_reader:
csv_writer.writerow(line)
# with open('new_names.csv','r') as read_new_csv:
# csv_new_reader = csv.reader(read_new_csv,delimiter='\t')
# for line in csv_new_reader:
# print(line)
"""Using Dictionary Reader where field names are keys of the values"""
with open('names.csv','r') as names_csv:
csv_reader = csv.DictReader(names_csv)
for line in csv_reader:
print(line['email']) #accesing the key 'email'
"""Using Dictionary Writer have to provide the field names"""
with open('names.csv','r') as names_csv:
csv_reader = csv.DictReader(names_csv)
with open('new_names_dict.csv','w') as new_names_dict:
fieldnames = ['first_name','last_name','email']
csv_writer = csv.DictWriter(new_names_dict, fieldnames=fieldnames, delimiter='\t')
csv_writer.writeheader()
for line in csv_reader:
#del line['email'] if want to write only first and last name
csv_writer.writerow(line)
#for line in csv_reader:
# print(line['email']) #accesing the key 'email'
|
the-stack_0_14551 | # -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.slk import Slovak
class SlovakTestCase(HangulizeTestCase):
lang = Slovak()
def test_people(self):
self.assert_examples({
'Ján Bahýľ': '얀 바힐',
'Štefan Banič': '슈테판 바니치',
'Anton Bernolák': '안톤 베르놀라크',
'Peter Bondra': '페테르 본드라',
'Zdeno Chára': '즈데노 하라',
'Dominika Cibulková': '도미니카 치불코바',
'Ján Čarnogurský': '얀 차르노구르스키',
'Štefan Marko Daxner': '슈테판 마르코 닥스네르',
'Pavol Demitra': '파볼 데미트라',
'Alexander Dubček': '알렉산데르 둡체크',
'Mikuláš Dzurinda': '미쿨라시 주린다',
'Marián Gáborík': '마리안 가보리크',
'Marek Hamšík': '마레크 함시크',
'Daniela Hantuchová': '다니엘라 한투호바',
'Andrej Hlinka': '안드레이 흘린카',
'Milan Hodža': '밀란 호자',
'Marian Hossa': '마리안 호사',
'Dominik Hrbatý': '도미니크 흐르바티',
'Pavol Hurajt': '파볼 후라이트',
'Jozef Miloslav Hurban': '요제프 밀로슬라우 후르반',
'Gustáv Husák': '구스타우 후사크',
'Hviezdoslav': '흐비에즈도슬라우',
'Dionýz Ilkovič': '디오니스 일코비치',
'Elena Kaliská': '엘레나 칼리스카',
'Michaela Kocianová': '미하엘라 코치아노바',
'Karol Kučera': '카롤 쿠체라',
'Anastasiya Kuzmina': '아나스타시야 쿠즈미나',
'Michal Martikán': '미할 마르티칸',
'Janko Matúška': '얀코 마투슈카',
'Vladimír Mečiar': '블라디미르 메치아르',
'Martina Moravcová': '마르티나 모라우초바',
'Jozef Murgaš': '요제프 무르가시',
'Natália Prekopová': '나탈리아 프레코포바',
'Jozef Roháček': '요제프 로하체크',
'Magdaléna Rybáriková': '마그달레나 리바리코바',
'Zuzana Sekerová': '주자나 세케로바',
'Aurel Stodola': '아우렐 스토돌라',
'Eugen Suchoň': '에우겐 수혼',
'Martin Škrtel': '마르틴 슈크르텔',
'Milan Rastislav Štefánik': '밀란 라스티슬라우 슈테파니크',
'Zuzana Štefečeková': '주자나 슈테페체코바',
'Peter Šťastný': '페테르 슈탸스트니',
'Ľudovít Štúr': '류도비트 슈투르',
'Jozef Tiso': '요제프 티소',
'Vavrinec': '바우리네츠',
'Rudolf Vrba': '루돌프 브르바',
'Vladimír Weiss': '블라디미르 베이스',
})
def test_places(self):
self.assert_examples({
'Banská Bystrica': '반스카 비스트리차',
'Bardejov': '바르데요우',
'Bratislava': '브라티슬라바',
'Komárno': '코마르노',
'Košice': '코시체',
'Manínska tiesňava': '마닌스카 티에스냐바',
'Martin': '마르틴',
'Michalovce': '미할로우체',
'Nitra': '니트라',
'Poprad': '포프라트',
'Považská': '포바슈스카',
'Prešov': '프레쇼우',
'Rožňava': '로주냐바',
'Slavín': '슬라빈',
'Spiš': '스피시',
'Trenčín': '트렌친',
'Trnava': '트르나바',
'Váh': '바흐',
'Vlkolínec': '블콜리네츠',
'Vydrica': '비드리차',
'Zvolen': '즈볼렌',
'Žilina': '질리나',
'Žehra': '제흐라',
})
def test_miscellaneous(self):
self.assert_examples({
'deväť': '데베티',
'jahôd': '야후오트',
'mäkčeň': '멕첸',
'pätnásť': '페트나스티',
}) |
the-stack_0_14552 | import urllib
import itertools
import json
import jinja2
from datasette.plugins import pm
from datasette.database import QueryInterrupted
from datasette.utils import (
CustomRow,
MultiParams,
append_querystring,
compound_keys_after_sql,
escape_sqlite,
filters_should_redirect,
is_url,
path_from_row_pks,
path_with_added_args,
path_with_removed_args,
path_with_replaced_args,
sqlite3,
to_css_class,
urlsafe_components,
value_as_boolean,
)
from datasette.utils.asgi import NotFound
from datasette.filters import Filters
from .base import DataView, DatasetteError, ureg
from .database import QueryView
LINK_WITH_LABEL = (
'<a href="{base_url}{database}/{table}/{link_id}">{label}</a> <em>{id}</em>'
)
LINK_WITH_VALUE = '<a href="{base_url}{database}/{table}/{link_id}">{id}</a>'
class Row:
def __init__(self, cells):
self.cells = cells
def __iter__(self):
return iter(self.cells)
def __getitem__(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["raw"]
raise KeyError
def display(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["value"]
return None
def __str__(self):
d = {
key: self[key]
for key in [
c["column"] for c in self.cells if not c.get("is_special_link_column")
]
}
return json.dumps(d, default=repr, indent=2)
class RowTableShared(DataView):
async def sortable_columns_for_table(self, database, table, use_rowid):
db = self.ds.databases[database]
table_metadata = self.ds.table_metadata(database, table)
if "sortable_columns" in table_metadata:
sortable_columns = set(table_metadata["sortable_columns"])
else:
sortable_columns = set(await db.table_columns(table))
if use_rowid:
sortable_columns.add("rowid")
return sortable_columns
async def expandable_columns(self, database, table):
# Returns list of (fk_dict, label_column-or-None) pairs for that table
expandables = []
db = self.ds.databases[database]
for fk in await db.foreign_keys_for_table(table):
label_column = await db.label_column_for_table(fk["other_table"])
expandables.append((fk, label_column))
return expandables
async def display_columns_and_rows(
self, database, table, description, rows, link_column=False, truncate_cells=0
):
"Returns columns, rows for specified table - including fancy foreign key treatment"
db = self.ds.databases[database]
table_metadata = self.ds.table_metadata(database, table)
sortable_columns = await self.sortable_columns_for_table(database, table, True)
columns = [
{"name": r[0], "sortable": r[0] in sortable_columns} for r in description
]
pks = await db.primary_keys(table)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await db.foreign_keys_for_table(table)
}
cell_rows = []
base_url = self.ds.config("base_url")
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
is_special_link_column = len(pks) != 1
pk_path = path_from_row_pks(row, pks, not pks, False)
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value_type": "pk",
"is_special_link_column": is_special_link_column,
"raw": pk_path,
"value": jinja2.Markup(
'<a href="{base_url}{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format(
base_url=base_url,
database=database,
table=urllib.parse.quote_plus(table),
flat_pks=str(jinja2.escape(pk_path)),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = pm.hook.render_cell(
value=value,
column=column,
table=table,
database=database,
datasette=self.ds,
)
if plugin_display_value is not None:
display_value = plugin_display_value
elif isinstance(value, bytes):
display_value = jinja2.Markup(
"<Binary data: {} byte{}>".format(
len(value), "" if len(value) == 1 else "s"
)
)
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = (
LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
)
display_value = jinja2.Markup(
link_template.format(
database=database,
base_url=base_url,
table=urllib.parse.quote_plus(other_table),
link_id=urllib.parse.quote_plus(str(value)),
id=str(jinja2.escape(value)),
label=str(jinja2.escape(label)),
)
)
elif value in ("", None):
display_value = jinja2.Markup(" ")
elif is_url(str(value).strip()):
display_value = jinja2.Markup(
'<a href="{url}">{url}</a>'.format(
url=jinja2.escape(value.strip())
)
)
elif column in table_metadata.get("units", {}) and value != "":
# Interpret units using pint
value = value * ureg(table_metadata["units"][column])
# Pint uses floating point which sometimes introduces errors in the compact
# representation, which we have to round off to avoid ugliness. In the vast
# majority of cases this rounding will be inconsequential. I hope.
value = round(value.to_compact(), 6)
display_value = jinja2.Markup(
"{:~P}".format(value).replace(" ", " ")
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + u"\u2026"
cells.append(
{
"column": column,
"value": display_value,
"raw": value,
"value_type": "none"
if value is None
else str(type(value).__name__),
}
)
cell_rows.append(Row(cells))
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
columns = [
{"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1}
] + columns
return columns, cell_rows
class TableView(RowTableShared):
name = "table"
async def post(self, request, db_name, table_and_format):
# Handle POST to a canned query
canned_query = await self.ds.get_canned_query(
db_name, table_and_format, request.actor
)
assert canned_query, "You may only POST to a canned query"
return await QueryView(self.ds).data(
request,
db_name,
None,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table_and_format,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
async def data(
self,
request,
database,
hash,
table,
default_labels=False,
_next=None,
_size=None,
):
canned_query = await self.ds.get_canned_query(database, table, request.actor)
if canned_query:
return await QueryView(self.ds).data(
request,
database,
hash,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
db = self.ds.databases[database]
is_view = bool(await db.get_view_definition(table))
table_exists = bool(await db.table_exists(table))
if not is_view and not table_exists:
raise NotFound("Table not found: {}".format(table))
await self.check_permission(request, "view-instance")
await self.check_permission(request, "view-database", database)
await self.check_permission(request, "view-table", (database, table))
private = not await self.ds.permission_allowed(
None, "view-table", (database, table), default=True
)
pks = await db.primary_keys(table)
table_columns = await db.table_columns(table)
select_columns = ", ".join(escape_sqlite(t) for t in table_columns)
use_rowid = not pks and not is_view
if use_rowid:
select = "rowid, {}".format(select_columns)
order_by = "rowid"
order_by_pks = "rowid"
else:
select = select_columns
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
order_by = order_by_pks
if is_view:
order_by = ""
# Ensure we don't drop anything with an empty value e.g. ?name__exact=
args = MultiParams(
urllib.parse.parse_qs(request.query_string, keep_blank_values=True)
)
# Special args start with _ and do not contain a __
# That's so if there is a column that starts with _
# it can still be queried using ?_col__exact=blah
special_args = {}
other_args = []
for key in args:
if key.startswith("_") and "__" not in key:
special_args[key] = args[key]
else:
for v in args.getlist(key):
other_args.append((key, v))
# Handle ?_filter_column and redirect, if present
redirect_params = filters_should_redirect(special_args)
if redirect_params:
return self.redirect(
request,
path_with_added_args(request, redirect_params),
forward_querystring=False,
)
# Spot ?_sort_by_desc and redirect to _sort_desc=(_sort)
if "_sort_by_desc" in special_args:
return self.redirect(
request,
path_with_added_args(
request,
{
"_sort_desc": special_args.get("_sort"),
"_sort_by_desc": None,
"_sort": None,
},
),
forward_querystring=False,
)
table_metadata = self.ds.table_metadata(database, table)
units = table_metadata.get("units", {})
filters = Filters(sorted(other_args), units, ureg)
where_clauses, params = filters.build_where_clauses(table)
extra_wheres_for_ui = []
# Add _where= from querystring
if "_where" in request.args:
if not await self.ds.permission_allowed(
request.actor, "execute-sql", resource=database, default=True,
):
raise DatasetteError("_where= is not allowed", status=403)
else:
where_clauses.extend(request.args.getlist("_where"))
extra_wheres_for_ui = [
{
"text": text,
"remove_url": path_with_removed_args(request, {"_where": text}),
}
for text in request.args.getlist("_where")
]
# Support for ?_through={table, column, value}
extra_human_descriptions = []
if "_through" in request.args:
for through in request.args.getlist("_through"):
through_data = json.loads(through)
through_table = through_data["table"]
other_column = through_data["column"]
value = through_data["value"]
outgoing_foreign_keys = await db.foreign_keys_for_table(through_table)
try:
fk_to_us = [
fk for fk in outgoing_foreign_keys if fk["other_table"] == table
][0]
except IndexError:
raise DatasetteError(
"Invalid _through - could not find corresponding foreign key"
)
param = "p{}".format(len(params))
where_clauses.append(
"{our_pk} in (select {our_column} from {through_table} where {other_column} = :{param})".format(
through_table=escape_sqlite(through_table),
our_pk=escape_sqlite(fk_to_us["other_column"]),
our_column=escape_sqlite(fk_to_us["column"]),
other_column=escape_sqlite(other_column),
param=param,
)
)
params[param] = value
extra_human_descriptions.append(
'{}.{} = "{}"'.format(through_table, other_column, value)
)
# _search support:
fts_table = special_args.get("_fts_table")
fts_table = fts_table or table_metadata.get("fts_table")
fts_table = fts_table or await db.fts_table(table)
fts_pk = special_args.get("_fts_pk", table_metadata.get("fts_pk", "rowid"))
search_args = dict(
pair for pair in special_args.items() if pair[0].startswith("_search")
)
search = ""
search_mode_raw = special_args.get("_searchmode") == "raw"
if fts_table and search_args:
if "_search" in search_args:
# Simple ?_search=xxx
search = search_args["_search"]
where_clauses.append(
"{fts_pk} in (select rowid from {fts_table} where {fts_table} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
fts_pk=escape_sqlite(fts_pk),
match_clause=":search"
if search_mode_raw
else "escape_fts(:search)",
)
)
extra_human_descriptions.append('search matches "{}"'.format(search))
params["search"] = search
else:
# More complex: search against specific columns
for i, (key, search_text) in enumerate(search_args.items()):
search_col = key.split("_search_", 1)[1]
if search_col not in await db.table_columns(fts_table):
raise DatasetteError("Cannot search by that column", status=400)
where_clauses.append(
"rowid in (select rowid from {fts_table} where {search_col} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
search_col=escape_sqlite(search_col),
match_clause=":search_{}".format(i)
if search_mode_raw
else "escape_fts(:search_{})".format(i),
)
)
extra_human_descriptions.append(
'search column "{}" matches "{}"'.format(
search_col, search_text
)
)
params["search_{}".format(i)] = search_text
sortable_columns = set()
sortable_columns = await self.sortable_columns_for_table(
database, table, use_rowid
)
# Allow for custom sort order
sort = special_args.get("_sort")
sort_desc = special_args.get("_sort_desc")
if not sort and not sort_desc:
sort = table_metadata.get("sort")
sort_desc = table_metadata.get("sort_desc")
if sort and sort_desc:
raise DatasetteError("Cannot use _sort and _sort_desc at the same time")
if sort:
if sort not in sortable_columns:
raise DatasetteError("Cannot sort table by {}".format(sort))
order_by = escape_sqlite(sort)
if sort_desc:
if sort_desc not in sortable_columns:
raise DatasetteError("Cannot sort table by {}".format(sort_desc))
order_by = "{} desc".format(escape_sqlite(sort_desc))
from_sql = "from {table_name} {where}".format(
table_name=escape_sqlite(table),
where=("where {} ".format(" and ".join(where_clauses)))
if where_clauses
else "",
)
# Copy of params so we can mutate them later:
from_sql_params = dict(**params)
count_sql = "select count(*) {}".format(from_sql)
_next = _next or special_args.get("_next")
offset = ""
if _next:
if is_view:
# _next is an offset
offset = " offset {}".format(int(_next))
else:
components = urlsafe_components(_next)
# If a sort order is applied, the first of these is the sort value
if sort or sort_desc:
sort_value = components[0]
# Special case for if non-urlencoded first token was $null
if _next.split(",")[0] == "$null":
sort_value = None
components = components[1:]
# Figure out the SQL for next-based-on-primary-key first
next_by_pk_clauses = []
if use_rowid:
next_by_pk_clauses.append("rowid > :p{}".format(len(params)))
params["p{}".format(len(params))] = components[0]
else:
# Apply the tie-breaker based on primary keys
if len(components) == len(pks):
param_len = len(params)
next_by_pk_clauses.append(
compound_keys_after_sql(pks, param_len)
)
for i, pk_value in enumerate(components):
params["p{}".format(param_len + i)] = pk_value
# Now add the sort SQL, which may incorporate next_by_pk_clauses
if sort or sort_desc:
if sort_value is None:
if sort_desc:
# Just items where column is null ordered by pk
where_clauses.append(
"({column} is null and {next_clauses})".format(
column=escape_sqlite(sort_desc),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} is not null or ({column} is null and {next_clauses}))".format(
column=escape_sqlite(sort),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
column=escape_sqlite(sort or sort_desc),
op=">" if sort else "<",
p=len(params),
extra_desc_only=""
if sort
else " or {column2} is null".format(
column2=escape_sqlite(sort or sort_desc)
),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
params["p{}".format(len(params))] = sort_value
order_by = "{}, {}".format(order_by, order_by_pks)
else:
where_clauses.extend(next_by_pk_clauses)
where_clause = ""
if where_clauses:
where_clause = "where {} ".format(" and ".join(where_clauses))
if order_by:
order_by = "order by {} ".format(order_by)
extra_args = {}
# Handle ?_size=500
page_size = _size or request.args.get("_size") or table_metadata.get("size")
if page_size:
if page_size == "max":
page_size = self.ds.max_returned_rows
try:
page_size = int(page_size)
if page_size < 0:
raise ValueError
except ValueError:
raise DatasetteError("_size must be a positive integer", status=400)
if page_size > self.ds.max_returned_rows:
raise DatasetteError(
"_size must be <= {}".format(self.ds.max_returned_rows), status=400
)
extra_args["page_size"] = page_size
else:
page_size = self.ds.page_size
sql_no_limit = "select {select} from {table_name} {where}{order_by}".format(
select=select,
table_name=escape_sqlite(table),
where=where_clause,
order_by=order_by,
)
sql = "{sql_no_limit} limit {limit}{offset}".format(
sql_no_limit=sql_no_limit.rstrip(), limit=page_size + 1, offset=offset
)
if request.args.get("_timelimit"):
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
results = await db.execute(sql, params, truncate=True, **extra_args)
# Number of filtered rows in whole set:
filtered_table_rows_count = None
if (
not db.is_mutable
and self.ds.inspect_data
and count_sql == "select count(*) from {} ".format(table)
):
try:
filtered_table_rows_count = self.ds.inspect_data[database]["tables"][
table
]["count"]
except KeyError:
pass
if count_sql and filtered_table_rows_count is None:
try:
count_rows = list(await db.execute(count_sql, from_sql_params))
filtered_table_rows_count = count_rows[0][0]
except QueryInterrupted:
pass
# facets support
if not self.ds.config("allow_facet") and any(
arg.startswith("_facet") for arg in request.args
):
raise DatasetteError("_facet= is not allowed", status=400)
# pylint: disable=no-member
facet_classes = list(
itertools.chain.from_iterable(pm.hook.register_facet_classes())
)
facet_results = {}
facets_timed_out = []
facet_instances = []
for klass in facet_classes:
facet_instances.append(
klass(
self.ds,
request,
database,
sql=sql_no_limit,
params=params,
table=table,
metadata=table_metadata,
row_count=filtered_table_rows_count,
)
)
for facet in facet_instances:
(
instance_facet_results,
instance_facets_timed_out,
) = await facet.facet_results()
facet_results.update(instance_facet_results)
facets_timed_out.extend(instance_facets_timed_out)
# Figure out columns and rows for the query
columns = [r[0] for r in results.description]
rows = list(results.rows)
# Expand labeled columns if requested
expanded_columns = []
expandable_columns = await self.expandable_columns(database, table)
columns_to_expand = None
try:
all_labels = value_as_boolean(special_args.get("_labels", ""))
except ValueError:
all_labels = default_labels
# Check for explicit _label=
if "_label" in request.args:
columns_to_expand = request.args.getlist("_label")
if columns_to_expand is None and all_labels:
# expand all columns with foreign keys
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
if columns_to_expand:
expanded_labels = {}
for fk, _ in expandable_columns:
column = fk["column"]
if column not in columns_to_expand:
continue
expanded_columns.append(column)
# Gather the values
column_index = columns.index(column)
values = [row[column_index] for row in rows]
# Expand them
expanded_labels.update(
await self.ds.expand_foreign_keys(database, table, column, values)
)
if expanded_labels:
# Rewrite the rows
new_rows = []
for row in rows:
new_row = CustomRow(columns)
for column in row.keys():
value = row[column]
if (column, value) in expanded_labels and value is not None:
new_row[column] = {
"value": value,
"label": expanded_labels[(column, value)],
}
else:
new_row[column] = value
new_rows.append(new_row)
rows = new_rows
# Pagination next link
next_value = None
next_url = None
if len(rows) > page_size and page_size > 0:
if is_view:
next_value = int(_next or 0) + page_size
else:
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
# If there's a sort or sort_desc, add that value as a prefix
if (sort or sort_desc) and not is_view:
prefix = rows[-2][sort or sort_desc]
if isinstance(prefix, dict) and "value" in prefix:
prefix = prefix["value"]
if prefix is None:
prefix = "$null"
else:
prefix = urllib.parse.quote_plus(str(prefix))
next_value = "{},{}".format(prefix, next_value)
added_args = {"_next": next_value}
if sort:
added_args["_sort"] = sort
else:
added_args["_sort_desc"] = sort_desc
else:
added_args = {"_next": next_value}
next_url = self.ds.absolute_url(
request, path_with_replaced_args(request, added_args)
)
rows = rows[:page_size]
# Detect suggested facets
suggested_facets = []
if (
self.ds.config("suggest_facets")
and self.ds.config("allow_facet")
and not _next
):
for facet in facet_instances:
suggested_facets.extend(await facet.suggest())
# human_description_en combines filters AND search, if provided
human_description_en = filters.human_description_en(
extra=extra_human_descriptions
)
if sort or sort_desc:
sorted_by = "sorted by {}{}".format(
(sort or sort_desc), " descending" if sort_desc else ""
)
human_description_en = " ".join(
[b for b in [human_description_en, sorted_by] if b]
)
async def extra_template():
nonlocal sort
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=not is_view,
truncate_cells=self.ds.config("truncate_cells_html"),
)
metadata = (
(self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {})
)
self.ds.update_with_inherited_metadata(metadata)
form_hidden_args = []
for arg in ("_fts_table", "_fts_pk"):
if arg in special_args:
form_hidden_args.append((arg, special_args[arg]))
if request.args.get("_where"):
for where_text in request.args.getlist("_where"):
form_hidden_args.append(("_where", where_text))
# if no sort specified AND table has a single primary key,
# set sort to that so arrow is displayed
if not sort and not sort_desc:
if 1 == len(pks):
sort = pks[0]
elif use_rowid:
sort = "rowid"
return {
"supports_search": bool(fts_table),
"search": search or "",
"use_rowid": use_rowid,
"filters": filters,
"display_columns": display_columns,
"filter_columns": columns,
"display_rows": display_rows,
"facets_timed_out": facets_timed_out,
"sorted_facet_results": sorted(
facet_results.values(),
key=lambda f: (len(f["results"]), f["name"]),
reverse=True,
),
"extra_wheres_for_ui": extra_wheres_for_ui,
"form_hidden_args": form_hidden_args,
"is_sortable": any(c["sortable"] for c in display_columns),
"path_with_replaced_args": path_with_replaced_args,
"path_with_removed_args": path_with_removed_args,
"append_querystring": append_querystring,
"request": request,
"sort": sort,
"sort_desc": sort_desc,
"disable_sort": is_view,
"custom_table_templates": [
"_table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table-table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table.html",
],
"metadata": metadata,
"view_definition": await db.get_view_definition(table),
"table_definition": await db.get_table_definition(table),
}
return (
{
"database": database,
"table": table,
"is_view": is_view,
"human_description_en": human_description_en,
"rows": rows[:page_size],
"truncated": results.truncated,
"filtered_table_rows_count": filtered_table_rows_count,
"expanded_columns": expanded_columns,
"expandable_columns": expandable_columns,
"columns": columns,
"primary_keys": pks,
"units": units,
"query": {"sql": sql, "params": params},
"facet_results": facet_results,
"suggested_facets": suggested_facets,
"next": next_value and str(next_value) or None,
"next_url": next_url,
"private": private,
"allow_execute_sql": await self.ds.permission_allowed(
request.actor, "execute-sql", database, default=True
),
},
extra_template,
(
"table-{}-{}.html".format(to_css_class(database), to_css_class(table)),
"table.html",
),
)
class RowView(RowTableShared):
name = "row"
async def data(self, request, database, hash, table, pk_path, default_labels=False):
pk_values = urlsafe_components(pk_path)
await self.check_permission(request, "view-instance")
await self.check_permission(request, "view-database", database)
await self.check_permission(request, "view-table", (database, table))
db = self.ds.databases[database]
pks = await db.primary_keys(table)
use_rowid = not pks
select = "*"
if use_rowid:
select = "rowid, *"
pks = ["rowid"]
wheres = ['"{}"=:p{}'.format(pk, i) for i, pk in enumerate(pks)]
sql = "select {} from {} where {}".format(
select, escape_sqlite(table), " AND ".join(wheres)
)
params = {}
for i, pk_value in enumerate(pk_values):
params["p{}".format(i)] = pk_value
results = await db.execute(sql, params, truncate=True)
columns = [r[0] for r in results.description]
rows = list(results.rows)
if not rows:
raise NotFound("Record not found: {}".format(pk_values))
async def template_data():
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=False,
truncate_cells=0,
)
for column in display_columns:
column["sortable"] = False
return {
"foreign_key_tables": await self.foreign_key_tables(
database, table, pk_values
),
"display_columns": display_columns,
"display_rows": display_rows,
"custom_table_templates": [
"_table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table-row-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table.html",
],
"metadata": (self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {}),
}
data = {
"database": database,
"table": table,
"rows": rows,
"columns": columns,
"primary_keys": pks,
"primary_key_values": pk_values,
"units": self.ds.table_metadata(database, table).get("units", {}),
}
if "foreign_key_tables" in (request.args.get("_extras") or "").split(","):
data["foreign_key_tables"] = await self.foreign_key_tables(
database, table, pk_values
)
return (
data,
template_data,
(
"row-{}-{}.html".format(to_css_class(database), to_css_class(table)),
"row.html",
),
)
async def foreign_key_tables(self, database, table, pk_values):
if len(pk_values) != 1:
return []
db = self.ds.databases[database]
all_foreign_keys = await db.get_all_foreign_keys()
foreign_keys = all_foreign_keys[table]["incoming"]
if len(foreign_keys) == 0:
return []
sql = "select " + ", ".join(
[
"(select count(*) from {table} where {column}=:id)".format(
table=escape_sqlite(fk["other_table"]),
column=escape_sqlite(fk["other_column"]),
)
for fk in foreign_keys
]
)
try:
rows = list(await db.execute(sql, {"id": pk_values[0]}))
except sqlite3.OperationalError:
# Almost certainly hit the timeout
return []
foreign_table_counts = dict(
zip(
[(fk["other_table"], fk["other_column"]) for fk in foreign_keys],
list(rows[0]),
)
)
foreign_key_tables = []
for fk in foreign_keys:
count = (
foreign_table_counts.get((fk["other_table"], fk["other_column"])) or 0
)
foreign_key_tables.append({**fk, **{"count": count}})
return foreign_key_tables
|
the-stack_0_14553 | from datetime import datetime
import time
if __name__ == "__main__":
# get now time
now = datetime.now()
# convert time into timestamp
timstart = datetime.timestamp(now)
print("now is ", now,"<<<>", timstart)
now_end = datetime.now()
timend = now_end.timestamp()
print('second ===>', int(timend - timstart))
seconds = 100
minutes, seconds = divmod(seconds,60)
hours, minutes = divmod(minutes, 60)
print("%02d:%02d:%02d" % (hours, minutes, seconds)) |
the-stack_0_14554 | import numpy as np
import matplotlib.pyplot as plt
Nx = 81
Nz = 81
Lx = 91.42
Lz = 100.0
xn = np.linspace(0,Lx,Nx)
Liton = -0.8*Lz + 0.02*Lz*np.cos(np.pi*xn/Lx)
Liton = Liton*1000
f = open("interfaces_creep.txt","w")
f.write("C 1.0 1.0\n")
f.write("rho -1000. 0.\n")
f.write("H 0.0E-12 0.0E-12\n")
f.write("A 0.0 0.0\n")
f.write("n 0.0 0.0\n")
f.write("Q 0.0 0.0\n")
f.write("V 0.0 0.0\n")
for i in np.arange(Nx):
f.write("%lf\n"%(Liton[i]))
f.close()
|
the-stack_0_14555 | import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
class DataSocket:
"""
Wrapper for ZMQ socket that sends and recieves dictionaries
"""
def __init__(self, context, port, type, debug, ip_address="127.0.0.1"):
# request reply socket
self._socket = context.socket(type)
self._debug = debug
# store these as wekrefs so that circular refs dont prevent garbage collection
self._java_objects = WeakSet()
# try:
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
# except Exception as e:
# print(e.__traceback__)
# raise Exception('Couldnt connect or bind to port {}'.format(port))
def _register_java_object(self, object):
self._java_objects.add(object)
def __del__(self):
# make sure all shadow objects have signaled to Java side to release references before they shut down
for java_object in self._java_objects:
java_object._close()
def _convert_np_to_python(self, d):
"""
recursively search dictionary and convert any values from numpy floats/ints to
python floats/ints so they can be json serialized
:return:
"""
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
"""
make a string to replace bytes data or numpy array in message, which encode data type if numpy
"""
# make up a random 32 bit int as the identifier
# TODO: change to simple counting
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
# '@{some_number}_{bytes_per_pixel}'
# if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
# make sure any np types convert to python types so they can be json serialized
self._convert_np_to_python(message)
# Send binary data in seperate messages so it doesnt need to be json serialized
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
# convert keys to byte array
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass # ignore, keep trying
return False
def _replace_bytes(self, dict_or_list, hash, value):
"""
Replace placeholders for byte arrays in JSON message with their actual values
"""
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[key]:
hash_in_message = int(entry.split("@")[1], 16) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass # ignore, keep trying
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
# replace any byte data placeholders with the byte data itself
for i in np.arange(1, len(reply), 2):
# messages come in pairs: first is hash, second it byte data
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def close(self):
self._socket.close()
class Bridge:
"""
Create an object which acts as a client to a corresponding server running within micro-manager.
This enables construction and interaction with arbitrary java objects
"""
_DEFAULT_PORT = 4827
_EXPECTED_ZMQ_SERVER_VERSION = "4.0.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
"""
Only one instance of Bridge per a thread
"""
if hasattr(Bridge.thread_local, "bridge"):
return Bridge.thread_local.bridge
else:
return super(Bridge, cls).__new__(cls)
def __init__(
self, port=_DEFAULT_PORT, convert_camel_case=True, debug=False, ip_address="127.0.0.1"
):
"""
Parameters
----------
port : int
The port on which the bridge operates
convert_camel_case : bool
If True, methods for Java objects that are passed across the bridge
will have their names converted from camel case to underscores. i.e. class.methodName()
becomes class.method_name()
debug : bool
If True print helpful stuff for debugging
"""
self._ip_address = ip_address
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
if hasattr(self.thread_local, "bridge"):
return
self.thread_local.bridge = self # cache a thread-local version of the bridge
self._convert_camel_case = convert_camel_case
self._debug = debug
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=500)
if reply_json is None:
raise TimeoutError(
"Socket timed out after 500 milliseconds. Is Micro-Manager running and is the ZMQ server option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0" # before version was added
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath, new_socket=False, args=None):
"""
Create a new instance of a an object on the Java side. Returns a Python "Shadow" of the object, which behaves
just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at
runtime using iPython autocomplete
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
args : list
list of arguments to the constructor, if applicable
Returns
-------
Python "Shadow" to the Java object
"""
if args is None:
args = []
# classpath_minus_class = '.'.join(classpath.split('.')[:-1])
# query the server for constructors matching this classpath
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
# Calling a constructor, rather than getting return from method
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
"""
Connect a push socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
"""
Connect to a pull socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
"""
return an instance of the Micro-Magellan API
"""
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
"""
Connect to CMMCore and return object that has its methods
:return: Python "shadow" object for micromanager core
"""
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
"""
return an instance of the Studio object that provides access to micro-manager Java APIs
"""
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
"""
This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.
If a given class has already been generate once it will be returns from the cache rather than re-generating it.
"""
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
"""Create a class (or return a class from the cache) based on the contents of `serialized_object` message."""
if serialized_obj["class"] in self.classes.keys(): # Return a cached class
return self.classes[serialized_obj["class"]]
else: # Generate a new class since it wasn't found in the cache.
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args)
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
"""
Generic class for serving as a python interface for a micromanager class using a zmq server backend
"""
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
def _close(self):
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
print("closing: {}".format(self))
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
"""
Tell java side this object is garbage collected so it can do the same if needed
"""
self._close()
def _access_field(self, name):
"""
Return a python version of the field with a given name
:return:
"""
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
"""
Return a python version of the field with a given name
:return:
"""
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple):
"""
Translate to appropriate Java method, call it, and return converted python version of its result
Parameters
----------
args :
args[0] is list of dictionaries of possible method specifications
kwargs :
hold possible polymorphic args, or none
"""
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _deserialize(self, json_return):
"""
method_spec :
info about the method that called it
reply :
bytes that represents return
Returns
-------
An appropriate python type of the converted value
"""
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
"""
Convert a serialized java array to the appropriate numpy type
Parameters
----------
json_return
"""
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
"""
Serialize function arguments and also include description of their Java types
Parameters
----------
valid_method_spec:
fn_args :
"""
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
"""
Check if a single method specificiation is compatible with the arguments the function recieved
Parameters
----------
method_spec :
fn_args :
"""
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
"""
Compare python arguments to java arguments to find correct function to call
Parameters
----------
method_specs :
fn_args :
Returns
-------
one of the method_specs that is valid
"""
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
_CLASS_NAME_MAPPING = {
"boolean": "boolean",
"byte[]": "uint8array",
"double": "float",
"double[]": "float64_array",
"float": "float",
"int": "int",
"int[]": "uint32_array",
"java.lang.String": "string",
"long": "int",
"short": "int",
"void": "void",
}
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"byte[]": np.uint8,
"short[]": np.uint16,
"double[]": np.float64,
"int[]": np.int32,
}
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
|
the-stack_0_14558 | # pylint: disable=no-self-use,invalid-name
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data.iterators import BucketIterator
from allennlp.tests.data.iterators.basic_iterator_test import IteratorTest
class TestBucketIterator(IteratorTest):
# pylint: disable=protected-access
def test_create_batches_groups_correctly(self):
iterator = BucketIterator(batch_size=2, padding_noise=0, sorting_keys=[('text', 'num_tokens')])
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]]]
def test_create_batches_groups_correctly_with_max_instances(self):
# If we knew all the instances, the correct order is 4 -> 2 -> 0 -> 1 -> 3.
# Here max_instances_in_memory is 3, so we load instances [0, 1, 2]
# and then bucket them by size into batches of size 2 to get [2, 0] -> [1].
# Then we load the remaining instances and bucket them by size to get [4, 3].
iterator = BucketIterator(batch_size=2,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
max_instances_in_memory=3)
iterator.index_with(self.vocab)
for test_instances in (self.instances, self.lazy_instances):
batches = list(iterator._create_batches(test_instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[2], self.instances[0]],
[self.instances[1]],
[self.instances[4], self.instances[3]]]
def test_biggest_batch_first_works(self):
iterator = BucketIterator(batch_size=2,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
biggest_batch_first=True)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[3]],
[self.instances[0], self.instances[1]],
[self.instances[4], self.instances[2]]]
def test_from_params(self):
# pylint: disable=protected-access
params = Params({})
with pytest.raises(ConfigurationError):
iterator = BucketIterator.from_params(params)
sorting_keys = [("s1", "nt"), ("s2", "nt2")]
params['sorting_keys'] = sorting_keys
iterator = BucketIterator.from_params(params)
assert iterator._sorting_keys == sorting_keys
assert iterator._padding_noise == 0.1
assert not iterator._biggest_batch_first
assert iterator._batch_size == 32
assert not iterator._skip_smaller_batches
params = Params({
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"biggest_batch_first": True,
"batch_size": 100,
"skip_smaller_batches": True
})
iterator = BucketIterator.from_params(params)
assert iterator._sorting_keys == sorting_keys
assert iterator._padding_noise == 0.5
assert iterator._biggest_batch_first
assert iterator._batch_size == 100
assert iterator._skip_smaller_batches
def test_bucket_iterator_maximum_samples_per_batch(self):
iterator = BucketIterator(
batch_size=3,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
maximum_samples_per_batch=['num_tokens', 9]
)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
stats = self.get_batches_stats(batches)
# ensure all instances are in a batch
assert stats['total_instances'] == len(self.instances)
# ensure correct batch sizes
assert stats['batch_lengths'] == [2, 2, 1]
# ensure correct sample sizes (<= 9)
assert stats['sample_sizes'] == [6, 8, 9]
def test_maximum_samples_per_batch_packs_tightly(self):
token_counts = [10, 4, 3]
test_instances = self.create_instances_from_token_counts(token_counts)
iterator = BucketIterator(
batch_size=3,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
maximum_samples_per_batch=['num_tokens', 11]
)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(test_instances, shuffle=False))
stats = self.get_batches_stats(batches)
# ensure all instances are in a batch
assert stats['total_instances'] == len(test_instances)
# ensure correct batch sizes
assert stats['batch_lengths'] == [2, 1]
# ensure correct sample sizes (<= 11)
assert stats['sample_sizes'] == [8, 10]
def test_skip_smaller_batches_works(self):
iterator = BucketIterator(batch_size=2, padding_noise=0, sorting_keys=[('text', 'num_tokens')],
skip_smaller_batches=True)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats['batch_lengths'])
# we should have lost one instance by skipping the last batch
assert stats['total_instances'] == len(self.instances) - 1
|
the-stack_0_14559 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import satchmo_utils.fields
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomProduct',
fields=[
('product', models.OneToOneField(primary_key=True, serialize=False, to='product.Product', verbose_name='Product')),
('downpayment', models.IntegerField(default=20, verbose_name='Percent Downpayment')),
('deferred_shipping', models.BooleanField(default=False, help_text='Do not charge shipping at checkout for this item.', verbose_name='Deferred Shipping')),
('option_group', models.ManyToManyField(to='product.OptionGroup', verbose_name='Option Group', blank=True)),
],
options={
'verbose_name': 'Custom Product',
'verbose_name_plural': 'Custom Products',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CustomTextField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=40, verbose_name='Custom field name')),
('slug', models.SlugField(help_text='Auto-generated from name if blank', verbose_name='Slug', blank=True)),
('sort_order', models.IntegerField(default=0, help_text='The display order for this group.', verbose_name='Sort Order')),
('price_change', satchmo_utils.fields.CurrencyField(null=True, verbose_name='Price Change', max_digits=14, decimal_places=6, blank=True)),
('products', models.ForeignKey(related_name='custom_text_fields', verbose_name='Custom Fields', to='custom.CustomProduct')),
],
options={
'ordering': ('sort_order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CustomTextFieldTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('languagecode', models.CharField(max_length=10, verbose_name='language', choices=[(b'en', b'English')])),
('name', models.CharField(max_length=255, verbose_name='Translated Custom Text Field Name')),
('version', models.IntegerField(default=1, verbose_name='version')),
('active', models.BooleanField(default=True, verbose_name='active')),
('customtextfield', models.ForeignKey(related_name='translations', to='custom.CustomTextField')),
],
options={
'ordering': ('customtextfield', 'name', 'languagecode'),
'verbose_name': 'CustomTextField Translation',
'verbose_name_plural': 'CustomTextField Translations',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='customtextfieldtranslation',
unique_together=set([('customtextfield', 'languagecode', 'version')]),
),
migrations.AlterUniqueTogether(
name='customtextfield',
unique_together=set([('slug', 'products')]),
),
]
|
the-stack_0_14563 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTetoolkit(PythonPackage):
"""TEToolkit is a software package that utilizes both unambiguously
(uniquely) and ambiguously (multi-) mapped reads to perform
differential enrichment analyses from high throughput sequencing
experiments."""
homepage = "http://hammelllab.labsites.cshl.edu/software"
pypi = "TEToolkit/TEToolkit-1.5.1.tar.gz"
version('2.0.3', sha256='1d0f5928b30c6cd9dbef8e092ae0c11e9e707faf92a19af8eed3e360da7d4e46')
version('1.5.1', sha256='22c13ca45bccc89e9d9bf48d59ae6db1fa4c634def64fc56ba9bffd23aa689ac')
depends_on('py-setuptools')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('r-deseq', when='@:1.5.1', type=('build', 'run'))
depends_on('r-deseq2', when='@2.0.0:', type=('build', 'run'))
|
the-stack_0_14567 | import json
import os
import pycspr
# A known casper test-net node address.
_NODE_ADDRESS = os.getenv("CASPER_NODE_ADDRESS", "3.136.227.9")
# A known block hash.
_BLOCK_HASH: bytes = bytes.fromhex("c7148e1e2e115d8fba357e04be2073d721847c982dc70d5c36b5f6d3cf66331c")
# A known block height.
_BLOCK_HEIGHT: int = 20652
def main():
"""Retrieves on-chain auction information.
"""
# Set client.
client: pycspr.NodeClient = pycspr.NodeClient(pycspr.NodeConnectionInfo(host=_NODE_ADDRESS))
# Set auction info scoped by current era.
auction_info_1: dict = client.queries.get_auction_info()
# Set auction info scoped by known hash.
auction_info_2: dict = client.queries.get_auction_info(_BLOCK_HASH)
# Set auction info scoped by known height.
auction_info_3: dict = client.queries.get_auction_info(_BLOCK_HEIGHT)
# Verify.
assert auction_info_1 != auction_info_2
assert auction_info_2 == auction_info_3
print("-----------------------------------------------------------------------------------------------------")
print(f"QUERIED TEST-NET NODE {_NODE_ADDRESS}")
print("-----------------------------------------------------------------------------------------------------")
print(f"Auction information = {json.dumps(auction_info_1, indent=4)}")
print("-----------------------------------------------------------------------------------------------------")
if __name__ == "__main__":
try:
main()
except Exception as err:
print(f"API ERROR @ NODE {_NODE_ADDRESS} :: {err}") |
the-stack_0_14568 | from __future__ import absolute_import
from selenium import webdriver
from shishito.runtime.environment.shishito import ShishitoEnvironment
class ControlEnvironment(ShishitoEnvironment):
""" Local control environment. """
def get_capabilities(self, config_section):
""" Return dictionary of capabilities for specific config combination.
:param str config_section: section in platform/environment.properties config
:return: dict with capabilities
"""
get_opt = self.shishito_support.get_opt
default_capabilities = super().get_capabilities(config_section)
capabilities = {
'marionette': str(get_opt('firefox_marionette')).lower() == 'true',
}
return {**default_capabilities, **capabilities}
def start_driver(self, browser_type, capabilities, config_section=None):
""" Prepare selenium webdriver.
:param browser_type: type of browser for which prepare driver
:param capabilities: capabilities used for webdriver initialization
"""
# get browser profile
browser_profile = self.get_browser_profile(browser_type, capabilities, config_section)
# starts local browser
if browser_type == "firefox":
from selenium.webdriver.firefox.options import Options
firefox_options = Options()
for arg in self.get_browser_arguments(config_section):
firefox_options.add_argument(arg)
driver = webdriver.Firefox(browser_profile, desired_capabilities=capabilities,
firefox_options=firefox_options)
elif browser_type == "chrome":
driver = webdriver.Chrome(desired_capabilities=capabilities, chrome_options=browser_profile)
elif browser_type == "ie":
driver = webdriver.Ie(capabilities=capabilities)
elif browser_type == "phantomjs":
driver = webdriver.PhantomJS(desired_capabilities=capabilities)
elif browser_type == "opera":
driver = webdriver.Opera(desired_capabilities=capabilities)
# SafariDriver bindings for Python not yet implemented
# elif browser == "Safari":
# self.driver = webdriver.SafariDriver()
else:
raise ValueError('Unknown type of browser.')
return driver
def call_browser(self, config_section):
""" Start webdriver for given config section. Prepare capabilities for the browser, set browser resolution.
:param str config_section: section in platform/environment.properties config
:return: created webdriver
"""
# get browser capabilities
capabilities = self.get_capabilities(config_section)
# get browser type
browser_type = self.shishito_support.get_opt(config_section, 'browser').lower()
# get driver
driver = self.start_driver(browser_type, capabilities, config_section=config_section)
if browser_type.lower() == 'chrome':
self.set_download_path(driver)
# set browser size is defined
browser_size = self.shishito_support.get_opt(config_section, 'resolution')
if browser_size:
# default size --> leave it on webdriver
width, height = browser_size.split('x')
driver.set_window_size(width, height)
return driver
|
the-stack_0_14569 | # stdlib
import sys
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
import pandas as pd
# syft relative
from ....core.pointer.pointer import Pointer
from ....decorators import syft_decorator
from ....lib import create_lib_ast
from ....logger import critical
from ....logger import debug
from ....logger import error
from ....logger import traceback_and_raise
from ....proto.core.node.common.client_pb2 import Client as Client_PB
from ....proto.core.node.common.metadata_pb2 import Metadata as Metadata_PB
from ....util import get_fully_qualified_name
from ...common.message import EventualSyftMessageWithoutReply
from ...common.message import ImmediateSyftMessageWithReply
from ...common.message import ImmediateSyftMessageWithoutReply
from ...common.message import SignedEventualSyftMessageWithoutReply
from ...common.message import SignedImmediateSyftMessageWithReply
from ...common.message import SignedImmediateSyftMessageWithoutReply
from ...common.message import SyftMessage
from ...common.serde.deserialize import _deserialize
from ...common.uid import UID
from ...io.location import Location
from ...io.location import SpecificLocation
from ...io.route import Route
from ...io.route import SoloRoute
from ...io.virtual import VirtualClientConnection
from ...node.common.service.obj_search_service import ObjectSearchMessage
from ..abstract.node import AbstractNodeClient
from .action.exception_action import ExceptionMessage
from .service.child_node_lifecycle_service import RegisterChildNodeMessage
class Client(AbstractNodeClient):
"""Client is an incredibly powerful abstraction in Syft. We assume that,
no matter where a client is, it can figure out how to communicate with
the Node it is supposed to point to. If I send you a client I have
with all of the metadata in it, you should have all the information
you need to know to interact with a node (although you might not
have permissions - clients should not store private keys)."""
@syft_decorator(typechecking=True)
def __init__(
self,
name: Optional[str],
routes: List[Route],
network: Optional[Location] = None,
domain: Optional[Location] = None,
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
):
name = f"{name} Client" if name is not None else None
super().__init__(
name=name, network=network, domain=domain, device=device, vm=vm
)
self.routes = routes
self.default_route_index = 0
# create a signing key if one isn't provided
if signing_key is None:
self.signing_key = SigningKey.generate()
else:
self.signing_key = signing_key
# if verify key isn't provided, get verify key from signing key
if verify_key is None:
self.verify_key = self.signing_key.verify_key
else:
self.verify_key = verify_key
self.install_supported_frameworks()
self.store = StoreClient(client=self)
@property
def icon(self) -> str:
icon = "📡"
sub = []
if self.vm is not None:
sub.append("🍰")
if self.device is not None:
sub.append("📱")
if self.domain is not None:
sub.append("🏰")
if self.network is not None:
sub.append("🔗")
if len(sub) > 0:
icon = f"{icon} ["
for s in sub:
icon += s
icon += "]"
return icon
@staticmethod
def deserialize_client_metadata_from_node(
metadata: Metadata_PB,
) -> Tuple[SpecificLocation, str, UID]:
# string of bytes
meta = _deserialize(blob=metadata)
return meta.node, meta.name, meta.id
def install_supported_frameworks(self) -> None:
self.lib_ast = create_lib_ast(client=self)
# first time we want to register for future updates
self.lib_ast.register_updates(self)
if self.lib_ast is not None:
for attr_name, attr in self.lib_ast.attrs.items():
setattr(self, attr_name, attr)
# shortcut syft.lib.python to just python
if hasattr(self.lib_ast, "syft"):
try:
lib_attr = getattr(self.lib_ast.syft, "lib", None)
if lib_attr is not None:
python_attr = getattr(lib_attr, "python", None)
setattr(self, "python", python_attr)
except Exception as e:
critical(f"Failed to set python attribute on client. {e}")
def add_me_to_my_address(self) -> None:
traceback_and_raise(NotImplementedError)
@syft_decorator(typechecking=True)
def register_in_memory_client(self, client: AbstractNodeClient) -> None:
# WARNING: Gross hack
route_index = self.default_route_index
# this ID should be unique but persistent so that lookups are universal
route = self.routes[route_index]
if isinstance(route, SoloRoute):
connection = route.connection
if isinstance(connection, VirtualClientConnection):
connection.server.node.in_memory_client_registry[
client.address.target_id.id
] = client
else:
traceback_and_raise(
Exception(
"Unable to save client reference without VirtualClientConnection"
)
)
else:
traceback_and_raise(
Exception("Unable to save client reference without SoloRoute")
)
@syft_decorator(typechecking=True)
def register(self, client: AbstractNodeClient) -> None:
debug(f"> Registering {client.pprint} with {self.pprint}")
self.register_in_memory_client(client=client)
msg = RegisterChildNodeMessage(
lookup_id=client.id,
child_node_client_address=client.address,
address=self.address,
)
if self.network is not None:
client.network = (
self.network
if self.network is not None # type: ignore # nested "is not None"
else client.network
)
# QUESTION
# if the client is a network and the domain is not none this will set it
# on the network causing an exception
# but we can't check if the client is a NetworkClient here because
# this is a superclass of NetworkClient
# Remove: if self.domain is not None:
# then see the test line node_test.py:
# bob_network_client.register(client=bob_domain_client)
if self.domain is not None:
client.domain = (
self.domain
if self.domain is not None # type: ignore # nested "is not None"
else client.domain
)
if self.device is not None:
client.device = (
self.device
if self.device is not None # type: ignore # nested "is not None"
else client.device
)
assert self.device == client.device
if self.vm is not None:
client.vm = self.vm
self.send_immediate_msg_without_reply(msg=msg)
@property
def id(self) -> UID:
"""This client points to an node, this returns the id of that node."""
traceback_and_raise(NotImplementedError)
# TODO fix the msg type but currently tensor needs SyftMessage
@syft_decorator(typechecking=True)
def send_immediate_msg_with_reply(
self,
msg: Union[SignedImmediateSyftMessageWithReply, ImmediateSyftMessageWithReply],
route_index: int = 0,
) -> SyftMessage:
route_index = route_index or self.default_route_index
if isinstance(msg, ImmediateSyftMessageWithReply):
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
msg = msg.sign(signing_key=self.signing_key)
response = self.routes[route_index].send_immediate_msg_with_reply(msg=msg)
if response.is_valid:
# check if we have an ExceptionMessage to trigger a local exception
# from a remote exception that we caused
if isinstance(response.message, ExceptionMessage):
exception_msg = response.message
exception = exception_msg.exception_type(exception_msg.exception_msg)
error(str(exception))
traceback_and_raise(exception)
else:
return response.message
traceback_and_raise(
Exception("Response was signed by a fake key or was corrupted in transit.")
)
# TODO fix the msg type but currently tensor needs SyftMessage
@syft_decorator(typechecking=True)
def send_immediate_msg_without_reply(
self,
msg: Union[
SignedImmediateSyftMessageWithoutReply, ImmediateSyftMessageWithoutReply
],
route_index: int = 0,
) -> None:
route_index = route_index or self.default_route_index
if isinstance(msg, ImmediateSyftMessageWithoutReply):
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
msg = msg.sign(signing_key=self.signing_key)
debug(f"> Sending {msg.pprint} {self.pprint} ➡️ {msg.address.pprint}")
self.routes[route_index].send_immediate_msg_without_reply(msg=msg)
@syft_decorator(typechecking=True)
def send_eventual_msg_without_reply(
self, msg: EventualSyftMessageWithoutReply, route_index: int = 0
) -> None:
route_index = route_index or self.default_route_index
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
signed_msg: SignedEventualSyftMessageWithoutReply = msg.sign(
signing_key=self.signing_key
)
self.routes[route_index].send_eventual_msg_without_reply(msg=signed_msg)
@syft_decorator(typechecking=True)
def __repr__(self) -> str:
return f"<Client pointing to node with id:{self.id}>"
@syft_decorator(typechecking=True)
def register_route(self, route: Route) -> None:
self.routes.append(route)
@syft_decorator(typechecking=True)
def set_default_route(self, route_index: int) -> None:
self.default_route = route_index
@syft_decorator(typechecking=True)
def _object2proto(self) -> Client_PB:
obj_type = get_fully_qualified_name(obj=self)
routes = [route.serialize() for route in self.routes]
network = self.network._object2proto() if self.network is not None else None
domain = self.domain._object2proto() if self.domain is not None else None
device = self.device._object2proto() if self.device is not None else None
vm = self.vm._object2proto() if self.vm is not None else None
client_pb = Client_PB(
obj_type=obj_type,
id=self.id.serialize(),
name=self.name,
routes=routes,
has_network=self.network is not None,
network=network,
has_domain=self.domain is not None,
domain=domain,
has_device=self.device is not None,
device=device,
has_vm=self.vm is not None,
vm=vm,
)
return client_pb
@staticmethod
def _proto2object(proto: Client_PB) -> "Client":
module_parts = proto.obj_type.split(".")
klass = module_parts.pop()
obj_type = getattr(sys.modules[".".join(module_parts)], klass)
network = (
SpecificLocation._proto2object(proto.network) if proto.has_network else None
)
domain = (
SpecificLocation._proto2object(proto.domain) if proto.has_domain else None
)
device = (
SpecificLocation._proto2object(proto.device) if proto.has_device else None
)
vm = SpecificLocation._proto2object(proto.vm) if proto.has_vm else None
routes = [SoloRoute._proto2object(route) for route in proto.routes]
obj = obj_type(
name=proto.name,
routes=routes,
network=network,
domain=domain,
device=device,
vm=vm,
)
if type(obj) != obj_type:
traceback_and_raise(
TypeError(
f"Deserializing Client. Expected type {obj_type}. Got {type(obj)}"
)
)
return obj
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return Client_PB
@property
def keys(self) -> str:
verify = (
self.key_emoji(key=self.signing_key.verify_key)
if self.signing_key is not None
else "🚫"
)
keys = f"🔑 {verify}"
return keys
class StoreClient:
def __init__(self, client: Client) -> None:
self.client = client
@property
def store(self) -> List[Pointer]:
msg = ObjectSearchMessage(
address=self.client.address, reply_to=self.client.address
)
results = self.client.send_immediate_msg_with_reply(msg=msg).results
# This is because of a current limitation in Pointer where we cannot
# serialize a client object. TODO: Fix limitation in Pointer so that we don't need this.
for result in results:
result.gc_enabled = False
result.client = self.client
return results
def __len__(self) -> int:
"""Return the number of items in the object store we're allowed to know about"""
return len(self.store)
def __getitem__(self, key: Union[str, int]) -> Pointer:
if isinstance(key, str):
matches = 0
match_obj: Optional[Pointer] = None
for obj in self.store:
if key in str(obj.id_at_location.value).replace("-", ""):
return obj
if key in obj.tags:
matches += 1
match_obj = obj
if matches == 1 and match_obj is not None:
return match_obj
elif matches > 1:
traceback_and_raise(KeyError("More than one item with tag:" + str(key)))
traceback_and_raise(KeyError("No such request found for id:" + str(key)))
if isinstance(key, int):
return self.store[key]
else:
traceback_and_raise(KeyError("Please pass in a string or int key"))
def __repr__(self) -> str:
return repr(self.store)
@property
def pandas(self) -> pd.DataFrame:
obj_lines: List[Dict[str, Any]] = list()
for obj in self.store:
obj_lines.append(
{
"ID": obj.id_at_location,
"Tags": obj.tags,
"Description": obj.description,
"object_type": obj.object_type,
}
)
return pd.DataFrame(obj_lines)
|
the-stack_0_14570 | #!/usr/bin/env python3
"""websocket cmd client for wssrv.py example."""
import argparse
import asyncio
import signal
import sys
import aiohttp
async def start_client(loop: asyncio.AbstractEventLoop, url: str) -> None:
name = input("Please enter your name: ")
# input reader
def stdin_callback() -> None:
line = sys.stdin.buffer.readline().decode("utf-8")
if not line:
loop.stop()
else:
ws.send_str(name + ": " + line)
loop.add_reader(sys.stdin.fileno(), stdin_callback)
async def dispatch() -> None:
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
print("Text: ", msg.data.strip())
elif msg.type == aiohttp.WSMsgType.BINARY:
print("Binary: ", msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
print("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
print("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
# send request
async with aiohttp.ClientSession() as client:
async with client.ws_connect(url, autoclose=False, autoping=False) as ws:
await dispatch()
ARGS = argparse.ArgumentParser(
description="websocket console client for wssrv.py example."
)
ARGS.add_argument(
"--host", action="store", dest="host", default="127.0.0.1", help="Host name"
)
ARGS.add_argument(
"--port", action="store", dest="port", default=8080, type=int, help="Port number"
)
if __name__ == "__main__":
args = ARGS.parse_args()
if ":" in args.host:
args.host, port = args.host.split(":", 1)
args.port = int(port)
url = f"http://{args.host}:{args.port}"
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, loop.stop)
loop.create_task(start_client(loop, url))
loop.run_forever()
|
the-stack_0_14573 | import os
import signal
import psutil
from rest.api.loghelpers.message_dumper import MessageDumper
from rest.service.fluentd import Fluentd
class ProcessUtils:
def __init__(self, logger):
self.logger = logger
self.fluentd_utils = Fluentd(logger)
self.message_dumper = MessageDumper()
def on_terminate(self, proc):
self.fluentd_utils.emit(tag="process", msg=self.message_dumper.dump_message(
{"proc": str(proc), "returncode": proc.returncode}))
def log_process_err(self, proc, err=""):
self.fluentd_utils.emit(tag="process", msg=self.message_dumper.dump_message(
{"proc": str(proc), "error": err}))
@staticmethod
def find_procs_by_name(name):
""" Return a list of processes matching 'name' """
ls = []
for p in psutil.process_iter(["name", "exe", "cmdline"]):
if name == p.info['name'] or \
p.info['exe'] and os.path.basename(p.info['exe']) == name or \
p.info['cmdline'] and p.info['cmdline'][0] == name:
ls.append(p)
return ls
def kill_proc_tree(self, pid=os.getpid(), sig=signal.SIGTERM, include_parent=True, timeout=5):
"""Kill a process tree (including grandchildren) with signal
"sig" and return a (gone, still_alive) tuple.
"on_terminate", if specified, is a callback function which is
called as soon as a child terminates.
"""
if pid == os.getpid():
include_parent = False
parent = psutil.Process(pid=pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
try:
p.send_signal(sig)
except Exception as e:
self.log_process_err(proc=p, err=e.__str__())
gone, alive = psutil.wait_procs(children, timeout=timeout, callback=self.on_terminate)
return gone, alive
|
the-stack_0_14575 | import discord
import sqlite3
import re
from datetime import datetime
from discord import Message, TextChannel, Member, PartialEmoji
from discord.ext import commands
class Music(commands.Cog, name="Please don't stop the music"):
def __init__(self, client):
self.client = client
@commands.command(aliases=['history'])
@commands.is_owner()
async def gethistory(self, ctx):
count = 0
async for message in ctx.channel.history(limit = None, oldest_first = True):
# if message.author == self.client.user or message.channel.id != 399477609559490560:
# print('returned')
# return
if 'htt' in message.content and message.author != self.client.user :
main = sqlite3.connect('music.db')
cursor = main.cursor()
sql = ("INSERT or IGNORE INTO links(link, date_posted, author, jump_link, count) VALUES(?,?,?,?,?)")
val = (str(message.content), str(message.created_at.date()), str(message.author), str(message.jump_url), count)
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
else:
pass
@commands.Cog.listener()
async def on_message(self, message:discord.Message):
if message.author == self.client.user:
return
else:
main = sqlite3.connect('music.db')
cursor = main.cursor()
if message.channel.id == 399477609559490560 and 'http' in message.content:
cursor.execute('SELECT link, date_posted, author, jump_link FROM links WHERE link LIKE ?', (message.content,))
result = cursor.fetchone()
if not result:
sql = ("INSERT INTO links(link, date_posted, author, jump_link) VALUES(?,?,?,?)")
val = (str(message.content), str(message.created_at.date()), str(message.author), str(message.jump_url))
cursor.execute(sql, val)
else:
embed = discord.Embed(colour = 0x7ed321, description = "This song/link was already posted!")
embed.timestamp = datetime.utcnow()
embed.set_author(name="Jungle Jive")
embed.set_footer(text=f'{self.client.user.name}', icon_url=f'{self.client.user.avatar_url}')
embed.add_field(name="Original Poster", value=f"{result[2]}")
embed.add_field(name="Link to original post", value=f"[Click here]({result[3]})")
embed.add_field(name="Date of original post", value=f"{result[1]}")
await message.channel.send(embed=embed)
main.commit()
cursor.close()
main.close()
@commands.command(aliases=['musicchat', 'chat'])
async def music_chat(self, ctx):
await ctx.send(file=discord.File('music_channel.png'))
def setup(client):
client.add_cog(Music(client))
print('Music Cog loaded') |
the-stack_0_14576 | # -*- coding: utf-8 -*-
"""
simulation script for benchmark data
"""
#%%
import sys
import os
sys.path.insert(0, ".." + os.sep + ".." + os.sep)
from benchmarking.benchmarking_tools import SurfaceCodeBenchmarkingTool
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import pauli_error, depolarizing_error
from surface_code.fitters import GraphDecoder
from surface_code.circuits import SurfaceCodeLogicalQubit
from qiskit import QuantumCircuit, execute, QuantumRegister, ClassicalRegister, Aer
from tqdm import tqdm
import multiprocessing as mp
# Noise Model Function
def get_noise_model(p_err):
error_gate1 = pauli_error([("X", p_err / 2), ("Z", p_err / 2), ("I", 1 - p_err)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error_gate1, "id")
return noise_model
if __name__ == "__main__":
decoder_keys = [(d, 1) for d in range(3, 11, 2)]
benchmarking_tools = []
for decoder_key in tqdm(decoder_keys):
d = decoder_key[0]
T = decoder_key[1]
qubit = SurfaceCodeLogicalQubit(d)
qubit.stabilize()
qubit.identity_data()
qubit.stabilize()
qubit.readout_z()
benchmarking_tools.append(
SurfaceCodeBenchmarkingTool(
decoder=GraphDecoder(d=d, T=T),
readout_circuit=qubit,
noise_model_func=get_noise_model,
)
)
print("\nDONE SETTING UP DECODERS!\n")
for benchmarking_tool in benchmarking_tools:
print(
"\nSIMULATE: (d={},T={})\n".format(benchmarking_tool.d, benchmarking_tool.T)
)
correct_logical_value = 0
noise_values = [
5e-5,
1e-4,
2e-4,
5e-4,
1e-3,
2e-3,
4e-3,
5e-3,
6e-3,
7e-3,
8e-3,
9e-3,
1e-2,
2e-2,
]
benchmarking_tool.simulate_readout_mp(
correct_logical_value=correct_logical_value, noise_values=noise_values
)
# %%
|
the-stack_0_14577 | import requests
from bs4 import BeautifulSoup
import pytest
import time
import hashlib
def find_playstation_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find(class_='psw-t-title-m').text
return price
def find_apple_store_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find_all(
class_='inline-list__item inline-list__item--bulleted app-header__list__item--price')[0].get_text()
return price
def find_pioneer_firmware_versions(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price_str = soup.find(class_='version').text
price_arr = price_str.split()
price = price_arr[1]
return price
def find_wacom_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find(class_='price').text
return price
def check_logsdon(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
resp = soup.find(class_='_1Cfot')
hashed = hashlib.md5(str(resp).encode())
return hashed.hexdigest()
class Common:
def setup(self):
pass
def teardown(self):
time.sleep(5) # Sleep for 5 seconds
class TestPioneer(Common):
def test_pioneer_software_download_updated_firmware(self):
url = 'https://www.pioneerelectronics.com/PUSA/Car/NEX/DMH-W4600NEX'
expected_firmware = '1.31'
dmh_4600_next_fw = find_pioneer_firmware_versions(url)
assert expected_firmware in dmh_4600_next_fw
class TestLogsdon(Common):
def test_logsdon_website_for_any_changes(self):
url = 'https://www.farmhousebeer.com/'
expected_hashes = ['6adf97f83acf6453d4a6a4b1070f3754', '534952d2d7451c0709c8d0263a50005f']
actual_hash = check_logsdon(url)
assert actual_hash in expected_hashes
class TestPlaystation(Common):
def test_actraiser_renaissance(self):
url = 'https://store.playstation.com/en-us/product/UP0082-CUSA25035_00-ACTPS4APPNA00001/'
expected_price = '$29.99'
price = find_playstation_price(url)
assert expected_price == price
|
the-stack_0_14578 | import os
import threading
import queue
import asyncio
def convert_video(Q,file):
if not Q.empty():
async def covert_720p():
os.system('ffmpeg -i ' + file + ' -r 30 -b 2M -s 1280x720 ' + file + '_720.mp4')
print(threading.currentThread())
return '720P covert successfully'
async def covert_480p():
os.system('ffmpeg -i ' + file + ' -r 30 -b 1M -s 720x480 ' + file + '_480.mp4')
print(threading.currentThread())
return '480P covert successfully'
coroutine1 = covert_720p()
coroutine2 = covert_480p()
thread_list = [
asyncio.ensure_future(coroutine1),
asyncio.ensure_future(coroutine2),
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(thread_list))
for thread in thread_list:
print('thread: ', thread.result())
def main():
Q = queue.Queue()
path = 'D:\EC500\Exercise2'
for file in os.listdir(path):
if file.endswith('.mp4'):
Q.put(file)
convert_video(Q,file)
if __name__ == '__main__':
main()
|
the-stack_0_14579 | import click
import py42.sdk.queries.alerts.filters as f
from py42.exceptions import Py42NotFoundError
from py42.sdk.queries.alerts.alert_query import AlertQuery
from py42.sdk.queries.alerts.filters import AlertState
from py42.sdk.queries.alerts.filters import RuleType
from py42.sdk.queries.alerts.filters import Severity
from py42.util import format_dict
import code42cli.cmds.search.options as searchopt
import code42cli.errors as errors
import code42cli.options as opt
from code42cli.bulk import generate_template_cmd_factory
from code42cli.bulk import run_bulk_process
from code42cli.click_ext.groups import OrderedGroup
from code42cli.cmds.search import SendToCommand
from code42cli.cmds.search.cursor_store import AlertCursorStore
from code42cli.cmds.search.options import server_options
from code42cli.cmds.util import convert_to_or_query
from code42cli.cmds.util import create_time_range_filter
from code42cli.cmds.util import try_get_default_header
from code42cli.date_helper import convert_datetime_to_timestamp
from code42cli.date_helper import limit_date_range
from code42cli.enums import JsonOutputFormat
from code42cli.enums import OutputFormat
from code42cli.file_readers import read_csv_arg
from code42cli.options import format_option
from code42cli.output_formats import OutputFormatter
from code42cli.util import hash_event
from code42cli.util import parse_timestamp
from code42cli.util import warn_interrupt
ALERTS_KEYWORD = "alerts"
ALERT_PAGE_SIZE = 25
begin = opt.begin_option(
ALERTS_KEYWORD,
callback=lambda ctx, param, arg: convert_datetime_to_timestamp(
limit_date_range(arg, max_days_back=90)
),
)
end = opt.end_option(ALERTS_KEYWORD)
checkpoint = opt.checkpoint_option(ALERTS_KEYWORD)
advanced_query = searchopt.advanced_query_option(ALERTS_KEYWORD)
severity_option = click.option(
"--severity",
multiple=True,
type=click.Choice(Severity.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.Severity),
help="Filter alerts by severity. Defaults to returning all severities.",
)
filter_state_option = click.option(
"--state",
multiple=True,
type=click.Choice(AlertState.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.AlertState),
help="Filter alerts by status. Defaults to returning all statuses.",
)
actor_option = click.option(
"--actor",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.Actor),
help="Filter alerts by including the given actor(s) who triggered the alert. "
"Arguments must match the actor's cloud alias exactly.",
)
actor_contains_option = click.option(
"--actor-contains",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.contains_filter(f.Actor),
help="Filter alerts by including actor(s) whose cloud alias contains the given string.",
)
exclude_actor_option = click.option(
"--exclude-actor",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.Actor),
help="Filter alerts by excluding the given actor(s) who triggered the alert. "
"Arguments must match actor's cloud alias exactly.",
)
exclude_actor_contains_option = click.option(
"--exclude-actor-contains",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_contains_filter(f.Actor),
help="Filter alerts by excluding actor(s) whose cloud alias contains the given string.",
)
rule_name_option = click.option(
"--rule-name",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleName),
help="Filter alerts by including the given rule name(s).",
)
exclude_rule_name_option = click.option(
"--exclude-rule-name",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleName),
help="Filter alerts by excluding the given rule name(s).",
)
rule_id_option = click.option(
"--rule-id",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleId),
help="Filter alerts by including the given rule id(s).",
)
exclude_rule_id_option = click.option(
"--exclude-rule-id",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleId),
help="Filter alerts by excluding the given rule id(s).",
)
rule_type_option = click.option(
"--rule-type",
multiple=True,
type=click.Choice(RuleType.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleType),
help="Filter alerts by including the given rule type(s).",
)
exclude_rule_type_option = click.option(
"--exclude-rule-type",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleType),
help="Filter alerts by excluding the given rule type(s).",
)
description_option = click.option(
"--description",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.contains_filter(f.Description),
help="Filter alerts by description. Does fuzzy search by default.",
)
send_to_format_options = click.option(
"-f",
"--format",
type=click.Choice(JsonOutputFormat(), case_sensitive=False),
help="The output format of the result. Defaults to json format.",
default=JsonOutputFormat.RAW,
)
alert_id_arg = click.argument("alert-id")
note_option = click.option("--note", help="A note to attach to the alert.")
update_state_option = click.option(
"--state",
help="The state to give to the alert.",
type=click.Choice(AlertState.choices()),
)
def _get_default_output_header():
return {
"id": "Id",
"name": "RuleName",
"actor": "Username",
"createdAt": "ObservedDate",
"state": "State",
"severity": "Severity",
"description": "Description",
}
def search_options(f):
f = checkpoint(f)
f = advanced_query(f)
f = end(f)
f = begin(f)
return f
def filter_options(f):
f = actor_option(f)
f = actor_contains_option(f)
f = exclude_actor_option(f)
f = exclude_actor_contains_option(f)
f = rule_name_option(f)
f = exclude_rule_name_option(f)
f = rule_id_option(f)
f = exclude_rule_id_option(f)
f = rule_type_option(f)
f = exclude_rule_type_option(f)
f = description_option(f)
f = severity_option(f)
f = filter_state_option(f)
return f
@click.group(cls=OrderedGroup)
@opt.sdk_options(hidden=True)
def alerts(state):
"""Get and send alert data."""
# store cursor getter on the group state so shared --begin option can use it in validation
state.cursor_getter = _get_alert_cursor_store
@alerts.command()
@click.argument("checkpoint-name")
@opt.sdk_options()
def clear_checkpoint(state, checkpoint_name):
"""Remove the saved alert checkpoint from `--use-checkpoint/-c` mode."""
_get_alert_cursor_store(state.profile.name).delete(checkpoint_name)
@alerts.command()
@filter_options
@search_options
@click.option(
"--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
@opt.sdk_options()
@click.option(
"--include-all",
default=False,
is_flag=True,
help="Display simple properties of the primary level of the nested response.",
)
@format_option
def search(
cli_state,
format,
begin,
end,
advanced_query,
use_checkpoint,
or_query,
include_all,
**kwargs,
):
"""Search for alerts."""
output_header = try_get_default_header(
include_all, _get_default_output_header(), format
)
formatter = OutputFormatter(format, output_header)
cursor = _get_alert_cursor_store(cli_state.profile.name) if use_checkpoint else None
if use_checkpoint:
checkpoint_name = use_checkpoint
checkpoint = cursor.get(checkpoint_name)
if checkpoint is not None:
begin = checkpoint
query = _construct_query(cli_state, begin, end, advanced_query, or_query)
alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query)
if use_checkpoint:
checkpoint_name = use_checkpoint
# update checkpoint to alertId of last event retrieved
alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
)
alerts_list = []
for alert in alerts_gen:
alerts_list.append(alert)
if not alerts_list:
click.echo("No results found.")
return
formatter.echo_formatted_list(alerts_list)
def _construct_query(state, begin, end, advanced_query, or_query):
if advanced_query:
state.search_filters = advanced_query
else:
if begin or end:
state.search_filters.append(
create_time_range_filter(f.DateObserved, begin, end)
)
if or_query:
state.search_filters = convert_to_or_query(state.search_filters)
query = AlertQuery(*state.search_filters)
query.page_size = ALERT_PAGE_SIZE
query.sort_direction = "asc"
query.sort_key = "CreatedAt"
return query
def _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
):
"""De-duplicates events across checkpointed runs. Since using the timestamp of the last event
processed as the `--begin` time of the next run causes the last event to show up again in the
next results, we hash the last event(s) of each run and store those hashes in the cursor to
filter out on the next run. It's also possible that two events have the exact same timestamp, so
`checkpoint_events` needs to be a list of hashes so we can filter out everything that's actually
been processed.
"""
checkpoint_alerts = cursor.get_alerts(checkpoint_name)
new_timestamp = None
new_alerts = []
for alert in alerts_gen:
event_hash = hash_event(alert)
if event_hash not in checkpoint_alerts:
if alert[f.DateObserved._term] != new_timestamp:
new_timestamp = alert[f.DateObserved._term]
new_alerts.clear()
new_alerts.append(event_hash)
yield alert
ts = parse_timestamp(new_timestamp)
cursor.replace(checkpoint_name, ts)
cursor.replace_alerts(checkpoint_name, new_alerts)
@alerts.command(cls=SendToCommand)
@filter_options
@search_options
@click.option(
"--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
@opt.sdk_options()
@server_options
@click.option(
"--include-all",
default=False,
is_flag=True,
help="Display simple properties of the primary level of the nested response.",
)
@send_to_format_options
def send_to(cli_state, begin, end, advanced_query, use_checkpoint, or_query, **kwargs):
"""Send alerts to the given server address.
HOSTNAME format: address:port where port is optional and defaults to 514.
"""
cursor = _get_cursor(cli_state, use_checkpoint)
if use_checkpoint:
checkpoint_name = use_checkpoint
checkpoint = cursor.get(checkpoint_name)
if checkpoint is not None:
begin = checkpoint
query = _construct_query(cli_state, begin, end, advanced_query, or_query)
alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query)
if use_checkpoint:
checkpoint_name = use_checkpoint
alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
)
with warn_interrupt():
alert = None
for alert in alerts_gen:
cli_state.logger.info(alert)
if alert is None: # generator was empty
click.echo("No results found.")
def _get_cursor(state, use_checkpoint):
return _get_alert_cursor_store(state.profile.name) if use_checkpoint else None
def _get_alert_cursor_store(profile_name):
return AlertCursorStore(profile_name)
@alerts.command()
@opt.sdk_options()
@alert_id_arg
@click.option(
"--include-observations", is_flag=True, help="View observations of the alert."
)
def show(state, alert_id, include_observations):
"""Display the details of a single alert."""
formatter = OutputFormatter(OutputFormat.TABLE, _get_default_output_header())
try:
response = state.sdk.alerts.get_details(alert_id)
except Py42NotFoundError:
raise errors.Code42CLIError(f"No alert found with ID '{alert_id}'.")
alert = response["alerts"][0]
formatter.echo_formatted_list([alert])
# Show note details
note = alert.get("note")
if note:
click.echo("\nNote:\n")
click.echo(format_dict(note))
if include_observations:
observations = alert.get("observations")
if observations:
click.echo("\nObservations:\n")
click.echo(format_dict(observations))
else:
click.echo("\nNo observations found.")
@alerts.command()
@opt.sdk_options()
@alert_id_arg
@update_state_option
@note_option
def update(cli_state, alert_id, state, note):
"""Update alert information."""
_update_alert(cli_state.sdk, alert_id, state, note)
@alerts.group(cls=OrderedGroup)
@opt.sdk_options(hidden=True)
def bulk(state):
"""Tools for executing bulk alert actions."""
pass
UPDATE_ALERT_CSV_HEADERS = ["id", "state", "note"]
update_alerts_generate_template = generate_template_cmd_factory(
group_name=ALERTS_KEYWORD,
commands_dict={"update": UPDATE_ALERT_CSV_HEADERS},
help_message="Generate the CSV template needed for bulk alert commands.",
)
bulk.add_command(update_alerts_generate_template)
@bulk.command(
name="update",
help=f"Bulk update alerts using a CSV file with format: {','.join(UPDATE_ALERT_CSV_HEADERS)}",
)
@opt.sdk_options()
@read_csv_arg(headers=UPDATE_ALERT_CSV_HEADERS)
def bulk_update(cli_state, csv_rows):
"""Bulk update alerts."""
sdk = cli_state.sdk
def handle_row(id, state, note):
_update_alert(sdk, id, state, note)
run_bulk_process(
handle_row, csv_rows, progress_label="Updating alerts:",
)
def _update_alert(sdk, alert_id, alert_state, note):
if alert_state:
sdk.alerts.update_state(alert_state, [alert_id], note=note)
elif note:
sdk.alerts.update_note(alert_id, note)
|
the-stack_0_14581 | """
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
import base64
import datetime
import json
import re
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes
from django.utils.module_loading import import_string
_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$')
class BadSignature(Exception):
"""Signature does not match."""
pass
class SignatureExpired(BadSignature):
"""Signature timestamp is older than required max_age."""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest()).decode()
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY) # SECRET_KEY may be str or bytes.
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer:
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Return URL-safe, hmac/SHA1 signed base64 compressed JSON string. If key is
None, use settings.SECRET_KEY instead.
If compress is True (not the default), check if compressing using zlib can
save some space. Prepend a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data).decode()
if is_compressed:
base64d = '.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raise BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign() returns str but base64 and zlib compression
# operate on bytes.
base64d = TimestampSigner(key, salt=salt).unsign(s, max_age=max_age).encode()
decompress = base64d[:1] == b'.'
if decompress:
# It's compressed; uncompress it first
base64d = base64d[1:]
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer:
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.key = key or settings.SECRET_KEY
self.sep = sep
if _SEP_UNSAFE.match(self.sep):
raise ValueError(
'Unsafe Signer separator: %r (cannot be empty or consist of '
'only A-z0-9-_=)' % sep,
)
self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
def signature(self, value):
return base64_hmac(self.salt + 'signer', value, self.key)
def sign(self, value):
return '%s%s%s' % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return value
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = '%s%s%s' % (value, self.sep, self.timestamp())
return super().sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super().unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
|
the-stack_0_14582 | import os
import sys
import miind.include as include
import miind.algorithms as algorithms
import miind.nodes as nodes
import miind.connections as connections
import miind.simulation as simulation
import miind.variables as variables
import xml.etree.ElementTree as ET
import argparse
import miind.directories as directories
XML_EXTENSION = '.xml'
# Nothing too fancy for the weight type
WEIGHTTYPES = ['double', 'DelayedConnection']
def generate_preamble(outfile):
outfile.write('//Machine-generated by miind.py. Edit at your own risk.\n\n')
for inc in include.includes:
outfile.write(inc +'\n')
return
def generate_closing(outfile, steps, type, t_step):
outfile.write('\tnetwork.configureSimulation(par_run);\n')
# outfile.write('\tstd::thread t1(TwoDLib::Display::stat_runthreaded);\n')
# outfile.write('\tt1.detach();\n')
# outfile.write('\tnetwork.evolve();\n')
if type == "DelayedConnection":
s = "MPILib::" + type
else:
s = "double"
outfile.write('\tstd::vector<MPILib::NodeId> nodes;\n')
outfile.write('\tnodes.push_back(0);\n')
outfile.write('\tTwoDLib::Display::getInstance()->animate(true, nodes,' + t_step + ');\n')
outfile.write('\tnetwork.startSimulation();\n')
outfile.write('\tMPILib::utilities::ProgressBar *pb = new MPILib::utilities::ProgressBar(' + steps + ');\n')
outfile.write('\tlong count = 0;\n')
outfile.write('\twhile(count < ' + steps + ') {\n')
outfile.write('\t\tnetwork.evolveSingleStep(std::vector<MPILib::ActivityType>());\n')
outfile.write('\t\tnetwork.reportNodeActivities(nodes);\n')
outfile.write('\t\tTwoDLib::Display::getInstance()->updateDisplay(count);\n')
outfile.write('\t\tTwoDLib::GridReport<TwoDLib::GridAlgorithm<DelayedConnection>>::getInstance()->reportDensity();\n')
outfile.write('\t\t(*pb)++;\n')
outfile.write('\t\tcount++;\n')
outfile.write('\t}\n')
outfile.write('\tnetwork.endSimulation();\n')
outfile.write('\t} catch(std::exception& exc){\n')
outfile.write('\t\tstd::cout << exc.what() << std::endl;\n')
outfile.write('#ifdef ENABLE_MPI\n')
outfile.write('\t//Abort the MPI environment in the correct way :\n')
outfile.write('\tenv.abort(1);\n')
outfile.write('#endif\n')
outfile.write('\t}\n\n')
outfile.write('\tMPILib::utilities::MPIProxy().barrier();\n')
outfile.write('\tt.stop();\n')
outfile.write('\tif (MPILib::utilities::MPIProxy().getRank() == 0) {\n')
outfile.write('\n\t\tstd::cout << \"Overall time spend\\n\";\n')
outfile.write('\t\tt.report();\n')
outfile.write('\t}\n')
outfile.write('\treturn 0;\n}\n')
for t in algorithms.RATEFUNCTIONS:
outfile.write(t)
return
def define_network_type(outfile, type):
if type == "DelayedConnection":
s = "MPILib::" + type
else:
s = "double"
return 'typedef MPILib::MPINetwork<' + s + ', MPILib::utilities::CircularDistribution> Network;\n'
def parse_xml(infile, outfile):
tree=ET.fromstring(infile.read())
m=tree.find('WeightType')
s = m.text
return define_network_type(outfile,s), tree
def generate_opening(outfile):
outfile.write('int main(int argc, char *argv[]){\n\tNetwork network;\n')
outfile.write('\tboost::timer::auto_cpu_timer t;\n\n')
outfile.write('#ifdef ENABLE_MPI\n')
outfile.write('\t// initialise the mpi environment this cannot be forwarded to a class\n')
outfile.write('\tboost::mpi::environment env(argc, argv);\n')
outfile.write('#endif\n\n')
outfile.write('\ttry {')
def model_name(fn):
'''Identifies model files mentioned in an XML file. For example used in placing the right model file in
the same directory as an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm')
modelnames = []
for a in ma:
if a.attrib['type'] == 'MeshAlgorithm' or a.attrib['type'] == 'GridAlgorithm':
modelnames.append(a.attrib['modelfile'])
return modelnames
def matrix_transform_name(fn):
'''Identifies matrix transform files mentioned in an XML file. For example used in placing the right model file in
the same directory as an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm')
tmatnames = []
for a in ma:
if a.attrib['type'] == 'GridAlgorithm':
tmatnames.append(a.attrib['transformfile'])
return tmatnames
def matrix_names(fn):
'''Find the file names of all MatrixFiles, mentioned in an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm/MatrixFile')
matrixnames = []
for a in ma:
matrixnames.append(a.text)
return matrixnames
def generate_outputfile(infile, outfile, enable_root):
generate_preamble(outfile)
nettype, tree = parse_xml(infile,outfile)
outfile.write(nettype)
outfile.write('\t// defining variables\n') # whatever variables are use are global
variable_list = tree.findall('Variable')
variables.parse_variables(variable_list,outfile)
algies = tree.findall('Algorithms')
if len(algies) != 1:
raise ValueError
alg_list = algies[0].findall('Algorithm')
weighttype = tree.find('WeightType')
generate_opening(outfile)
outfile.write('\t// generating algorithms\n')
algorithms.parse_algorithms(alg_list,weighttype,outfile)
node_list = tree.findall('Nodes/Node')
outfile.write('\t// generating nodes\n')
nodes.parse_nodes(node_list,weighttype,outfile)
outfile.write('\t// generating connections\n')
connection_list = tree.findall('Connections/Connection')
connections.parse_connections(connection_list,weighttype,outfile)
outfile.write('\t// generation simulation parameter\n')
simhand = tree.find('SimulationIO')
simulation.parse_simulation(simhand,outfile,enable_root)
simpar = tree.find('SimulationRunParameter')
simulation.parse_parameter(simpar,outfile)
t_begin = tree.find('SimulationRunParameter/t_begin')
t_end = tree.find('SimulationRunParameter/t_end')
t_step = tree.find('SimulationRunParameter/t_step')
m=tree.find('WeightType')
s = m.text
generate_closing(outfile, '(' + t_end.text + ' - ' + t_begin.text + ') / ' + t_step.text , s , t_step.text)
algorithms.reset_algorithms()
nodes.reset_nodes()
|
the-stack_0_14583 | # -*- coding: utf-8 -*-
"""
Code source: https://github.com/KaiyangZhou/deep-person-reid
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from itertools import repeat
from collections import namedtuple, defaultdict
import torch
__all__ = ['compute_model_complexity']
"""
Utility
"""
def _ntuple(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
return x
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
"""
Convolution
"""
def hook_convNd(m, x, y):
k = torch.prod(torch.Tensor(m.kernel_size)).item()
cin = m.in_channels
flops_per_ele = k * cin # + (k*cin-1)
if m.bias is not None:
flops_per_ele += 1
flops = flops_per_ele * y.numel() / m.groups
return int(flops)
"""
Pooling
"""
def hook_maxpool1d(m, x, y):
flops_per_ele = m.kernel_size - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_maxpool2d(m, x, y):
k = _pair(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
# ops: compare
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_maxpool3d(m, x, y):
k = _triple(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool1d(m, x, y):
flops_per_ele = m.kernel_size
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool2d(m, x, y):
k = _pair(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool3d(m, x, y):
k = _triple(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool1d(m, x, y):
x = x[0]
out_size = m.output_size
k = math.ceil(x.size(2) / out_size)
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool2d(m, x, y):
x = x[0]
out_size = _pair(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool3d(m, x, y):
x = x[0]
out_size = _triple(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool1d(m, x, y):
x = x[0]
out_size = m.output_size
k = math.ceil(x.size(2) / out_size)
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool2d(m, x, y):
x = x[0]
out_size = _pair(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool3d(m, x, y):
x = x[0]
out_size = _triple(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
"""
Non-linear activations
"""
def hook_relu(m, x, y):
# eq: max(0, x)
num_ele = y.numel()
return int(num_ele)
def hook_leakyrelu(m, x, y):
# eq: max(0, x) + negative_slope*min(0, x)
num_ele = y.numel()
flops = 3 * num_ele
return int(flops)
"""
Normalization
"""
def hook_batchnormNd(m, x, y):
num_ele = y.numel()
flops = 2 * num_ele # mean and std
if m.affine:
flops += 2 * num_ele # gamma and beta
return int(flops)
def hook_instancenormNd(m, x, y):
return hook_batchnormNd(m, x, y)
def hook_groupnorm(m, x, y):
return hook_batchnormNd(m, x, y)
def hook_layernorm(m, x, y):
num_ele = y.numel()
flops = 2 * num_ele # mean and std
if m.elementwise_affine:
flops += 2 * num_ele # gamma and beta
return int(flops)
"""
Linear
"""
def hook_linear(m, x, y):
flops_per_ele = m.in_features # + (m.in_features-1)
if m.bias is not None:
flops_per_ele += 1
flops = flops_per_ele * y.numel()
return int(flops)
__generic_flops_counter = {
# Convolution
'Conv1d': hook_convNd,
'Conv2d': hook_convNd,
'Conv3d': hook_convNd,
# Pooling
'MaxPool1d': hook_maxpool1d,
'MaxPool2d': hook_maxpool2d,
'MaxPool3d': hook_maxpool3d,
'AvgPool1d': hook_avgpool1d,
'AvgPool2d': hook_avgpool2d,
'AvgPool3d': hook_avgpool3d,
'AdaptiveMaxPool1d': hook_adapmaxpool1d,
'AdaptiveMaxPool2d': hook_adapmaxpool2d,
'AdaptiveMaxPool3d': hook_adapmaxpool3d,
'AdaptiveAvgPool1d': hook_adapavgpool1d,
'AdaptiveAvgPool2d': hook_adapavgpool2d,
'AdaptiveAvgPool3d': hook_adapavgpool3d,
# Non-linear activations
'ReLU': hook_relu,
'ReLU6': hook_relu,
'LeakyReLU': hook_leakyrelu,
# Normalization
'BatchNorm1d': hook_batchnormNd,
'BatchNorm2d': hook_batchnormNd,
'BatchNorm3d': hook_batchnormNd,
'InstanceNorm1d': hook_instancenormNd,
'InstanceNorm2d': hook_instancenormNd,
'InstanceNorm3d': hook_instancenormNd,
'GroupNorm': hook_groupnorm,
'LayerNorm': hook_layernorm,
# Linear
'Linear': hook_linear,
}
__conv_linear_flops_counter = {
# Convolution
'Conv1d': hook_convNd,
'Conv2d': hook_convNd,
'Conv3d': hook_convNd,
# Linear
'Linear': hook_linear,
}
def _get_flops_counter(only_conv_linear):
if only_conv_linear:
return __conv_linear_flops_counter
return __generic_flops_counter
def compute_model_complexity(model, input_size, verbose=False, only_conv_linear=True):
"""Returns number of parameters and FLOPs.
.. note::
(1) this function only provides an estimate of the theoretical time complexity
rather than the actual running time which depends on implementations and hardware,
and (2) the FLOPs is only counted for layers that are used at test time. This means
that redundant layers such as person ID classification layer will be ignored as it
is discarded when doing feature extraction. Note that the inference graph depends on
how you construct the computations in ``forward()``.
Args:
model (nn.Module): network model.
input_size (tuple): input size, e.g. (1, 3, 256, 128).
verbose (bool, optional): shows detailed complexity of
each module. Default is False.
only_conv_linear (bool, optional): only considers convolution
and linear layers when counting flops. Default is True.
If set to False, flops of all layers will be counted.
"""
registered_handles = []
layer_list = []
layer = namedtuple('layer', ['class_name', 'params', 'flops'])
def _add_hooks(m):
def _has_submodule(m):
return len(list(m.children())) > 0
def _hook(m, x, y):
params = sum(p.numel() for p in m.parameters())
class_name = str(m.__class__.__name__)
flops_counter = _get_flops_counter(only_conv_linear)
if class_name in flops_counter:
flops = flops_counter[class_name](m, x, y)
else:
flops = 0
layer_list.append(layer(class_name=class_name, params=params, flops=flops))
# only consider the very basic nn layer
if _has_submodule(m):
return
handle = m.register_forward_hook(_hook)
registered_handles.append(handle)
default_train_mode = model.training
model.eval().apply(_add_hooks)
input = torch.rand(input_size)
if next(model.parameters()).is_cuda:
input = input.cuda()
model(input) # forward
for handle in registered_handles:
handle.remove()
model.train(default_train_mode)
if verbose:
per_module_params = defaultdict(list)
per_module_flops = defaultdict(list)
total_params, total_flops = 0, 0
for layer in layer_list:
total_params += layer.params
total_flops += layer.flops
if verbose:
per_module_params[layer.class_name].append(layer.params)
per_module_flops[layer.class_name].append(layer.flops)
if verbose:
num_udscore = 55
print(' {}'.format('-' * num_udscore))
print(' Model complexity with input size {}'.format(input_size))
print(' {}'.format('-' * num_udscore))
for class_name in per_module_params:
params = int(np.sum(per_module_params[class_name]))
flops = int(np.sum(per_module_flops[class_name]))
print(' {} (params={:,}, flops={:,})'.format(class_name, params, flops))
print(' {}'.format('-' * num_udscore))
print(' Total (params={:,}, flops={:,})'.format(total_params, total_flops))
print(' {}'.format('-' * num_udscore))
return total_params, total_flops
|
the-stack_0_14586 | import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = PACKAGE_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "dev.db",
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = "yc)7(o27pavgtr&$3i3si@d8zulc&0b#1kpof7f%jo@&+vlc$g"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
ROOT_URLCONF = "rakesh.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "rakesh.wsgi.application"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"account",
"eventlog",
"metron",
"easy_thumbnails",
"kaleo",
"teams",
"wiki",
# project
"rakesh",
"rakesh.profiles",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
WIKI_HOOKSET = "rakesh.hooks.ProjectWikiHookset"
WIKI_BINDERS = [
"rakesh.binders.UserBinder",
"rakesh.binders.TeamBinder"
]
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
"django.contrib.auth.backends.ModelBackend"
]
AUTH_PROFILE_MODULE = "profiles.Profile"
|
the-stack_0_14587 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
try:
import frida
except ImportError:
sys.exit('install frida\nsudo python3 -m pip install frida')
def err(msg):
sys.stderr.write(msg + '\n')
def on_message(message, data):
if message['type'] == 'error':
err('[!] ' + message['stack'])
elif message['type'] == 'send':
print('[+] ' + message['payload'])
else:
print(message)
def kill_process(target_process):
cmd = 'adb shell pm clear {} 1> /dev/null'.format(target_process)
os.system(cmd)
def main():
target_process = sys.argv[1]
#kill_process(target_process)
device = frida.get_usb_device()
try:
started = False
session = device.attach(target_process)
except frida.ProcessNotFoundError:
print('Starting process {}...\n'.format(target_process))
started = True
try:
pid = device.spawn([target_process])
except frida.NotSupportedError:
sys.exit('An error ocurred while attaching with the procces\n')
session = device.attach(pid)
script = session.create_script("""
Java.perform(function () {
var Log = Java.use("android.util.Log");
Log.e.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.e( ' + tag + ', ' + entry + ' )');
console.log('');
return this.e.apply(this, arguments);
}
Log.w.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.w( ' + tag + ', ' + entry + ' )');
console.log('');
return this.w.apply(this, arguments);
}
Log.i.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.i( ' + tag + ', ' + entry + ' )');
console.log('');
return this.i.apply(this, arguments);
}
Log.d.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.d( ' + tag + ', ' + entry + ' )');
console.log('');
return this.d.apply(this, arguments);
}
});
""")
script.on('message', on_message)
print('[!] Press <Enter> at any time to detach from instrumented program.\n\n')
script.load()
if started:
device.resume(pid)
input()
session.detach()
if __name__ == '__main__':
if len(sys.argv) != 2:
usage = 'usage {} <process name or PID>\n\n'.format(__file__)
sys.exit(usage)
main()
|
the-stack_0_14590 | import asyncio
import base64
import binascii
import hashlib
import json
import logging
import os
import random
import requests
import sys
import time
from urllib.parse import urlparse
from qrcode import QRCode
from aiohttp import ClientError
from uuid import uuid4
from datetime import date
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # noqa
from runners.support.agent import DemoAgent, default_genesis_txns
from runners.support.utils import (
log_json,
log_msg,
log_status,
log_timer,
prompt,
prompt_loop,
require_indy,
)
CRED_PREVIEW_TYPE = (
"did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview"
)
SELF_ATTESTED = os.getenv("SELF_ATTESTED")
LOGGER = logging.getLogger(__name__)
TAILS_FILE_COUNT = int(os.getenv("TAILS_FILE_COUNT", 100))
class VSWAgent(DemoAgent):
def __init__(
self,
http_port: int,
admin_port: int,
tails_server_base_url: str = None,
**kwargs,
):
super().__init__(
"VSW.Agent",
http_port,
admin_port,
prefix="VSW",
tails_server_base_url=tails_server_base_url,
extra_args=["--auto-accept-invites", "--auto-accept-requests"],
**kwargs,
)
self.connection_id = None
self._connection_ready = asyncio.Future()
self.cred_state = {}
self.cred_done = asyncio.Future()
# TODO define a dict to hold credential attributes
# based on credential_definition_id
self.cred_attrs = {}
self.proof_done = asyncio.Future()
async def detect_connection(self):
await self._connection_ready
async def credential_complete(self):
await self.cred_done
async def proof_complete(self):
await self.proof_done
@property
def connection_ready(self):
return self._connection_ready.done() and self._connection_ready.result()
async def handle_connections(self, message):
if message["connection_id"] == self.connection_id:
if message["state"] == "active" and not self._connection_ready.done():
self.log("Connected")
self._connection_ready.set_result(True)
async def handle_present_proof(self, message):
state = message["state"]
presentation_exchange_id = message["presentation_exchange_id"]
self.log(
"Presentation: state =",
state,
", presentation_exchange_id =",
presentation_exchange_id,
)
if state == "presentation_received":
log_status("#27 Process the proof provided by X")
log_status("#28 Check if proof is valid")
proof = await self.admin_POST(
f"/present-proof/records/{presentation_exchange_id}/verify-presentation"
)
self.log("Proof =", proof["verified"])
# if presentation is a vsw schema (app publication),
# check the values received
pres = message["presentation"]
self.log("pres:", pres)
name = pres['requested_proof']['revealed_attrs']['0_name_uuid']['raw']
url = pres['requested_proof']['revealed_attrs']['0_url_uuid']['raw']
digest = pres['requested_proof']['revealed_attrs']['0_digest_uuid']['raw']
response = requests.get(url, allow_redirects=True)
if response.status_code != 200:
print("Failed to download file from URL")
sys.exit(1)
computed = hashlib.sha256(response.content).hexdigest()
if computed != digest:
print("SHA does not match")
print(computed)
sys.exit(1)
else:
open(f'vsw/{name}.wasm', 'wb').write(response.content)
self.log("SUCCESS")
self.proof_done.set_result(True)
async def handle_basicmessages(self, message):
self.log("Received message:", message["content"])
async def main(
start_port: int,
name: str,
show_timing: bool = False,
):
with open('/home/indy/vsw/.config.json') as f:
config = json.load(f)
genesis = await default_genesis_txns()
if not genesis:
print("Error retrieving ledger genesis transactions")
sys.exit(1)
agent = None
try:
log_status("#1 Provision an agent and wallet, get back configuration details")
agent = VSWAgent(
start_port,
start_port + 1,
genesis_data=genesis,
timing=show_timing,
)
await agent.listen_webhooks(start_port + 2)
# FIXME: This user should not have to publish their DID, but if I remove the next line it fails
await agent.register_did()
with log_timer("Startup duration:"):
await agent.start_process()
log_msg("Admin URL is at:", agent.admin_url)
log_msg("Endpoint URL is at:", agent.endpoint)
# Connect to repo
log_status("#9 Connect to repo")
connection = await agent.admin_POST("/connections/receive-invitation", config['invitation'])
agent.connection_id = connection["connection_id"]
log_json(connection, label="Invitation response:")
await agent.detect_connection()
log_status("#20 Request app credential from repo")
req_attrs = [
{
"name": "name",
"value": name,
"restrictions": [{"schema_name": "vsw schema"}]
},
{
"name": "url",
"restrictions": [{"schema_name": "vsw schema"}]
},
{
"name": "digest",
"restrictions": [{"schema_name": "vsw schema"}]
}
]
req_preds = []
indy_proof_request = {
"name": "Retrieve by Name",
"version": "1.0",
"nonce": str(uuid4().int),
"requested_attributes": {
f"0_{req_attr['name']}_uuid": req_attr
for req_attr in req_attrs
},
"requested_predicates": {}
}
proof_request_web_request = {
"connection_id": agent.connection_id,
"proof_request": indy_proof_request
}
# this sends the request to our agent, which forwards it to the repo
# (based on the connection_id)
await agent.admin_POST(
"/present-proof/send-request",
proof_request_web_request
)
await agent.proof_complete()
finally:
terminated = True
try:
if agent:
await agent.terminate()
except Exception:
LOGGER.exception("Error terminating agent:")
terminated = False
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs a VSW agent.")
parser.add_argument(
"-p",
"--port",
type=int,
default=8050,
metavar=("<port>"),
help="Choose the starting port number to listen on",
)
parser.add_argument(
"--timing", action="store_true", help="Enable timing information"
)
parser.add_argument("name", type=str, help="name of app to install")
args = parser.parse_args()
ENABLE_PYDEVD_PYCHARM = os.getenv("ENABLE_PYDEVD_PYCHARM", "").lower()
ENABLE_PYDEVD_PYCHARM = ENABLE_PYDEVD_PYCHARM and ENABLE_PYDEVD_PYCHARM not in (
"false",
"0",
)
PYDEVD_PYCHARM_HOST = os.getenv("PYDEVD_PYCHARM_HOST", "localhost")
PYDEVD_PYCHARM_CONTROLLER_PORT = int(
os.getenv("PYDEVD_PYCHARM_CONTROLLER_PORT", 5001)
)
if ENABLE_PYDEVD_PYCHARM:
try:
import pydevd_pycharm
print(
"VSW remote debugging to "
f"{PYDEVD_PYCHARM_HOST}:{PYDEVD_PYCHARM_CONTROLLER_PORT}"
)
pydevd_pycharm.settrace(
host=PYDEVD_PYCHARM_HOST,
port=PYDEVD_PYCHARM_CONTROLLER_PORT,
stdoutToServer=True,
stderrToServer=True,
suspend=False,
)
except ImportError:
print("pydevd_pycharm library was not found")
require_indy()
try:
asyncio.get_event_loop().run_until_complete(
main(
args.port,
args.name,
args.timing,
)
)
except KeyboardInterrupt:
os._exit(1)
|
the-stack_0_14593 | # -*- coding: utf-8 -*-
"""
module to do uiauto
"""
import json
import re
import codecs
import time
from urllib2 import URLError
from appium import webdriver
from selenium.common.exceptions import WebDriverException
from logger import logger
from emulator import ADB
from db import DB
from smartmonkey import Navigator, Stabilizer
from myexceptions import PathNotDefinedInConfig, IdPHandlingException, EmulatorActionException,\
TestInitException
RP_REQUEST_TIMEOUT = 20
IDP_STATUS_TIMEOUT = 20
class UIAction(object):
"""class to simplify ui action process"""
def __init__(self, driver, emulator=None, idp=None, config_file=None, package=None, version=None):
if 'udid' in driver.desired_capabilities and driver.desired_capabilities['udid']:
self.adb = ADB(serial=driver.desired_capabilities['udid'])
elif emulator and emulator.serial:
self.adb = ADB(serial=emulator.serial)
else:
self.adb = ADB()
if not package:
package = self.adb.current_package()
version = self.adb.current_version(package)
self.emulator = emulator
self.package = package
self.driver = driver
self.idp = idp
self.app_style = 'international' if self.idp == 'fb' else 'chinese'
self.config_file = config_file
self.config = {}
if config_file:
self.loaded = self.load_config_from_file(config_file)
else:
if not idp:
raise Exception("IdP not specified")
self.loaded = self.load_config_from_db(package, idp, version=version)
if self.loaded:
if 'home_activity' in self.config and self.config['home_activity']:
self.has_home_activity = True
else:
self.has_home_activity = False
self.stabilizer = Stabilizer(self.driver, package=self.package, app_style=self.app_style)
def load_config_from_db(self, package, idp, version=None):
"""load configuration from database"""
config = DB().fetch_config(package, idp, version=version)
if not config:
return False
else:
logger.debug(u'Config for %s loaded from DB', package)
return self.set_config(config)
def load_config_from_file(self, filename):
"""load configuration from config file"""
try:
with open(filename, 'r') as config_f:
result = self.set_config(config_f.read())
logger.debug(u'Config for %s loaded from %s', self.config['package'], filename)
return result
except EnvironmentError:
logger.error(u'Read file error: %s', filename)
return False
def set_config(self, config):
"""initialize configuration from json"""
try:
if isinstance(config, str):
config = json.loads(config)
# check required objects
package_name = config['package']
package_version = config['version']
installed_version = self.adb.current_version(package_name)
config_version = package_version
if installed_version != config_version:
logger.warning(u'Version inconsistent - Installed: %s, Config: %s',\
installed_version, config_version)
self.config = config
return True
except ValueError:
logger.error(u'Invalid path format')
raise
def login(self):
"""perform navigation to get to login page"""
assert self.loaded
login_path = self.path_for('login')
origin = self.origin_for('login')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for login")
loginer = Navigator(self.driver, path=login_path, package=self.config['package'],\
app_style=self.app_style)
return loginer.navigate()
def login_from_snapshot(self, tag):
"""restore session from snapshot"""
# check emulator
if not self.emulator:
raise EmulatorActionException('No emulator instance is specified')
# check snapshot
tags_in_use = [ x['tag'] for x in self.emulator.list_snapshot()]
if tag not in tags_in_use:
raise EmulatorActionException('No snapshot with tag {}'.format(tag))
# try to load snapshot
if not self.emulator.load_snapshot(tag):
raise EmulatorActionException('Fail to load snapshot {}'.format(tag))
# try to restore appium session
desired_caps = self.driver.capabilities
desired_caps['autoLaunch'] = False
try:
self.driver = webdriver.Remote(self.driver.command_executor._url, desired_caps)
except URLError:
raise TestInitException('appium is not running')
# try to login
if self.idp == 'fb':
status = 'IdpNeedLogin'
return status
def logout(self, reset=False):
"""perform logout action"""
assert self.loaded
self.clear_sdcard()
if reset:
logger.info(u"[+] App reset")
return self.driver.reset()
else:
logout_path = self.path_for('logout')
origin = self.origin_for('logout')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for logout")
logoutter = Navigator(self.driver, path=logout_path, package=self.config['package'],\
app_style=self.app_style)
return logoutter.navigate()
def clear_sdcard(self):
"""clear sdcard and keep only essential files"""
files = self.adb.ls('/sdcard/')
files_reserved = ['Android', 'DCIM', 'Download', 'Movies', 'Music', 'Notifications', 'Pictures',
'Podcasts', 'Ringtones', 'UxinSDK', 'backups', 'sina', 'tencent']
for fname in files:
if fname in files_reserved:
continue
self.adb.rm('/sdcard/{}'.format(fname))
def user_info(self):
"""retrieve user info"""
assert self.loaded
info_path = self.path_for('user_info')
origin = self.origin_for('user_info')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for user info")
user_getter = Navigator(self.driver, path=info_path, package=self.config['package'],\
app_style=self.app_style)
status = user_getter.navigate()
if status == 'LoggedIn':
identities = self.config['paths']['user_info']['identities']
for (k, val) in identities.items():
if re.search(val, self.page_source, re.I):
return k
return 'Others'
else:
return status
# ----------------- Single Destination -------------------
# match = re.search(self.config['paths']['user_info']['identity_regex'],
# self.page_source)
# if len(match.groups()) > 0:
# return match.group(1)
# else:
# return match.group(0)
# [ example_regex: "(?=<[^<]*user_name[^<]*>)<.*?text=\"(.*?)\".*?>" ]
def landing(self):
"""land on home activity"""
home_activity = self.stabilizer.get_home_activity()
if self.loaded:
if self.has_home_activity and self.config['home_activity'] != home_activity:
logger.warning(u'home_activity already exists in config, skip record update\n'
u'\tstored: %s, new: %s', self.config['home_activity'],\
home_activity)
else:
self.has_home_activity = True
self.config['home_activity'] = home_activity
if self.config_file:
self.config['home_activity'] = home_activity
with open(self.config_file, 'wb') as config_f:
config_f = codecs.getwriter('utf-8')(config_f)
json.dump(self.config, config_f, indent=4, sort_keys=True,\
ensure_ascii=False)
else:
result = DB().update_config(self.config['package'], self.config['idp'],\
{'home_activity': home_activity}, version=self.config['version'])
if result:
logger.info(u'home_activity:%s stored into config', home_activity)
else:
logger.info(u'Landed on %s', home_activity)
return home_activity
def start_home_activity(self, is_retry=False):
"""better start home activity"""
if self.loaded and self.has_home_activity:
home_activity = self.config['home_activity']
else:
logger.debug(u'home_activity not defined in DB')
home_activity = self.landing()
if self.stabilizer.better_start_activity(home_activity):
return True
else:
if is_retry:
logger.warning('uiaction: start_home_activity mismatch')
return False
else:
self.stabilizer.skip_irrelevant()
if self.driver.current_activity == home_activity:
return True
else:
return self.start_home_activity(is_retry=True)
def origin_for(self, action):
"""find origin of the action"""
if action not in self.config['paths']:
return False
if 'origin' in self.config['paths'][action]:
return self.config['paths'][action]['origin']
else:
return False
def path_for(self, action):
"""find path to the action"""
if action in self.config['paths'] and self.config['paths'][action]:
return json.dumps(self.config['paths'][action])
else:
raise PathNotDefinedInConfig(u"%s not configured for %s - %s"
% (action, self.config['package'], self.config['version']))
def fblite_login_handler(self, stab, account=None, password=None):
"""handler for fblite login"""
if not account or not password:
account = "[email protected]"
password = "evessotest"
# very ugly wait for status change
logger.debug(u'Wait for status change')
time.sleep(IDP_STATUS_TIMEOUT)
# if session is stored
if self.driver.current_activity != 'com.facebook.browser.lite.BrowserLiteActivity':
logger.debug(u'Session is stored')
return True
# click continue
logger.debug(u'Try to click continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# wait for getting out of fblite
count = 0
while self.driver.current_activity == 'com.facebook.browser.lite.BrowserLiteActivity':
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
logger.debug(u'Get out of fblite')
return True
def fb_login_handler(self, stab, account=None, password=None):
"""handler for facebook webview login"""
if not account or not password:
account = "[email protected]"
password = "evessotest"
keywords = [u"Enter email", u"请输入邮箱", u"輸入電郵"]
err_keywords = [u'Error', u'Invalid', u'应用编号无效']
block_keywords = [u'Secure Account']
# wait for correct activity
logger.debug(u'Wait for facebook login activity')
count = 0
activity = self.driver.current_activity
while activity != 'com.facebook.FacebookActivity' and activity != 'com.facebook.LoginActivity':
count += 1
activity = self.driver.current_activity
assert count <= IDP_STATUS_TIMEOUT
# wait for input boxes appear
logger.debug(u'Wait for input boxes appear')
count = 0
while not self.driver.find_elements_by_class_name('android.widget.EditText'):
# in case app does not support fb login at all
if any(kw in self.page_source for kw in err_keywords):
raise IdPHandlingException('This app does not support facebook login')
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
# input email and password
source = self.page_source
logger.debug(u'Try to input email and password')
if any(kw in source for kw in keywords):
self.driver.find_elements_by_class_name('android.widget.EditText')[0]\
.set_text(account)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
self.driver.find_elements_by_class_name('android.widget.EditText')[-1]\
.set_text(password)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
elif any(kw in source for kw in err_keywords):
raise IdPHandlingException('This app does not support facebook login')
else:
inputs = self.driver.find_elements_by_class_name('android.widget.EditText')
inputs[0].set_text(account)
inputs[-1].set_text(password)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
# wait for status change
logger.debug(u'Wait for status change')
status_keywords = [u'身分繼續', u'Continue', u'would like to'] + err_keywords + block_keywords
count = 0
while not any(kw in self.page_source for kw in status_keywords):
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
# handle pages after status change
count = 0
logger.debug(u'Try to handle pages after status change')
while self.driver.current_activity == 'com.facebook.FacebookActivity'\
or self.driver.current_activity == 'com.facebook.LoginActivity':
count += 1
source = self.page_source
# in case of any error
if any(kw in source for kw in err_keywords):
logger.debug(u'Error keywords received')
raise IdPHandlingException('This app does not support facebook login')
# in case of account has been blocked
elif any(kw in self.page_source for kw in block_keywords):
raise IdPHandlingException('This account has been blocked!')
# in case of continue appears
elif 'Continue' in source:
logger.debug(u'Try to click Continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# give all possible permisson to the app
elif 'would like to' in source:
logger.debug(u'Try to offer permission by clicking OK')
stab.find_elements_by_keyword(u'OK', clickable_only=True, exact=True)[-1].click()
time.sleep(1)
assert count <= IDP_STATUS_TIMEOUT
logger.debug(u'Get out of facebook login webview')
return True
def idp_handler(self, status, account=None, password=None):
"""handler idp login process"""
logger.info(u"RP login status: %s, idp: %s", status, self.idp)
stab = Stabilizer(self.driver)
if status == "Uncertain":
if self.idp == 'sina' and stab.wait_for_activities('.SSOAuthorizeActivity'):
status = "IdpNeedConfirm"
elif self.idp == 'wechat' and stab.wait_for_keyword(u'微信登录|登录后应用将获得以下权限', timeout=60):
status = "IdpNeedConfirm"
elif self.idp == 'fb' and stab.wait_for_keyword(u'登录 Facebook 帐户', timeout=60):
status = "IdpNeedLogin"
else:
return
if status == "IdpNeedConfirm" and self.idp == 'sina':
if self.driver.current_activity == '.SSOAuthorizeActivity' and \
stab.wait_for_keyword(u'确定', timeout=60):
stab.tap_keyword(u'确定', siblings_on=False)
elif status == "IdpNeedConfirm" and self.idp == 'wechat':
if stab.wait_for_keyword(u'确认登录', timeout=60):
stab.tap_keyword(u'确认登录', siblings_on=False)
elif status == "IdpNeedLogin" and self.idp == 'fb':
self.fb_login_handler(stab, account=account, password=password)
elif status == "LoggedIn":
pass
else:
logger.warning("Cannot handle: status - %s, IdP - %s", status, self.idp)
time.sleep(RP_REQUEST_TIMEOUT)
def idp_set_session(self, idp, path):
"""
Set IdP to specific user session by file copying
:param idp: fb, sina or wechat
:param path: path to the folder or file containing session data.
For wechat, no coexisting sessions are allowed, and sessions are bind to device information.
:return: True for success
"""
# make sure adb has root permission, if not exception will be raised
self.adb.root()
# On-device location of session file for different IdP
PKG = {
'fb': 'com.facebook.lite',
'sina': 'com.sina.weibo',
'wechat': 'com.tencent.mm'
}
DST = {
'fb': '/data/data/com.facebook.lite/shared_prefs/',
'sina': '/data/data/com.sina.weibo/databases/sina_weibo',
'wechat': '/data/data/com.tencent.mm/MicroMsg/'
}
self.adb.force_stop(PKG[idp])
self.adb.rm(DST[idp])
self.adb.push(path, DST[idp])
self.adb.chmod(DST[idp])
@property
def page_source(self):
"""wrapper around appium page_source to catch exception"""
source = None
e = None
for _ in range(3):
try:
source = self.driver.page_source
if source:
break
except WebDriverException as e:
time.sleep(1)
continue
else:
raise WebDriverException(e)
return source |
the-stack_0_14594 | from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read().split('\n')
setup(
author='Philipp Bode, Christian Warmuth',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description=(
'neurogaze provides wrapper functionality to record and '
'visualize eye tracking and application usage data.'
),
install_requires=requirements,
license='MIT license',
long_description=readme,
include_package_data=True,
name='neurogaze',
packages=find_packages(include=['neurogaze']),
url='https://github.com/christianwarmuth/neurogaze',
version='0.0.1',
)
|
the-stack_0_14597 | # coding: utf-8
import os
import re
from time import gmtime, localtime, strftime, time
from django import forms
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.core.files.storage import (DefaultStorage, FileSystemStorage,
default_storage)
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import HttpResponse, render
from django.template import RequestContext as Context
from django.urls import get_resolver, get_urlconf, reverse
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from filebrowser import signals
# Default actions
from filebrowser.actions import (flip_horizontal, flip_vertical,
rotate_90_clockwise,
rotate_90_counterclockwise, rotate_180)
from filebrowser.base import FileListing, FileObject
from filebrowser.decorators import file_exists, path_exists
from filebrowser.settings import (ADMIN_THUMBNAIL, ADMIN_VERSIONS,
CONVERT_FILENAME, DEFAULT_PERMISSIONS,
DEFAULT_SORTING_BY, DEFAULT_SORTING_ORDER,
DIRECTORY, EXCLUDE, EXTENSION_LIST,
EXTENSIONS, LIST_PER_PAGE, MAX_UPLOAD_SIZE,
NORMALIZE_FILENAME, OVERWRITE_EXISTING,
SEARCH_TRAVERSE, SELECT_FORMATS,
UPLOAD_TEMPDIR, VERSIONS, VERSIONS_BASEDIR)
from filebrowser.storage import FileSystemStorageMixin
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.utils import convert_filename
try:
import json
except ImportError:
from django.utils import simplejson as json
# Add some required methods to FileSystemStorage
if FileSystemStorageMixin not in FileSystemStorage.__bases__:
FileSystemStorage.__bases__ += (FileSystemStorageMixin,)
# This cache contains all *instantiated* FileBrowser sites
_sites_cache = {}
def get_site_dict(app_name='filebrowser'):
"""
Return a dict with all *deployed* FileBrowser sites that have
a given app_name.
"""
if app_name not in _sites_cache:
return {}
# Get names of all deployed filebrowser sites with a give app_name
deployed = get_resolver(get_urlconf()).app_dict[app_name]
# Get the deployed subset from the cache
return dict((k, v) for k, v in _sites_cache[app_name].items() if k in deployed)
def register_site(app_name, site_name, site):
"""
Add a site into the site dict.
"""
if app_name not in _sites_cache:
_sites_cache[app_name] = {}
_sites_cache[app_name][site_name] = site
def get_default_site(app_name='filebrowser'):
"""
Returns the default site. This function uses Django's url resolution method to
obtain the name of the default site.
"""
# Get the name of the default site:
resolver = get_resolver(get_urlconf())
name = 'filebrowser'
# Django's default name resolution method (see django.core.urlresolvers.reverse())
app_list = resolver.app_dict[app_name]
if name not in app_list:
name = app_list[0]
return get_site_dict()[name]
def get_breadcrumbs(query, path):
"""
Get breadcrumbs.
"""
breadcrumbs = []
dir_query = ""
if path:
for item in path.split(os.sep):
dir_query = os.path.join(dir_query, item)
breadcrumbs.append([item, dir_query])
return breadcrumbs
def get_filterdate(filter_date, date_time):
"""
Get filterdate.
"""
returnvalue = ''
date_year = strftime("%Y", gmtime(date_time))
date_month = strftime("%m", gmtime(date_time))
date_day = strftime("%d", gmtime(date_time))
if filter_date == 'today' and int(date_year) == int(localtime()[0]) and int(date_month) == int(localtime()[1]) and int(date_day) == int(localtime()[2]):
returnvalue = 'true'
elif filter_date == 'thismonth' and date_time >= time() - 2592000:
returnvalue = 'true'
elif filter_date == 'thisyear' and int(date_year) == int(localtime()[0]):
returnvalue = 'true'
elif filter_date == 'past7days' and date_time >= time() - 604800:
returnvalue = 'true'
elif filter_date == '':
returnvalue = 'true'
return returnvalue
def get_settings_var(directory=DIRECTORY):
"""
Get settings variables used for FileBrowser listing.
"""
settings_var = {}
# Main
# Extensions/Formats (for FileBrowseField)
settings_var['EXTENSIONS'] = EXTENSIONS
settings_var['SELECT_FORMATS'] = SELECT_FORMATS
# Versions
settings_var['ADMIN_VERSIONS'] = ADMIN_VERSIONS
settings_var['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
# FileBrowser Options
settings_var['MAX_UPLOAD_SIZE'] = MAX_UPLOAD_SIZE
# Normalize Filenames
settings_var['NORMALIZE_FILENAME'] = NORMALIZE_FILENAME
# Convert Filenames
settings_var['CONVERT_FILENAME'] = CONVERT_FILENAME
# Traverse directories when searching
settings_var['SEARCH_TRAVERSE'] = SEARCH_TRAVERSE
return settings_var
def handle_file_upload(path, file, site):
"""
Handle File Upload.
"""
uploadedfile = None
try:
file_path = os.path.join(path, file.name)
uploadedfile = site.storage.save(file_path, file)
except Exception as inst:
raise inst
return uploadedfile
def filebrowser_view(view):
"Only let staff browse the files"
return staff_member_required(never_cache(view))
class FileBrowserSite(object):
"""
A filebrowser.site defines admin views for browsing your servers media files.
"""
filelisting_class = FileListing
def __init__(self, name=None, app_name='filebrowser', storage=default_storage):
self.name = name
self.app_name = app_name
self.storage = storage
self._actions = {}
self._global_actions = self._actions.copy()
# Register this site in the global site cache
register_site(self.app_name, self.name, self)
# Per-site settings:
self.directory = DIRECTORY
def _directory_get(self):
"Set directory"
return self._directory
def _directory_set(self, val):
"Get directory"
self._directory = val
directory = property(_directory_get, _directory_set)
def get_urls(self):
"URLs for a filebrowser.site"
from django.conf.urls import url
# filebrowser urls (views)
urlpatterns = [
url(r'^browse/$', path_exists(self, filebrowser_view(self.browse)), name="fb_browse"),
url(r'^createdir/', path_exists(self, filebrowser_view(self.createdir)), name="fb_createdir"),
url(r'^upload/', path_exists(self, filebrowser_view(self.upload)), name="fb_upload"),
url(r'^delete_confirm/$', file_exists(self, path_exists(self, filebrowser_view(self.delete_confirm))), name="fb_delete_confirm"),
url(r'^delete/$', file_exists(self, path_exists(self, filebrowser_view(self.delete))), name="fb_delete"),
url(r'^detail/$', file_exists(self, path_exists(self, filebrowser_view(self.detail))), name="fb_detail"),
url(r'^version/$', file_exists(self, path_exists(self, filebrowser_view(self.version))), name="fb_version"),
url(r'^upload_file/$', staff_member_required(csrf_exempt(self._upload_file)), name="fb_do_upload"),
]
return urlpatterns
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
# Check/create short description
if not hasattr(action, 'short_description'):
action.short_description = action.__name__.replace("_", " ").capitalize()
# Check/create applies-to filter
if not hasattr(action, 'applies_to'):
action.applies_to = lambda x: True
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
def applicable_actions(self, fileobject):
"""
Return a list of tuples (name, action) of actions applicable to a given fileobject.
"""
res = []
for name, action in self.actions:
if action.applies_to(fileobject):
res.append((name, action))
return res
@property
def actions(self):
"""
Get all the enabled actions as a list of (name, func). The list
is sorted alphabetically by actions names
"""
res = list(self._actions.items())
res.sort(key=lambda name_func: name_func[0])
return res
@property
def urls(self):
"filebrowser.site URLs"
return self.get_urls(), self.app_name, self.name
def browse(self, request):
"Browse Files/Directories."
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
# do not filter if VERSIONS_BASEDIR is being used
if not VERSIONS_BASEDIR:
for k, v in VERSIONS.items():
exp = (r'_%s(%s)$') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp, re.IGNORECASE))
def filter_browse(item):
"Defining a browse filter"
filtered = item.filename.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(item.filename):
filtered = True
if filtered:
return False
return True
query = request.GET.copy()
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
filelisting = self.filelisting_class(
path,
filter_func=filter_browse,
sorting_by=query.get('o', DEFAULT_SORTING_BY),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
files = []
if SEARCH_TRAVERSE and query.get("q"):
listing = filelisting.files_walk_filtered()
else:
listing = filelisting.files_listing_filtered()
# If we do a search, precompile the search pattern now
do_search = query.get("q")
if do_search:
re_q = re.compile(query.get("q").lower(), re.M)
filter_type = query.get('filter_type')
filter_date = query.get('filter_date')
filter_format = query.get('type')
for fileobject in listing:
# date/type filter, format filter
append = False
if (not filter_type or fileobject.filetype == filter_type) and \
(not filter_date or get_filterdate(filter_date, fileobject.date or 0)) and \
(not filter_format or filter_format in fileobject.format):
append = True
# search
if do_search and not re_q.search(fileobject.filename.lower()):
append = False
# always show folders with popups
# otherwise, one is not able to select/filter files within subfolders
if fileobject.filetype == "Folder":
append = True
# MODIFIED: Hide folders in search
if do_search:
append = False
# append
if append:
files.append(fileobject)
filelisting.results_total = len(listing)
filelisting.results_current = len(files)
p = Paginator(files, LIST_PER_PAGE)
page_nr = request.GET.get('p', '1')
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
request.current_app = self.name
return render(request, 'filebrowser/index.html', {
'p': p,
'page': page,
'filelisting': filelisting,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': "",
'filebrowser_site': self
})
def createdir(self, request):
"Create Directory"
from filebrowser.forms import CreateDirForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
if request.method == 'POST':
form = CreateDirForm(path, request.POST, filebrowser_site=self)
if form.is_valid():
server_path = os.path.join(path, form.cleaned_data['name'])
try:
signals.filebrowser_pre_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'], site=self)
self.storage.makedirs(server_path)
signals.filebrowser_post_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'], site=self)
messages.add_message(request, messages.SUCCESS, _('The Folder %s was successfully created.') % form.cleaned_data['name'])
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError as e:
errno = e.args[0]
if errno == 13:
form.errors['name'] = forms.utils.ErrorList([_('Permission denied.')])
else:
form.errors['name'] = forms.utils.ErrorList([_('Error creating folder.')])
else:
form = CreateDirForm(path, filebrowser_site=self)
request.current_app = self.name
return render(request, 'filebrowser/createdir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'New Folder'),
'filebrowser_site': self
})
def upload(self, request):
"Multipe File Upload."
query = request.GET
request.current_app = self.name
return render(request, 'filebrowser/upload.html', {
'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Upload'),
'filebrowser_site': self
})
def delete_confirm(self, request):
"Delete existing File/Directory."
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if fileobject.filetype == "Folder":
filelisting = self.filelisting_class(
os.path.join(path, fileobject.filename),
sorting_by=query.get('o', 'filename'),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
filelisting = filelisting.files_walk_total()
if len(filelisting) > 100:
additional_files = len(filelisting) - 100
filelisting = filelisting[:100]
else:
additional_files = None
else:
filelisting = None
additional_files = None
request.current_app = self.name
return render(request, 'filebrowser/delete_confirm.html', {
'fileobject': fileobject,
'filelisting': filelisting,
'additional_files': additional_files,
'query': query,
'title': _(u'Confirm delete'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Confirm delete'),
'filebrowser_site': self
})
def delete(self, request):
"Delete existing File/Directory."
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.GET:
try:
signals.filebrowser_pre_delete.send(sender=request, path=fileobject.path, name=fileobject.filename, site=self)
fileobject.delete_versions()
fileobject.delete()
signals.filebrowser_post_delete.send(sender=request, path=fileobject.path, name=fileobject.filename, site=self)
messages.add_message(request, messages.SUCCESS, _('Successfully deleted %s') % fileobject.filename)
except OSError:
# TODO: define error-message
pass
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
def detail(self, request):
"""
Show detail page for a file.
Rename existing File/Directory (deletes existing Image Versions/Thumbnails).
"""
from filebrowser.forms import ChangeForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.method == 'POST':
form = ChangeForm(request.POST, path=path, fileobject=fileobject, filebrowser_site=self)
if form.is_valid():
new_name = form.cleaned_data['name']
action_name = form.cleaned_data['custom_action']
try:
action_response = None
if action_name:
action = self.get_action(action_name)
# Pre-action signal
signals.filebrowser_actions_pre_apply.send(sender=request, action_name=action_name, fileobject=[fileobject], site=self)
# Call the action to action
action_response = action(request=request, fileobjects=[fileobject])
# Post-action signal
signals.filebrowser_actions_post_apply.send(sender=request, action_name=action_name, fileobject=[fileobject], result=action_response, site=self)
if new_name != fileobject.filename:
signals.filebrowser_pre_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name, site=self)
fileobject.delete_versions()
self.storage.move(fileobject.path, os.path.join(fileobject.head, new_name))
signals.filebrowser_post_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name, site=self)
messages.add_message(request, messages.SUCCESS, _('Renaming was successful.'))
if isinstance(action_response, HttpResponse):
return action_response
if "_continue" in request.POST:
redirect_url = reverse("filebrowser:fb_detail", current_app=self.name) + query_helper(query, "filename=" + new_name, "filename")
else:
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError:
form.errors['name'] = forms.utils.ErrorList([_('Error.')])
else:
form = ChangeForm(initial={"name": fileobject.filename}, path=path, fileobject=fileobject, filebrowser_site=self)
request.current_app = self.name
return render(request, 'filebrowser/detail.html', {
'form': form,
'fileobject': fileobject,
'query': query,
'title': u'%s' % fileobject.filename,
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': u'%s' % fileobject.filename,
'filebrowser_site': self
})
def version(self, request):
"""
Version detail.
This just exists in order to select a version with a filebrowser–popup.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
request.current_app = self.name
return render(request, 'filebrowser/version.html', {
'fileobject': fileobject,
'query': query,
'settings_var': get_settings_var(directory=self.directory),
'filebrowser_site': self
})
def _upload_file(self, request):
"""
Upload file to the server.
If temporary is true, we upload to UPLOAD_TEMPDIR, otherwise
we upload to site.directory
"""
if request.method == "POST":
folder = request.GET.get('folder', '')
temporary = request.GET.get('temporary', '')
temp_filename = None
if len(request.FILES) == 0:
return HttpResponseBadRequest('Invalid request! No files included.')
if len(request.FILES) > 1:
return HttpResponseBadRequest('Invalid request! Multiple files included.')
filedata = list(request.FILES.values())[0]
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("filebrowser:fb_upload", current_app=self.name))
folder = fb_uploadurl_re.sub('', folder)
# temporary upload folder should be outside self.directory
if folder == UPLOAD_TEMPDIR and temporary == "true":
path = folder
else:
path = os.path.join(self.directory, folder)
# we convert the filename before uploading in order
# to check for existing files/folders
file_name = convert_filename(filedata.name)
filedata.name = file_name
file_path = os.path.join(path, file_name)
file_already_exists = self.storage.exists(file_path)
# construct temporary filename by adding the upload folder, because
# otherwise we don't have any clue if the file has temporary been
# uploaded or not
if folder == UPLOAD_TEMPDIR and temporary == "true":
temp_filename = os.path.join(folder, file_name)
# Check for name collision with a directory
if file_already_exists and self.storage.isdir(file_path):
ret_json = {'success': False, 'filename': file_name}
return HttpResponse(json.dumps(ret_json))
signals.filebrowser_pre_upload.send(sender=request, path=folder, file=filedata, site=self)
uploadedfile = handle_file_upload(path, filedata, site=self)
if file_already_exists and OVERWRITE_EXISTING:
old_file = smart_str(file_path)
new_file = smart_str(uploadedfile)
self.storage.move(new_file, old_file, allow_overwrite=True)
full_path = FileObject(smart_str(old_file), site=self).path_full
else:
file_name = smart_str(uploadedfile)
filedata.name = os.path.relpath(file_name, path)
full_path = FileObject(smart_str(file_name), site=self).path_full
# set permissions
if DEFAULT_PERMISSIONS is not None:
os.chmod(full_path, DEFAULT_PERMISSIONS)
f = FileObject(smart_str(file_name), site=self)
signals.filebrowser_post_upload.send(sender=request, path=folder, file=f, site=self)
# let Ajax Upload know whether we saved it or not
ret_json = {'success': True, 'filename': f.filename, 'temp_filename': temp_filename}
return HttpResponse(json.dumps(ret_json), content_type="application/json")
storage = DefaultStorage()
# Default FileBrowser site
site = FileBrowserSite(name='filebrowser', storage=storage)
site.add_action(flip_horizontal)
site.add_action(flip_vertical)
site.add_action(rotate_90_clockwise)
site.add_action(rotate_90_counterclockwise)
site.add_action(rotate_180)
|
the-stack_0_14598 | '''
Module for gathering and managing network information
'''
# Import python libs
import logging
# Import salt libs
from salt.utils.socket_util import sanitize_host
__outputter__ = {
'dig': 'txt',
'ping': 'txt',
'netstat': 'txt',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on Windows, a specific file module exists:
if __grains__['os'] in ('Windows',):
return False
return 'network'
def _cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
netmask = ''
for n in range(4):
if n:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256-(2**(8-cidr_bits)))
cidr_bits = 0
return netmask
def _number_of_set_bits_to_ipv4_netmask(set_bits):
'''
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
'''
return _cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
def _number_of_set_bits(x):
'''
Returns the number of bits that are set in a 32bit int
'''
#Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
def _interfaces_ip(out):
'''
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
'''
import re
ret = dict()
def parse_network(value, cols):
'''
Return a tuple of ip, netmask, broadcast
based on the current set of cols
'''
brd = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/')
else:
ip = value
cidr = 32
if type == 'inet':
mask = _cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd')+1]
elif type == 'inet6':
mask = cidr
return (ip, mask, brd)
groups = re.compile('\r?\n\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if not ' ' in line:
continue
m = re.match('^\d*:\s+([\w.]+)(?:@)?(\w+)?:\s+<(.+)>', line)
if m:
iface, parent, attrs = m.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast = parse_network(value, cols)
if type == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip, mask, brd = parse_network(value, cols)
data['secondary'].append({
'type': type,
'address': ip,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip, mask, brd
elif type.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
'''
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
'''
import re
ret = dict()
piface = re.compile('^(\S+):?')
pmac = re.compile('.*?(?:HWaddr|ether) ([0-9a-fA-F:]+)')
pip = re.compile('.*?(?:inet addr:|inet )(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask = re.compile('.*?(?:Mask:|netmask )(?:(0x[0-9a-fA-F]{8})|([\d\.]+))')
pmask6 = re.compile('.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
pupdown = re.compile('UP')
pbcast = re.compile('.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
CLI Example::
salt '*' network.interfaces
'''
ifaces = dict()
if __salt__['cmd.has_exec']('ip'):
cmd1 = __salt__['cmd.run']('ip link show')
cmd2 = __salt__['cmd.run']('ip addr show')
ifaces = _interfaces_ip(cmd1 + '\n' + cmd2)
elif __salt__['cmd.has_exec']('ifconfig'):
cmd = __salt__['cmd.run']('ifconfig -a')
ifaces = _interfaces_ifconfig(cmd)
return ifaces
def _get_net_start(ipaddr, netmask):
ipaddr_octets = ipaddr.split('.')
netmask_octets = netmask.split('.')
net_start_octets = [str(int(ipaddr_octets[x]) & int(netmask_octets[x]))
for x in range(0, 4)]
return '.'.join(net_start_octets)
def _get_net_size(mask):
binary_str = ''
for octet in mask.split('.'):
binary_str += bin(int(octet))[2:].zfill(8)
return len(binary_str.rstrip('0'))
def _calculate_subnet(ipaddr, netmask):
return '{0}/{1}'.format(_get_net_start(ipaddr, netmask),
_get_net_size(netmask))
def _ipv4_to_bits(ipaddr):
'''
Accepts an IPv4 dotted quad and returns a string representing its binary
counterpart
'''
return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')])
def subnets():
'''
Returns a list of subnets to which the host belongs
'''
ifaces = interfaces()
subnets = []
for ipv4_info in ifaces.values():
for ipv4 in ipv4_info.get('inet', []):
if ipv4['address'] == '127.0.0.1': continue
network = _calculate_subnet(ipv4['address'], ipv4['netmask'])
subnets.append(network)
return subnets
def in_subnet(cidr):
'''
Returns True if host is within specified subnet, otherwise False
'''
try:
netstart, netsize = cidr.split('/')
netsize = int(netsize)
except:
log.error('Invalid CIDR \'{0}\''.format(cidr))
return False
netstart_bin = _ipv4_to_bits(netstart)
if netsize < 32 and len(netstart_bin.rstrip('0')) > netsize:
log.error('Invalid network starting IP \'{0}\' in CIDR '
'\'{1}\''.format(netstart, cidr))
return False
netstart_leftbits = netstart_bin[0:netsize]
for ip_addr in ip_addrs():
if netsize == 32:
if netstart == ip_addr: return True
else:
ip_leftbits = _ipv4_to_bits(ip_addr)[0:netsize]
if netstart_leftbits == ip_leftbits: return True
return False
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
'''
ret = []
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k,v) for k,v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv4_info in target_ifaces.values():
for ipv4 in ipv4_info.get('inet',[]):
if include_loopback \
or (not include_loopback and ipv4['address'] != '127.0.0.1'):
ret.append(ipv4['address'])
return ret
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
'''
ret = []
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k,v) for k,v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv6_info in target_ifaces.values():
for ipv6 in ipv6_info.get('inet6',[]):
if include_loopback \
or (not include_loopback and ipv6['address'] != '::1'):
ret.append(ipv6['address'])
return ret
def ping(host):
'''
Performs a ping to a host
CLI Example::
salt '*' network.ping archlinux.org
'''
cmd = 'ping -c 4 {0}'.format(sanitize_host(host))
return __salt__['cmd.run'](cmd)
# FIXME: Does not work with: netstat 1.42 (2001-04-15) from net-tools 1.6.0 (Ubuntu 10.10)
def netstat():
'''
Return information on open ports and states
CLI Example::
salt '*' network.netstat
'''
ret = []
cmd = 'netstat -tulpnea'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split()
if line.startswith('tcp'):
ret.append({
'inode': comps[7],
'local-address': comps[3],
'program': comps[8],
'proto': comps[0],
'recv-q': comps[1],
'remote-address': comps[4],
'send-q': comps[2],
'state': comps[5],
'user': comps[6]})
if line.startswith('udp'):
ret.append({
'inode': comps[6],
'local-address': comps[3],
'program': comps[7],
'proto': comps[0],
'recv-q': comps[1],
'remote-address': comps[4],
'send-q': comps[2],
'user': comps[5]})
return ret
# FIXME: This is broken on: Modern traceroute for Linux, version 2.0.14, May 10 2010 (Ubuntu 10.10)
# FIXME: traceroute is deprecated, make this fall back to tracepath
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example::
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = 'traceroute {0}'.format(sanitize_host(host))
out = __salt__['cmd.run'](cmd)
for line in out:
if not ' ' in line:
continue
if line.startswith('traceroute'):
continue
comps = line.split()
result = {
'count': comps[0],
'hostname': comps[1],
'ip': comps[2],
'ms1': comps[4],
'ms2': comps[6],
'ms3': comps[8],
'ping1': comps[3],
'ping2': comps[5],
'ping3': comps[7]}
ret.append(result)
return ret
def dig(host):
'''
Performs a DNS lookup with dig
CLI Example::
salt '*' network.dig archlinux.org
'''
cmd = 'dig {0}'.format(sanitize_host(host))
return __salt__['cmd.run'](cmd)
|
the-stack_0_14601 |
from nintendo.nex import backend, authentication, friends, nintendo_notification
from nintendo import account
import rpc
import time
client_id = '472185292636291082'
rpc_obj = rpc.DiscordIpcClient.for_platform(client_id)
print("RPC connection successful.")
# Wii U Console Details
DEVICE_ID = 1111111111
SERIAL_NUMBER = "xxxxxxxxxxxx"
SYSTEM_VERSION = 0x230 # 5.5.2E
REGION = 4 # Europe (PAL)
COUNTRY = "GB" # United Kingdom (Great Britain)
# Wii U Secondary User/Account Details
USERNAME = "PutSecondaryNNIDUsernameHere"
PASSWORD = "PutSecondaryNNIDPasswordHere"
# Wii U Main User/Account NNID
MAINID = "PutMainNNIDUsernameHere"
class NotificationHandler(nintendo_notification.NintendoNotificationHandler):
def __init__(self):
self.name_cache = {}
def process_notification_event(self, event):
pid = event.pid
if pid not in self.name_cache:
self.name_cache[pid] = api.get_nnid(pid)
name = self.name_cache[pid]
if event.type == nintendo_notification.NotificationType.LOGOUT:
if name == MAINID:
print("Peace!")
activity = {
}
rpc_obj.set_activity(activity)
elif event.type == nintendo_notification.NotificationType.PRESENCE_CHANGE:
presence = event.data
if name == MAINID:
print("Gotcha!")
title_id = "%016X" %(event.data.game_key.title_id)
if title_id == "0000000000000000":
title_name = "Wii U Menu"
elif title_id == "000500001010ED00":
title_name = "MARIO KART 8"
elif title_id == "000500001010CD00":
title_name = "MARIO KART 8"
elif title_id == "0005000010176A00":
title_name = "Splatoon"
elif title_id == "00050000101C9500":
title_name = "Breath of the Wild"
elif title_id == "0005000010180700":
title_name = "Captain Toad: Treasure Tracker"
elif title_id == "0005000010199500":
title_name = "Super Mario 64"
elif title_id == "0005000010195B00":
title_name = "NEW SUPER MARIO BROS."
elif title_id == "0005000010172700":
title_name = "BAYONETTA 2"
elif title_id == "000500301001420A":
title_name = "Nintendo eShop"
elif title_id == "000500301001620A":
title_name = "Miiverse"
elif title_id == "000500301001220A":
title_name = "Internet Browser"
elif title_id == "000500101004A200":
title_name = "Mii Maker"
elif title_id == "000500101005A200":
title_name = "Wii U Chat"
elif title_id == "0005000010105A00":
title_name = "Netflix"
elif title_id == "0005000010105700":
title_name = "YouTube"
elif title_id == "0005000010102F00":
title_name = "Amazon / LOVEFiLM"
elif title_id == "0005000010101E00":
title_name = "New SUPER MARIO BROS. U"
elif title_id == "000500001014B800":
title_name = "New SUPER MARIO BROS. U + New SUPER LUIGI U"
elif title_id == "0005000010145D00":
title_name = "SUPER MARIO 3D WORLD"
elif title_id == "000500001018DD00":
title_name = "Super Mario Maker"
else:
title_name = title_id
#idDash = title_id[:8] + "-" + title_id[8:]
#print("idDash: " + idDash)
start_time = time.time()
print(title_id + " / " + title_name)
activity = {
"details": title_name,
"timestamps": {
"start": start_time
},
"assets": {
"small_text": MAINID,
"small_image": "nn",
"large_text": title_name,
"large_image": title_id.lower()
}
}
rpc_obj.set_activity(activity)
else:
print("Unknown notification type %i (from %s)" %(event.type, name))
api = account.AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.set_title(friends.FriendsTitle.TITLE_ID_EUR, friends.FriendsTitle.LATEST_VERSION)
api.login(USERNAME, PASSWORD)
nex_token = api.get_nex_token(friends.FriendsTitle.GAME_SERVER_ID)
backend = backend.BackEndClient(
friends.FriendsTitle.ACCESS_KEY,
friends.FriendsTitle.NEX_VERSION,
backend.Settings("friends.cfg")
)
backend.connect(nex_token.host, nex_token.port)
backend.login(
nex_token.username, nex_token.password, None,
authentication.NintendoLoginData(nex_token.token)
)
backend.nintendo_notification_server.handler = NotificationHandler()
input("Press enter to disconnect and exit\n")
backend.close()
|
the-stack_0_14602 | """The Met Office integration."""
import asyncio
import logging
import datapoint
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY,
MODE_DAILY,
)
from .data import MetOfficeData
from .helpers import fetch_data, fetch_site
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a Met Office entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
site_name = entry.data[CONF_NAME]
connection = datapoint.connection(api_key=api_key)
site = await hass.async_add_executor_job(
fetch_site, connection, latitude, longitude
)
if site is None:
raise ConfigEntryNotReady()
async def async_update_3hourly() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_3HOURLY
)
async def async_update_daily() -> MetOfficeData:
return await hass.async_add_executor_job(
fetch_data, connection, site, MODE_DAILY
)
metoffice_hourly_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Hourly Coordinator for {site_name}",
update_method=async_update_3hourly,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_daily_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"MetOffice Daily Coordinator for {site_name}",
update_method=async_update_daily,
update_interval=DEFAULT_SCAN_INTERVAL,
)
metoffice_hass_data = hass.data.setdefault(DOMAIN, {})
metoffice_hass_data[entry.entry_id] = {
METOFFICE_HOURLY_COORDINATOR: metoffice_hourly_coordinator,
METOFFICE_DAILY_COORDINATOR: metoffice_daily_coordinator,
METOFFICE_NAME: site_name,
METOFFICE_COORDINATES: f"{latitude}_{longitude}",
}
# Fetch initial data so we have data when entities subscribe
await asyncio.gather(
metoffice_hourly_coordinator.async_config_entry_first_refresh(),
metoffice_daily_coordinator.async_config_entry_first_refresh(),
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
|
the-stack_0_14603 | # Copyright 2016-2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Views module for prngmgr API."""
from django.db.models import F, Q
from django_peeringdb.models import concrete as pdb_models
from prngmgr import models as prngmgr_models
from prngmgr.api import datatables, serializers
from rest_framework import permissions, viewsets
from rest_framework.decorators import list_route
from rest_framework.response import Response
class OrganizationViewSet(viewsets.ReadOnlyModelViewSet):
"""Organization view set."""
queryset = pdb_models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
permission_classes = (permissions.IsAuthenticated,)
class FacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""Facility view set."""
queryset = pdb_models.Facility.objects.all()
serializer_class = serializers.FacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkProxyViewSet(viewsets.ReadOnlyModelViewSet):
"""Network proxy view set."""
queryset = prngmgr_models.NetworkProxy.objects.all()
serializer_class = serializers.NetworkSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'Network Name',
'data': 'name',
'name': 'name'},
{'title': 'Primary ASN',
'data': 'asn',
'name': 'asn'},
{'title': 'IRR Record',
'data': 'irr_as_set',
'name': 'irr_as_set'},
{'title': 'Looking Glass',
'data': 'looking_glass',
'name': 'looking_glass'},
{'title': 'Peering Policy',
'data': 'policy_general',
'name': 'policy_general'},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class InternetExchangeProxyViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP proxy view set."""
queryset = prngmgr_models.InternetExchangeProxy.objects.all()
serializer_class = serializers.InternetExchangeSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'IXP Name',
'data': 'name',
'name': 'name',
'path': 'value'},
{'title': 'Country',
'data': 'country',
'name': 'country'},
{'title': 'Region',
'data': 'region_continent',
'name': 'region_continent'},
{'title': 'Participants',
'data': 'participants',
'name': 'participants',
'orderable': True,
'searchable': False},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class InternetExchangeFacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP Facility proxy view set."""
queryset = pdb_models.InternetExchangeFacility.objects.all()
serializer_class = serializers.InternetExchangeFacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class IXLanViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP LAN view set."""
queryset = pdb_models.IXLan.objects.all()
serializer_class = serializers.IXLanSerializer
permission_classes = (permissions.IsAuthenticated,)
class IXLanPrefixViewSet(viewsets.ReadOnlyModelViewSet):
"""IXP LAN prefix view set."""
queryset = pdb_models.IXLanPrefix.objects.all()
serializer_class = serializers.IXLanPrefixSerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkContactViewSet(viewsets.ReadOnlyModelViewSet):
"""Network contact view set."""
queryset = pdb_models.NetworkContact.objects.all()
serializer_class = serializers.NetworkContactSerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkFacilityViewSet(viewsets.ReadOnlyModelViewSet):
"""Network facility view set."""
queryset = pdb_models.NetworkFacility.objects.all()
serializer_class = serializers.NetworkFacilitySerializer
permission_classes = (permissions.IsAuthenticated,)
class NetworkIXLanViewSet(viewsets.ReadOnlyModelViewSet):
"""Network IX LAN view set."""
queryset = pdb_models.NetworkIXLan.objects.all()
serializer_class = serializers.NetworkIXLanSerializer
permission_classes = (permissions.IsAuthenticated,)
class PeeringRouterViewSet(viewsets.ModelViewSet):
"""Peering router view set."""
queryset = prngmgr_models.PeeringRouter.objects.all()
serializer_class = serializers.PeeringRouterSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'Hostname',
'data': 'hostname',
'name': 'hostname'},
{'title': 'Peering Interfaces',
'data': 'peering_interfaces',
'name': 'peering_interfaces'},
{'title': 'Possible Sessions',
'data': 'possible_sessions',
'name': 'possible_sessions',
'orderable': False,
'searchable': False},
{'title': 'Provisioned Sessions',
'data': 'provisioned_sessions',
'name': 'provisioned_sessions',
'orderable': False,
'searchable': False},
{'title': 'Established Sessions',
'data': 'established_sessions',
'name': 'established_sessions',
'orderable': False,
'searchable': False},
]
definition = datatables.TableDefView(columns=columns)
return definition.response
class PeeringRouterIXInterfaceViewSet(viewsets.ModelViewSet):
"""Peering router IX interface view set."""
queryset = prngmgr_models.PeeringRouterIXInterface.objects.all()
serializer_class = serializers.PeeringRouterIXInterfaceSerializer
permission_classes = (permissions.IsAuthenticated,)
class PeeringSessionViewSet(viewsets.ModelViewSet):
"""Peering session view set."""
model_manager = prngmgr_models.PeeringSession.objects
queryset = model_manager.all()
serializer_class = serializers.PeeringSessionSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route()
def status_summary(self, *args, **kwargs):
"""Render status summary response."""
summary = self.model_manager.status_summary()
return Response(summary)
@list_route()
def state_changes(self, request, *args, **kwargs):
"""Render state changes query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params,
static_exclude=Q(**{"session_state": F("previous_state")}),
static_order='-state_changed',
)
return query.response
@list_route()
def datatable(self, request, *args, **kwargs):
"""Render datatable query response."""
query_params = datatables.QueryParams(request)
query = datatables.QueryView(
query_set=self.queryset,
serializer_class=self.serializer_class,
query_params=query_params
)
return query.response
@list_route()
def tabledef(self, *args, **kwargs):
"""Render datatable table definition."""
columns = [
{'title': 'IXP',
'data': 'ixp_name',
'name': 'ixp_name',
'responsivePriority': 5},
{'title': 'Peer Name',
'data': 'remote_network_name',
'name': 'remote_network_name',
'responsivePriority': 1},
{'title': 'Peer AS',
'data': 'remote_network_asn',
'name': 'remote_network_asn',
'responsivePriority': 2},
{'title': 'Address Family',
'data': 'address_family',
'name': 'address_family',
'responsivePriority': 3},
{'title': 'Peer Address',
'data': 'remote_address',
'name': 'remote_address'},
{'title': 'Router',
'data': 'router_hostname',
'name': 'router_hostname'},
{'title': 'State',
'data': 'session_state',
'name': 'session_state',
'responsivePriority': 4},
{'title': 'Accepted Prefixes',
'data': 'accepted_prefixes',
'name': 'accepted_prefixes',
'responsivePriority': 6}
]
definition = datatables.TableDefView(columns=columns)
return definition.response
|
the-stack_0_14605 | import dbProvider as db
import json
from operator import itemgetter
from flask import Flask, url_for, render_template, abort, make_response, redirect
app = Flask(__name__)
serverName = '146.185.179.193:5000'
# serverName = 'otkachkaseptika.ru'
def getStaticPath(relativePath):
# return '/static/' + relativePath
return url_for('static', filename=relativePath)
app.config['SERVER_NAME'] = serverName
# Helpers
@app.context_processor
def utility_processor():
def getLinkForRegionService(regionId = None, serviceId = None, order = False, subdomain = None):
if regionId == None:
if serviceId == None:
return url_for("RegionNoService", subdomain = 'www' if subdomain == None else subdomain)
else:
service = db.getServiceById(serviceId)
return url_for("RegionService", routeString = service['nameTranslit'], subdomain = 'www' if subdomain == None else subdomain)
else:
region = db.getRegionById(regionId)
subdomain = db.getSubdomainByMainRegionId(regionId)
if subdomain == None:
isMainRegion = False
subdomain = db.getSubdomainByRegionId(regionId)
else:
isMainRegion = True
if serviceId == None:
if isMainRegion:
return url_for("RegionNoService", subdomain = subdomain)
else:
return url_for("RegionService", routeString = getPathForRegionId(regionId), subdomain = subdomain)
else:
service = db.getServiceById(serviceId)
if isMainRegion:
return url_for("RegionService", routeString = service['nameTranslit'], subdomain = subdomain)
else:
if not (region['hasChildren']):
order = True
if order:
routeString = service['nameTranslit'] + "-v-" + region['dativeTranslit']
return url_for("RegionService", routeString = routeString, subdomain = subdomain)
else:
routeString = service['nameTranslit'] + getPathForRegionId(regionId)
return url_for("RegionService", routeString = routeString, subdomain = subdomain)
def getLen(array):
return len(array)
return dict(getLinkForRegionService=getLinkForRegionService, getLen=getLen, getServiceImgUrl=getServiceImgUrl)
def getPathForRegionId(regionId):
path = ""
parents = db.getRegionParentsSorted(regionId)
for parent in parents:
path += "/" + parent["nameTranslit"]
region = db.getRegionById(regionId)
path += "/" + region["nameTranslit"]
return path
def getRegionByPathAndParentId(path, parentId):
regions = path.split('/')
for regionName in regions:
region = db.getRegionByNameTranslitAndParentId(regionName, parentId)
if region == None:
return None
parentId = region['id']
return region
def getServiceImgUrl(service, region, size = None):
imgNumber = db.getServiceRandomImgNumber(service, region['id'])
if imgNumber == None:
service = db.getServiceById(5)
imgNumber = db.getServiceRandomImgNumber(service, region['id'])
sizeStr = ''
if size != None:
sizeStr = '-' + size
return getStaticPath('img/' + service['nameTranslit'] + '/' + service['nameTranslit'] + '-' + str(imgNumber) + sizeStr + '.jpg')
def replaceDataInContent(content, region, service):
result = []
for block in content:
imgUrl = getServiceImgUrl(service, region)
replaced = block.replace('{N}', region['dativeCaseName']).replace('{imgSrc}', imgUrl).replace('{imgAlt}', service['name'] + ' в ' + region['dativeCaseName'])
result.append(replaced)
return result
# Redirects from no subdomains to www
# @app.route('/')
# def Redirect():
# return redirect("http://www." + serverName + "/", code=301)
# @app.route('/<path:routeString>')
# def RedirectWithPath(routeString):
# return redirect("http://www." + serverName + "/" + routeString, code=301)
# With subdomain
@app.route('/')
@app.route('/', subdomain="<subdomain>")
def RegionNoService(subdomain = ''):
print('Subdomain ' + subdomain)
region = db.getRegionBySubdomain(subdomain)
if region == None:
region = 0
return render_template('selectServiceForRegion.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(region['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(region['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(region['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("regionNoService", "title").format(region['dativeCaseName']),
description = db.getText("regionNoService", "description").format(region['dativeCaseName']),
keywords = db.getText("regionNoService", "keywords").format(region['dativeCaseName']),
h1 = db.getText("regionNoService", "h1").format(region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = db.getServices(),
region = region,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds=[region['id']])
)
@app.route('/<path:routeString>', subdomain = '')
@app.route('/<path:routeString>', subdomain="<subdomain>")
def RegionService(routeString, subdomain):
print('Subdomain ' + subdomain)
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
serviceAndRegion = routeString.split("/")
service = db.getServiceByNameTranslit(serviceAndRegion[0])
if service != None:
regionPath = routeString.replace(service['nameTranslit'] + "/", "")
region = getRegionByPathAndParentId(path=regionPath, parentId=mainRegion['id'])
dativeRegionName = mainRegion['dativeCaseName']
parentIds=[mainRegion['id']]
parentRegions = db.getRegionParentsSorted(mainRegion['id'])
regionOrMainRegion = mainRegion
if region != None:
dativeRegionName = region['dativeCaseName']
parentIds=[region['id']]
parentRegions = db.getRegionParentsSorted(region['id'])
regionOrMainRegion = region
return render_template('selectRegionForService.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("mainRegionService", "title").format(service['name'], dativeRegionName),
description = db.getText("mainRegionService", "description").format(service['name'], dativeRegionName, service['description']),
keywords = db.getText("mainRegionService", "keywords").format(service['name'], dativeRegionName),
h1 = db.getText("mainRegionService", "h1").format(service['name'], dativeRegionName),
service = service,
parentRegions = parentRegions,
copyright = db.getText("footer", "copyright"),
regions = db.getRegionsTree(parentIds, 2),
region = regionOrMainRegion
)
else:
serviceAndRegion = routeString.split("-v-")
service = db.getServiceByNameTranslit(serviceAndRegion[0])
if service == None:
region = getRegionByPathAndParentId(serviceAndRegion[0], mainRegion['id'])
if region == None:
region = 0
return render_template('selectServiceForRegion.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
subdomain = subdomain,
title = db.getText("regionNoService", "title").format(region['dativeCaseName']),
description = db.getText("regionNoService", "description").format(region['dativeCaseName']),
keywords = db.getText("regionNoService", "keywords").format(region['dativeCaseName']),
h1 = db.getText("regionNoService", "h1").format(region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = db.getServices(),
region = region,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds = [mainRegion['id']])
)
if len(serviceAndRegion) > 1:
region = db.getRegionByDativeTranslitAndMainRegion(serviceAndRegion[1], mainRegion['id'])
if region == None:
region = 0
services = db.getServices()[:]
services.remove(service)
content = db.getRandomizedTexts("orderService", subdomain, str(service['id']), region['id'])
content = replaceDataInContent(content, region, service)
return render_template('orderService.html',
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(mainRegion['dativeCaseName']),
mainPhone = db.getPhoneByRegionId(mainRegion['id'])['phoneString'],
mainPhoneLink = db.getPhoneByRegionId(mainRegion['id'])['phoneNormal'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
subdomain = subdomain,
title = db.getText("orderService", "title").format(service['name'], region['dativeCaseName']),
description = db.getText("orderService", "description").format(service['name'], region['dativeCaseName']),
keywords = db.getText("orderService", "keywords").format(service['name'], region['dativeCaseName']),
h1 = db.getText("orderService", "h1").format(service['name'], region['dativeCaseName']),
copyright = db.getText("footer", "copyright"),
services = services,
region = region,
service = service,
parentRegions = db.getRegionParentsSorted(region['id']),
regions = db.getRegionsTree(parentIds = [mainRegion['id']]),
otherServicesHeader = "Другие услуги в {}".format(region['dativeCaseName']),
contentBlocks = content,
imgUrl = getServiceImgUrl(service, region)
)
#robots.txt
@app.route('/robots.txt', subdomain="<subdomain>")
def Robots(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
robots = 'User-agent: *\nAllow: /\nHost:' + subdomain + '.' + serverName + '\nsitemap: http://' + subdomain + '.' + serverName + '/sitemap.xml'
response= make_response(robots)
response.headers["Content-Type"] = "text/plain"
return response
#sitemap.xml
sitemapCount = 50
lastMod = '2017-07-16'
@app.route('/sitemap.xml', subdomain="<subdomain>")
def SitemapIndex(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
sitemapIndex = render_template('sitemapindex.xml',
urlRoot='http://' + subdomain + '.' + serverName,
sitemapCount = sitemapCount,
lastMod = lastMod)
response= make_response(sitemapIndex)
response.headers["Content-Type"] = "application/xml"
return response
@app.route('/sitemap<index>.xml', subdomain="<subdomain>")
def Sitemap(index, subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
index = int(index)
if index > sitemapCount:
abort(404)
services = db.getServices()
regions = db.getAllChildrenRegionIds(mainRegion['id'])
start = (index - 1) * len(regions)/sitemapCount
if start < 0:
abort(404)
if start > len(regions):
start = len(regions)
end = index * len(regions)/sitemapCount
if end > len(regions):
end = len(regions)
start = int(start)
end = int(end)
sitemapTemplate = 'sitemap1.xml'
if index == 1:
sitemapTemplate = 'sitemap.xml'
sitemapXml = render_template(sitemapTemplate,
urlRoot='http://' + subdomain + '.' + serverName,
services = services,
regions = regions[start:end],
lastMod = lastMod,
subdomain = subdomain)
response= make_response(sitemapXml)
response.headers["Content-Type"] = "application/xml"
return response
#verification
@app.route('/google450d69197dedc081.html', subdomain="<subdomain>")
def GoogleVerification(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return 'google-site-verification: google450d69197dedc081.html'
@app.route('/df439bf5423b.html', subdomain="<subdomain>")
def YandexVerificationMsk(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return 'd085889e17e4'
@app.route('/yandex_d6b8a19aaea0ecfe.html', subdomain="<subdomain>")
def YandexVerificationSpb(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('yandex_d6b8a19aaea0ecfe.html')
@app.route('/wmail_557011f651d368ddfb70a33d8e147a72.html', subdomain="<subdomain>")
def MailVerificationSpb(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('wmail_557011f651d368ddfb70a33d8e147a72.html')
@app.route('/yandex_fb5d169c5c36f5d3.html', subdomain="<subdomain>")
def YandexVerificationKrasnodar(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('yandex_fb5d169c5c36f5d3.html')
@app.route('/wmail_076dfddb21e2e2bdee0afae71729a13a.html', subdomain="<subdomain>")
def MailVerificationKrasnodar(subdomain):
mainRegion = db.getRegionBySubdomain(subdomain)
if mainRegion == None:
mainRegion = 0
return render_template('wmail_076dfddb21e2e2bdee0afae71729a13a.html')
# Error handling
@app.errorhandler(404)
def page_not_found(error):
region = db.getRegionById(0)
return render_template('404.html',
mainPhone = db.getPhoneByRegionId(region['id'])['phoneString'],
mainPhoneMeta = db.getText("phoneDescription", "other"),
mainPhoneLink = db.getPhoneByRegionId(region['id'])['phoneNormal'],
siteName = db.getText("header", "siteName"),
motto = db.getText("header", "motto").format(region['dativeCaseName']),
subdomain = db.getSubdomainByMainRegionId(region['id']),
title = "Страница не найдена",
copyright = db.getText("footer", "copyright")),404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
the-stack_0_14609 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2019 Chaintope Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292, self.signblockprivkey)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1028)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10, self.signblockprivkey)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
the-stack_0_14610 | import os
import argparse
import numpy as np
from algos import construct_classifier, classifier_types
from utils.data_utils import get_dataset, dataset_names
from utils.misc import increment_path
from utils.tf_utils import launch_tensorboard
from utils.vis_utils import plot_embeddings
masterdir = "/tmp/fairml-farm/"
base_datadir = masterdir + "data/"
base_logdir = masterdir + "logs/"
parser = argparse.ArgumentParser(description="Evaluate an individual fairness "
"algorithm.\nNOTE: classifier-specific "
"arguments should be specified in area "
"in the script itself.")
parser.add_argument("--experiment-name", default="default",
help="Name for the experiment base directory, "
"used as the extension relative to {}".format(base_logdir))
parser.add_argument("--load-dir",
help="Path to a previous experiment subdirectory, used to "
"load model weights, relative to {}.".format(base_logdir))
parser.add_argument("--train", action="store_true",
help="train the classifier")
parser.add_argument("-epochs", type=int, default=20)
parser.add_argument("--visualize", action="store_true", help="visualize "
"learned latent space")
parser.add_argument("-classifier", choices=[c.name for c in classifier_types],
default="simplenn",
help="Name of the type of fairness algorithm to use.")
parser.add_argument("-dataset", choices=dataset_names,
default="adult",
help="Name of dataset to train on.")
args = parser.parse_args()
loaddir = None
if args.load_dir is not None:
loaddir = os.path.join(base_logdir, args.load_dir)
logdir = increment_path(os.path.join(base_logdir, args.experiment_name, "run"))
os.makedirs(logdir, exist_ok=True)
print("Logging data to {}".format(logdir))
print("Loading {} dataset...".format(args.dataset))
train_dataset, validation_dataset = get_dataset(args.dataset,
base_datadir=base_datadir)
print("Launching Tensorboard.\nTo visualize, navigate to "
"http://0.0.0.0:6006/\nTo close Tensorboard,"
" press ctrl+C")
tensorboard_process = launch_tensorboard(logdir)
# ===== SPECIFY HYPERPARAMETERS (INCLUDING CLASSIFIER-TYPE) =====
inputsize = train_dataset["data"].shape[1]
layersizes = [100]
classifier_type = "paritynn"
hparams = {
"classifier_type": classifier_type,
"layersizes": layersizes,
"inputsize": inputsize,
}
# ===============================================================
print("Initializing classifier...")
classifier = construct_classifier(hparams, loaddir=loaddir)
if args.train:
print("Training network...")
classifier.train(train_dataset, logdir, epochs=args.epochs,
validation_dataset=validation_dataset)
savepath = classifier.save_model(logdir)
if args.visualize: # Plot out the learned embedding space
n = validation_dataset["label"].shape[0]
# get an equal number of male and female points
n_males = sum(validation_dataset["label"])
limiting_gender = n_males > n - n_males # 1 if men, 0 if women
n_limiting_gender = sum(validation_dataset["label"] == limiting_gender)
max_points_per_gender = 500
n_per_gender = min(max_points_per_gender, n_limiting_gender)
inds = np.concatenate([
np.where(validation_dataset["label"] == limiting_gender)[0][:n_per_gender],
np.where(validation_dataset["label"] != limiting_gender)[0][:n_per_gender]],
axis=0)
vis_dataset = {k:v[inds, ...] for k, v in validation_dataset.items()}
val_embeddings = classifier.compute_embedding(vis_dataset["data"])
plot_embeddings(val_embeddings,
vis_dataset["label"],
vis_dataset["protected"],
plot3d=True,
subsample=False,
label_names=["income<=50k", "income>50k"],
protected_names=["female", "male"])
tensorboard_process.join()
|
the-stack_0_14612 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module'
__author__ = 'DANTE FUNG'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello world!')
elif len(args) == 2:
print('Hello, %s!' % args[1])
else:
print('Too many arguments!')
if __name__ == '__main__':
test() |
the-stack_0_14613 | from typing import List, Any
def quick_sort(array: List[Any], arr_length: int) -> List[Any]:
def __quick_sort(start: int, end: int) -> None:
if start >= end:
return
pivot = array[(start + end) // 2]
left, right = start, end
while left <= right:
while array[left] < pivot:
left += 1
while array[right] > pivot:
right -= 1
if left <= right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
__quick_sort(start, right)
__quick_sort(left, end)
__quick_sort(0, arr_length - 1)
return array
|
the-stack_0_14614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from abbyjergerContigFilter.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'abbyjergerContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from abbyjergerContigFilter.abbyjergerContigFilterImpl import abbyjergerContigFilter # noqa @IgnorePep8
impl_abbyjergerContigFilter = abbyjergerContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'abbyjergerContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_abbyjergerContigFilter.run_abbyjergerContigFilter,
name='abbyjergerContigFilter.run_abbyjergerContigFilter',
types=[dict])
self.method_authentication['abbyjergerContigFilter.run_abbyjergerContigFilter'] = 'required' # noqa
self.rpc_service.add(impl_abbyjergerContigFilter.run_abbyjergerContigFilter_max,
name='abbyjergerContigFilter.run_abbyjergerContigFilter_max',
types=[dict])
self.method_authentication['abbyjergerContigFilter.run_abbyjergerContigFilter_max'] = 'required' # noqa
self.rpc_service.add(impl_abbyjergerContigFilter.status,
name='abbyjergerContigFilter.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'abbyjergerContigFilter ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
the-stack_0_14617 | # Copyright (c) University of Utah
from IPython.display import display
from traitlets import Bool, Dict, HasTraits, Instance, Int, List, Tuple, Unicode, observe, Set, link
from ipywidgets import HBox, VBox, IntRangeSlider, FloatRangeSlider
import ipywidgets as widgets
from . import BaseTreeView
from .filters import AttrFilter, Trigger, GroupUIFilter
class TreeView(BaseTreeView):
options = List(Unicode(), ['span',
'fitness', 'parent_fitness', 'child_fitness', 'shared_fitness',
'coef_change', 'coef_similarity',
'inv_fitness',
'min', 'max', 'unique_max', 'unique_min',
'dim_parent', 'dim_child',
'dim_min', 'dim_max',
'q_fitness',
])
x_value = Tuple(default_value=(0, 1))
y_value = Tuple(default_value=(0, 1))
def __init__(self, src=None, auto=True, x=None, y=None, **kwargs):
super().__init__(**kwargs)
self._filters = {}
self.box = VBox()
self._links = []
self._group_filter = GroupUIFilter()
self._trigger = Trigger(self._group_filter, func=self._apply_filter)
self._filters = {}
self._auto = auto
self._auto_filter = None
self.show_measure = False
# setup controls
self._ctrls = HBox()
self._menu = widgets.Dropdown(
options=self.options,
description='Attribute:',
value=self.attr,
disabled=False,
)
self.x_slider = IntRangeSlider(min=0, max=1, value=(0, 1), description='Points:')
self.y_slider = FloatRangeSlider(min=0, max=1, value=(0,1), description='Persistence:', step=0.001)
self._ctrls = HBox([self._menu, self.y_slider, self.x_slider])
link((self, 'x_value'), (self.x_slider, 'value'))
link((self, 'y_value'), (self.y_slider, 'value'))
self._auto_filter = AttrFilter(attr=self._menu.value)
if self._auto:
self._group_filter.add(self._auto_filter, name='auto')
widgets.link((self, 'attr'), (self._menu, 'value'))
self.observe(self._auto_update, names=['attr'])
# setup view
self._links = [
widgets.link((self, 'x'), (self.x_slider, 'value')),
widgets.link((self, 'y'), (self.y_slider, 'value')),
]
self._update_children()
if src is not None:
self.src = src
if x is not None:
self.x = x
if y is not None:
self.y = y
def _apply_filter(self):
if self.tree is not None:
self.show = self.tree.filter(self._group_filter)
def _auto_update(self, change):
self._auto_filter.attr = self.attr
self._auto_filter.update_range(self.tree)
@observe('tree')
def tree_view_tree_changed(self, change):
if self.tree is None:
self.x_slider.value = (self.x_slider.min, self.x_slider.max)
else:
reset = self.x_slider.value[1] == self.x_slider.max
self.x_slider.max = self.tree.regulus.pts.size()
if reset:
self.x_slider.value = self.x_slider.value[0], self.x_slider.max
@property
def filters(self):
return self._group_filter
@filters.setter
def filters(self, f):
if f == self._group_filter:
return
self._trigger.remove(self._group_filter)
self._group_filter = f
if self._auto:
self._group_filter.insert(0, self._auto_filter, name='auto')
self._trigger.add(self._group_filter)
self._update_children()
@property
def opts(self):
return self._menu.options
@opts.setter
def opts(self, opts):
self._menu.options = opts
def add_option(self, attr):
if attr not in self._menu.options:
self._menu.options = list(self.options) + [attr]
self.attr = attr
def remove_option(self, attr):
if attr in self._menu.options:
opts = list(self._menu.options)
del opts[attr]
self._menu.options = opts
if self.attr == attr:
self.attr = opts[0] if len(opts) > 0 else None
def _update_children(self):
children = [self._ctrls, self, self._group_filter]
self.box.children = children
def find_filter(self, name):
return self._group_filter.find(name)
def add_filter(self, *args, **kwargs):
f = self._group_filter.add(*args, **kwargs)
if self.tree and hasattr(f, 'update_range'):
f.update_range(self.tree)
return f
def insert_filter(self, idx, *args, **kwargs):
f = self._group_filter.insert(idx, *args, **kwargs)
if self.tree and hasattr(f, 'update_range'):
f.update_range(self.tree)
def remove_filter(self, item):
self._group_filter.remove(item)
@property
def auto(self):
return self._auto
@auto.setter
def auto(self, value):
if value != self._auto:
self._auto = value
if self._auto:
self._group_filter.insert(0, self._auto_filter)
else:
self._group_filter.remove(self._auto_filter)
def _ipython_display_(self, **kwargs):
display(self.box) |
the-stack_0_14619 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opteryx.storage import BasePartitionScheme
def _safe_get_next_element(lst, item):
"""get the element from a list which follows a given element"""
try:
index = lst.index(item)
return lst[index + 1]
except IndexError:
return None
def _extract_as_at(path):
for part in path.split("/"):
if part.startswith("as_at_"):
return part
return ""
def _extract_by(path):
for part in path.split("/"):
if part.startswith("by_"):
return part
_is_complete = lambda blobs, as_at: any(
blob for blob in blobs if as_at + "/frame.complete" in blob
)
_is_invalid = lambda blobs, as_at: any(
blob for blob in blobs if (as_at + "/frame.ignore" in blob)
)
class MabelPartitionScheme(BasePartitionScheme):
"""
Handle reading data using the Mabel partition scheme.
"""
def partition_format(self):
return "year_{yyyy}/month_{mm}/day_{dd}"
def _inner_filter_blobs(self, list_of_blobs, statistics):
# The segments are stored in folders with the prefix 'by_', as in,
# segments **by** field name
list_of_segments = {
_extract_by(blob) for blob in list_of_blobs if "/by_" in blob
}
chosen_segment = ""
# If we have multiple 'by_' segments, pick one - pick the first one until
# we start making cost-based decisions
if list_of_segments:
list_of_segments = sorted(list_of_segments)
chosen_segment = list_of_segments.pop()
# Do the pruning
list_of_blobs = [
blob for blob in list_of_blobs if f"/{chosen_segment}/" in blob
]
# build a list of the segments we're going to read, for example, if we have
# data which are segmented by hour, this will be the hour=00 part
if chosen_segment == "":
segmented_folders = {""}
else:
segmented_folders = {
_safe_get_next_element(blob.split("/"), chosen_segment)
for blob in list_of_blobs
}
# count the segments we're planning to read
statistics.segments_scanned += len(segmented_folders)
# go through the list of segments, getting the active frame for each
for segment_folder in segmented_folders:
# we get the blobs for this segment by looking for the path to contain
# a combination of the segment and the segmented folder
segment_blobs = [
blob
for blob in list_of_blobs
if f"{chosen_segment}/{segment_folder}" in blob
]
# work out if there's an as_at part
as_ats = {
_extract_as_at(blob) for blob in segment_blobs if "as_at_" in blob
}
if as_ats:
as_ats = sorted(as_ats)
as_at = as_ats.pop()
while not _is_complete(segment_blobs, as_at) or _is_invalid(
segment_blobs, as_at
):
if len(as_ats) > 0:
as_at = as_ats.pop()
else:
return []
# get_logger().debug(f"Reading Frame `{as_at}`")
yield from (blob for blob in segment_blobs if (as_at in blob))
else:
yield from list_of_blobs
def filter_blobs(self, list_of_blobs, statistics):
return list(self._inner_filter_blobs(list_of_blobs, statistics))
|
the-stack_0_14621 | from typing import TypeVar, Optional
T = TypeVar("T")
def ensure(value: Optional[T]) -> T:
if value is None:
raise RuntimeError("Expected a non-None value to be present.")
return value
|
the-stack_0_14622 | import collections.abc
import copy
import datetime
import decimal
import operator
import uuid
import warnings
from base64 import b64decode, b64encode
from functools import partialmethod, total_ordering
from django import forms
from django.apps import apps
from django.conf import settings
from django.core import checks, exceptions, validators
from django.db import connection, connections, router
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin
from django.utils import timezone
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.duration import duration_microseconds, duration_string
from django.utils.functional import Promise, cached_property
from django.utils.ipv6 import clean_ipv6_address
from django.utils.itercompat import is_iterable
from django.utils.text import capfirst
from django.utils.translation import gettext_lazy as _
__all__ = [
'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField',
'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField',
'DateField', 'DateTimeField', 'DecimalField', 'DurationField',
'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField',
'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField',
]
class Empty:
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
def return_None():
return None
@total_ordering
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
descriptor_class = DeferredAttribute
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=(),
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.remote_field = rel
self.is_relation = self.remote_field is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
if isinstance(choices, collections.abc.Iterator):
choices = list(choices)
self.choices = choices
self.help_text = help_text
self.db_index = db_index
self.db_column = db_column
self._db_tablespace = db_tablespace
self.auto_created = auto_created
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = list(validators) # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
"""
Return "app_label.model_label.field_name" for fields attached to
models.
"""
if not hasattr(self, 'model'):
return super().__str__()
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_choices(),
*self._check_db_index(),
*self._check_null_allowed_for_primary_keys(),
*self._check_backend_specific_checks(**kwargs),
*self._check_validators(),
*self._check_deprecation_details(),
]
def _check_field_name(self):
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
elif LOOKUP_SEP in self.name:
return [
checks.Error(
'Field names must not contain "%s".' % (LOOKUP_SEP,),
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=self,
id='fields.E003',
)
]
else:
return []
@classmethod
def _choices_is_value(cls, value):
return isinstance(value, (str, Promise)) or not is_iterable(value)
def _check_choices(self):
if not self.choices:
return []
if not is_iterable(self.choices) or isinstance(self.choices, str):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
obj=self,
id='fields.E004',
)
]
choice_max_length = 0
# Expect [group_name, [value, display]]
for choices_group in self.choices:
try:
group_name, group_choices = choices_group
except (TypeError, ValueError):
# Containing non-pairs
break
try:
if not all(
self._choices_is_value(value) and self._choices_is_value(human_name)
for value, human_name in group_choices
):
break
if self.max_length is not None and group_choices:
choice_max_length = max([
choice_max_length,
*(len(value) for value, _ in group_choices if isinstance(value, str)),
])
except (TypeError, ValueError):
# No groups, choices in the form [value, display]
value, human_name = group_name, group_choices
if not self._choices_is_value(value) or not self._choices_is_value(human_name):
break
if self.max_length is not None and isinstance(value, str):
choice_max_length = max(choice_max_length, len(value))
# Special case: choices=['ab']
if isinstance(choices_group, str):
break
else:
if self.max_length is not None and choice_max_length > self.max_length:
return [
checks.Error(
"'max_length' is too small to fit the longest value "
"in 'choices' (%d characters)." % choice_max_length,
obj=self,
id='fields.E009',
),
]
return []
return [
checks.Error(
"'choices' must be an iterable containing "
"(actual value, human readable name) tuples.",
obj=self,
id='fields.E005',
)
]
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
app_label = self.model._meta.app_label
for db in connections:
if router.allow_migrate(db, app_label, model_name=self.model._meta.model_name):
return connections[db].validation.check_field(self, **kwargs)
return []
def _check_validators(self):
errors = []
for i, validator in enumerate(self.validators):
if not callable(validator):
errors.append(
checks.Error(
"All 'validators' must be callable.",
hint=(
"validators[{i}] ({repr}) isn't a function or "
"instance of a validator class.".format(
i=i, repr=repr(validator),
)
),
obj=self,
id='fields.E008',
)
)
return errors
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class:e.g.
django.db.models.IntegerField This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": None,
"help_text": '',
"db_column": None,
"db_tablespace": None,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
"db_tablespace": "_db_tablespace",
}
equals_comparison = {"choices", "validators"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.abc.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
elif path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
elif path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
elif path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (self.name, path, [], keywords)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.remote_field:
obj.remote_field = copy.copy(self.remote_field)
if hasattr(self.remote_field, 'field') and self.remote_field.field is self:
obj.remote_field.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
state = self.__dict__.copy()
# The _get_default cached_property can't be pickled due to lambda
# usage.
state.pop('_get_default', None)
return _empty, (self.__class__,), state
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Return the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
"""
Some validators can't be created at field initialization time.
This method provides a way to delay their creation until required.
"""
return [*self.default_validators, *self._validators]
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices is not None and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type_parameters(self, connection):
return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_')
def db_check(self, connection):
"""
Return the database column check constraint for this field, for the
provided connection. Works the same way as db_type() for the case that
get_internal_type() does not map to a preexisting model field.
"""
data = self.db_type_parameters(connection)
try:
return connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
return None
def db_type(self, connection):
"""
Return the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = self.db_type_parameters(connection)
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. For example, this method is called by ForeignKey and OneToOneField
to determine its data type.
"""
return self.db_type(connection)
def cast_db_type(self, connection):
"""Return the data type to use in the Cast() function."""
db_type = connection.ops.cast_data_types.get(self.get_internal_type())
if db_type:
return db_type % self.db_type_parameters(connection)
return self.db_type(connection)
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
@property
def db_tablespace(self):
return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
@property
def db_returning(self):
"""
Private API intended only to be used by Django itself. Currently only
the PostgreSQL backend supports returning multiple fields on a model.
"""
return False
def set_attributes_from_name(self, name):
self.name = self.name or name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, private_only=False):
"""
Register the field with the model class it belongs to.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self, private=private_only)
if self.column:
# Don't override classmethods with the descriptor. This means that
# if you have a classmethod and a field with the same name, then
# such fields can't be deferred (we don't have a check for this).
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, self.descriptor_class(self))
if self.choices is not None:
if not hasattr(cls, 'get_%s_display' % self.name):
setattr(
cls,
'get_%s_display' % self.name,
partialmethod(cls._get_FIELD_display, field=self),
)
def get_filter_kwargs_for_object(self, obj):
"""
Return a dict that when passed as kwargs to self.model.filter(), would
yield all instances having the same value for this field as obj has.
"""
return {self.name: getattr(obj, self.attname)}
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""Return the default value for this field."""
return self._get_default()
@cached_property
def _get_default(self):
if self.has_default():
if callable(self.default):
return self.default
return lambda: self.default
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()):
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
if self.choices is not None:
choices = list(self.choices)
if include_blank:
blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices)
if not blank_defined:
choices = blank_choice + choices
return choices
rel_model = self.remote_field.model
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
choice_func = operator.attrgetter(
self.remote_field.get_related_field().attname
if hasattr(self.remote_field, 'get_related_field')
else 'pk'
)
qs = rel_model._default_manager.complex_filter(limit_choices_to)
if ordering:
qs = qs.order_by(*ordering)
return (blank_choice if include_blank else []) + [
(choice_func(x), str(x)) for x in qs
]
def value_to_string(self, obj):
"""
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return str(self.value_from_object(obj))
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
if self.choices is None:
return []
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""Return a django.forms.Field instance for this field."""
defaults = {
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices is not None:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial', 'disabled'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be either True or False.'),
'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'),
}
description = _("Boolean (Either True or False)")
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if self.null and value in self.empty_values:
return None
if value in (True, False):
# 1/0 are equal to True/False. bool() converts former to latter.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid_nullable' if self.null else 'invalid'],
code='invalid',
params={'value': value},
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return self.to_python(value)
def formfield(self, **kwargs):
if self.choices is not None:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
form_class = forms.NullBooleanField if self.null else forms.BooleanField
# In HTML checkboxes, 'required' means "must be checked" which is
# different from the choices case ("must select some value").
# required=False allows unchecked checkboxes.
defaults = {'form_class': form_class, 'required': False}
return super().formfield(**{**defaults, **kwargs})
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_attribute(**kwargs),
]
def _check_max_length_attribute(self, **kwargs):
if self.max_length is None:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
obj=self,
id='fields.E120',
)
]
elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or
self.max_length <= 0):
return [
checks.Error(
"'max_length' must be a positive integer.",
obj=self,
id='fields.E121',
)
]
else:
return []
def cast_db_type(self, connection):
if self.max_length is None:
return connection.ops.cast_char_field_without_max_length
return super().cast_db_type(connection)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
# TODO: Handle multiple backends with different feature flags.
if self.null and not connection.features.interprets_empty_strings_as_nulls:
defaults['empty_value'] = None
defaults.update(kwargs)
return super().formfield(**defaults)
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
system_check_removed_details = {
'msg': (
'CommaSeparatedIntegerField is removed except for support in '
'historical migrations.'
),
'hint': (
'Use CharField(validators=[validate_comma_separated_integer_list]) '
'instead.'
),
'id': 'fields.E901',
}
class DateTimeCheckMixin:
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_mutually_exclusive_options(),
*self._check_fix_default_value(),
]
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()]
enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid date format. It must be '
'in YYYY-MM-DD format.'),
'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) '
'but it is an invalid date.'),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(
cls, 'get_next_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True)
)
setattr(
cls, 'get_previous_by_%s' % self.name,
partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False)
)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateField,
**kwargs,
})
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'),
'invalid_date': _("“%(value)s” value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _('“%(value)s” value has the correct format '
'(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) '
'but it is an invalid date/time.'),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
def get_prep_value(self, value):
value = super().get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_datetimefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DateTimeField,
**kwargs,
})
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
digits_errors = [
*self._check_decimal_places(),
*self._check_max_digits(),
]
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
obj=self,
id='fields.E134',
)
]
return []
@cached_property
def validators(self):
return super().validators + [
validators.DecimalValidator(self.max_digits, self.decimal_places)
]
@cached_property
def context(self):
return decimal.Context(prec=self.max_digits)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, float):
return self.context.create_decimal_from_float(value)
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_save(self, value, connection):
return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
return super().formfield(**{
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
**kwargs,
})
class DurationField(Field):
"""
Store timedelta objects.
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'[DD] [[HH:]MM:]ss[.uuuuuu] format.')
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
return duration_microseconds(value)
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super().get_db_converters(connection)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.DurationField,
**kwargs,
})
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs.setdefault('max_length', 254)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.EmailField,
**kwargs,
})
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'path': self.path() if callable(self.path) else self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
**kwargs,
})
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be a float.'),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return float(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.FloatField,
**kwargs,
})
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value must be an integer.'),
}
description = _("Integer")
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_max_length_warning(),
]
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with %s." % self.__class__.__name__,
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
validators_ = super().validators
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None and not any(
(
isinstance(validator, validators.MinValueValidator) and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
) >= min_value
) for validator in validators_
):
validators_.append(validators.MinValueValidator(min_value))
if max_value is not None and not any(
(
isinstance(validator, validators.MaxValueValidator) and (
validator.limit_value()
if callable(validator.limit_value)
else validator.limit_value
) <= max_value
) for validator in validators_
):
validators_.append(validators.MaxValueValidator(max_value))
return validators_
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError) as e:
raise e.__class__(
"Field '%s' expected a number but got %r." % (self.name, value),
) from e
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.IntegerField,
**kwargs,
})
class BigIntegerField(IntegerField):
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT,
**kwargs,
})
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_removed_details = {
'msg': (
'IPAddressField has been removed except for support in '
'historical migrations.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.E900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def get_internal_type(self):
return "IPAddressField"
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_blank_and_null_values(**kwargs),
]
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
return super().formfield(**{
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
**kwargs,
})
class NullBooleanField(BooleanField):
default_error_messages = {
'invalid': _('“%(value)s” value must be either None, True or False.'),
'invalid_nullable': _('“%(value)s” value must be either None, True or False.'),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
class PositiveIntegerRelDbTypeMixin:
def rel_db_type(self, connection):
"""
Return the data type that a related field pointing to this field should
use. In most cases, a foreign key pointing to a positive integer
primary key will have an integer column data type but some databases
(e.g. MySQL) have an unsigned integer type. In that case
(related_fields_match_type=True), the primary key should return its
db_type.
"""
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return IntegerField().db_type(connection=connection)
class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _('Positive big integer')
def get_internal_type(self):
return 'PositiveBigIntegerField'
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
return super().formfield(**{
'min_value': 0,
**kwargs,
})
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs):
self.allow_unicode = allow_unicode
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
if self.allow_unicode is not False:
kwargs['allow_unicode'] = self.allow_unicode
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.SlugField,
'allow_unicode': self.allow_unicode,
**kwargs,
})
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if isinstance(value, str) or value is None:
return value
return str(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
return super().formfield(**{
'max_length': self.max_length,
**({} if self.choices is not None else {'widget': forms.Textarea}),
**kwargs,
})
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('“%(value)s” value has an invalid format. It must be in '
'HH:MM[:ss[.uuuuuu]] format.'),
'invalid_time': _('“%(value)s” value has the correct format '
'(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super().__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_timefield_value(value)
def value_to_string(self, obj):
val = self.value_from_object(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.TimeField,
**kwargs,
})
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs.setdefault('max_length', 200)
super().__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
return super().formfield(**{
'form_class': forms.URLField,
**kwargs,
})
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
return [*super().check(**kwargs), *self._check_str_default_value()]
def _check_str_default_value(self):
if self.has_default() and isinstance(self.default, str):
return [
checks.Error(
"BinaryField's default cannot be a string. Use bytes "
"content instead.",
obj=self,
id='fields.E170',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.editable:
kwargs['editable'] = True
else:
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_placeholder(self, value, compiler, connection):
return connection.ops.binary_placeholder_sql(value)
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super().get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super().get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(self.value_from_object(obj)).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, str):
return memoryview(b64decode(value.encode('ascii')))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _('“%(value)s” is not a valid UUID.'),
}
description = _('Universally unique identifier')
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super().__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_prep_value(self, value):
value = super().get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
if not isinstance(value, uuid.UUID):
value = self.to_python(value)
if connection.features.has_native_uuid_field:
return value
return value.hex
def to_python(self, value):
if value is not None and not isinstance(value, uuid.UUID):
input_form = 'int' if isinstance(value, int) else 'hex'
try:
return uuid.UUID(**{input_form: value})
except (AttributeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
return super().formfield(**{
'form_class': forms.UUIDField,
**kwargs,
})
class AutoFieldMixin:
db_returning = True
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.auto_field, (
"Model %s can't have more than one auto-generated field."
% cls._meta.label
)
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class AutoFieldMeta(type):
"""
Metaclass to maintain backward inheritance compatibility for AutoField.
It is intended that AutoFieldMixin become public API when it is possible to
create a non-integer automatically-generated field using column defaults
stored in the database.
In many areas Django also relies on using isinstance() to check for an
automatically-generated field as a subclass of AutoField. A new flag needs
to be implemented on Field to be used instead.
When these issues have been addressed, this metaclass could be used to
deprecate inheritance from AutoField and use of isinstance() with AutoField
for detecting automatically-generated fields.
"""
@property
def _subclasses(self):
return (BigAutoField, SmallAutoField)
def __instancecheck__(self, instance):
return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)
def __subclasscheck__(self, subclass):
return subclass in self._subclasses or super().__subclasscheck__(subclass)
class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta):
def get_internal_type(self):
return 'AutoField'
def rel_db_type(self, connection):
return IntegerField().db_type(connection=connection)
class BigAutoField(AutoFieldMixin, BigIntegerField):
def get_internal_type(self):
return 'BigAutoField'
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
class SmallAutoField(AutoFieldMixin, SmallIntegerField):
def get_internal_type(self):
return 'SmallAutoField'
def rel_db_type(self, connection):
return SmallIntegerField().db_type(connection=connection)
|
the-stack_0_14623 | data = (
'jjwaels', # 0x00
'jjwaelt', # 0x01
'jjwaelp', # 0x02
'jjwaelh', # 0x03
'jjwaem', # 0x04
'jjwaeb', # 0x05
'jjwaebs', # 0x06
'jjwaes', # 0x07
'jjwaess', # 0x08
'jjwaeng', # 0x09
'jjwaej', # 0x0a
'jjwaec', # 0x0b
'jjwaek', # 0x0c
'jjwaet', # 0x0d
'jjwaep', # 0x0e
'jjwaeh', # 0x0f
'jjoe', # 0x10
'jjoeg', # 0x11
'jjoegg', # 0x12
'jjoegs', # 0x13
'jjoen', # 0x14
'jjoenj', # 0x15
'jjoenh', # 0x16
'jjoed', # 0x17
'jjoel', # 0x18
'jjoelg', # 0x19
'jjoelm', # 0x1a
'jjoelb', # 0x1b
'jjoels', # 0x1c
'jjoelt', # 0x1d
'jjoelp', # 0x1e
'jjoelh', # 0x1f
'jjoem', # 0x20
'jjoeb', # 0x21
'jjoebs', # 0x22
'jjoes', # 0x23
'jjoess', # 0x24
'jjoeng', # 0x25
'jjoej', # 0x26
'jjoec', # 0x27
'jjoek', # 0x28
'jjoet', # 0x29
'jjoep', # 0x2a
'jjoeh', # 0x2b
'jjyo', # 0x2c
'jjyog', # 0x2d
'jjyogg', # 0x2e
'jjyogs', # 0x2f
'jjyon', # 0x30
'jjyonj', # 0x31
'jjyonh', # 0x32
'jjyod', # 0x33
'jjyol', # 0x34
'jjyolg', # 0x35
'jjyolm', # 0x36
'jjyolb', # 0x37
'jjyols', # 0x38
'jjyolt', # 0x39
'jjyolp', # 0x3a
'jjyolh', # 0x3b
'jjyom', # 0x3c
'jjyob', # 0x3d
'jjyobs', # 0x3e
'jjyos', # 0x3f
'jjyoss', # 0x40
'jjyong', # 0x41
'jjyoj', # 0x42
'jjyoc', # 0x43
'jjyok', # 0x44
'jjyot', # 0x45
'jjyop', # 0x46
'jjyoh', # 0x47
'jju', # 0x48
'jjug', # 0x49
'jjugg', # 0x4a
'jjugs', # 0x4b
'jjun', # 0x4c
'jjunj', # 0x4d
'jjunh', # 0x4e
'jjud', # 0x4f
'jjul', # 0x50
'jjulg', # 0x51
'jjulm', # 0x52
'jjulb', # 0x53
'jjuls', # 0x54
'jjult', # 0x55
'jjulp', # 0x56
'jjulh', # 0x57
'jjum', # 0x58
'jjub', # 0x59
'jjubs', # 0x5a
'jjus', # 0x5b
'jjuss', # 0x5c
'jjung', # 0x5d
'jjuj', # 0x5e
'jjuc', # 0x5f
'jjuk', # 0x60
'jjut', # 0x61
'jjup', # 0x62
'jjuh', # 0x63
'jjweo', # 0x64
'jjweog', # 0x65
'jjweogg', # 0x66
'jjweogs', # 0x67
'jjweon', # 0x68
'jjweonj', # 0x69
'jjweonh', # 0x6a
'jjweod', # 0x6b
'jjweol', # 0x6c
'jjweolg', # 0x6d
'jjweolm', # 0x6e
'jjweolb', # 0x6f
'jjweols', # 0x70
'jjweolt', # 0x71
'jjweolp', # 0x72
'jjweolh', # 0x73
'jjweom', # 0x74
'jjweob', # 0x75
'jjweobs', # 0x76
'jjweos', # 0x77
'jjweoss', # 0x78
'jjweong', # 0x79
'jjweoj', # 0x7a
'jjweoc', # 0x7b
'jjweok', # 0x7c
'jjweot', # 0x7d
'jjweop', # 0x7e
'jjweoh', # 0x7f
'jjwe', # 0x80
'jjweg', # 0x81
'jjwegg', # 0x82
'jjwegs', # 0x83
'jjwen', # 0x84
'jjwenj', # 0x85
'jjwenh', # 0x86
'jjwed', # 0x87
'jjwel', # 0x88
'jjwelg', # 0x89
'jjwelm', # 0x8a
'jjwelb', # 0x8b
'jjwels', # 0x8c
'jjwelt', # 0x8d
'jjwelp', # 0x8e
'jjwelh', # 0x8f
'jjwem', # 0x90
'jjweb', # 0x91
'jjwebs', # 0x92
'jjwes', # 0x93
'jjwess', # 0x94
'jjweng', # 0x95
'jjwej', # 0x96
'jjwec', # 0x97
'jjwek', # 0x98
'jjwet', # 0x99
'jjwep', # 0x9a
'jjweh', # 0x9b
'jjwi', # 0x9c
'jjwig', # 0x9d
'jjwigg', # 0x9e
'jjwigs', # 0x9f
'jjwin', # 0xa0
'jjwinj', # 0xa1
'jjwinh', # 0xa2
'jjwid', # 0xa3
'jjwil', # 0xa4
'jjwilg', # 0xa5
'jjwilm', # 0xa6
'jjwilb', # 0xa7
'jjwils', # 0xa8
'jjwilt', # 0xa9
'jjwilp', # 0xaa
'jjwilh', # 0xab
'jjwim', # 0xac
'jjwib', # 0xad
'jjwibs', # 0xae
'jjwis', # 0xaf
'jjwiss', # 0xb0
'jjwing', # 0xb1
'jjwij', # 0xb2
'jjwic', # 0xb3
'jjwik', # 0xb4
'jjwit', # 0xb5
'jjwip', # 0xb6
'jjwih', # 0xb7
'jjyu', # 0xb8
'jjyug', # 0xb9
'jjyugg', # 0xba
'jjyugs', # 0xbb
'jjyun', # 0xbc
'jjyunj', # 0xbd
'jjyunh', # 0xbe
'jjyud', # 0xbf
'jjyul', # 0xc0
'jjyulg', # 0xc1
'jjyulm', # 0xc2
'jjyulb', # 0xc3
'jjyuls', # 0xc4
'jjyult', # 0xc5
'jjyulp', # 0xc6
'jjyulh', # 0xc7
'jjyum', # 0xc8
'jjyub', # 0xc9
'jjyubs', # 0xca
'jjyus', # 0xcb
'jjyuss', # 0xcc
'jjyung', # 0xcd
'jjyuj', # 0xce
'jjyuc', # 0xcf
'jjyuk', # 0xd0
'jjyut', # 0xd1
'jjyup', # 0xd2
'jjyuh', # 0xd3
'jjeu', # 0xd4
'jjeug', # 0xd5
'jjeugg', # 0xd6
'jjeugs', # 0xd7
'jjeun', # 0xd8
'jjeunj', # 0xd9
'jjeunh', # 0xda
'jjeud', # 0xdb
'jjeul', # 0xdc
'jjeulg', # 0xdd
'jjeulm', # 0xde
'jjeulb', # 0xdf
'jjeuls', # 0xe0
'jjeult', # 0xe1
'jjeulp', # 0xe2
'jjeulh', # 0xe3
'jjeum', # 0xe4
'jjeub', # 0xe5
'jjeubs', # 0xe6
'jjeus', # 0xe7
'jjeuss', # 0xe8
'jjeung', # 0xe9
'jjeuj', # 0xea
'jjeuc', # 0xeb
'jjeuk', # 0xec
'jjeut', # 0xed
'jjeup', # 0xee
'jjeuh', # 0xef
'jjyi', # 0xf0
'jjyig', # 0xf1
'jjyigg', # 0xf2
'jjyigs', # 0xf3
'jjyin', # 0xf4
'jjyinj', # 0xf5
'jjyinh', # 0xf6
'jjyid', # 0xf7
'jjyil', # 0xf8
'jjyilg', # 0xf9
'jjyilm', # 0xfa
'jjyilb', # 0xfb
'jjyils', # 0xfc
'jjyilt', # 0xfd
'jjyilp', # 0xfe
'jjyilh', # 0xff
)
|
the-stack_0_14624 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.mplot3d import Axes3D
import hanshu
proapp=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测缺陷密度')
proapp0=hanshu.zyzh(proapp)
print(proapp0)
fig = plt.figure(5)
ax=fig.add_subplot(1,1,1,projection='3d') #绘制三维图
x=proapp['org_id']
y=proapp['yue'] #获取x轴数据,y轴数据
z=proapp['rat'] #获取z轴数据
#
ax.plot_surface(x,y,z,rstride=2,cstride=1,cmap=plt.cm.coolwarm,alpha=0.8) #绘制三维图表面
ax.set_xlabel('x-name') #x轴名称
ax.set_ylabel('y-name') #y轴名称
ax.set_zlabel('z-name') #z轴名称
plt.savefig('12.png',dpi=400,bbox_inches='tight')
plt.show() |
the-stack_0_14625 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_dyn_principal_component_var [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_dyn_principal_component_var&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-dyn-pc-var).
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from arpym.statistics import simulate_var1, simulate_normal, multi_r2
from arpym.tools import transpose_square_root, add_logo
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-parameters)
n_ = 2 # number of target variables
k_ = 1 # number of factors
t_ = int(1e4) # length of VAR(1) process
j_ = int(1e2) # number of scenarios
delta_omega = 1e-3
sigma2 = np.eye(n_) # scale matrix
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step00): Setup parameters
# +
t_vec = np.arange(t_)
tau_vec = np.arange(-j_, j_+1)
omega_vec = np.arange(-np.pi, np.pi, delta_omega)
m_ = len(omega_vec)
gamma = (2 * np.random.rand(4) - 1) * 0.99
theta = gamma * np.pi / 2
b = np.array([[np.sin(theta[0]), 0],
[np.sin(theta[3])*np.sin(theta[2]),
np.sin(theta[3])*np.cos(theta[2])]])
mu_epsi = np.zeros(n_)
s_1 = np.cos(theta[0])
s_2 = np.cos(theta[3])
rho = np.sin(theta[1])
sigma2_epsi = np.array([[s_1**2, rho*s_1*s_2],
[rho*s_1*s_2, s_2**2]])
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step01): Simulate VAR(1) process
# +
mu_inf = np.linalg.solve(np.eye(n_) - b, mu_epsi)
sigma2_inf = np.linalg.solve(np.eye(n_**2) - np.kron(b, b),
sigma2.reshape(n_**2, 1)).reshape(n_, n_)
x_tnow = simulate_normal(mu_inf, sigma2_inf, 1).reshape(n_)
x = simulate_var1(x_tnow, b, mu_epsi, sigma2_epsi, t_, j_=1).squeeze()
mu_x = np.linalg.solve((np.eye(n_) - b), mu_epsi)
sigma2_x = np.linalg.solve(np.eye(n_ ** 2) - np.kron(b, b),
sigma2_epsi.reshape(n_ ** 2, 1)).reshape(n_, n_)
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step02): Compute spectral density
# +
ktilde_x = np.zeros((m_, n_, n_), dtype=complex)
sigma_epsi = transpose_square_root(sigma2_epsi)
for m in range(m_):
ktilde_x_temp = np.linalg.solve(np.eye(n_, dtype=complex) -
np.exp(-omega_vec[m]*1j) * b, sigma_epsi)
ktilde_x[m, :, :] = ktilde_x_temp @ ktilde_x_temp.conj().T
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step03): Principal components decomposition
# +
lam, e = np.linalg.eigh(ktilde_x)
lam_k = lam[:, -k_:][:, ::-1]
e_k = e[:, :, -k_:][:, :, ::-1]
sigma = transpose_square_root(sigma2)
beta_tilde_f = np.einsum('ij,ljk->lik', sigma, e_k)
gamma_tilde_f = np.einsum('ijk,kl->ijl',
e_k.conj().transpose((0, 2, 1)),
np.linalg.inv(sigma))
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step04): Computation of the filter h
# +
h_tilde_f = np.einsum('ijk,ikl->ijl', beta_tilde_f, gamma_tilde_f)
coef = np.exp(1j * np.outer(tau_vec, omega_vec))
h_f = np.real(np.tensordot(coef, h_tilde_f, axes=(1, 0)) *
delta_omega / (2 * np.pi))
gamma_f = np.tensordot(coef, gamma_tilde_f, axes=(1, 0)) * \
delta_omega / (2 * np.pi)
alpha_f = (np.eye(n_) - np.sum(h_f, axis=0)) @ mu_x
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step05): Compute the spectral density of predicted process
ktilde_x_pc_bar = np.einsum('ijk,ilk->ijl',
np.einsum('ijk,ikl->ijl', h_tilde_f, ktilde_x), h_tilde_f.conj())
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step06): Compute the principal components predicted process
# +
t_vec_pc = t_vec[tau_vec[-1]:-tau_vec[-1]]
t_pc = t_vec_pc.shape[0]
x_pc_bar = np.zeros((t_pc, n_), dtype=complex)
z_pc = np.zeros((t_pc, k_), dtype=complex)
for t in range(t_pc):
x_tau = x[t_vec_pc[t] + tau_vec, :][::-1, :]
x_pc_bar[t, :] = np.einsum('ijk,ik->j', h_f, x_tau) + alpha_f
z_pc[t, :] = np.einsum('ijk,ik->j', gamma_f, x_tau)
x_pc_bar = np.real(x_pc_bar)
z_pc = np.real(z_pc)
# -
# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step07): update times of original process x
x = x[t_vec_pc, :]
# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_dyn_principal_component_var-implementation-step08): Compute r-squared
u = x - x_pc_bar
sigma2_u = np.einsum('ijk,ilk->ijl',
np.einsum('ijk,ikl->ijl', np.eye(n_) - h_tilde_f, ktilde_x),
(np.eye(n_) - h_tilde_f).conj())
sigma2_u = np.sum(np.real(sigma2_u), axis=0) * delta_omega / (2 * np.pi)
r_2 = multi_r2(sigma2_u, sigma2_x, sigma2)
# ## Plots
# +
plt.style.use('arpm')
t_plot = t_vec_pc[1:150]
xlim = [t_plot[0], t_plot[-1]]
ylim = [-4, 4]
fig1, axes = plt.subplots(1, 2)
axes[0].plot(t_plot, x[t_plot, 0], 'b')
axes[0].plot(t_plot, x[t_plot, 0], 'r--')
axes[0].set_xlabel('$t$')
axes[0].set_ylabel('$x_1$')
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
axes[0].legend(['Process', 'Predicted process'])
axes[1].plot(t_plot, x[t_plot, 1], 'b')
axes[1].plot(t_plot, x[t_plot, 1], 'r--')
axes[1].set_xlabel('$t$')
axes[1].set_ylabel('$x_2$')
axes[1].set_xlim(xlim)
axes[1].set_ylim(ylim)
axes[1].legend(['Process', 'Predicted process'])
add_logo(fig1, size_frac_x=1/8)
plt.tight_layout()
fig2 = plt.figure()
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 3, 1])
ax0 = plt.subplot(gs[0])
ax0.plot(ylim, ylim, 'k')
ax0.plot(x[t_plot, 0], x_pc_bar[t_plot, 0], 'r.')
ax0.set_xlabel('$x_1$')
ax0.set_ylabel('$\overline{x}_{1}^{pc}$')
ax0.set_xlim(ylim)
ax0.set_ylim(ylim)
ax1 = plt.subplot(gs[1])
ax1.plot(t_plot, z_pc[t_plot, 0], 'b')
ax1.set_xlabel('$t$')
ax1.set_ylabel('$Z^{pc}$')
ax1.set_xlim(xlim)
ax2 = plt.subplot(gs[2])
ax2.plot(ylim, ylim, 'k')
ax2.plot(x[t_plot, 1], x_pc_bar[t_plot, 1], 'r.')
ax2.set_xlabel('$x_2$')
ax2.set_ylabel('$\overline{x}_{2}^{pc}$')
ax2.set_xlim(ylim)
ax1.set_ylim(ylim)
add_logo(fig2, size_frac_x=1/4)
plt.tight_layout()
fig3, axes = plt.subplots(2, 4)
for i in range(2):
for j in range(2):
axes[i, j].plot(omega_vec, np.real(ktilde_x[:, i, j]), 'b')
axes[i, j].plot(omega_vec, np.imag(ktilde_x[:, i, j]), 'r')
axes[i, j].set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
axes[i, j].set_xticklabels(['$-\pi$', '$-\pi/2$',
'$0$', '$\pi$', '$\pi/2$'])
axes[i, j].set_ylabel(r'$[\tilde{k}_x(\omega)]_{'+str(i+1)+str(j+1)+'}$')
for j in range(2):
axes[i, j+2].plot(omega_vec, np.real(ktilde_x_pc_bar[:, i, j]), 'b')
axes[i, j+2].plot(omega_vec, np.imag(ktilde_x_pc_bar[:, i, j]), 'r')
axes[i, j+2].set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
axes[i, j+2].set_xticklabels(['$-\pi$', '$-\pi/2$',
'$0$', '$\pi$', '$\pi/2$'])
axes[i, j+2].set_ylabel(r'$[\tilde{k}_{\bar{x}}(\omega)]^{pc}_{'+str(i+1)+str(j+1)+'}$')
add_logo(fig3, size_frac_x=1/4, location=1)
plt.tight_layout()
|
the-stack_0_14630 | # Something, something words with digit N times
import re
pattern_word = re.compile(r'\b\w+\b')
pattern_sentence = re.compile(r'^[A-Z].*[\.\!\?]$')
criteria = list(input())
letter, times = criteria[0], int(criteria[1])
result = []
while True:
user_input = input()
if user_input == 'end':
break
sentence = re.search(pattern_sentence, user_input)
if sentence is not None:
words = re.finditer(pattern_word, sentence.group())
for word in words:
cur_word = word.group()
letter_count = cur_word.count(letter)
if letter_count == times:
result.append(cur_word)
print(*result, sep = ', ')
|
the-stack_0_14631 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: queriesOverTime.py #
# Tests: queries - Database
#
# Defect ID: none
#
# Programmer: Kathleen Bonnell
# Date: March 31, 2004
#
# Modifications:
#
# Hank Childs, Tue Apr 13 13:00:15 PDT 2004
# Rename surface area query.
#
# Kathleen Bonnell, Tue Apr 20 09:42:30 PDT 2004
# Added TestFilledBoundary.
#
# Kathleen Bonnell, Tue Apr 27 12:10:44 PDT 2004
# Added TestExpressions, TestOperators.
#
# Kathleen Bonnell, Thu Jun 24 09:49:35 PDT 2004
# Added TestTransientVariable.
#
# Kathleen Bonnell, Wed Jul 21 16:51:31 PDT 2004
# Added TestSpecifyTimeQueryWindow.
#
# Kathleen Bonnell, Wed Sep 8 10:53:58 PDT 2004
# Renamed 'WorldPick' as 'Pick'.
#
# Kathleen Bonnell, Mon Dec 20 15:54:04 PST 2004
# Changed 'Variable by Node' to 'PickByNode'.
#
# Kathleen Bonnell, Thu Jan 6 11:06:29 PST 2005
# Added TestTimeVaryingSIL.
#
# Kathleen Bonnell, Wed Mar 16 11:13:40 PST 2005
# Added TestQueryAfterQueryOverTime.
#
# Kathleen Bonnell, Wed Jul 6 16:21:34 PDT 2005
# Added TestMili.
#
# Kathleen Bonnell, Thu Nov 10 08:21:54 PST 2005
# Added TrajectoryByZone to TestMili.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Cyrus Harrison, Fri Feb 5 09:27:37 PST 2010
# Turn off color cycling to avoid possible propagation of error from
# one failed test to several.
#
# Kathleen Bonnell, Thu Mar 3 11:47:09 PST 2011
# Added MultiVarTimePick tests.
#
# Kathleen Biagas, Thu Jul 14 10:44:55 PDT 2011
# Use named arguments.
#
# Alister Maguire, Tue Oct 17 16:54:48 PDT 2017
# Added TestPickRangeTimeQuery
#
# Alister Maguire, Wed May 9 10:13:26 PDT 2018
# Added TestReturnValue.
#
# Alister Maguire, Wed May 30 14:16:28 PDT 2018
# Added tests for performing pick ranges over time with and
# without plotting and returning the curves.
#
# Alister Maguire, Wed May 22 08:49:30 PDT 2019
# Updated mili tests to reflect new plugin changes.
#
# Alister Maguire, Tue Oct 1 11:48:15 MST 2019
# Make sure to set use_actual_data to true when we want
# to use data from the pipeline output.
#
# Alister Maguire, Fri Oct 11 13:12:36 PDT 2019
# Added TestDirectDatabaseRoute. I also updated several tests to
# use actual data so that they continue to test the old QOT route.
#
# Kathleen Biagas, Thu Jan 30 13:37:50 MST 2020
# Added TestOperatorCreatedVar. (github bugs #2842, #3489).
#
# Alister Maguire, Tue Feb 25 13:46:24 PST 2020
# Added tests for handling vectors in the direct database route.
#
# Alister Maguire, Mon Mar 9 15:16:36 PDT 2020
# I've removed the use_actual_data flag for Pick queries as this
# is now handled internally.
#
# ----------------------------------------------------------------------------
RequiredDatabasePlugin(("PDB", "Mili", "SAMRAI"))
def InitAnnotation():
# Turn off most annotations
a = AnnotationAttributes()
a.axes2D.visible = 1
a.axes2D.xAxis.label.visible = 1
a.axes2D.yAxis.label.visible = 1
a.axes2D.xAxis.title.visible = 1
a.axes2D.yAxis.title.visible = 1
a.axes3D.triadFlag = 0
a.axes3D.bboxFlag = 0
a.userInfoFlag = 0
a.databaseInfoFlag = 0
a.legendInfoFlag = 0
a.backgroundMode = a.Solid
a.foregroundColor = (0, 0, 0, 255)
a.backgroundColor = (255, 255, 255, 255)
SetAnnotationAttributes(a)
def SetCurvePlotDefaults():
# Disable Color Cycling, default to a blue curve.
catts = CurveAttributes()
catts.lineWidth = 0
catts.color = (0, 0, 255, 255)
catts.showLabels = 1
catts.designator = ""
catts.showPoints = 0
catts.showLegend = 1
catts.cycleColors = 0
catts.renderMode = catts.RenderAsLines
SetDefaultPlotOptions(catts)
def TestAllTimeQueries():
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
# Do some database queries.
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("AllTimeQueries_01")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Volume")
SetActiveWindow(2);
Test("AllTimeQueries_02")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Min")
SetActiveWindow(2);
Test("AllTimeQueries_03")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Max")
SetActiveWindow(2);
Test("AllTimeQueries_04")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Variable Sum")
SetActiveWindow(2);
Test("AllTimeQueries_05")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Weighted Variable Sum")
SetActiveWindow(2);
Test("AllTimeQueries_06")
DeleteAllPlots()
SetActiveWindow(1)
pa = GetPickAttributes()
pa.doTimeCurve = 1
pa.timePreserveCoord = 0
SetPickAttributes(pa)
PickByNode(15947)
# reset some defaults
pa.doTimeCurve = 0
pa.timePreserveCoord = 1
SetPickAttributes(pa)
SetActiveWindow(2);
Test("AllTimeQueries_07")
# delete window 2
DeleteWindow()
# remove plots from window 1
DeleteAllPlots()
def TestFilledBoundary():
# bug '4708
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("FilledBoundary", "Material")
DrawPlots()
TurnMaterialsOff(("1 barrier", "2 water"))
SetActiveWindow(1)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("FBTimeQuery_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "pressure")
DrawPlots()
TurnMaterialsOff(("1 barrier", "2 water"))
QueryOverTime("3D surface area")
SetActiveWindow(2)
Test("FBTimeQuery_02")
# delete window 2
DeleteWindow()
# remove plots from window 1
TurnMaterialsOn()
DeleteAllPlots()
def TestOperators():
# bug '4818
OpenDatabase(silo_data_path("wave*.silo database"))
AddPlot("Pseudocolor", "pressure")
AddOperator("Isovolume")
iso = IsovolumeAttributes()
iso.lbound = 0.1
iso.ubound = 1.0
SetOperatorOptions(iso)
DrawPlots()
SetActiveWindow(1)
QueryOverTime("Volume", stride=10, use_actual_data=1)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_ops_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "mesh_quality/jacobian")
AddOperator("Slice")
slice = SliceAttributes()
slice.axisType = slice.Arbitrary
slice.normal = (-0.689, -0.0416, 0.7233)
slice.originType = slice.Point
slice.originPoint = (2.0011, -0.4084, -1.1279)
slice.upAxis = (-0.08584, 0.996007, -0.0245)
slice.project2d = 1
SetOperatorOptions(slice)
DrawPlots()
QueryOverTime("2D area", stride=10, use_actual_data=1)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_ops_02")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestExpressions():
#bug '4784
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
AddPlot("Pseudocolor", "mesh/ireg")
pa = PseudocolorAttributes()
pa.minFlag = 1
pa.maxFlag = 1
pa.min = 1
pa.max = 4
SetPlotOptions(pa)
DrawPlots()
pt = (4., 3., 0.)
pick = GetPickAttributes()
pick.doTimeCurve = 1
SetPickAttributes(pick)
Pick(pt)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_expr_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
# test a scalar expression
OpenDatabase(silo_data_path("wave*.silo database"))
DefineScalarExpression("p2", "pressure*pressure")
AddPlot("Pseudocolor", "p2")
DrawPlots()
QueryOverTime("Variable Sum", stride=10)
SetActiveWindow(2)
Test("TimeQuery_expr_02")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("pdb_test_data/dbA00.pdb"))
DefineScalarExpression("m", "matvf(material, 1)")
AddPlot("Pseudocolor", "m")
DrawPlots()
QueryOverTime("Variable Sum")
SetActiveWindow(2)
Test("TimeQuery_expr_03")
# prepare for next test-set
# delete plots from window 2 & l
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestTransientVariable():
#bug '4906
# Do what is necessary to get access to the transient variable,
# because QueryOverTime requires an active drawn plot.
db = silo_data_path("wave_tv*.silo database")
OpenDatabase(db)
SetTimeSliderState(17)
ReOpenDatabase(db)
AddPlot("Pseudocolor", "transient")
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
SetQueryOverTimeAttributes(qt)
QueryOverTime("Variable Sum")
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_trans_01")
DeleteAllPlots()
SetActiveWindow(1)
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(327)
pick.doTimeCurve = 0
pick.timePreserveCoord = 1
SetPickAttributes(pick)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_trans_02")
# Prepare for next test
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestSpecifyTimeQueryWindow():
# bug '5163
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
SetQueryOverTimeAttributes(qt)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("SpecifyTimeQueryWindow_01")
DeleteAllPlots()
SetActiveWindow(1)
TurnMaterialsOff(("1 barrier"))
DrawPlots()
qot = GetQueryOverTimeAttributes()
qot.createWindow = 0
qot.windowId = 3
SetQueryOverTimeAttributes(qot)
QueryOverTime("3D surface area")
SetActiveWindow(3)
InitAnnotation()
Test("SpecifyTimeQueryWindow_02")
DeleteAllPlots()
SetActiveWindow(1)
TurnMaterialsOff(("2 water"))
DrawPlots()
qot.windowId = 2
SetQueryOverTimeAttributes(qot)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
Test("SpecifyTimeQueryWindow_03")
# Prepare for next test
DeleteAllPlots()
DeleteWindow()
SetActiveWindow(3)
DeleteWindow()
SetActiveWindow(1)
DeleteAllPlots()
def TestTimeVaryingSIL():
#bug '5473
OpenDatabase(data_path("samrai_test_data/sil_changes/dumps.visit"))
cfileName = "./temp.curve"
curveFile = open(cfileName, "wt")
curveFile.write("#3D surface area\n")
nframes = TimeSliderGetNStates()
for i in range(nframes):
Query("3D surface area")
val = GetQueryOutputValue()
curveFile.write("%g %g\n" % (i, val))
TimeSliderNextState()
curveFile.close()
AddWindow()
SetActiveWindow(2)
DeleteAllPlots()
OpenDatabase(cfileName)
AddPlot("Curve", "3D surface area")
DrawPlots()
SetActiveWindow(1)
# Go ahead and use default plot for now.
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Timestep
qt.createWindow = 0
qt.windowId = 2
SetQueryOverTimeAttributes(qt)
QueryOverTime("3D surface area")
SetActiveWindow(2)
InitAnnotation()
cv = GetViewCurve();
cv.domainCoords = (-0.534115, 10.5341)
cv.rangeCoords = (4029.87, 5856.13)
SetViewCurve(cv)
SetActivePlots((0, 1))
c = CurveAttributes()
c.showPoints = 1
SetPlotOptions(c)
Query("Area Between Curves")
s = GetQueryOutputString()
text = CreateAnnotationObject("Text2D")
text.text = s
text.height = 0.02
text.position = (0.55, 0.4)
Test("TimeQuery_sil_01")
text.Delete()
os.unlink(cfileName)
# Prepare for next test
DeleteAllPlots()
SetActiveWindow(2)
DeleteWindow()
SetActiveWindow(1)
DeleteAllPlots()
def TestQueryAfterQueryOverTime():
# bug '5823
OpenDatabase(silo_data_path("wave_tv.visit"))
SetTimeSliderState(17)
ReOpenDatabase(silo_data_path("wave_tv.visit"))
AddPlot("Pseudocolor", "transient")
DrawPlots()
QueryOverTime("Volume")
Query("Volume")
s = GetQueryOutputString()
QueryOverTime("Max")
Query("Max")
s = s + GetQueryOutputString()
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
# bug '6042
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
TurnMaterialsOn()
QueryOverTime("3D surface area", stride=10)
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
Query("3D surface area")
s = s + GetQueryOutputString()
TestText("QueryAfterQueryOverTime", s)
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestMili():
# bug '6430
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/node/nodvel/vz")
DrawPlots()
ResetQueryOverTimeAttributes()
QueryOverTime("Volume")
SetActiveWindow(2)
ResetView()
InitAnnotation()
Test("TimeQuery_mili_01")
DeleteAllPlots()
SetActiveWindow(1)
QueryOverTime("Max")
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_mili_02")
DeleteAllPlots()
SetActiveWindow(1)
p = GetPickAttributes()
p.doTimeCurve = 1
p.timePreserveCoord = 0
SetPickAttributes(p)
NodePick(122, 161)
p.doTimeCurve = 0
SetPickAttributes(p)
SetActiveWindow(2)
InitAnnotation()
Test("TimeQuery_mili_03")
DeleteAllPlots()
SetActiveWindow(1)
qvars = ("Primal/shell/edv1", "Primal/shell/edv2")
QueryOverTime("TrajectoryByZone", element=242, vars=qvars)
SetActiveWindow(2)
ResetView()
InitAnnotation()
Test("TimeQuery_mili_04")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def MultiVarTimePick():
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "pressure")
DrawPlots()
pa = GetPickAttributes()
pa.doTimeCurve = 1
pa.timeCurveType = pa.Single_Y_Axis
SetPickAttributes(pa)
vars =("pressure", "v", "direction_magnitude")
PickByNode(8837, vars)
SetActiveWindow(2);
InitAnnotation()
Test("TimePick_MultiVar_01")
DeleteAllPlots()
SetActiveWindow(1)
pa.timeCurveType = pa.Multiple_Y_Axes
SetPickAttributes(pa)
PickByNode(8837, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_02")
DeleteAllPlots()
# remove plots from window 1
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/shell/inteng")
DrawPlots()
pa.timePreserveCoord = 0
pa.timeCurveType = pa.Single_Y_Axis
SetPickAttributes(pa)
vars = ("default", "Primal/shell/normal_magnitude")
PickByZone(233, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_03")
DeleteAllPlots()
SetActiveWindow(1)
pa.timeCurveType = pa.Multiple_Y_Axes
SetPickAttributes(pa)
PickByZone(233, vars)
SetActiveWindow(2);
Test("TimePick_MultiVar_04")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestPickRangeTimeQuery():
OpenDatabase(silo_data_path("wave_tv.visit"))
SetTimeSliderState(17)
AddPlot("Pseudocolor", "v")
DrawPlots()
pickAtts = GetPickAttributes()
pickAtts.doTimeCurve = 0
pickAtts.variables = ("default", "v")
pickAtts.timeCurveType = pickAtts.Single_Y_Axis
SetPickAttributes(pickAtts)
#
# Return the curves without plotting, and show
# highlights.
#
pickAtts.showPickHighlight = 1
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105, 100, 1"
options["do_time"] = 0
options["return_curves"] = 1
output_dict = PickByZone(options)
s = str(output_dict)
Test("TimePickRange_00")
TestText("TimePickRangeDict_00",s)
ClearPickPoints()
#
# Plot the curves, but don't return them.
#
pickAtts.showPickHighlight = 0
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105, 100, 1"
options["do_time"] = 1
options["return_curves"] = 0
options["start_time"] = 10
options["end_time"] = 14
options["stride"] = 2
output_dict = PickByNode(options)
s = str(output_dict)
SetActiveWindow(2)
Test("TimePickRange_01")
TestText("TimePickRangeDict_01",s)
ClearPickPoints()
SetActiveWindow(1)
#
# Plot the curves, and return them.
#
pickAtts.showPickHighlight = 0
SetPickAttributes(pickAtts)
options = {}
options["pick_range"] = "100-105"
options["do_time"] = 1
options["return_curves"] = 1
options["start_time"] = 20
options["end_time"] = 60
options["stride"] = 2
output_dict = PickByNode(options)
s = str(output_dict)
SetActiveWindow(2)
Test("TimePickRange_02")
TestText("TimePickRangeDict_02",s)
SetActiveWindow(1)
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
SetActiveWindow(1)
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
def TestReturnValue():
#
# There used to be a bug where the return value
# from previous picks would propagate to the following
# time query. Let's make sure this isn't re-introduced.
#
OpenDatabase(silo_data_path("wave.visit"))
AddPlot("Pseudocolor", "v")
DrawPlots()
pickAtts = GetPickAttributes()
pickAtts.doTimeCurve = 0
pickAtts.variables = ("default", "v")
pickAtts.timeCurveType = pickAtts.Single_Y_Axis
SetPickAttributes(pickAtts)
time1 = NodePick(coord=(3, .5, 3), do_time=1, start_time=0, end_time=70)
no_time = NodePick(coord=(2, .2, 2), do_time=0)
time2 = NodePick(coord=(3, .5, 3), do_time=1, start_time=0, end_time=70)
AssertEqual("Pick Updated", type(time1), type(time2))
ClearPickPoints()
DeleteAllPlots()
ResetPickLetter()
def TestDirectDatabaseRoute():
#
# Cleanup any plots that haven't been deleted yet.
#
SetActiveWindow(2)
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/d3samp6_10_longrun.plt.mili"))
AddPlot("Pseudocolor", "Primal/Shared/edrate")
DrawPlots()
element = 116
domain = 0
element = 116
preserve = 0
start = 0
stride = 1
stop = 10000
vars = ("default")
#
# First, let's time the query. This hard to predict because of it being dependent
# on the machine's architecture, but we can make an educated guess. The direct
# route should take under a second, and the old route should take at least
# 30 seconds. We'll give ourselves a threshold of 10 seconds to be safe.
#
import time
thresh = 10
timer_start = time.time()
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
timer_stop = time.time()
res = timer_stop - timer_start
AssertLTE("Timing Direct Database Query", res, thresh)
SetActiveWindow(2)
Test("Direct_Database_Route_00")
DeleteAllPlots()
SetActiveWindow(1)
#
# Like the original QOT, the direct route creates a clone, but this clone
# differs in that its resulting dataset will NOT match the original dataset.
# Let's make sure the active dataset is being updated to the old plot by
# performing a new pick (not through time).
#
PickByZone(do_time=0, domain=domain, element=element)
Test("Direct_Database_Route_01")
#
# Test basic range settings.
#
start = 100
stop = 900
stride = 10
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
stride = 1
start = 0
stop = 10000
SetActiveWindow(2)
Test("Direct_Database_Route_02")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
AddPlot("Pseudocolor", "Primal/node/nodacc/ax")
DrawPlots()
# This tests two things:
# 1. Plotting a node pick curve.
# 2. Using a direct route query on magnitude expression.
#
vars=("Primal/node/nodacc_magnitude")
PickByNode(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_03")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/single_proc/m_plot.mili"))
AddPlot("Pseudocolor", "Primal/brick/stress/sx")
DrawPlots()
#
# Test plotting multiple variables at once.
#
element = 489
vars=("Primal/brick/stress/sz", "Primal/brick/stress/sx")
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_04")
DeleteAllPlots()
SetActiveWindow(1)
#
# Testing the multi curve plot.
#
PickByZone(curve_plot_type=1, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_05")
DeleteAllPlots()
SetActiveWindow(1)
#
# Test multi-domain data.
#
DeleteAllPlots()
OpenDatabase(data_path("mili_test_data/multi_proc/d3samp6.plt.mili"))
AddPlot("Pseudocolor", "Primal/Shared/edrate")
DrawPlots()
domain = 1
element = 11
vars = ("default")
PickByZone(curve_plot_type=0, vars=vars, do_time=1, domain=domain, element=element,
preserve_coord=preserve, end_time=stop, start_time=start, stride=stride)
SetActiveWindow(2)
Test("Direct_Database_Route_06")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
#
# Now let's test a variable that is not defined on all
# timesteps.
#
db = silo_data_path("wave_tv*.silo database")
OpenDatabase(db)
SetTimeSliderState(17)
ReOpenDatabase(db)
AddPlot("Pseudocolor", "transient")
DrawPlots()
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(element=327)
pick.doTimeCurve = 0
pick.timePreserveCoord = 1
SetPickAttributes(pick)
SetActiveWindow(2)
InitAnnotation()
Test("Direct_Database_Route_07")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
#
# Next, let's test a vector plot. The vectors should be reduced
# to their magnitudes.
#
AddPlot("Vector", "direction")
DrawPlots()
pick = GetPickAttributes()
pick.doTimeCurve = 1
pick.timePreserveCoord = 0
SetPickAttributes(pick)
PickByNode(element=10)
SetActiveWindow(2)
InitAnnotation()
Test("Direct_Database_Route_08")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
def TestOperatorCreatedVar():
OpenDatabase(silo_data_path("wave.visit"))
DefineVectorExpression("normals", "cell_surface_normal(quadmesh)")
AddPlot("Pseudocolor", "operators/Flux/quadmesh")
fluxAtts = FluxAttributes()
fluxAtts.flowField = "direction"
SetOperatorOptions(fluxAtts)
AddOperator("Slice")
sliceAtts = SliceAttributes()
sliceAtts.axisType = sliceAtts.Arbitrary
sliceAtts.normal = (0, 1, 0)
sliceAtts.originType = sliceAtts.Percent
sliceAtts.originPercent = 50
sliceAtts.project2d = 0
SetOperatorOptions(sliceAtts)
AddOperator("DeferExpression")
deferAtts = DeferExpressionAttributes()
deferAtts.exprs = ("normals")
SetOperatorOptions(deferAtts)
# we want slice before flux, so demote it
DemoteOperator(1)
DrawPlots()
qt = GetQueryOverTimeAttributes()
qt.timeType = qt.Cycle
SetQueryOverTimeAttributes(qt)
QueryOverTime("Weighted Variable Sum")
SetActiveWindow(2)
InitAnnotation()
Test("OperatorCreatedVar_01")
DeleteAllPlots()
SetActiveWindow(1)
DeleteAllPlots()
DeleteExpression("normals")
CloseDatabase(silo_data_path("wave.visit"))
def TimeQueryMain():
TestAllTimeQueries()
TestFilledBoundary()
TestOperators()
TestExpressions()
TestTransientVariable()
TestSpecifyTimeQueryWindow()
TestTimeVaryingSIL()
TestQueryAfterQueryOverTime()
TestMili()
MultiVarTimePick()
TestPickRangeTimeQuery()
TestReturnValue()
TestDirectDatabaseRoute()
TestOperatorCreatedVar()
# main
InitAnnotation()
SetCurvePlotDefaults()
TimeQueryMain()
Exit()
|
the-stack_0_14633 | from pydub import AudioSegment
import requests
import easygui
# get the stuff for making the mp3
text = easygui.enterbox(msg='Enter the text for the spooky man to say.', title='Damon, I love you!', default='', strip=True)
headers = {
'Connection': 'keep-alive',
'Accept': '*/*',
'Origin': 'https://fasthub.net',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://fasthub.net/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
# fasthub.net macintalk voice whisper
data = {
'text': text,
'lang': 'en-us en en-US',
'langTrans': 'en-us en en-US',
'voiceType': 'whisper',
'amplitude': '109',
'pitch': '51',
'speed': '80',
'repeat': '0'
}
response = requests.post('https://fasthub.net/plauder', headers=headers, data=data)
mp3stop = response.text.split('#')
mp3url = 'https://fasthub.net/speak/' + mp3stop[0] + '.mp3'
mp3 = requests.get(mp3url, allow_redirects=True)
open('mp3ofVoice.mp3', 'wb').write(mp3.content)
#Put it together
voice = easygui.fileopenbox(title='Choose speech audio')
mp3fromweb = AudioSegment.from_mp3("mp3ofVoice.mp3")
mp3voice = AudioSegment.from_mp3(voice)
mp3guitar = AudioSegment.from_mp3("guitarwail.mp3")
length=len(mp3voice)
combined = mp3guitar.overlay(mp3voice, gain_during_overlay=-12)
final = mp3fromweb + combined
gaming = final.export(text+".mp3", format="mp3") |
the-stack_0_14634 | #!/usr/bin/env python
# encoding: utf-8
# Modifications copyright Amazon.com, Inc. or its affiliates.
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Matt Clarkson, 2012
import os, sys, re, tempfile
from waflib import Utils, Task, Logs, Options, Errors
from waflib.Logs import debug, warn
from waflib.Tools import c_preproc, ccroot, c, cxx, ar
from waflib.Configure import conf
from waflib.TaskGen import feature, after, after_method, before_method
import waflib.Node
# The compiler will issue a warning if some flags are specified more than once.
# The command is constructed from subsets that may have conflicting flags
# This list of lists contains all the set of flags that are made unique
UNIQUE_FLAGS_LIST = [
["/arch:IA32", "/arch:SSE", "/arch:SSE2", "/arch:AVX", "/arch:AVX2"], # code gen for arch
["/clr", "/clr:initialAppDomain", "/clr:noAssembly", "/clr:nostdlib", "/clr:pure", "/clr:safe"], # common language runtime
["/EHs", "/EHa", "/EHac", "/EHsc"], # exception handling
["/errorReport:none", "/errorReport:prompt", "/errorReport:queue", "/errorReport:send"], # report internal compiler errors
["/favor:blend", "/favor:ATOM", "/favor:AMD64", "/favor:INTEL64"], # optimize for arch
["/fp:precise", "/fp:except", "/fp:except-", "/fp:fast", "/fp:strict"], # floating point behavior
["/Gd", "/Gr", "/Gv", "/Gz"], # calling convention
["/GL", "/GL-"], # whole program optimization
["/GR", "/GR-"], # runtime type information
["/GS", "/GS-"], # buffer security checks
["/Gs", "/Gs0", "/Gs4096"], # control stack checking calls
["/Gw", "/Gw-"], # global data optimization
["/Gy", "/Gy-"], # enable function level linking
["/O1", "/O2", "/Od", "/Ox"], # optimization level
["/Ob0", "/Ob1", "/Ob2"], # inline expansion
["/Oi", "/Oi-"], # intrinsics
["/Os", "/Ot"], # favor small code/ fast code
["/Oy", "/Oy-"], # frame pointer omission
["/MD", "/MT", "/LD", "/MDd", "/MTd", "/LDd"], # runtime library
["/RTC1","/RTCc","/RTCs","/RTCu"], # runtime error checks
["/volatile","/volatile:iso", "/volatile:ms"], # volatile keyword handling
["/vd0", "/vd1", "/vd2"], # disable construction displacements
["/ZW", "/ZW:nostdlib"], # windows runtime compilation
["/sdl", "/sdl-"], # enable additional security checks
["/vmb", "/vmg"], # always declare a class before using a pointer to it
["/vmm", "/vms", "/vmv"], # inheritance of yet-to-be-defined classes
["/W0", "/W1", "/W2", "/W3", "/W4"], # error level
["/WX", "/WX-", "/WX:NO"], # treat warnings as errors
["/Z7", "/Zi", "/ZI"], # debug information format
["/Za", "/Ze"], # disable language extensions
["/Zc:forScope", "/Zc:forScope-"], # for loop scope conformance
["/Zc:wchar_t", "/Zc:wchar_t-"], # wchar_t maps to __wchar_t
["/Zc:auto", "/Zc:auto-"], # deduce variable type
["/Zc:trigraphs", "/Zc:trigraphs-"], # character substitutions if character isn't in charpage
["/Zc:rvalueCast", "/Zc:rvalueCast-"], # enforce type conversion rules
["/Zc:strictStrings", "/Zc:strictStrings-"], # disable string literal type conversion
["/Zc:inline", "/Zc:inline-"], # remove unreferenced comdat sections
["/Zp", "/Zp:1", "/Zp:2", "/Zp:4", "/Zp:8", "/Zp:16"] # struct member alignment
]
# convert list of flags that must be unique to dictionary
UNIQUE_FLAGS_DICT = {}
for idx, flags in enumerate(UNIQUE_FLAGS_LIST):
assert(isinstance(flags,list))
for flag in flags:
UNIQUE_FLAGS_DICT[flag] = idx # all flags from the list have the same value, just using index as a dummy unique val
def exec_mf(self):
"""
Create the manifest file
"""
env = self.env
mtool = env['MT']
if not mtool:
return 0
self.do_manifest = False
outfile = self.outputs[0].abspath()
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.abspath()
break
if manifest is None:
# Should never get here. If we do, it means the manifest file was
# never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
mode = ''
if 'cprogram' in self.generator.features or 'cxxprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features or 'cxxshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest in mode %r' % mode)
lst = []
lst.append(env['MT'])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(['-manifest',manifest])
if hasattr(self.generator, 'additional_manifests'):
if not isinstance(self.generator.additional_manifests, list): # the additional manifests could be a string
self.generator.additional_manifests = [self.generator.additional_manifests]
for element in self.generator.additional_manifests: # add each one with its own path
lst.append( self.generator.path.abspath() + '/' + element)
lst.append('-outputresource:%s;%s' % (outfile, mode))
# note that because we call exec_command and give it a list of params, these become the subprocess argv*
# and thus it is not necessary for us to escape them with quotes or anything like that.
lst = [lst]
return self.exec_command(*lst)
def quote_response_command(self, flag):
flag = flag.replace('\\', '\\\\') # escape any backslashes
flag = flag.replace('"', '\\"') # escape any quotes
if flag.find(' ') > -1:
for x in ('/LIBPATH:', '/IMPLIB:', '/OUT:', '/I'):
if flag.startswith(x):
flag = '%s"%s"' % (x, flag[len(x):])
break
else:
flag = '"%s"' % flag
return flag
def exec_response_command(self, cmd, **kw):
# not public yet
try:
tmp = None
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 16384:
tmp_files_folder = self.generator.bld.get_bintemp_folder_node().make_node('TempFiles')
program = cmd[0] #unquoted program name, otherwise exec_command will fail
cmd = [self.quote_response_command(x) for x in cmd]
# Determine an appropriate filename for the output file (displayed by Incredibuild)
if self.outputs and len(self.outputs[0].abspath()):
tmp_file_name = os.path.basename(self.outputs[0].abspath())
else:
# strips quotes off the FRONT in case its a string like '"something="somethingelse"' which would cause issues
out_file = os.path.split(cmd[-1].strip('"'))
tmp_file_name = out_file[1]
(fd, tmp) = tempfile.mkstemp(prefix=tmp_file_name, dir=tmp_files_folder.abspath())
os.write(fd, '\r\n'.join(cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# no return here, that's on purpose
ret = self.generator.bld.exec_command(cmd, **kw)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass # anti-virus and indexers can keep the files open -_-
return ret
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"""
Change the command-line execution for msvc programs.
Instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in
"""
# If bullseye coverage tool is in the environment, and we are executing the CXX or C compiler, then
# we prefix with the bullseye coverage tool. Otherwise, just run the regular tools.
# Ideally we need a way to do this cleanly on other platforms, but this implies a post hook of some kind to change
# the CXX task run_str, and I could not immediately see a clean way to do that, especially conditionally as
# we need to do below.
if 'BULL_COVC' in self.env:
excluded_modules = getattr(self.generator.bld.options,'bullseye_excluded_modules',"").replace(' ', '').split(',')
if "" in excluded_modules:
excluded_modules.remove("")
# Figure out which package we are building, and check if it is in the list of packages we want coverage for
# If the package list is empty, then we do coverage building for the whole project.
# This applies to the CC/CXX steps. We must always link with coverage if we are going to get measurements.
included_modules = getattr(self.generator.bld.options,'bullseye_included_modules',"").replace(' ', '').split(',')
if "" in included_modules:
included_modules.remove("")
if self.generator.name not in excluded_modules and (not included_modules or self.generator.name in included_modules):
if k[0][0] == self.env['CXX'] or k[0][0] == self.env['CC']:
k = ([self.env['BULL_COVC'], '--file', self.env['BULL_COV_FILE']] + k[0],)
# We must link with bullseye regardless of which way the project is set (with or without coverage) to avoid link errors with included libraries.
if 'BULL_COVLINK' in self.env and (k[0][0] == self.env['LINK'] or k[0][0] == self.env['LINK_CXX'] or k[0][0] == self.env['LINK_CC']):
k = ([self.env['BULL_COVLINK']] + k[0],)
# 1) Join options that carry no space are joined e.g. /Fo FilePath -> /FoFilePath
# 2) Join options that carry a ':' as last character : e.g. /OUT: FilePath -> /OUT:FilePath
if isinstance(k[0], list):
lst = []
carry = ''
join_with_next_list_item = ['/Fo', '/doc', '/Fi', '/Fa']
for a in k[0]:
if a in join_with_next_list_item or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
ret = self.exec_response_command(k[0], **kw)
if not ret and getattr(self, 'do_manifest', None):
ret = self.exec_mf()
return ret
def wrap_class(class_name):
"""
Manifest file processing and @response file workaround for command-line length limits on Windows systems
The indicated task class is replaced by a subclass to prevent conflicts in case the class is wrapped more than once
"""
cls = Task.classes.get(class_name, None)
if not cls:
return
derived_class = type(class_name, (cls,), {})
def exec_command(self, *k, **kw):
if self.env['CC_NAME'] == 'msvc':
return self.exec_command_msvc(*k, **kw)
else:
return super(derived_class, self).exec_command(*k, **kw)
# Chain-up monkeypatch needed since exec_command() is in base class API
derived_class.exec_command = exec_command
# No chain-up behavior needed since the following methods aren't in
# base class API
derived_class.exec_response_command = exec_response_command
derived_class.quote_response_command = quote_response_command
derived_class.exec_command_msvc = exec_command_msvc
derived_class.exec_mf = exec_mf
return derived_class
for k in 'c cxx cprogram cxxprogram cshlib cxxshlib cstlib cxxstlib'.split():
wrap_class(k)
@feature('cxxprogram', 'cxxshlib', 'cprogram', 'cshlib', 'cxx', 'c')
@after_method('apply_incpaths')
@after_method('add_pch_to_dependencies')
def set_pdb_flags(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if not self.bld.is_option_true('generate_debug_info'):
return
# find the last debug symbol type of [/Z7, /Zi, /ZI] applied in cxxflags.
last_debug_option = ''
for opt in reversed(self.env['CXXFLAGS']):
if opt in ['/Z7', '/Zi', '/ZI']:
last_debug_option = opt
break
if last_debug_option in ['/Zi', '/ZI']:
# Compute PDB file path
pdb_folder = self.path.get_bld().make_node(str(self.idx))
pdb_cxxflag = '/Fd{}'.format(pdb_folder.abspath())
# Make sure the PDB folder exists
pdb_folder.mkdir()
# Add CXX and C Flags
for t in getattr(self, 'compiled_tasks', []):
t.env.append_value('CXXFLAGS', pdb_cxxflag)
t.env.append_value('CFLAGS', pdb_cxxflag)
# Add PDB also to Precompiled header. pch_task is not in compiled_tasks
if getattr(self, 'pch_task', None):
self.pch_task.env.append_value('CXXFLAGS', pdb_cxxflag)
self.pch_task.env.append_value('CFLAGS', pdb_cxxflag)
def is_node_qt_rc_generated(self,node):
if node.is_child_of(self.bld.bldnode):
raw_name = os.path.splitext(os.path.basename(node.abspath()))[0]
if raw_name.endswith('_rc'):
return True
return False
@feature('cxx')
@before_method('process_source')
def add_pch_msvc(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if Utils.unversioned_sys_platform() != 'win32':
return
# Create Task to compile PCH
if not getattr(self, 'pch', ''):
return
if not self.bld.is_option_true('use_precompiled_header'):
return
# Always assume only one PCH File
pch_source = self.to_nodes(self.pch)[0]
self.pch_header = pch_source.change_ext('.h')
self.pch_header_name = os.path.split(self.pch_header.abspath())[1]
# Generate PCH per target project idx
# Avoids the case where two project have the same PCH output path but compile the PCH with different compiler options i.e. defines, includes, ...
self.pch_file = pch_source.change_ext('.%d.pch' % self.idx)
self.pch_object = pch_source.change_ext('.%d.obj' % self.idx)
# Create PCH Task
self.pch_task = pch_task = self.create_task('pch_msvc', pch_source, [self.pch_object, self.pch_file])
pch_task.env.append_value('PCH_NAME', self.pch_header_name)
pch_task.env.append_value('PCH_FILE', '/Fp' + self.pch_file.abspath())
pch_task.env.append_value('PCH_OBJ', self.pch_object.abspath())
@feature('cxx')
@after_method('apply_incpaths')
def add_pch_to_dependencies(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
# Create Task to compile PCH
if not getattr(self, 'pch_object', ''):
return
pch_abs_path = self.pch_file.abspath()
pch_flag = '/Fp' + pch_abs_path
pch_header = '/Yu' + self.pch_header_name
# Append PCH File to each compile task
for t in getattr(self, 'compiled_tasks', []):
input_file = t.inputs[0].abspath()
file_specific_settings = self.file_specifc_settings.get(input_file, None)
if file_specific_settings and 'disable_pch' in file_specific_settings and file_specific_settings['disable_pch'] == True:
continue # Don't append PCH to files for which we don't use them
if getattr(t, 'disable_pch', False) == True:
continue # Don't append PCH to files for which we don't use them
if t.__class__.__name__ in ['cxx','qxx']: #Is there a better way to ensure cpp only?
if is_node_qt_rc_generated(self,t.inputs[0]):
t.env.append_value('CXXFLAGS', '/Y-')
else:
t.env.append_value('CXXFLAGS', pch_header)
t.env.append_value('CXXFLAGS', pch_flag)
# Append PCH to task input to ensure correct ordering
t.dep_nodes.append(self.pch_object)
# Append the pch object to the link task
if getattr(self, 'link_task', None):
self.link_task.inputs.append(self.pch_object)
class pch_msvc(waflib.Task.Task):
run_str = '${CXX} ${PCH_CREATE_ST:PCH_NAME} ${CXXFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} ${CXX_TGT_F}${PCH_OBJ} ${PCH_FILE}'
scan = c_preproc.scan
color = 'BLUE'
def exec_command(self, *k, **kw):
return exec_command_msvc(self, *k, **kw)
def exec_response_command(self, *k, **kw):
return exec_response_command(self, *k, **kw)
def quote_response_command(self, *k, **kw):
return quote_response_command(self, *k, **kw)
def exec_mf(self, *k, **kw):
return exec_mf(self, *k, **kw)
def strip_all_but_last_dependent_options(flags):
seen = set()
delete = []
for idx, flag in enumerate(reversed(flags)):
try:
val = UNIQUE_FLAGS_DICT[flag]
if val not in seen:
seen.add(val)
continue
# mark for delete
delete.append(len(flags) -1 -idx)
except:
pass
for idx in reversed(delete):
del flags[idx]
def verify_options_common(env):
strip_all_but_last_dependent_options(env.CFLAGS)
strip_all_but_last_dependent_options(env.CXXFLAGS)
@feature('c', 'cxx')
@after_method('apply_link')
@after_method('add_pch_to_dependencies')
def verify_compiler_options_msvc(self):
if not 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
return
if Utils.unversioned_sys_platform() != 'win32':
return
# Verify compiler option (strip all but last for dependant options)
for t in getattr(self, 'compiled_tasks', []):
verify_options_common(t.env)
# Verify pch_task options (strip all but last for dependant options)
if hasattr(self, 'pch_task'):
verify_options_common(self.pch_task.env)
# Strip unsupported ARCH linker option
if hasattr(self, 'link_task'):
del self.link_task.env['ARCH']
#############################################################################
# Code for auto-recognition of Visual Studio Compiler and Windows SDK Path
# Taken from the original WAF code
#############################################################################
all_msvc_platforms = [ ('x64', 'amd64')]
"""List of msvc platforms"""
@conf
def auto_detect_msvc_compiler(conf, version, target, windows_kit):
conf.env['MSVC_VERSIONS'] = [version]
conf.env['MSVC_TARGETS'] = [target]
conf.autodetect(windows_kit, True)
conf.find_msvc()
@conf
def autodetect(conf, windows_kit, arch = False):
v = conf.env
if arch:
compiler, version, path, includes, libdirs, arch = conf.detect_msvc(windows_kit, True)
v['DEST_CPU'] = arch
else:
compiler, version, path, includes, libdirs = conf.detect_msvc(windows_kit)
v['PATH'] = path
v['INCLUDES'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
try:
v['MSVC_VERSION'] = float(version)
except Exception:
v['MSVC_VERSION'] = float(version[:-3])
@conf
def detect_msvc(conf, windows_kit, arch = False):
versions = get_msvc_versions(conf, windows_kit)
return setup_msvc(conf, versions, arch)
def setup_msvc(conf, versions, arch = False):
platforms = getattr(Options.options, 'msvc_targets', '').split(',')
if platforms == ['']:
platforms=Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms]
desired_versions = getattr(Options.options, 'msvc_version', '').split(',')
if desired_versions == ['']:
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,revision = version.rsplit(' ', 1)
if arch:
return compiler,revision,p1,p2,p3,arch
else:
return compiler,revision,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
MSVC_INSTALLED_VERSIONS = {}
@conf
def get_msvc_versions(conf, windows_kit):
"""
:return: list of compilers installed
:rtype: list of string
"""
global MSVC_INSTALLED_VERSIONS
if not windows_kit in MSVC_INSTALLED_VERSIONS:
MSVC_INSTALLED_VERSIONS[windows_kit] = ''
if len(MSVC_INSTALLED_VERSIONS[windows_kit]) == 0:
lst = []
conf.gather_wsdk_versions(windows_kit, lst)
conf.gather_msvc_versions(windows_kit, lst)
MSVC_INSTALLED_VERSIONS[windows_kit] = lst
return MSVC_INSTALLED_VERSIONS[windows_kit]
def gather_msvc_detected_versions():
#Detected MSVC versions!
version_pattern = re.compile('^(\d\d?\.\d\d?)(Exp)?$')
detected_versions = []
for vcver,vcvar in [('VCExpress','Exp'), ('VisualStudio','')]:
try:
prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
try:
prefix = 'SOFTWARE\\Microsoft\\'+vcver
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix)
except WindowsError:
continue
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
match = version_pattern.match(version)
if not match:
continue
else:
versionnumber = float(match.group(1))
detected_versions.append((versionnumber, version+vcvar, prefix+"\\"+version))
def fun(tup):
return tup[0]
detected_versions.sort(key = fun)
return detected_versions
@conf
def gather_msvc_versions(conf, windows_kit, versions):
vc_paths = []
for (v,version,reg) in gather_msvc_detected_versions():
try:
try:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC")
except WindowsError:
msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++")
path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir')
vc_paths.append((version, os.path.abspath(str(path))))
except WindowsError:
continue
for version,vc_path in vc_paths:
vs_path = os.path.dirname(vc_path)
conf.gather_msvc_targets(versions, version, windows_kit, vc_path)
pass
@conf
def gather_msvc_targets(conf, versions, version, windows_kit, vc_path):
#Looking for normal MSVC compilers!
targets = []
if os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, windows_kit, os.path.join(vc_path, 'vcvarsall.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', windows_kit, os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')))))
except conf.errors.ConfigurationError:
pass
elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, '', windows_kit, os.path.join(vc_path, 'Bin', 'vcvars32.bat')))))
except conf.errors.ConfigurationError:
pass
if targets:
versions.append(('msvc '+ version, targets))
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conf
def get_msvc_version(conf, compiler, version, target, windows_kit, vcvars):
"""
Create a bat file to obtain the location of the libraries
:param compiler: ?
:param version: ?
:target: ?
:vcvars: ?
:return: the location of msvc, the location of include dirs, and the library paths
:rtype: tuple of strings
"""
debug('msvc: get_msvc_version: %r %r %r %r', compiler, version, target, windows_kit)
batfile = conf.bldnode.make_node('waf-print-msvc.bat')
batfile.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%;%%LIBPATH%%
""" % (vcvars,target,windows_kit))
sout = conf.cmd_and_log(['cmd', '/E:on', '/V:on', '/C', batfile.abspath()])
lines = sout.splitlines()
if not lines[0]:
lines.pop(0)
MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None
for line in lines:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
if not MSVC_PATH or not MSVC_INCDIR or not MSVC_LIBDIR:
conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)')
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = dict(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH, silent_output=True)
cxx = conf.cmd_to_list(cxx)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if 'CL' in env:
del(env['CL'])
try:
try:
conf.cmd_and_log(cxx + ['/help'], env=env)
except Exception as e:
debug('msvc: get_msvc_version: %r %r %r %r -> failure' % (compiler, version, target, windows_kit))
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r %r -> OK', compiler, version, target, windows_kit)
finally:
conf.env[compiler_name] = ''
# vcvarsall does not always resolve the windows sdk path with VS2015 + Win10, but we know where it is
winsdk_path = _get_win_sdk_path(windows_kit, target)
if winsdk_path:
MSVC_PATH.append(winsdk_path)
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
def _get_win_sdk_path(windows_kit, arch):
path = _find_win_sdk_root(windows_kit)
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, windows_kit.startswith('10'), windows_kit)
if is_valid:
if version == windows_kit:
return str(os.path.join(bin_path, arch))
else:
Logs.debug('winsdk: Found a working windows SDK (%s), but it does not match the requested version (%s)' % (version, windows_kit))
return ''
def _is_valid_win_sdk(path, is_universal_versioning, desired_version=''):
# Successfully installed windows kits have rc.exe. This file is a downstream dependency of vcvarsall.bat.
def _check_for_rc_file(path):
rc_x64 = os.path.join(path, 'x64\\rc.exe')
rc_x86 = os.path.join(path, 'x86\\rc.exe')
return os.path.isfile(rc_x64) or os.path.isfile(rc_x86)
bin_dir = os.path.join(path, 'bin')
include_dir = os.path.join(path, 'include')
if is_universal_versioning:
potential_sdks = [desired_version] if desired_version else []
if os.path.isdir(include_dir):
# lexically sort the 10.xxx versions in reverse so that latest/highest is first
potential_sdks += sorted(os.listdir(include_dir), reverse=True)
sdk10_versions = [entry for entry in potential_sdks if entry.startswith('10.')]
for sub_version in sdk10_versions:
sub_version_folder = os.path.join(include_dir, sub_version)
if os.path.isdir(os.path.join(sub_version_folder, 'um')):
# check for rc.exe in the sub_version folder's bin, or in the root 10 bin, we just need at least one
for bin_path in (os.path.join(os.path.join(path, 'bin'), sub_version), bin_dir):
if _check_for_rc_file(bin_path):
return True, sub_version, bin_path
else:
if _check_for_rc_file(bin_dir):
version = path.split('\\')[-2]
return True, version, bin_dir
return False, '', ''
def _find_win_sdk_root(winsdk_hint):
"""
Use winreg to find a valid installed windows kit.
Returns empty string if no valid version was found.
See visual studio compatibility charts here:
https://www.visualstudio.com/en-us/productinfo/vs2015-compatibility-vs
"""
try:
installed_roots = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows Kits\\Installed Roots')
except WindowsError:
try:
installed_roots = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots')
except WindowsError:
return ''
if winsdk_hint.startswith('10'):
try:
path, type = Utils.winreg.QueryValueEx(installed_roots, 'KitsRoot10')
return path
except WindowsError:
pass
elif winsdk_hint.startswith('8'):
try:
path, type = Utils.winreg.QueryValueEx(installed_roots, 'KitsRoot81')
return path
except WindowsError:
pass
return ''
@conf
def find_valid_wsdk_version(conf):
path = _find_win_sdk_root("10")
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, True)
if is_valid:
return version
# No root for sdk 10 found, try 8.1
path = _find_win_sdk_root("8.1")
if path:
is_valid, version, bin_path = _is_valid_win_sdk(path, False)
if is_valid:
return version
return ''
@conf
def gather_wsdk_versions(conf, windows_kit, versions):
"""
Use winreg to add the msvc versions to the input list
:param versions: list to modify
:type versions: list
"""
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = Utils.winreg.OpenKey(all_versions, version)
path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, windows_kit, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except conf.errors.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
pass
@conf
def find_msvc(conf):
"""Due to path format limitations, limit operation only to native Win32. Yeah it sucks."""
if sys.platform == 'cygwin':
conf.fatal('MSVC module does not work under cygwin Python!')
# the autodetection is supposed to be performed before entering in this method
v = conf.env
path = v['PATH']
compiler = v['MSVC_COMPILER']
version = v['MSVC_VERSION']
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11)
# compiler
cxx = None
if v['CXX']: cxx = v['CXX']
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
cxx = conf.find_program(compiler_name, var='CXX', path_list=path, silent_output=True)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
if path: env.update(PATH = ';'.join(path))
if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env):
conf.fatal('the msvc compiler could not be identified')
# c/c++ compiler
v['CC'] = v['CXX'] = cxx[0]
v['CC_NAME'] = v['CXX_NAME'] = 'msvc'
# Bullseye code coverage
if conf.is_option_true('use_bullseye_coverage'):
# TODO: Error handling for this is opaque. This will fail the MSVS 2015 tool check,
# and not say anything about bullseye being missing.
try:
covc = conf.find_program('covc',var='BULL_COVC',path_list = path, silent_output=True)
covlink = conf.find_program('covlink',var='BULL_COVLINK',path_list = path, silent_output=True)
covselect = conf.find_program('covselect',var='BULL_COVSELECT',path_list = path, silent_output=True)
v['BULL_COVC'] = covc
v['BULL_COVLINK'] = covlink
v['BULL_COV_FILE'] = conf.CreateRootRelativePath(conf.options.bullseye_cov_file)
# Update the coverage file with the region selections detailed in the settings regions parameters
# NOTE: should we clear other settings at this point, or allow them to accumulate?
# Maybe we need a flag for that in the setup?
regions = conf.options.bullseye_coverage_regions.replace(' ','').split(',')
conf.cmd_and_log(([covselect] + ['--file', v['BULL_COV_FILE'], '-a'] + regions))
except:
Logs.error('Could not find the Bullseye Coverage tools on the path, or coverage tools are not correctly installed. Coverage build disabled.')
# linker
if not v['LINK_CXX']:
link = conf.find_program(linker_name, path_list=path, silent_output=True)
if link: v['LINK_CXX'] = link
else: conf.fatal('%s was not found (linker)' % linker_name)
v['LINK'] = link
if not v['LINK_CC']:
v['LINK_CC'] = v['LINK_CXX']
# staticlib linker
if not v['AR']:
stliblink = conf.find_program(lib_name, path_list=path, var='AR', silent_output=True)
if not stliblink: return
v['ARFLAGS'] = ['/NOLOGO']
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
if v.MSVC_MANIFEST:
conf.find_program('MT', path_list=path, var='MT', silent_output=True)
v['MTFLAGS'] = ['/NOLOGO']
# call configure on the waflib winres module to setup the environment for configure
# conf.load('winres') caches the environment as part of the module load key, and we just modified
# the environment, causing the cache to miss, and extra calls import/load the module
# winres is loaded
try:
module = sys.modules['waflib.Tools.winres']
func = getattr(module,'configure',None)
if func:
func(conf)
except Error as e:
warn('Resource compiler not found. Compiling resource file is disabled')
|
the-stack_0_14635 | import logging
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from .datasets import CIFAR10_truncated
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# generate the non-IID distribution for all methods
def read_data_distribution(filename='./data_preprocessing/non-iid-distribution/CIFAR10/distribution.txt'):
distribution = {}
with open(filename, 'r') as data:
for x in data.readlines():
if '{' != x[0] and '}' != x[0]:
tmp = x.split(':')
if '{' == tmp[1].strip():
first_level_key = int(tmp[0])
distribution[first_level_key] = {}
else:
second_level_key = int(tmp[0])
distribution[first_level_key][second_level_key] = int(tmp[1].strip().replace(',', ''))
return distribution
def read_net_dataidx_map(filename='./data_preprocessing/non-iid-distribution/CIFAR10/net_dataidx_map.txt'):
net_dataidx_map = {}
with open(filename, 'r') as data:
for x in data.readlines():
if '{' != x[0] and '}' != x[0] and ']' != x[0]:
tmp = x.split(':')
if '[' == tmp[-1].strip():
key = int(tmp[0])
net_dataidx_map[key] = []
else:
tmp_array = x.split(',')
net_dataidx_map[key] = [int(i.strip()) for i in tmp_array]
return net_dataidx_map
def record_net_data_stats(y_train, net_dataidx_map):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
logging.debug('Data statistics: %s' % str(net_cls_counts))
return net_cls_counts
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10():
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def load_cifar10_data(datadir):
train_transform, test_transform = _data_transforms_cifar10()
cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=train_transform)
cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=test_transform)
X_train, y_train = cifar10_train_ds.data, cifar10_train_ds.target
X_test, y_test = cifar10_test_ds.data, cifar10_test_ds.target
return (X_train, y_train, X_test, y_test)
def partition_data(dataset, datadir, partition, n_nets, alpha):
logging.info("*********partition data***************")
X_train, y_train, X_test, y_test = load_cifar10_data(datadir)
n_train = X_train.shape[0]
# n_test = X_test.shape[0]
if partition == "homo":
total_num = n_train
idxs = np.random.permutation(total_num)
batch_idxs = np.array_split(idxs, n_nets)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_nets)}
elif partition == "hetero":
min_size = 0
K = 10
N = y_train.shape[0]
logging.info("N = " + str(N))
net_dataidx_map = {}
while min_size < 10:
idx_batch = [[] for _ in range(n_nets)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(y_train == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
## Balance
proportions = np.array([p * (len(idx_j) < N / n_nets) for p, idx_j in zip(proportions, idx_batch)])
proportions = proportions / proportions.sum()
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
elif partition == "hetero-fix":
dataidx_map_file_path = './data_preprocessing/non-iid-distribution/CIFAR10/net_dataidx_map.txt'
net_dataidx_map = read_net_dataidx_map(dataidx_map_file_path)
if partition == "hetero-fix":
distribution_file_path = './data_preprocessing/non-iid-distribution/CIFAR10/distribution.txt'
traindata_cls_counts = read_data_distribution(distribution_file_path)
else:
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map)
return X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts
# for centralized training
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None):
return get_dataloader_CIFAR10(datadir, train_bs, test_bs, dataidxs)
# for local devices
def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test):
return get_dataloader_test_CIFAR10(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test)
def get_dataloader_CIFAR10(datadir, train_bs, test_bs, dataidxs=None):
dl_obj = CIFAR10_truncated
transform_train, transform_test = _data_transforms_cifar10()
train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=True)
return train_dl, test_dl
def get_dataloader_test_CIFAR10(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None):
dl_obj = CIFAR10_truncated
transform_train, transform_test = _data_transforms_cifar10()
train_ds = dl_obj(datadir, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=True)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=True)
return train_dl, test_dl
def load_partition_data_distributed_cifar10(process_id, dataset, data_dir, partition_method, partition_alpha,
client_number, batch_size):
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(dataset,
data_dir,
partition_method,
client_number,
partition_alpha)
class_num = len(np.unique(y_train))
logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = sum([len(net_dataidx_map[r]) for r in range(client_number)])
# get global test data
if process_id == 0:
train_data_global, test_data_global = get_dataloader(dataset, data_dir, batch_size, batch_size)
logging.info("train_dl_global number = " + str(len(train_data_global)))
logging.info("test_dl_global number = " + str(len(test_data_global)))
train_data_local = None
test_data_local = None
local_data_num = 0
else:
# get local dataset
dataidxs = net_dataidx_map[process_id - 1]
local_data_num = len(dataidxs)
logging.info("rank = %d, local_sample_number = %d" % (process_id, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, batch_size, batch_size,
dataidxs)
logging.info("process_id = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
process_id, len(train_data_local), len(test_data_local)))
train_data_global = None
test_data_global = None
return train_data_num, train_data_global, test_data_global, local_data_num, train_data_local, test_data_local, class_num
def load_partition_data_cifar10(dataset, data_dir, partition_method, partition_alpha, client_number, batch_size):
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(dataset,
data_dir,
partition_method,
client_number,
partition_alpha)
class_num = len(np.unique(y_train))
logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = sum([len(net_dataidx_map[r]) for r in range(client_number)])
train_data_global, test_data_global = get_dataloader(dataset, data_dir, batch_size, batch_size)
logging.info("train_dl_global number = " + str(len(train_data_global)))
logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_data_global)
# get local dataset
data_local_num_dict = dict()
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
local_data_num = len(dataidxs)
data_local_num_dict[client_idx] = local_data_num
logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, batch_size, batch_size,
dataidxs)
logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
return train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, traindata_cls_counts
|
the-stack_0_14637 | # pylint: disable=protected-access, unused-argument
import os
import glob
import radical.utils as ru
from .test_common import setUp
from radical.pilot.agent.launch_method.jsrun import JSRUN
try:
import mock
except ImportError:
from unittest import mock
# ------------------------------------------------------------------------------
#
def tearDown():
rs = glob.glob('%s/*.rs' % os.getcwd())
for fold in rs:
os.remove(fold)
# ------------------------------------------------------------------------------
#
@mock.patch.object(JSRUN, '__init__', return_value=None)
@mock.patch.object(JSRUN, '_configure',return_value='jsrun')
@mock.patch('radical.utils.raise_on')
def test_create_resource_set_file(mocked_init, mocked_configure, mocked_raise_on):
test_cases = setUp('lm', 'jsrun')
component = JSRUN(name=None, cfg=None, session=None)
for unit, _, resource_file, _ in test_cases:
slot = unit['slots']
uid = unit['uid']
component._create_resource_set_file(slots=slot, uid=uid, sandbox='.')
print(uid)
with open('%s.rs' % uid) as rs_layout:
assert rs_layout.readlines() == resource_file
tearDown()
# ------------------------------------------------------------------------------
#
@mock.patch.object(JSRUN, '__init__', return_value=None)
@mock.patch.object(JSRUN, '_configure', return_value='jsrun')
@mock.patch('radical.utils.raise_on')
def test_construct_command(mocked_init, mocked_configure, mocked_raise_on):
test_cases = setUp('lm', 'jsrun')
component = JSRUN(name=None, cfg=None, session=None)
component._create_resource_set_file = mock.Mock()
component._log = ru.Logger('dummy')
component.launch_command = 'jsrun'
for unit, result, _ , resource_filename in test_cases:
component._create_resource_set_file.return_value = resource_filename
command, hop = component.construct_command(unit, None)
assert([command, hop] == result)
# ------------------------------------------------------------------------------
|
the-stack_0_14638 | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
## edit below variables as per your requirements -
REPO_NAME = "Movie-Recommender-System"
AUTHOR_USER_NAME = "Nitin"
SRC_REPO = "src"
LIST_OF_REQUIREMENTS = ['streamlit']
setup(
name=SRC_REPO,
version="0.0.1",
author=AUTHOR_USER_NAME,
description="A small package for Movie Recommender System",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}",
author_email="[email protected]",
packages=[SRC_REPO],
license="MIT",
python_requires=">=3.7",
install_requires=LIST_OF_REQUIREMENTS
)
|
the-stack_0_14640 | import os
import pathlib
import subprocess
from sphinx.ext.doctest import (Any, Dict, DocTestBuilder, TestcodeDirective,
TestoutputDirective, doctest, sphinx)
from sphinx.locale import __
class JavaDocTestBuilder(DocTestBuilder):
"""
Runs java test snippets in the documentation.
"""
name = "javadoctest"
epilog = __(
"Java testing of doctests in the sources finished, look at the "
"results in %(outdir)s/output.txt."
)
def compile(
self, code: str, name: str, type: str, flags: Any, dont_inherit: bool
) -> Any:
# go to project that contains all your arrow maven dependencies
path_arrow_project = pathlib.Path(__file__).parent.parent / "source" / "demo"
# create list of all arrow jar dependencies
subprocess.check_call(
[
"mvn",
"-q",
"dependency:build-classpath",
"-DincludeTypes=jar",
"-Dmdep.outputFile=.cp.tmp",
],
cwd=path_arrow_project,
text=True,
)
if not (path_arrow_project / ".cp.tmp").exists():
raise RuntimeError(
__("invalid process to create jshell dependencies library")
)
# get list of all arrow jar dependencies
with open(path_arrow_project / ".cp.tmp") as f:
stdout_dependency = f.read()
if not stdout_dependency:
raise RuntimeError(
__("invalid process to list jshell dependencies library")
)
# execute java testing code thru jshell and read output
# JDK11 support '-' This allows the pipe to work as expected without requiring a shell
# Migrating to /dev/stdin to also support JDK9+
proc_jshell_process = subprocess.Popen(
["jshell", "--class-path", stdout_dependency, "-s", "/dev/stdin"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
text=True,
)
out_java_arrow, err_java_arrow = proc_jshell_process.communicate(code)
if err_java_arrow:
raise RuntimeError(__("invalid process to run jshell"))
# continue with python logic code to do java output validation
output = f"print('''{self.clean_output(out_java_arrow)}''')"
# continue with sphinx default logic
return compile(output, name, self.type, flags, dont_inherit)
def clean_output(self, output: str):
if output[-3:] == '-> ':
output = output[:-3]
if output[-1:] == '\n':
output = output[:-1]
output = (4*' ').join(output.split('\t'))
return output
def setup(app) -> Dict[str, Any]:
app.add_directive("testcode", TestcodeDirective)
app.add_directive("testoutput", TestoutputDirective)
app.add_builder(JavaDocTestBuilder)
# this config value adds to sys.path
app.add_config_value("doctest_path", [], False)
app.add_config_value("doctest_test_doctest_blocks", "default", False)
app.add_config_value("doctest_global_setup", "", False)
app.add_config_value("doctest_global_cleanup", "", False)
app.add_config_value(
"doctest_default_flags",
doctest.DONT_ACCEPT_TRUE_FOR_1
| doctest.ELLIPSIS
| doctest.IGNORE_EXCEPTION_DETAIL,
False,
)
return {"version": sphinx.__display_version__, "parallel_read_safe": True}
|
the-stack_0_14641 | import numpy as np
k = lambda x:3*np.sin(x)*np.exp(np.sqrt(x))/(2*x)
def cordes_para(f,x0,epsilon,gamma,maxiter=50):
xn = x0
for i in range(maxiter):
xn_ = xn
xn = xn-f(xn)/gamma
print(xn, f(xn))
return (xn, np.abs(xn_-xn)<epsilon)
print(cordes_para(lambda x:k(x)-0.25, 3.5, 1, 5))
|
the-stack_0_14644 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hydroengine documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import hydroengine
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hydro-engine'
copyright = u"2018, Gennadii Donchyts"
author = u"Gennadii Donchyts"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = hydroengine.__version__
# The full version, including alpha/beta/rc tags.
release = hydroengine.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'hydroenginedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hydroengine.tex',
u'hydro-engine Documentation',
u'Gennadii Donchyts', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hydroengine',
u'hydro-engine Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hydroengine',
u'hydro-engine Documentation',
author,
'hydroengine',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_14645 | import sys
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
def trt_inc_dir():
return "/usr/include/aarch64-linux-gnu"
def trt_lib_dir():
return "/usr/lib/aarch64-linux-gnu"
ext_modules = []
exclude_dir = ["torch2trt/contrib","torch2trt/contrib.*"]
plugins_ext_module = CUDAExtension(
name='plugins',
sources=[
'torch2trt/plugins/plugins.cpp'
],
include_dirs=[
trt_inc_dir()
],
library_dirs=[
trt_lib_dir()
],
libraries=[
'nvinfer'
],
extra_compile_args={
'cxx': ['-DUSE_DEPRECATED_INTLIST'] if torch.__version__ < "1.5" else [],
'nvcc': []
}
)
if '--plugins' in sys.argv:
ext_modules.append(plugins_ext_module)
sys.argv.remove('--plugins')
if '--contrib' in sys.argv:
exclude_dir=[]
sys.argv.remove('--contrib')
setup(
name='torch2trt',
version='0.3.0',
description='An easy to use PyTorch to TensorRT converter',
packages=find_packages(exclude=exclude_dir),
ext_package='torch2trt',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension}
)
|
the-stack_0_14646 | from train import CoordParser
def cluster(file_list, output, n_clusters=None, max_files=None):
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from mpl_toolkits.basemap import Basemap
import numpy as np
if n_clusters is None: n_clusters = 100
# Parse the coordinates
parser = CoordParser()
c = np.array([parser(l) for l in open(file_list,'r')])
# Create the basemap parameters
bnd = 0
basemap_params = dict(projection='merc',llcrnrlat=np.min(c[:,0])-bnd,urcrnrlat=np.max(c[:,0])+bnd, llcrnrlon=np.min(c[:,1])-bnd,urcrnrlon=np.max(c[:,1])+bnd)
# Select a subset of the coordinates to cluster
if max_files is None:
max_files = 100000
np.random.shuffle(c)
c = c[:max_files]
# Project the coordinates into x, y coordinates
m = Basemap(**basemap_params)
x,y = m(c[:,1],c[:,0])
from sklearn import cluster
km = cluster.MiniBatchKMeans(n_clusters=n_clusters).fit(np.concatenate((x[:,None],y[:,None]),axis=1))
np.save(output,(basemap_params,km.cluster_centers_))
def main():
from argparse import ArgumentParser
from time import time
parser = ArgumentParser()
parser.add_argument('--file-list', type=str, default='/fastdata/finder/streetview_train.txt', help='path to the streetview training file')
parser.add_argument('-n', '--n-clusters', type=int, default=100, help='number of cluster')
parser.add_argument('--max-files', type=int, help='maximum number of files to cluster')
parser.add_argument('output', type=str, help='output file (e.g. clusters.npy)')
args = parser.parse_args()
cluster(args.file_list, args.output, args.n_clusters, args.max_files)
if __name__ == "__main__":
main()
|
the-stack_0_14648 | #
# Licensed to Dagda under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Dagda licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import requests
import json
import traceback
from threading import Thread
from analysis.static.os import os_info_extractor
from analysis.static.dependencies import dep_info_extractor
from analysis.static.av import malware_extractor
from api.internal.internal_server import InternalServer
from log.dagda_logger import DagdaLogger
from analysis.static.util.utils import extract_filesystem_bundle
from analysis.static.util.utils import clean_up
# Analyzer class
class Analyzer:
# -- Public methods
# Analyzer Constructor
def __init__(self, dagda_server_url=None):
super(Analyzer, self).__init__()
self.is_remote = False
if dagda_server_url is not None:
self.dagda_server_url = dagda_server_url
self.is_remote = True
else:
self.mongoDbDriver = InternalServer.get_mongodb_driver()
self.dockerDriver = InternalServer.get_docker_driver()
# Evaluate image from image name or container id
def evaluate_image(self, image_name, container_id, file_path):
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"ENTRY to the method for analyzing a docker image"
)
# Init
data = {}
# -- Static analysis
if not file_path:
self.dockerDriver.get_docker_image_name_by_container_id(
container_id
) if container_id else image_name
os_packages = []
malware_binaries = []
dependencies = []
temp_dir = None
try:
# Get OS packages
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Retrieving OS packages from the docker image ..."
)
if file_path:
# no OS packages to scan because not contained in a docker image
temp_dir = extract_filesystem_bundle(
image_name=image_name,
image_path=file_path,
)
elif container_id is None: # Scans the docker image
os_packages = os_info_extractor.get_soft_from_docker_image(
docker_driver=self.dockerDriver, image_name=image_name
)
temp_dir = extract_filesystem_bundle(
docker_driver=self.dockerDriver, image_name=image_name
)
else: # Scans the docker container
os_packages = os_info_extractor.get_soft_from_docker_container_id(
docker_driver=self.dockerDriver, container_id=container_id
)
temp_dir = extract_filesystem_bundle(
docker_driver=self.dockerDriver, container_id=container_id
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"OS packages from the docker image retrieved"
)
# Get malware binaries in a parallel way
malware_thread = Thread(
target=Analyzer._threaded_malware,
args=(self.dockerDriver, temp_dir, malware_binaries),
)
malware_thread.start()
# Get programming language dependencies in a parallel way
dependencies_thread = Thread(
target=Analyzer._threaded_dependencies,
args=(self.dockerDriver, image_name, temp_dir, dependencies),
)
dependencies_thread.start()
# Waiting for the threads
malware_thread.join()
dependencies_thread.join()
except Exception as ex:
message = "Unexpected exception of type {0} occurred: {1!r}".format(
type(ex).__name__,
ex.get_message() if type(ex).__name__ == "DagdaError" else ex.args,
)
DagdaLogger.get_logger().error(message)
if InternalServer.is_debug_logging_enabled():
traceback.print_exc()
data["status"] = message
# -- Cleanup
if temp_dir is not None:
clean_up(temporary_dir=temp_dir)
# -- Prepare output
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug("Preparing analysis output ...")
if "status" not in data or data["status"] is None:
data["status"] = "Completed"
data["image_name"] = image_name
data["timestamp"] = datetime.datetime.now().timestamp()
data["static_analysis"] = self.generate_static_analysis(
image_name, os_packages, dependencies, malware_binaries
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug("Analysis output completed")
# -- Return
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"EXIT from the method for analyzing a docker image"
)
return data
# Generates the result of the static analysis
def generate_static_analysis(
self, image_name, os_packages, dependencies, malware_binaries
):
data = {}
data["os_packages"] = self.generate_os_report(image_name, os_packages)
data["prog_lang_dependencies"] = self.generate_dependencies_report(
image_name, dependencies
)
data["malware_binaries"] = malware_binaries
return data
# Generates dependencies report
def generate_dependencies_report(self, image_name, dependencies):
data = {}
dep_details = {}
dep_details["java"] = []
dep_details["python"] = []
dep_details["nodejs"] = []
dep_details["js"] = []
dep_details["ruby"] = []
dep_details["php"] = []
fp_count = 0
for dependency in dependencies:
d = {}
splitted_dep = dependency.split("#")
d["product"] = splitted_dep[1]
d["version"] = splitted_dep[2]
d["product_file_path"] = splitted_dep[3]
d["vulnerabilities"] = self.get_vulnerabilities(d["product"], d["version"])
# purpose of this code is to not throw away the cve_id reported by 3grander/4depcheck container process
dep_check_cve_id = splitted_dep[4]
# DagdaLogger.get_logger().debug(f"dep_check_cve_id: {dep_check_cve_id}")
included_vuln_ids = []
for vuln in d["vulnerabilities"]:
included_vuln_ids.extend(str(vuln.keys()))
# DagdaLogger.get_logger().debug(
# f"included_vuln_ids: {json.dumps(included_vuln_ids)}"
# )
if not dep_check_cve_id in included_vuln_ids:
info = {}
cve_info = {}
cve_data = self.mongoDbDriver.db.cve_info.find_one(
{"cveid": dep_check_cve_id}
)
# DagdaLogger.get_logger().debug(f"cve_data: {cve_data}")
if cve_data is not None:
cve_info = cve_data.copy()
cve_info["mod_date"] = cve_data["mod_date"].strftime("%d-%m-%Y")
cve_info["pub_date"] = cve_data["pub_date"].strftime("%d-%m-%Y")
del cve_info["_id"]
info[dep_check_cve_id] = cve_info
# DagdaLogger.get_logger().debug(f"info: {json.dumps(info)}")
d["vulnerabilities"].append(info)
# DagdaLogger.get_logger().debug(
# f"d['vulnerabilities']: {json.dumps(d['vulnerabilities'])}"
# )
d["is_vulnerable"] = True
d["is_false_positive"] = self.is_fp(image_name, d["product"], d["version"])
if d["is_false_positive"]:
fp_count += 1
dep_details[splitted_dep[0]].append(d)
# Prepare output
data["vuln_dependencies"] = (
len(dep_details["java"])
+ len(dep_details["python"])
+ len(dep_details["nodejs"])
+ len(dep_details["js"])
+ len(dep_details["ruby"])
+ len(dep_details["php"])
- fp_count
)
data["dependencies_details"] = dep_details
# Return
return data
# Generates os report
def generate_os_report(self, image_name, os_packages):
data = {}
products_status = []
vuln_products = 0
fp_count = 0
for package in os_packages:
p = {}
p["product"] = package["product"]
p["version"] = package["version"]
p["vulnerabilities"] = self.get_vulnerabilities(
package["product"], package["version"]
)
if len(p["vulnerabilities"]) > 0:
p["is_vulnerable"] = True
vuln_products += 1
else:
p["is_vulnerable"] = False
p["is_false_positive"] = self.is_fp(
image_name, package["product"], package["version"]
)
if p["is_false_positive"]:
fp_count += 1
products_status.append(p)
# Prepare output
vuln_products -= fp_count
data["total_os_packages"] = len(products_status)
data["vuln_os_packages"] = vuln_products
data["ok_os_packages"] = data["total_os_packages"] - vuln_products
data["os_packages_details"] = products_status
# Return
return data
# Gets vulnerabilities by product and version
def get_vulnerabilities(self, product, version):
if not self.is_remote:
return self.mongoDbDriver.get_vulnerabilities(product, version)
else:
if product is not None:
product += "/" + version
r = requests.get(self.dagda_server_url + "/vuln/products/" + product)
if r.status_code == 200:
return json.loads(r.content.decode("utf-8"))
return []
# Check if it is a false positive
def is_fp(self, image_name, product, version):
if not self.is_remote:
return self.mongoDbDriver.is_fp(image_name, product, version)
else:
if product is not None:
product += "/" + version
r = requests.get(
self.dagda_server_url + "/history/" + image_name + "/fp/" + product
)
return r.status_code == 204
# Get malware binaries thread
@staticmethod
def _threaded_malware(dockerDriver, temp_dir, malware_binaries):
# Get malware binaries
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
f'Retrieving malware files from the docker image in "{temp_dir}"...'
)
malware_binaries.extend(
malware_extractor.get_malware_included_in_docker_image(
docker_driver=dockerDriver, temp_dir=temp_dir
)
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Malware files from the docker image retrieved"
)
# Get programming language dependencies thread
@staticmethod
def _threaded_dependencies(dockerDriver, image_name, temp_dir, dependencies):
# Get programming language dependencies
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
f'Retrieving dependencies from the docker image in "{temp_dir}...'
)
dependencies.extend(
dep_info_extractor.get_dependencies_from_docker_image(
docker_driver=dockerDriver, image_name=image_name, temp_dir=temp_dir
)
)
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug(
"Dependencies from the docker image retrieved"
)
# DagdaLogger.get_logger().debug(f"dependencies: {json.dumps(dependencies)}")
|
the-stack_0_14652 | import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from src.PanelMethod import *
import numpy as np
from scipy.special import exp1
def wave_source(x,y,xs,ys,K):
"Source plus generated free surface waves"
r2 = (x-xs)**2+(y-ys)**2 # source square-distance
m2 = (x-xs)**2+(y+ys)**2 # mirror sink square-distance
Z = K*(y+ys+1j*abs(x-xs)) # wave number scaled complex vector
eZ = np.exp(Z) # propagating wave potential
fZ = np.real(eZ*exp1(Z)) # standing wave potential
return 0.5*np.log(r2/m2)-2j*np.pi*eZ-2*fZ
from matplotlib.animation import FuncAnimation
def wave_video(x,y,q,XY,G=wave_source,args=(4,),size=(16,6)):
"Animate the induced flow over a cycle of motion"
# Get complex velocity
def uv(i): return q[i]*velocity(*XY, x[i], y[i], x[i+1], y[i+1], G, args)
UV = sum(uv(i) for i in range(len(x)-1))
# Plot flow and segments
fig, ax = plt.subplots(1,1,figsize=size)
Q = ax.quiver(*XY, *UV)#, pivot='mid')
ax.plot(x,y,c='b')
ax.set_ylim(None,0.5)
ax.set_aspect('equal', adjustable='box')
plt.close()
# run through a wave period
def update_quiver(num, Q):
Q.set_UVC(*np.real(UV*np.exp(-2j*np.pi*num/101)))
return Q,
# create the animation
return FuncAnimation(fig, update_quiver, fargs=(Q,), interval=50)
|
the-stack_0_14653 | """Support for Tellstick sensors."""
from collections import namedtuple
import logging
from tellcore import telldus
import tellcore.constants as tellcore_constants
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_ID, CONF_NAME, CONF_PROTOCOL, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DatatypeDescription = namedtuple("DatatypeDescription", ["name", "unit"])
CONF_DATATYPE_MASK = "datatype_mask"
CONF_ONLY_NAMED = "only_named"
CONF_TEMPERATURE_SCALE = "temperature_scale"
CONF_MODEL = "model"
DEFAULT_DATATYPE_MASK = 127
DEFAULT_TEMPERATURE_SCALE = TEMP_CELSIUS
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_TEMPERATURE_SCALE, default=DEFAULT_TEMPERATURE_SCALE
): cv.string,
vol.Optional(
CONF_DATATYPE_MASK, default=DEFAULT_DATATYPE_MASK
): cv.positive_int,
vol.Optional(CONF_ONLY_NAMED, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional(CONF_MODEL): cv.string,
}
)
],
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tellstick sensors."""
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE: DatatypeDescription(
"temperature", config.get(CONF_TEMPERATURE_SCALE)
),
tellcore_constants.TELLSTICK_HUMIDITY: DatatypeDescription("humidity", "%"),
tellcore_constants.TELLSTICK_RAINRATE: DatatypeDescription("rain rate", ""),
tellcore_constants.TELLSTICK_RAINTOTAL: DatatypeDescription("rain total", ""),
tellcore_constants.TELLSTICK_WINDDIRECTION: DatatypeDescription(
"wind direction", ""
),
tellcore_constants.TELLSTICK_WINDAVERAGE: DatatypeDescription(
"wind average", ""
),
tellcore_constants.TELLSTICK_WINDGUST: DatatypeDescription("wind gust", ""),
}
try:
tellcore_lib = telldus.TelldusCore()
except OSError:
_LOGGER.exception("Could not initialize Tellstick")
return
sensors = []
datatype_mask = config.get(CONF_DATATYPE_MASK)
if config[CONF_ONLY_NAMED]:
named_sensors = {}
for named_sensor in config[CONF_ONLY_NAMED]:
name = named_sensor[CONF_NAME]
proto = named_sensor.get(CONF_PROTOCOL)
model = named_sensor.get(CONF_MODEL)
id_ = named_sensor[CONF_ID]
if proto is not None:
if model is not None:
named_sensors["{}{}{}".format(proto, model, id_)] = name
else:
named_sensors["{}{}".format(proto, id_)] = name
else:
named_sensors[id_] = name
for tellcore_sensor in tellcore_lib.sensors():
if not config[CONF_ONLY_NAMED]:
sensor_name = str(tellcore_sensor.id)
else:
proto_id = "{}{}".format(tellcore_sensor.protocol, tellcore_sensor.id)
proto_model_id = "{}{}{}".format(
tellcore_sensor.protocol, tellcore_sensor.model, tellcore_sensor.id
)
if tellcore_sensor.id in named_sensors:
sensor_name = named_sensors[tellcore_sensor.id]
elif proto_id in named_sensors:
sensor_name = named_sensors[proto_id]
elif proto_model_id in named_sensors:
sensor_name = named_sensors[proto_model_id]
else:
continue
for datatype in sensor_value_descriptions:
if datatype & datatype_mask and tellcore_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(
TellstickSensor(sensor_name, tellcore_sensor, datatype, sensor_info)
)
add_entities(sensors)
class TellstickSensor(Entity):
"""Representation of a Tellstick sensor."""
def __init__(self, name, tellcore_sensor, datatype, sensor_info):
"""Initialize the sensor."""
self._datatype = datatype
self._tellcore_sensor = tellcore_sensor
self._unit_of_measurement = sensor_info.unit or None
self._value = None
self._name = f"{name} {sensor_info.name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update tellstick sensor."""
self._value = self._tellcore_sensor.value(self._datatype).value
|
the-stack_0_14654 | """
Author: Alex Kiernan
Desc: Fact model
"""
from app import db
class Fact(db.Model):
__tablename__ = 'prediction_facts'
pf_date = db.Column('pf_date', db.Date, primary_key=True)
pf_time_of_day = db.Column('pf_time_of_day', db.Integer, primary_key=True)
user_id = db.Column('user_id', db.Integer, primary_key=True)
bg_value = db.Column('bg_value', db.Float)
ins_value = db.Column('ins_value', db.Float)
food_value = db.Column('food_value', db.Integer)
exercise_value = db.Column('exercise_value', db.Integer)
def serialize(self):
return {
'pf_date' : self.pf_date.strftime("%A, %d %b %Y"),
'pf_time_of_day' : self.pf_time_of_day,
'user_id' : self.user_id,
'bg_value' : self.bg_value,
'ins_value': self.ins_value,
'food_value': self.food_value,
'exercise_value': self.exercise_value
}
def fact_serialize(self):
return {
'timestamp': self.pf_time_of_day,
'bg_value': self.bg_value,
'carbs': self.food_value,
'exercise': self.exercise_value,
'insulin_dosage': self.ins_value
}
|
the-stack_0_14655 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
#
# Author: Christian Boos <[email protected]>
import re
from trac.config import ConfigSection
from trac.core import *
from trac.util.html import Element, Fragment, find_element, tag
from trac.util.translation import N_, _, tag_
from trac.web.api import IRequestHandler
from trac.wiki.api import IWikiMacroProvider
from trac.wiki.formatter import extract_link
class InterTracDispatcher(Component):
"""InterTrac dispatcher."""
implements(IRequestHandler, IWikiMacroProvider)
is_valid_default_handler = False
intertrac_section = ConfigSection('intertrac',
"""This section configures InterTrac prefixes. Option names in
this section that contain a `.` are of the format
`<name>.<attribute>`. Option names that don't contain a `.` define
an alias.
The `.url` attribute is mandatory and is used for locating the
other Trac. This can be a relative path when the other Trac
environment is located on the same server.
The `.title` attribute is used for generating a tooltip when the
cursor is hovered over an InterTrac link.
Example configuration:
{{{#!ini
[intertrac]
# -- Example of setting up an alias:
t = trac
# -- Link to an external Trac:
genshi.title = Edgewall's Trac for Genshi
genshi.url = https://genshi.edgewall.org
}}}
""")
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'^/intertrac/(.*)', req.path_info)
if match:
if match.group(1):
req.args['link'] = match.group(1)
return True
def process_request(self, req):
link = req.args.get('link', '')
parts = link.split(':', 1)
if len(parts) > 1:
resolver, target = parts
if target[:1] + target[-1:] not in ('""', "''"):
link = '%s:"%s"' % (resolver, target)
from trac.web.chrome import web_context
link_frag = extract_link(self.env, web_context(req), link)
if isinstance(link_frag, (Element, Fragment)):
elt = find_element(link_frag, 'href')
if elt is None:
raise TracError(
_("Can't view %(link)s. Resource doesn't exist or "
"you don't have the required permission.", link=link))
href = elt.attrib.get('href')
else:
href = req.href(link.rstrip(':'))
req.redirect(href)
# IWikiMacroProvider methods
def get_macros(self):
yield 'InterTrac'
def get_macro_description(self, name):
return 'messages', N_("Provide a list of known InterTrac prefixes.")
def expand_macro(self, formatter, name, content):
intertracs = {}
for key, value in self.intertrac_section.options():
idx = key.rfind('.')
if idx > 0: # 0 itself doesn't help much: .xxx = ...
prefix, attribute = key[:idx], key[idx+1:]
intertrac = intertracs.setdefault(prefix, {})
try:
intertrac[attribute] = value
except TypeError: # alias
pass
else:
intertracs[key] = value # alias
intertracs.setdefault('trac', {'title': _('The Trac Project'),
'url': 'https://trac.edgewall.org'})
def generate_prefix(prefix):
intertrac = intertracs[prefix]
if isinstance(intertrac, basestring):
yield tag.tr(tag.td(tag.strong(prefix)),
tag.td(tag_("Alias for %(name)s",
name=tag.strong(intertrac))))
else:
url = intertrac.get('url')
if url:
title = intertrac.get('title', url)
yield tag.tr(tag.td(tag.a(tag.strong(prefix),
href=url + '/timeline')),
tag.td(tag.a(title, href=url)))
return tag.table(class_="wiki intertrac")(
tag.tr(tag.th(tag.em(_("Prefix"))),
tag.th(tag.em(_("Trac Site")))),
[generate_prefix(p) for p in sorted(intertracs)])
|
the-stack_0_14657 | from tridesclous import get_dataset
from tridesclous.peakdetector import get_peak_detector_class
import time
import itertools
import scipy.signal
import numpy as np
import sklearn.metrics.pairwise
from matplotlib import pyplot
from tridesclous.tests.test_signalpreprocessor import offline_signal_preprocessor
from tridesclous.peakdetector import make_sum_rectified, detect_peaks_in_rectified, get_mask_spatiotemporal_peaks
from tridesclous.peakdetector import HAVE_PYOPENCL
import matplotlib.pyplot as plt
def get_normed_sigs(chunksize=None):
# get sigs
sigs, sample_rate = get_dataset(name='olfactory_bulb')
#~ sigs = np.tile(sigs, (1, 20)) #for testing large channels num
if sigs.shape[0] % chunksize >0:
sigs = sigs[:-(sigs.shape[0] % chunksize), :]
nb_channel = sigs.shape[1]
#~ print('nb_channel', nb_channel)
geometry = np.zeros((nb_channel, 2))
geometry[:, 0] = np.arange(nb_channel) * 50 # um spacing
# normalize sigs
highpass_freq = 300.
preprocess_params = dict(
highpass_freq=highpass_freq,
common_ref_removal=True,
backward_chunksize=chunksize+chunksize//4,
output_dtype='float32')
normed_sigs = offline_signal_preprocessor(sigs, sample_rate, **preprocess_params)
return sigs, sample_rate, normed_sigs, geometry
def offline_peak_detect_global(normed_sigs, sample_rate, geometry,
peak_sign='-',relative_threshold = 5, peak_span_ms=0.5, smooth_radius_um=None):
n_span = int(sample_rate * peak_span_ms / 1000.)//2
if smooth_radius_um is None:
spatial_matrix = None
else:
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
spatial_matrix = np.exp(-d/smooth_radius_um)
spatial_matrix[spatial_matrix<0.01] = 0.
sum_rectified = make_sum_rectified(normed_sigs, relative_threshold, peak_sign, spatial_matrix)
mask_peaks = detect_peaks_in_rectified(sum_rectified, n_span, relative_threshold, peak_sign)
ind_peaks, = np.nonzero(mask_peaks)
ind_peaks += n_span
return ind_peaks, sum_rectified
def offline_peak_detect_geometrical(normed_sigs, sample_rate, geometry,
peak_sign='-',relative_threshold = 5, peak_span_ms=0.5,
adjacency_radius_um=None, smooth_radius_um=None):
assert smooth_radius_um is None
assert adjacency_radius_um is not None
nb_channel = normed_sigs.shape[1]
n_span = int(sample_rate * peak_span_ms / 1000.)//2
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
neighbour_mask = d<=adjacency_radius_um
nb_neighbour_per_channel = np.sum(neighbour_mask, axis=0)
nb_max_neighbour = np.max(nb_neighbour_per_channel)
nb_max_neighbour = nb_max_neighbour
neighbours = np.zeros((nb_channel, nb_max_neighbour), dtype='int32')
neighbours[:] = -1
for c in range(nb_channel):
neighb, = np.nonzero(neighbour_mask[c, :])
neighbours[c, :neighb.size] = neighb
peak_mask = get_mask_spatiotemporal_peaks(normed_sigs, n_span, relative_threshold, peak_sign, neighbours)
peaks, chan_inds = np.nonzero(peak_mask)
return peaks
def test_compare_offline_online_engines():
#~ HAVE_PYOPENCL = True
engine_names = [
('global', 'numpy'),
('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
#~ engine_names += [('global', 'opencl'),
#~ ('geometrical', 'opencl')]
engine_names += [('geometrical', 'opencl')]
chunksize=1024
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
#params
peak_sign = '-'
relative_threshold = 8
peak_span_ms = 0.9
smooth_radius_um = None
adjacency_radius_um = 200.
nb_channel = sigs.shape[1]
#~ print('n_span', n_span)
nloop = sigs.shape[0]//chunksize
print('sig duration', sigs.shape[0]/sample_rate)
offline_peaks = {}
t1 = time.perf_counter()
peaks, rectified_sum = offline_peak_detect_global(sigs, sample_rate, geometry,
peak_sign=peak_sign, relative_threshold=relative_threshold, peak_span_ms=peak_span_ms,
smooth_radius_um=smooth_radius_um)
t2 = time.perf_counter()
print('offline global', 'process time', t2-t1)
offline_peaks['global', 'numpy'] = peaks
offline_peaks['global', 'opencl'] = peaks
t1 = time.perf_counter()
peaks = offline_peak_detect_geometrical(sigs, sample_rate, geometry,
peak_sign=peak_sign, relative_threshold=relative_threshold, peak_span_ms=peak_span_ms,
smooth_radius_um=smooth_radius_um, adjacency_radius_um=adjacency_radius_um)
t2 = time.perf_counter()
print('offline geometrical', 'process time', t2-t1)
offline_peaks['geometrical', 'numpy'] = peaks
offline_peaks['geometrical', 'numba'] = peaks
offline_peaks['geometrical', 'opencl'] = peaks
online_peaks = {}
for method, engine in engine_names:
print(engine)
EngineClass = get_peak_detector_class(method, engine)
#~ buffer_size = chunksize*4
peakdetector = EngineClass(sample_rate, nb_channel, chunksize, 'float32', geometry)
peakdetector.change_params(peak_sign=peak_sign, relative_threshold=relative_threshold,
peak_span_ms=peak_span_ms, smooth_radius_um=smooth_radius_um,
adjacency_radius_um=adjacency_radius_um)
all_online_peaks = []
t1 = time.perf_counter()
for i in range(nloop):
#~ print(i)
pos = (i+1)*chunksize
chunk = sigs[pos-chunksize:pos,:]
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
#~ print(n_peaks)
if time_ind_peaks is not None:
#~ all_online_peaks.append(chunk_peaks['index'])
all_online_peaks.append(time_ind_peaks)
online_peaks[method, engine] = np.concatenate(all_online_peaks)
t2 = time.perf_counter()
print(engine, 'process time', t2-t1, 'size', online_peaks[method, engine].size)
# remove peaks on border for comparison
for method, engine in engine_names:
peaks = online_peaks[method, engine]
peaks = peaks[(peaks>chunksize) & (peaks<sigs.shape[0]-chunksize)]
online_peaks[method, engine] = peaks
peaks = offline_peaks[method, engine]
peaks = peaks[(peaks>chunksize) & (peaks<sigs.shape[0]-chunksize)]
offline_peaks[method, engine] = peaks
# compare
for method, engine in engine_names:
print('compare', method, engine)
onlinepeaks = online_peaks[method, engine]
offlinepeaks = offline_peaks[method, engine]
print(onlinepeaks.size, offlinepeaks.size)
# TODO
#~ assert offlinepeaks.size==onlinepeaks.size, '{} nb_peak {} instead {}'.format(engine, offlinepeaks.size, onlinepeaks.size)
#~ assert np.array_equal(offlinepeaks, onlinepeaks)
def test_detect_geometrical_peaks():
chunksize=1024
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
nb_channel = sigs.shape[1]
n_span = 4
thresh = 5
peak_sign = '-'
d = sklearn.metrics.pairwise.euclidean_distances(geometry)
nb_neighbour = 4
neighbours = np.zeros((nb_channel, nb_neighbour+1), dtype='int64')
for c in range(nb_channel):
nearest = np.argsort(d[c, :])
#~ print(c, nearest)
neighbours[c, :] = nearest[:nb_neighbour+1] # include itself
#~ print(neighbours)
mask = get_mask_spatiotemporal_peaks(normed_sigs, n_span, thresh, peak_sign, neighbours)
peak_inds, chan_inds = np.nonzero(mask)
peak_inds += n_span
print(peak_inds.size)
#~ fig, ax = plt.subplots()
#~ plot_sigs = normed_sigs.copy()
#~ for c in range(nb_channel):
#~ plot_sigs[:, c] += c*30
#~ ax.plot(plot_sigs, color='k')
#~ ampl = plot_sigs[peak_inds, chan_inds]
#~ ax.scatter(peak_inds, ampl, color='r')
#~ plt.show()
# test two way
mask_neg = get_mask_spatiotemporal_peaks(normed_sigs, n_span, thresh, '-', neighbours)
mask_pos = get_mask_spatiotemporal_peaks(-normed_sigs, n_span, thresh, '+', neighbours)
assert np.array_equal(mask_neg, mask_pos)
#~ print(peak_inds)
#~ print(chan_inds)
def benchmark_speed():
chunksize=1024
#~ chunksize=1025
#~ chunksize= 1024 + 256
#~ chunksize=2048
#~ chunksize = 1024 * 10
#~ chunksize=950
sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
#~ sigs = np
#***for testing large channels num***
sigs = np.tile(sigs, (1, 20))
normed_sigs = np.tile(normed_sigs, (1, 20))
geometry = np.zeros((sigs.shape[1], 2), dtype='float64')
geometry[:, 0] = np.arange(sigs.shape[1]) * 50.
#***
nb_channel = sigs.shape[1]
print('nb_channel', nb_channel)
engine_names = [
#~ ('global', 'numpy'),
#~ ('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
engine_names += [
#~ ('global', 'opencl'),
('geometrical', 'opencl'),
]
args = (sample_rate, nb_channel, chunksize, 'float32', geometry)
params = dict(peak_span_ms = 0.9,
relative_threshold = 5,
peak_sign = '-')
online_peaks = {}
for method, engine in engine_names:
peakdetector = get_peak_detector_class(method, engine)(*args)
peakdetector.change_params(**params)
#~ print(peakdetector.n_span, peakdetector.dtype)
nloop = normed_sigs.shape[0]//chunksize
peak_inds = []
peak_chans = []
t1 = time.perf_counter()
for i in range(nloop):
pos = (i+1)*chunksize
chunk = normed_sigs[pos-chunksize:pos,:]
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
if time_ind_peaks is not None:
peak_inds.append(time_ind_peaks)
if chan_peak_index is not None:
peak_chans.append(chan_peak_index)
t2 = time.perf_counter()
peak_inds = np.concatenate(peak_inds)
if len(peak_chans) > 0:
peak_chans = np.concatenate(peak_chans)
else:
peak_chans = np.argmin(normed_sigs[peak_inds, :], axis=1)
online_peaks[method, engine] = peak_inds
print(method, engine, ':' , peak_inds.size)
print(method, engine, 'process time', t2-t1)
#~ fig, ax = plt.subplots()
#~ plot_sigs = normed_sigs.copy()
#~ for c in range(nb_channel):
#~ plot_sigs[:, c] += c*30
#~ ax.plot(plot_sigs, color='k')
#~ ampl = plot_sigs[peak_inds, peak_chans]
#~ ax.scatter(peak_inds, ampl, color='r')
#~ plt.show()
def test_peak_sign_symetry():
chunksize=1024
raw_sigs, sample_rate, normed_sigs, geometry = get_normed_sigs(chunksize=chunksize)
nb_channel = normed_sigs.shape[1]
#~ print('nb_channel', nb_channel)
args = (sample_rate, nb_channel, chunksize, 'float32', geometry)
params = dict(peak_span_ms = 0.9,
relative_threshold = 5)
engine_names = [
('global', 'numpy'),
('geometrical', 'numpy'),
('geometrical', 'numba'),
]
if HAVE_PYOPENCL:
engine_names += [
('global', 'opencl'),
('geometrical', 'opencl'),
]
online_peaks = {}
for method, engine in engine_names:
peakdetector = get_peak_detector_class(method, engine)(*args)
for peak_sign in ['-', '+']:
if peak_sign=='-':
sigs = normed_sigs
elif peak_sign=='+':
sigs = -normed_sigs
peakdetector.change_params(peak_sign=peak_sign, **params)
nloop = normed_sigs.shape[0]//chunksize
peaks = []
t1 = time.perf_counter()
for i in range(nloop):
#~ print(i)
pos = (i+1)*chunksize
chunk = sigs[pos-chunksize:pos,:]
#~ print(chunk.shape)
time_ind_peaks, chan_peak_index, peak_val_peaks = peakdetector.process_buffer_stream(pos, chunk)
#~ print(n_peaks)
#~ print(chunk_peaks)
if time_ind_peaks is not None:
#~ all_online_peaks.append(chunk_peaks['index'])
peaks.append(time_ind_peaks)
peak_inds = np.concatenate(peaks)
online_peaks[method, engine, peak_sign] = peak_inds
t2 = time.perf_counter()
print(method, engine, 'peak_sign', peak_sign,':' , peak_inds.size, 'unique peak size', np.unique(peak_inds).size)
#~ print(name, 'process time', t2-t1)
assert np.array_equal(online_peaks[method, engine, '-'], online_peaks[method, engine, '+'])
if HAVE_PYOPENCL:
assert np.array_equal(online_peaks['global', 'numpy', '-'], online_peaks['global', 'opencl', '-'])
assert np.array_equal(online_peaks['geometrical', 'numpy', '-'], online_peaks['geometrical', 'numba', '-'])
# TODO this should be totally equal
assert np.array_equal(online_peaks['geometrical', 'numpy', '-'], online_peaks['geometrical', 'opencl', '-'])
assert np.array_equal(online_peaks['geometrical', 'numba', '-'], online_peaks['geometrical', 'opencl', '-'])
if __name__ == '__main__':
test_compare_offline_online_engines()
#~ test_detect_geometrical_peaks()
#~ benchmark_speed()
#~ test_peak_sign_symetry()
|
the-stack_0_14658 | from urllib.request import urlopen, Request
import os
import src.util as util
def save_image_tag(bs_object, conf):
# Header for passing header checker
if conf['site_name'] == conf['comic_sites'][0]:
headers = conf['headers']['m']
elif conf['site_name'] == conf['comic_sites'][1]:
headers = conf['headers']['w']
#이미지 소스 선택
targetString = 'https://'
targetlen = len(targetString)
#썸네일 사진 패스하기
thumbnailString = conf['thumbnail_link']
thumbnaillen = len(thumbnailString)
#그 외 불필요 이미지 파일 패스하기
otherString = conf['unnecessary_link']
otherStringlen = len(otherString)
# 인스턴스의 find_all 이라는 함수에 img 태그가 있으면 img_data에 넣어줌
img_data = bs_object.find_all("img")
num_comic_img = 2
img_idx = 1
'''
structure of img tag(prop list)
1. src
2. data-....
3. style
'''
for img_tag in img_data:
# print(list(img_tag.attrs.keys()))
attr_list = list(img_tag.attrs.keys())
# if lenght of attribute is less than 3
# it isn't comic image
if len(attr_list) < 2:
continue
# print(attr_list)
isComicImg = False
# if it is comic image,
# attribute list must contain 'data class'
for attr in attr_list:
if attr[:4] == 'data':
isComicImg = True
data_tag = attr
# some image tag contains 'itemprop' class
if conf['site_name'] == conf['comic_sites'][0]:
if 'itemprop' in attr_list:
isComicImg = True
data_tag = 'content'
elif conf['site_name'] == conf['comic_sites'][1]:
if 'alt' in attr_list:
isComicImg = True
data_tag = 'src'
if not isComicImg:
continue
print(img_idx, img_tag.attrs[data_tag])
srcString = img_tag.attrs[data_tag]
#썸네일은 건너뛰기
if srcString[:thumbnaillen] == thumbnailString:
print("pass thumbnail")
continue
if 'assets' in srcString:
print("pass img of assets")
continue
#서버 이미지면 저장 그만하기
#모든 만화 이미지는 외부 서버에 있음
print("img index=", img_idx)
if (srcString[:otherStringlen] == otherString):
print("break othrestring")
continue
#구글 드라이브 혹은 타서버에 저장된 만화 이미지 파일 처리
if srcString[0:targetlen] == targetString:
#딕셔너리를 순서대로 넣어줌
imgReq = Request(url=img_tag.attrs[data_tag], headers=headers)
try:
imageDownload = urlopen(imgReq).read()
except:
continue
#파일 이름 생성
filename = "image"+str(img_idx).zfill(2)+'.jpg'
folder_path = os.path.join(conf['comic_name'], str(conf['number']))
#폴더 생성
path = util.create_dir(conf['comic_path'], folder_path)
#파일 생성 경로
filepath = os.path.join(path, filename)
#파일 생성
with open(filepath,"wb") as f:
f.write(imageDownload)
print('save => "' + path + "'/" + str(conf['number']) + '"')
img_idx += 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.