content
stringlengths 5
1.05M
|
---|
# problem name: Minimum Absolute Sum Difference
# problem link: https://leetcode.com/contest/weekly-contest-235/problems/minimum-absolute-sum-difference/
# contest link: https://leetcode.com/contest/weekly-contest-235/
# time: (?)
# author: reyad
# other_tags: sorting
# difficulty_level: beginner
class Solution:
def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:
d = [abs(nums1[i] - nums2[i]) for i in range(0, len(nums1))]
x = []
for i, v in enumerate(d):
if len(x) == 0:
x.append(i)
else:
if v == d[x[0]]:
x.append(i)
elif v > d[x[0]]:
x = [i]
sel = d[x[0]]
b = sorted(nums1)
for y in x:
s = 0
e = len(nums1)-1
p = -1
while s <= e:
m = (s + e) // 2
if b[m] < nums2[y]:
p = m
s = m + 1
else:
e = m - 1
if p < len(b)-1 and abs(b[p+1]-nums2[y]) < abs(b[p]-nums2[y]):
p += 1
sel = min(sel, abs(b[p]-nums2[y]))
ans = sum(d) - d[x[0]] + sel
return ans % 1000000007
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 18:09:17 2019.
@author: mtageld
"""
import numpy as np
from histomicstk.utils import (
convert_image_to_matrix, convert_matrix_to_image)
from histomicstk.preprocessing.color_conversion import (
rgb_to_sda, sda_to_rgb)
from histomicstk.preprocessing.color_deconvolution import (
color_deconvolution_routine)
def perturb_stain_concentration(
StainsFloat, W, I_0=None, mask_out=None, sigma1=0.9, sigma2=0.9):
u"""Perturb stain concentrations in SDA space and return augmented image.
This is an implementation of the method described in Tellez et
al, 2018 (see below). The SDA matrix is perturbed by multiplying each
channel independently by a value chosen from a random uniform distribution
in the range [1 - sigma1, 1 + sigma1], then add a value chosed from another
random uniform distribution in the range [-sigma2, sigma2].
Parameters
------------
StainsFloat : array_like
An intensity image (m, n, 3) of deconvolved stains that is unbounded,
suitable for reconstructing color images of deconvolved stains
with color_convolution.
W : array_like
A 3x3 complemented stain matrix.
I_0 : float or array_like, optional
A float a 3-vector containing background RGB intensities.
If unspecified, use the old OD conversion.
mask_out : array_like, default is None
if not None, should be (m x n) boolean numpy array.
This parameter ensures exclusion of non-masked areas from perturbing.
This is relevant because elements like blood, sharpie marker,
white space, etc cannot be simply modeled as a mix of two stains.
sigma1 : float
parameter, see beginning of this docstring.
sigma2 : float
parameter, see beginning of this docstring.
Returns
--------
array_like
Color augmented RGB image (m x n x 3)
References
----------
.. [#] Tellez, David, Maschenka Balkenhol, Irene Otte-Höller,
Rob van de Loo, Rob Vogels, Peter Bult, Carla Wauters et al.
"Whole-slide mitosis detection in H&E breast histology using PHH3
as a reference to train distilled stain-invariant convolutional
networks." IEEE transactions on medical imaging 37, no. 9
(2018): 2126-2136.
.. [#] Tellez, David, Geert Litjens, Peter Bandi, Wouter Bulten,
John-Melle Bokhorst, Francesco Ciompi, and Jeroen van der Laak.
"Quantifying the effects of data augmentation and stain color
normalization in convolutional neural networks for computational
pathology." arXiv preprint arXiv:1902.06543 (2019).
.. [#] Implementation inspired by Peter Byfield StainTools repository. See
https://github.com/Peter554/StainTools/blob/master/LICENSE.txt
for copyright license (MIT license).
"""
# augment everything, otherwise only augment specific pixels
if mask_out is None:
keep_mask = np.zeros(StainsFloat.shape[:2]) == 0
else:
keep_mask = np.equal(mask_out, False)
keep_mask = np.tile(keep_mask[..., None], (1, 1, 3))
keep_mask = convert_image_to_matrix(keep_mask)
# transform 3D input stain image to 2D stain matrix format
m = convert_image_to_matrix(StainsFloat)
# transform input stains to optical density values
sda_fwd = rgb_to_sda(
m, 255 if I_0 is not None else None, allow_negatives=True)
# perturb concentrations in SDA space
augmented_sda = sda_fwd.copy()
for i in range(3):
alpha = np.random.uniform(1 - sigma1, 1 + sigma1)
beta = np.random.uniform(-sigma2, sigma2)
augmented_sda[i, keep_mask[i, :]] *= alpha
augmented_sda[i, keep_mask[i, :]] += beta
# convolve with stains column vectors and convert to RGB
sda_conv = np.dot(W, augmented_sda)
sda_inv = sda_to_rgb(sda_conv, I_0)
# reshape output, transform type
augmented_rgb = (
convert_matrix_to_image(sda_inv, StainsFloat.shape)
.clip(0, 255).astype(np.uint8))
return augmented_rgb
def rgb_perturb_stain_concentration(
im_rgb, stain_unmixing_routine_params=None, **kwargs):
"""Apply wrapper that calls perturb_stain_concentration() on RGB.
Parameters
------------
im_rgb : array_like
An RGB image (m x n x 3) to color normalize
stain_unmixing_routine_params : dict
kwargs to pass as-is to the color_deconvolution_routine().
kwargs : k,v pairs
Passed as-is to perturb_stain_concentration()
Returns
--------
array_like
Color augmented RGB image (m x n x 3)
"""
stain_unmixing_routine_params = {
'stains': ['hematoxylin', 'eosin'],
'stain_unmixing_method': 'macenko_pca',
} if stain_unmixing_routine_params is None else stain_unmixing_routine_params
_, StainsFloat, W_source = color_deconvolution_routine(
im_rgb, W_source=None, **stain_unmixing_routine_params)
return perturb_stain_concentration(StainsFloat, W_source, **kwargs)
|
# -*- coding: utf-8 -*-
a = 1
def getA():
return a
def modA(input_v):
global a
a = input_v
|
import numpy as np
# Load matlab's file
import scipy.io
# Build-in K-means clustering model of sklearn lib
from sklearn.cluster import KMeans
from findClosestCentroids import find_closest_centroid
from computeCentroidMean import compute_centroid_mean
from KmeansAlgo import kmeans_algo
############### (1) Find the closest centroids ####################
print(f"Find the closest centroids")
# Loading the example dataset
data2 = scipy.io.loadmat("data/ex7data2.mat")
# Find the closest centroid with with initial K=3
K = 3
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
idx = find_closest_centroid(data2["X"], initial_centroids)
print(f"Closest centroids of the 1st 3 examples: {idx[:3].T}")
print(f"The closet centroids should be 1, 3, 2 respectively")
input("Pause program, Press enter to continue")
############## (2) Compute the centroids means ################
print(f"\nCompute means of data point closed to each associated centroid")
centroids = compute_centroid_mean(data2["X"], idx, K)
print(f"Centroids computed after initial finding of closest centroid:\n {centroids}\n")
print(f"(the centroids should be\n [ 2.428301 3.157924 ]\n[ 5.813503 2.633656 ]\n[ 7.119387 3.616684 ]")
input("Pause program, Press enter to continue")
################ (3) K-means clustering ######################
print(f"\nRunning K-means clustering on example dataset")
# Initial setting
K = 3
max_iter = 10
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
# Run K-means clustering algorithm. "true" values at last parameter indicate plotting the progress of K-means
centroids, idx = kmeans_algo(data2["X"], initial_centroids, max_iter, True)
input("Pause program, Press enter to continue")
############# (4) K-means clustering using sklearn lib ###############
# from sklearn.cluster import KMeans
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
# Create KMeans model
kmeans_model = KMeans(n_clusters=K, max_iter=max_iter, init=initial_centroids)
# Fit data to the model
kmeans_model.fit(data2["X"])
# Return trained centroids
kmeans_model_centroids = kmeans_model.cluster_centers_
# Comparing centroids of 2 ways
print(f"Trained cluster centroid of sklearn model:\n"
f"{kmeans_model_centroids[0]}\n"
f"{kmeans_model_centroids[1]}\n"
f"{kmeans_model_centroids[2]}\n")
print(f"Trained cluster centroid of manual generation:\n"
f"{centroids[0]}\n"
f"{centroids[1]}\n"
f"{centroids[2]}\n")
input("Pause program, Press enter to continue")
|
import os
import dask.dataframe as dd
from utils.config import Config, mappings
from utils.es_wrapper import ElasticWrapper
def get_docs_json(index, docs_df):
for _, row in docs_df.iterrows():
yield {
"_index": index,
"_id": row[Config.DOCID_KEY],
"_source": {
Config.DOCID_KEY: row[Config.DOCID_KEY],
"url": row["url"],
"query": row["title"],
"title": row["title"],
"body": row["body"]
}
}
def bulk_index_docs(es_wrapper, docs_df, index, mapping):
es_wrapper.create_index(index, mapping, recreate_exist=True)
es_wrapper.bulk_index(index, get_docs_json(index, docs_df))
def index_docs(doc_path=None):
if not doc_path:
doc_path = os.path.join(Config.SUBSAMPLED_ROOT, Config.DOCS_FILE_SAMPLED)
docs_df = dd.read_csv(doc_path, keep_default_na=False)
es_wrapper = ElasticWrapper()
bulk_index_docs(es_wrapper, docs_df, Config.VSM_INDEX_KEY, mappings.VSM_MAPPING)
bulk_index_docs(es_wrapper, docs_df, Config.BM25_INDEX_KEY, mappings.BM25_MAPPING)
es_wrapper.close() |
def do_mul:
a=2
b=4
print(a*b)
do_mul()
#aaa
|
"Placing Callbacks on Queues"
'''
Luckily, queues support much more than just strings—any type of Python object can
be placed on a queue. Perhaps the most general of these is a callable object: by placing
a function or other callable object on the queue, a producer thread can tell the GUI
how to handle the message in a very direct way. The GUI simply calls the objects it
pulls off the queue. Since threads all run within the same process and memory space,
any type of callable object works on a queue—simple functions, lambdas, and even
bound methods that combine a function with an implied subject object that gives access
to state information and methods. Any updates performed by the callback object update
state shared across the entire process.
'''
"""
#################################################################################
System-wide thread interface utilities for GUIs.
Implements a single thread callback queue and checker timer loop shared by
all the windows in a program; worker threads queue their exit and progress
actions to be run in the main thread; this doesn't block the GUI - it just
spawns operations and manages and dispatches exits and progress; worker
threads can overlap freely with the main thread, and with other workers.
Using a queue of callback functions and arguments is more useful than a
simple data queue if there can be many kinds of threads running at the
same time - each kind may have different implied exit actions.
Because GUI API is not completely thread-safe, instead of calling GUI
update callbacks directly after thread main action, place them on a shared
queue, to be run from a timer loop in the main thread, not a child thread;
this also makes GUI update points less random and unpredictable; requires
threads to be split into main action, exit actions, and progress action.
Assumes threaded action raises an exception on failure, and has a 'progress'
callback argument if it supports progress updates; also assumes callbacks
are either short-lived or update as they run, and that queue will contain
callback functions (or other callables) for use in a GUI app - requires a
widget in order to schedule and catch 'after' event loop callbacks; to use
this model in non-GUI contexts, could use simple thread timer instead.
#################################################################################
"""
# run even if no threads # in standard lib now
try: # raise ImportError to
import _thread as thread # run with GUI blocking
except ImportError: # if threads not available
import _dummy_thread as thread # same interface, no threads
# shared cross-process queue
# named in shared global scope, lives in shared object memory
import queue, sys
threadQueue = queue.Queue(maxsize=0) # infinite size
#################################################################################
# IN MAIN THREAD - periodically check thread completions queue; run implied GUI
# actions on queue in this main GUI thread; one consumer (GUI), and multiple
# producers (load, del, send); a simple list may suffice too: list.append and
# pop atomic?; 4E: runs at most N actions per timer event: looping through all
# queued callbacks on each timer event may block GUI indefinitely, but running
# only one can take a long time or consume CPU for timer events (e.g., progress);
# assumes callback is either short-lived or updates display as it runs: after a
# callback run, the code here reschedules and returns to event loop and updates;
# because this perpetual loop runs in main thread, does not stop program exit;
#################################################################################
def threadChecker(widget, delayMsecs=100, perEvent=1): # 10x/sec, 1/timer
for i in range(perEvent): # pass to set speed
try:
(callback, args) = threadQueue.get(block=False) # run <= N callbacks
except queue.Empty:
break # anything ready?
else:
callback(*args) # run callback here
widget.after(delayMsecs, # reset timer event
lambda: threadChecker(widget, delayMsecs, perEvent)) # back to event loop
#################################################################################
# IN A NEW THREAD - run action, manage thread queue puts for exits and progress;
# run action with args now, later run on* calls with context; calls added to
# queue here are dispatched in main thread only, to avoid parallel GUI updates;
# allows action to be fully ignorant of use in a thread here; avoids running
# callbacks in thread directly: may update GUI in thread, since passed func in
# shared memory called in thread; progress callback just adds callback to queue
# with passed args; don't update in-progress counters here: not finished till
# exit actions taken off queue and dispatched in main thread by threadChecker;
#################################################################################
def threaded(action, args, context, onExit, onFail, onProgress):
try:
if not onProgress: # wait for action in this thread
action(*args) # assume raises exception if fails
else:
def progress(*any):
threadQueue.put((onProgress, any + context))
action(progress=progress, *args)
except:
threadQueue.put((onFail, (sys.exc_info(), ) + context))
else:
threadQueue.put((onExit, context))
def startThread(action, args, context, onExit, onFail, onProgress=None):
thread.start_new_thread(
threaded, (action, args, context, onExit, onFail, onProgress))
#################################################################################
# a thread-safe counter or flag: useful to avoid operation overlap if threads
# update other shared state beyond that managed by the thread callback queue
#################################################################################
class ThreadCounter:
def __init__(self):
self.count = 0
self.mutex = thread.allocate_lock() # or use Threading.semaphore
def incr(self):
self.mutex.acquire() # or with self.mutex:
self.count += 1
self.mutex.release()
def decr(self):
self.mutex.acquire()
self.count -= 1
self.mutex.release()
def __len__(self): return self.count # True/False if used as a flag
#################################################################################
# self-test code: split thread action into main, exits, progress
#################################################################################
if __name__ == '__main__': # self-test code when run
import time
from tkinter.scrolledtext import ScrolledText # or PP4E.Gui.Tour.scrolledtext
def onEvent(i): # code that spawns thread
myname = 'thread-%s' % i
startThread(action = threadaction,
args = (i, 3),
context = (myname,),
onExit = threadexit,
onFail = threadfail,
onProgress = threadprogress)
# thread's main action
def threadaction(id, reps, progress): # what the thread does
for i in range(reps):
time.sleep(1)
if progress: progress(i) # progress callback: queued
if id % 2 == 1: raise Exception # odd numbered: fail
# thread exit/progress callbacks: dispatched off queue in main thread
def threadexit(myname):
text.insert('end', '%s\texit\n' % myname)
text.see('end')
def threadfail(exc_info, myname):
text.insert('end', '%s\tfail\t%s\n' % (myname, exc_info[0]))
text.see('end')
def threadprogress(count, myname):
text.insert('end', '%s\tprog\t%s\n' % (myname, count))
text.see('end')
text.update() # works here: run in main thread
# make enclosing GUI and start timer loop in main thread
# spawn batch of worker threads on each mouse click: may overlap
text = ScrolledText()
text.pack()
threadChecker(text) # start thread loop in main thread
text.bind('<Button-1>', # 3.x need list for map, range ok
lambda event: list(map(onEvent, range(6))) )
text.mainloop() # pop-up window, enter tk event loop
|
# -*- coding: utf-8 -*-
thislist = ["apple", "banana", "cherry"]
thislist.insert(1, "orange")
print(thislist)
|
from torch import nn
import torch.nn.functional as F
class BasicNet(nn.Module):
def __init__(self):
super(BasicNet, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
import unittest
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from config import OptimizationConfigEuRoC
from utils import to_quaternion, to_rotation, Isometry3d
from feature import Feature
from msckf import CAMState
class TestFeature(unittest.TestCase):
def test_feature_initialization(self):
"""
Test feature initialization.
"""
optimization_config = OptimizationConfigEuRoC()
Feature.R_cam0_cam1 = np.identity(3)
Feature.t_cam0_cam1 = np.zeros(3)
# feature = np.array([0.5, 0., 0.])
feature = np.random.random(3) * 0.5
# Add 6 camera poses, all of which are able to see the
# feature at the origin. For simplicity, the six camera
# view are located at the six intersections between a
# unit sphere and the coordinate system. And the z axes
# of the camera frames are facing the origin.
cam_poses = [
Isometry3d(np.array([
[0., 0., -1.],
[1., 0., 0.],
[0., -1., 0.]]
), np.array([1., 0., 0.])),
Isometry3d(np.array([
[-1., 0., 0.],
[0., 0., -1.],
[0., -1., 0.]]
), np.array([0., 1., 0.])),
Isometry3d(np.array([
[0., 0., 1.],
[-1., 0., 0.],
[0., -1., 0.]]
), np.array([-1., 0., 0.])),
Isometry3d(np.array([
[1., 0., 0.],
[0., 0., 1.],
[0., -1., 0.]]
), np.array([0., -1., 0.])),
Isometry3d(np.array([
[0., -1., 0.],
[-1., 0., 0.],
[0., 0., -1.]]
), np.array([0., 0., 1.])),
Isometry3d(np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]
), np.array([0., 0., -1.])),
]
# Set the camera states
cam_states = dict()
for i in range(6):
cam_state = CAMState(i)
cam_state.timestamp = i
cam_state.orientation = cam_poses[i].R
cam_state.position = cam_poses[i].t
cam_states[i] = cam_state
# Compute measurements.
measurements = []
for i in range(6):
cam_pose_inv = cam_poses[i].inverse()
p = cam_pose_inv.R @ feature + cam_pose_inv.t
u, v = p[:2] / p[2] + np.random.randn(2) * 0.01
measurements.append(np.array([u, v, u, v]))
# Initialize a feature object.
feature_object = Feature(0, optimization_config=optimization_config)
for i in range(6):
feature_object.observations[i] = measurements[i]
# Compute the 3d position of the feature.
status = feature_object.initialize_position(cam_states)
# Check the difference between the computed 3d
# feature position and the groud truth.
print('status:', status)
print('ground truth position:\n', feature)
print('estimated position:\n', feature_object.position)
e = np.linalg.norm(feature - feature_object.position)
print('error norm:', e)
self.assertTrue(e < 0.05)
if __name__ == '__main__':
unittest.main() |
def get_arg_types(pred:str):
move_list=["north","south","east","west","northwest","northeast","southeast","southwest"]
if pred in move_list or pred == 'get':
return ['cell']
elif pred == 'cell':
return ['cell']
elif pred == 'deepwater':
return ['cell']
elif pred == 'at' or pred == 'not at':
return ['cell', 'xcoord', 'ycoord']
elif pred == 'agentat' or pred == 'not agentat':
return ['cell']
elif pred == 'wall' or pred == 'not wall':
return ['cell']
elif pred == '=':
#if '-' in self.args or '+' in self.args:
return ['int', 'int', 'operator', 'constant']
#else:
#return ['int', 'int']
elif pred == 'throw': #####################Non-Deterministic
return ['cell', 'xcoord','ycoord']
elif pred == 'get':
return ['cell']
elif pred == 'open_door': #TODO:Implement Below
return ['cell']
elif pred == 'closed_door':
return ['cell']
elif pred == 'drop':
return ['cell']
elif pred == 'wait':
return ['cell']
elif pred == 'itemtype':
return ['item_type']
elif pred == 'inv_item':
return ['item_type', 'quantity']
elif pred == 'inv_id':
return ['id', 'item']
elif pred == 'item':
return ['cell','item_type','quantity']
class Term:
def __init__(self,predicate_str:str,arg_strs:[str]):
self.pred_str = predicate_str
self.args = arg_strs
self.arg_types = None
self.human_readable_args = []
self._set_arg_types(predicate_str)
def get_pred_str(self):
return self.pred_str
def _set_arg_types(self, pred:str):
#print("pred is {}".format(pred))
self.arg_types=get_arg_types(pred)
def set_human_readable_args(self, args:[str]):
#print("args={}".format(args))
assert len(args) == len(self.arg_types)
self.human_readable_args = args
def substitute_args(self,curr:[str],new:[str]):
new_args = []
for a in self.human_readable_args:
new_args.append(curr.index(a))
self.human_readable_args = new_args
def __eq__(self, other):
return self.pred_str == other.pred_str and self.human_readable_args == other.standardized_args
def __hash__(self):
return hash(self.__str__())
def __str__(self):
upper_args = map(lambda s: s.upper(), self.args)
if self.pred_str == '=':
return '{} = {} {} {}'.format(*upper_args)
else:
s = '{}('.format(self.pred_str)
for a in upper_args:
s += '{},'.format(a)
s = s[0:-1] + ')'
return s
def __repr__(self):
return self.__str__() |
import pymysql
import settings
from dao.DAOUsuario import DAOUsuario
class DAOCart:
def connect(self):
host = settings.MYSQL_HOST
user = settings.MYSQL_USER
password = settings.MYSQL_PASSWORD
db = settings.MYSQL_DB
return pymysql.connect(host, user, password, db)
def getUsers(self):
con = DAOCart.connect(self)
cur = con.cursor()
try:
cur.execute("SELECT id_usuario FROM cart")
users = list(dict.fromkeys(cur.fetchall()))
if not users:
return []
db = DAOUsuario()
return db.readUsingIdList(users)
except Exception as e:
print("Exception occured in DAOCart:{}".format(e))
return
finally:
con.close()
|
# lightning imports
from pytorch_lightning import LightningModule, LightningDataModule, Callback, Trainer
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning import seed_everything
# hydra imports
from omegaconf import DictConfig
from hydra.utils import log
import hydra
# normal imports
from typing import List, Optional
# src imports
from src.utils import template_utils
def train(config: DictConfig) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if "seed" in config:
seed_everything(config.seed)
# Init Lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init Lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model: LightningModule = hydra.utils.instantiate(
config.model, _recursive_=False
)
# Init Lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config["callbacks"].items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init Lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config["logger"].items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init Lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger, _convert_="partial"
)
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters!")
template_utils.log_hyperparameters(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Train the model
log.info("Starting training!")
trainer.fit(model=model, datamodule=datamodule)
# Evaluate model on test set after training
if not config.trainer.get("fast_dev_run"):
log.info("Starting testing!")
trainer.test()
# Make sure everything closed properly
log.info("Finalizing!")
template_utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Return metric score for Optuna optimization
optimized_metric = config.get("optimized_metric")
if optimized_metric:
return trainer.callback_metrics[optimized_metric] |
import pickle
import os
import sys
import numpy as np
import pc_util
import scene_util
class ScannetDataset():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set = self.scene_points_list[index]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set,axis=0)
coordmin = np.min(point_set,axis=0)
smpmin = np.maximum(coordmax-[1.5,1.5,3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[1.5,1.5,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter-[0.75,0.75,1.5]
curmax = curcenter+[0.75,0.75,1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set>=(curmin-0.2))*(point_set<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set[curchoice,:]
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set>=(curmin-0.01))*(cur_point_set<=(curmax+0.01)),axis=1)==3
vidx = np.ceil((cur_point_set[mask,:]-curmin)/(curmax-curmin)*[31.0,31.0,62.0])
vidx = np.unique(vidx[:,0]*31.0*62.0+vidx[:,1]*62.0+vidx[:,2])
isvalid = np.sum(cur_semantic_seg>0)/len(cur_semantic_seg)>=0.7 and len(vidx)/31.0/31.0/62.0>=0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
point_set = cur_point_set[choice,:]
semantic_seg = cur_semantic_seg[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
return point_set, semantic_seg, sample_weight
def __len__(self):
return len(self.scene_points_list)
class ScannetDatasetWholeScene():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set_ini = self.scene_points_list[index]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini,axis=0)
coordmin = np.min(point_set_ini,axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
isvalid = False
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*1.5,j*1.5,0]
curmax = coordmin+[(i+1)*1.5,(j+1)*1.5,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini>=(curmin-0.2))*(point_set_ini<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set>=(curmin-0.001))*(cur_point_set<=(curmax+0.001)),axis=1)==3
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
point_set = cur_point_set[choice,:] # Nx3
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
if sum(mask)/float(len(mask))<0.01:
continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
point_sets.append(np.expand_dims(point_set,0)) # 1xNx3
semantic_segs.append(np.expand_dims(semantic_seg,0)) # 1xN
sample_weights.append(np.expand_dims(sample_weight,0)) # 1xN
point_sets = np.concatenate(tuple(point_sets),axis=0)
semantic_segs = np.concatenate(tuple(semantic_segs),axis=0)
sample_weights = np.concatenate(tuple(sample_weights),axis=0)
return point_sets, semantic_segs, sample_weights
def __len__(self):
return len(self.scene_points_list)
class ScannetDatasetVirtualScan():
def __init__(self, root, npoints=8192, split='train'):
self.npoints = npoints
self.root = root
self.split = split
self.data_filename = os.path.join(self.root, 'scannet_%s.pickle'%(split))
with open(self.data_filename,'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split=='train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
self.labelweights = 1/np.log(1.2+labelweights)
elif split=='test':
self.labelweights = np.ones(21)
def __getitem__(self, index):
point_set_ini = self.scene_points_list[index]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
sample_weight_ini = self.labelweights[semantic_seg_ini]
point_sets = list()
semantic_segs = list()
sample_weights = list()
for i in range(8):
smpidx = scene_util.virtual_scan(point_set_ini,mode=i)
if len(smpidx)<300:
continue
point_set = point_set_ini[smpidx,:]
semantic_seg = semantic_seg_ini[smpidx]
sample_weight = sample_weight_ini[smpidx]
choice = np.random.choice(len(semantic_seg), self.npoints, replace=True)
point_set = point_set[choice,:] # Nx3
semantic_seg = semantic_seg[choice] # N
sample_weight = sample_weight[choice] # N
point_sets.append(np.expand_dims(point_set,0)) # 1xNx3
semantic_segs.append(np.expand_dims(semantic_seg,0)) # 1xN
sample_weights.append(np.expand_dims(sample_weight,0)) # 1xN
point_sets = np.concatenate(tuple(point_sets),axis=0)
semantic_segs = np.concatenate(tuple(semantic_segs),axis=0)
sample_weights = np.concatenate(tuple(sample_weights),axis=0)
return point_sets, semantic_segs, sample_weights
def __len__(self):
return len(self.scene_points_list)
if __name__=='__main__':
d = ScannetDatasetWholeScene(root = './data', split='test', npoints=8192)
labelweights_vox = np.zeros(21)
for ii in range(len(d)):
print ii
ps,seg,smpw = d[ii]
for b in range(ps.shape[0]):
_, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(ps[b,smpw[b,:]>0,:], seg[b,smpw[b,:]>0], res=0.02)
tmp,_ = np.histogram(uvlabel,range(22))
labelweights_vox += tmp
print labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32))
exit()
|
from PIL import Image
import os
import argparse
def rescale_images(directory):
for img in os.listdir(directory):
im = Image.open(directory+img)
im_resized = im.resize((800,600), Image.ANTIALIAS)
im_resized.save(directory+img)
rescale_images("test/") |
# -*- coding: utf-8 -*-
""" Category model for Product Catalog """
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mptt.managers import TreeManager
from mptt.models import MPTTModel
from mptt.models import TreeForeignKey
from product_catalog.managers import ProductsRelatedPublishedManager
from product_catalog.managers import product_published
@python_2_unicode_compatible
class Category(MPTTModel):
"""
Simple model for categorizing products.
"""
title = models.CharField(
_('title'), max_length=255)
slug = models.SlugField(
_('slug'), unique=True, max_length=255, db_index=True,
help_text=_("Used to build the category's URL."))
description = models.TextField(
_('description'), blank=True)
parent = TreeForeignKey(
'self',
related_name='children',
null=True, blank=True,
on_delete=models.SET_NULL,
verbose_name=_('parent category'))
objects = TreeManager()
published = ProductsRelatedPublishedManager()
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the category's URL
based on his tree path.
"""
return 'product_catalog:category_detail', (self.slug,)
def __str__(self):
return self.title
def product_published(self):
"""
Returns category's published products.
"""
return product_published(self.products)
class Meta:
"""
Category's meta information.
"""
ordering = ['title']
verbose_name = _('category')
verbose_name_plural = _('categories')
class MPTTMeta:
"""
Category MPTT's meta information.
"""
order_insertion_by = ['title']
|
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2010 Nathanael C. Fritz, Lance J.T. Stout
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from slixmpp.plugins.base import register_plugin
from slixmpp.plugins.xep_0092 import stanza
from slixmpp.plugins.xep_0092.stanza import Version
from slixmpp.plugins.xep_0092.version import XEP_0092
register_plugin(XEP_0092)
|
from typing import Any
# Common properties which can be used at any schema
class CommonProperties:
def __init__(self, title: str, description: str, type: str, nullable: bool, deprecated: bool, readonly: bool):
self.title = title
self.description = description
self.type = type
self.nullable = nullable
self.deprecated = deprecated
self.readonly = readonly
from typing import Any
class ScalarProperties:
def __init__(self, format: str, enum: Union[StringArray, NumberArray], default: Union[str, float, bool]):
self.format = format
self.enum = enum
self.default = default
from typing import Any
from typing import Dict
# Properties of a schema
class Properties(Dict[str, PropertyValue]):
from typing import Any
# Properties specific for a container
class ContainerProperties:
def __init__(self, type: str):
self.type = type
from typing import Any
# Struct specific properties
class StructProperties:
def __init__(self, properties: Properties, required: List[str]):
self.properties = properties
self.required = required
from typing import Any
# Map specific properties
class MapProperties:
def __init__(self, additionalProperties: Union[BooleanType, NumberType, StringType, ArrayType, CombinationType, ReferenceType, GenericType], maxProperties: int, minProperties: int):
self.additionalProperties = additionalProperties
self.maxProperties = maxProperties
self.minProperties = minProperties
from typing import Any
# Array properties
class ArrayProperties:
def __init__(self, type: str, items: Union[BooleanType, NumberType, StringType, ReferenceType, GenericType], maxItems: int, minItems: int, uniqueItems: bool):
self.type = type
self.items = items
self.maxItems = maxItems
self.minItems = minItems
self.uniqueItems = uniqueItems
from typing import Any
# Boolean properties
class BooleanProperties:
def __init__(self, type: str):
self.type = type
from typing import Any
# Number properties
class NumberProperties:
def __init__(self, type: str, multipleOf: float, maximum: float, exclusiveMaximum: bool, minimum: float, exclusiveMinimum: bool):
self.type = type
self.multipleOf = multipleOf
self.maximum = maximum
self.exclusiveMaximum = exclusiveMaximum
self.minimum = minimum
self.exclusiveMinimum = exclusiveMinimum
from typing import Any
# String properties
class StringProperties:
def __init__(self, type: str, maxLength: int, minLength: int, pattern: str):
self.type = type
self.maxLength = maxLength
self.minLength = minLength
self.pattern = pattern
from typing import Any
from typing import Dict
# An object to hold mappings between payload values and schema names or references
class DiscriminatorMapping(Dict[str, str]):
from typing import Any
# Adds support for polymorphism. The discriminator is an object name that is used to differentiate between other schemas which may satisfy the payload description
class Discriminator:
def __init__(self, propertyName: str, mapping: DiscriminatorMapping):
self.propertyName = propertyName
self.mapping = mapping
from typing import Any
from typing import List
# An intersection type combines multiple schemas into one
class AllOfProperties:
def __init__(self, description: str, allOf: List[OfValue]):
self.description = description
self.allOf = allOf
from typing import Any
from typing import List
# An union type can contain one of the provided schemas
class OneOfProperties:
def __init__(self, description: str, discriminator: Discriminator, oneOf: List[OfValue]):
self.description = description
self.discriminator = discriminator
self.oneOf = oneOf
from typing import Any
from typing import Dict
class TemplateProperties(Dict[str, ReferenceType]):
from typing import Any
# Represents a reference to another schema
class ReferenceType:
def __init__(self, ref: str, template: TemplateProperties):
self.ref = ref
self.template = template
from typing import Any
# Represents a generic type
class GenericType:
def __init__(self, generic: str):
self.generic = generic
from typing import Any
from typing import Dict
# Schema definitions which can be reused
class Definitions(Dict[str, DefinitionValue]):
from typing import Any
from typing import Dict
# Contains external definitions which are imported. The imported schemas can be used via the namespace
class Import(Dict[str, str]):
from typing import Any
# TypeSchema meta schema which describes a TypeSchema
class TypeSchema:
def __init__(self, import: Import, title: str, description: str, type: str, definitions: Definitions, properties: Properties, required: List[str]):
self.import = import
self.title = title
self.description = description
self.type = type
self.definitions = definitions
self.properties = properties
self.required = required
|
import math
from random import randint
from numerus import is_numeric
from omnicanvas import Canvas, colors
from .series import Series, LineSeries, ScatterSeries
class Chart:
"""The base class for all charts. It controls the attributes common to all
charts - namely dimensions and title.
:param str title: The chart's title. This will be displayed at the top of\
the chart.
:param width: The width in pixels of the chart.
:param height: The height in pixels of the chart."""
def __init__(self, title="", width=700, height=500):
if not isinstance(title, str):
raise TypeError("title must be str, not '%s'" % str(title))
self._title = title
if not is_numeric(width):
raise TypeError("width must be numeric, not '%s'" % str(width))
self._width = width
if not is_numeric(height):
raise TypeError("height must be numeric, not '%s'" % str(height))
self._height = height
def __repr__(self):
return "<Chart (%i×%i)>" % (self._width, self._height)
def title(self, title=None):
"""Returns or sets (if a value is provided) the chart's title.
:param str title: If given, the chart's title will be set to this.
:rtype: ``str``"""
if title is None:
return self._title
else:
if not isinstance(title, str):
raise TypeError("title must be str, not '%s'" % str(title))
self._title = title
def width(self, width=None):
"""Returns or sets (if a value is provided) the chart's width.
:param width: If given, the chart's width will be set to this."""
if width is None:
return self._width
else:
if not is_numeric(width):
raise TypeError("width must be numeric, not '%s'" % str(width))
self._width = width
def height(self, height=None):
"""Returns or sets (if a value is provided) the chart's height.
:param height: If given, the chart's height will be set to this."""
if height is None:
return self._height
else:
if not is_numeric(height):
raise TypeError("height must be numeric, not '%s'" % str(height))
self._height = height
def create(self):
"""Renders the chart to an OmniCanvas `canvas <https://omnicanvas.readt\
hedocs.io/en/latest/api/canvas.html#omnicanvas.canvas.Canvas>`_. This
object can then be `saved <https://omnicanvas.readthedocs.io/en/latest/\
api/canvas.html#omnicanvas.canvas.Canvas.save>`_ or `rendered <https://\
omnicanvas.readthedocs.io/en/latest/api/canvas.html#omnicanvas.canvas.C\
anvas.render>`_ as SVG."""
canvas = Canvas(self.width(), self.height())
canvas.add_text(
self.width() / 2, 0, self.title(),
vertical_align="bottom", name="title"
)
return canvas
class AxisChart(Chart):
"""Base class: :py:class:`Chart`
A chart with axes, onto which series can be shown. Line charts, scatter
charts, bar charts etc. are all AxisCharts with the relevant series. An
AxisChart can have multiple series associated with it, and they will all be
drawn onto the chart.
AxisCharts have the usual properties relating to axes, such as axes labels
and ticks.
:param Series \*series: One or more :py:class:`.Series` objects to be\
associated with the chart.
:param str title: The chart's title. This will be displayed at the top of\
the chart.
:param width: The width in pixels of the chart.
:param height: The height in pixels of the chart.
:param str x_label: The label for the x-axis.
:param str y_label: The label for the y-axis.
:raises ValueError: If no series are given."""
def __init__(self, *series, x_label="", y_label="", **kwargs):
Chart.__init__(self, **kwargs)
for s in series:
if not isinstance(s, Series):
raise TypeError("'%s' is not a Series" % str(s))
if len(series) == 0:
raise ValueError("AxisChart needs at least one series")
self._all_series = list(series)
for s in series:
s._chart = self
if not isinstance(x_label, str):
raise TypeError("x_label must be str, not '%s'" % str(x_label))
self._x_label = x_label
if not isinstance(y_label, str):
raise TypeError("y_label must be str, not '%s'" % str(y_label))
self._y_label = y_label
self._horizontal_padding = 0.1
self._vertical_padding = 0.1
self._x_lower_limit = None
self._x_upper_limit = None
self._y_lower_limit = None
self._y_upper_limit = None
self._x_ticks = None
self._y_ticks = None
self._x_grid = True
self._y_grid = True
def __repr__(self):
return "<AxisChart (%i series)>" % len(self._all_series)
def all_series(self):
"""Returns a ``list`` of all the :py:class:`.Series` objects associated
with the chart.
:rtype: ``list``"""
return list(self._all_series)
def series(self):
"""Returns the first :py:class:`.Series` objects associated with the
chart.
:rtype: :py:class:`.Series`"""
return self._all_series[0]
def add_series(self, series):
"""Adds a :py:class:`.Series` to the chart.
:param Series series: The :py:class:`.Series` to add."""
if not isinstance(series, Series):
raise TypeError("'%s' is not a Series" % str(series))
self._all_series.append(series)
series._chart = self
def remove_series(self, series):
"""Removes a :py:class:`.Series` from the chart.
:param Series series: The :py:class:`.Series` to remove.
:raises ValueError: if you try to remove the last\
:py:class:`.Series`."""
if len(self.all_series()) == 1:
raise ValueError("Cannot remove last series from %s" % str(self))
self._all_series.remove(series)
series._chart = None
def next_color(self):
current_colors = [series.color() for series in self.all_series()]
for color in colors:
if color not in current_colors:
return color
return "#%02x%02x%02x" % (randint(0,255), randint(0,255), randint(0,255))
def line(self, *args, **kwargs):
"""Adds a :py:class:`.LineSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param str linestyle: The line pattern. See\
`OmniCanvas docs <https://omnicanvas.readthedocs.io/en/latest/api/graph\
ics.html#omnicanvas.graphics.ShapeGraphic.line_style>`_ for acceptable \
values.
:param Number linewidth: The width in pixels of the line.
:raises ValueError: if the size and length of the data doesn't match\
either format."""
if "color" not in kwargs:
kwargs["color"] = self.next_color()
series = LineSeries(*args, **kwargs)
self.add_series(series)
def scatter(self, *args, **kwargs):
"""Adds a :py:class:`.ScatterSeries` to the chart.
:param \*data: The data for the series as either (x,y) values or two big\
tuples/lists of x and y values respectively.
:param str name: The name to be associated with the series.
:param str color: The hex colour of the line.
:param Number size: The size of each data point - generally the diameter.
:param Number linewidth: The width in pixels of the data points' edge.
:raises ValueError: if the size and length of the data doesn't match\
either format."""
if "color" not in kwargs:
kwargs["color"] = self.next_color()
series = ScatterSeries(*args, **kwargs)
self.add_series(series)
def get_series_by_name(self, name):
"""Returns the first :py:class:`.Series` of a given name, or ``None``.
:param str name: The name to search by."""
if not isinstance(name, str):
raise TypeError(
"Can only search series by str name, not '%s'" % str(name)
)
for series in self.all_series():
if series.name() == name:
return series
def x_label(self, x_label=None):
"""Returns or sets (if a value is provided) the chart's x-axis label.
:param str x_label: If given, the chart's x_label will be set to this.
:rtype: ``str``"""
if x_label is None:
return self._x_label
else:
if not isinstance(x_label, str):
raise TypeError("x_label must be str, not '%s'" % str(x_label))
self._x_label = x_label
def y_label(self, y_label=None):
"""Returns or sets (if a value is provided) the chart's y-axis label.
:param str y_label: If given, the chart's y_label will be set to this.
:rtype: ``str``"""
if y_label is None:
return self._y_label
else:
if not isinstance(y_label, str):
raise TypeError("y_label must be str, not '%s'" % str(y_label))
self._y_label = y_label
def horizontal_padding(self, padding=None):
"""Returns or sets (if a value is provided) the chart's horizontal
padding. This determines how much space will be on either side of the
display area, as a proportion of overall width, and should be a value
between 0 and 0.5
:param float padding: If given, the chart's horizontal_padding\
will be set to this.
:raises ValueError: if a value outside of 0 < n < 0.5 is given.
:rtype: float"""
if padding is None:
return self._horizontal_padding
else:
if not isinstance(padding, float):
raise TypeError("padding must be float, not '%s'" % str(padding))
if not 0 < padding < 0.5:
raise ValueError(
"padding must be between 0 and 0.5 (not inclusive), not '%s'" % str(padding)
)
self._horizontal_padding = padding
def vertical_padding(self, padding=None):
"""Returns or sets (if a value is provided) the chart's vertical
padding. This determines how much space will be above and below the
display area, as a proportion of overall height, and should be a value
between 0 and 0.5
:param float padding: If given, the chart's vertical_padding\
will be set to this.
:raises ValueError: if a value outside of 0 < n < 0.5 is given.
:rtype: float"""
if padding is None:
return self._vertical_padding
else:
if not isinstance(padding, float):
raise TypeError("padding must be float, not '%s'" % str(padding))
if not 0 < padding < 0.5:
raise ValueError(
"padding must be between 0 and 0.5 (not inclusive), not '%s'" % str(padding)
)
self._vertical_padding = padding
def smallest_x(self):
"""Returns the smallest x-value in all the :py:class:`.Series`
associated with the chart."""
return min(
[series.data()[0][0] for series in self.all_series()]
)
def largest_x(self):
"""Returns the largest x-value in all the :py:class:`.Series`
associated with the chart."""
return max(
[series.data()[-1][0] for series in self.all_series()]
)
def smallest_y(self):
"""Returns the smallest y-value in all the :py:class:`.Series`
associated with the chart."""
return min(
[series.smallest_y() for series in self.all_series()]
)
def largest_y(self):
"""Returns the largest y-value in all the :py:class:`.Series`
associated with the chart."""
return max(
[series.largest_y() for series in self.all_series()]
)
def x_lower_limit(self, limit=None):
"""Returns or sets (if a value is provided) the value at which the
x-axis should start. By default this is zero (unless there are negative
values).
:param limit: If given, the chart's x_lower_limit will be set to this.
:raises ValueError: if you try to make the lower limit larger than the\
upper limit."""
if limit is None:
if self._x_lower_limit is None:
if self.smallest_x() < 0:
if self.smallest_x() == self.largest_x():
return int(self.smallest_x() - 1)
else:
return self.smallest_x()
else:
return 0
else:
return self._x_lower_limit
else:
if not is_numeric(limit):
raise TypeError(
"lower x limit must be numeric, not '%s'" % str(limit)
)
if limit >= self.largest_x():
raise ValueError(
"lower x limit must be less than upper limit (%s), not %s" % (
str(self.largest_x()), str(limit)
)
)
self._x_lower_limit = limit
def y_lower_limit(self, limit=None):
"""Returns or sets (if a value is provided) the value at which the
y-axis should start. By default this is zero (unless there are negative
values).
:param limit: If given, the chart's y_lower_limit will be set to this.
:raises ValueError: if you try to make the lower limit larger than the\
upper limit."""
if limit is None:
if self._y_lower_limit is None:
if self.smallest_y() < 0:
if self.smallest_y() == self.largest_y():
return int(self.smallest_y() - 1)
else:
return self.smallest_y()
else:
return 0
else:
return self._y_lower_limit
else:
if not is_numeric(limit):
raise TypeError(
"lower y limit must be numeric, not '%s'" % str(limit)
)
if limit >= self.largest_y():
raise ValueError(
"lower y limit must be less than upper limit (%s), not %s" % (
str(self.largest_y()), str(limit)
)
)
self._y_lower_limit = limit
def x_upper_limit(self, limit=None):
"""Returns or sets (if a value is provided) the value at which the
x-axis should end. By default this is the highest x value in the
associated series.
:param limit: If given, the chart's x_upper_limit will be set to this.
:raises ValueError: if you try to make the upper limit smaller than the\
lower limit."""
if limit is None:
if self._x_upper_limit is None:
if self.smallest_x() == self.largest_x():
if int(self.largest_x()) == float(self.largest_x()):
return self.largest_x() + 1
else:
return math.ceil(self.largest_x())
else:
return self.largest_x()
else:
return self._x_upper_limit
else:
if not is_numeric(limit):
raise TypeError(
"upper x limit must be numeric, not '%s'" % str(limit)
)
if limit <= self.smallest_x():
raise ValueError(
"upper x limit must be greater than lower limit (%s), not %s" % (
str(self.smallest_x()), str(limit)
)
)
self._x_upper_limit = limit
def y_upper_limit(self, limit=None):
"""Returns or sets (if a value is provided) the value at which the
y-axis should end. By default this is the highest y value in the
associated series.
:param limit: If given, the chart's y_upper_limit will be set to this.
:raises ValueError: if you try to make the upper limit smaller than the\
lower limit."""
if limit is None:
if self._y_upper_limit is None:
if self.smallest_y() == self.largest_y():
if int(self.largest_y()) == float(self.largest_y()):
return self.largest_y() + 1
else:
return math.ceil(self.largest_y())
else:
return self.largest_y()
else:
return self._y_upper_limit
else:
if not is_numeric(limit):
raise TypeError(
"upper y limit must be numeric, not '%s'" % str(limit)
)
if limit <= self.smallest_y():
raise ValueError(
"upper y limit must be greater than lower limit (%s), not %s" % (
str(self.smallest_y()), str(limit)
)
)
self._y_upper_limit = limit
def x_ticks(self, *ticks):
"""The points on the x-axis for which there are markers and grid lines.
There are default ticks, but you can pass values to this method to
override the defaults. Otherwise the method will return the ticks.
:param \*ticks: if given, these will be chart's x-ticks.
:rtype: ``tuple``"""
if ticks:
for tick in ticks:
if not is_numeric(tick):
raise TypeError("'%s' is not a numeric tick" % str(tick))
self._x_ticks = tuple(sorted(ticks))
else:
if self._x_ticks:
return self._x_ticks
else:
return determine_ticks(self.x_lower_limit(), self.x_upper_limit())
def y_ticks(self, *ticks):
"""The points on the y-axis for which there are markers and grid lines.
There are default ticks, but you can pass values to this method to
override the defaults. Otherwise the method will return the ticks.
:param \*ticks: if given, these will be chart's x-ticks.
:rtype: ``tuple``"""
if ticks:
for tick in ticks:
if not is_numeric(tick):
raise TypeError("'%s' is not a numeric tick" % str(tick))
self._y_ticks = tuple(sorted(ticks))
else:
if self._y_ticks:
return self._y_ticks
else:
return determine_ticks(self.y_lower_limit(), self.y_upper_limit())
def x_grid(self, grid=None):
"""The horizontal lines that run accross the chart from the x-ticks.
If a boolean value is given, these gridlines will be turned on or off.
Otherwise, the method will return their current state.
:param bool grid: Turns the gridlines on or off.
:rtype: ``bool``"""
if grid is None:
return self._x_grid
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._x_grid = grid
def y_grid(self, grid=None):
"""The vertical lines that run accross the chart from the y-ticks.
If a boolean value is given, these gridlines will be turned on or off.
Otherwise, the method will return their current state.
:param bool grid: Turns the gridlines on or off.
:rtype: ``bool``"""
if grid is None:
return self._y_grid
else:
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._y_grid = grid
def grid(self, grid):
"""Turns all gridlines on or off
:param bool grid: turns the gridlines on if ``True``, off if ``False``"""
if not isinstance(grid, bool):
raise TypeError("grid must be boolean, not '%s'" % grid)
self._x_grid = self._y_grid = grid
def create(self):
"""Renders the chart to an OmniCanvas `canvas <https://omnicanvas.readt\
hedocs.io/en/latest/api/canvas.html#omnicanvas.canvas.Canvas>`_. This
object can then be `saved <https://omnicanvas.readthedocs.io/en/latest/\
api/canvas.html#omnicanvas.canvas.Canvas.save>`_ or `rendered <https://\
omnicanvas.readthedocs.io/en/latest/api/canvas.html#omnicanvas.canvas.C\
anvas.render>`_ as SVG."""
canvas = Chart.create(self)
for index, series in enumerate(self.all_series(), start=1):
series.write_to_canvas(canvas, "series%i" % index)
canvas.add_rectangle(
0, 0, self.horizontal_padding() * canvas.width(), canvas.height(),
opacity=1,
line_width=0,
name="block-w"
)
canvas.add_rectangle(
0, 0, canvas.width(), self.vertical_padding() * canvas.height(),
opacity=1,
line_width=0,
name="block-n"
)
canvas.add_rectangle(
canvas.width() - (self.horizontal_padding() * canvas.width()), 0,
self.horizontal_padding() * canvas.width(), canvas.height(),
opacity=1,
line_width=0,
name="block-e"
)
canvas.add_rectangle(
0, canvas.height() - (self.vertical_padding() * canvas.height()),
canvas.width(), self.vertical_padding() * canvas.height(),
opacity=1,
line_width=0,
name="block-s"
)
title = canvas.graphics()[0]
while canvas.graphics().index(title) != len(canvas.graphics()) - 1:
canvas.move_graphic_forward(title)
canvas.graphics()[-1].y(self.vertical_padding() * canvas.height() * 0.5)
canvas.graphics()[-1].vertical_align("center")
axes = canvas.add_rectangle(
self.horizontal_padding() * canvas.width(),
self.vertical_padding() * canvas.height(),
canvas.width() - (2 * self.horizontal_padding() * canvas.width()),
canvas.height() - (2 * self.vertical_padding() * canvas.height()),
name="axes",
opacity=0
)
if self.x_label():
canvas.add_text(
canvas.width() / 2,
canvas.height() - (self.vertical_padding() * canvas.height() * 0.25),
self.x_label(),
name="x_label"
)
y_label_x = self.horizontal_padding() * canvas.width() * 0.25
if self.y_label():
canvas.add_text(
y_label_x,
canvas.height() * 0.5,
self.y_label(),
rotation=(y_label_x, canvas.height() * 0.5, 270),
name="y_label"
)
x_tick_series = Series(*[(tick, 0) for tick in self.x_ticks()])
x_tick_series._chart = self
x_tick_points = x_tick_series.canvas_points()
for index, tick in enumerate(x_tick_series.data()):
canvas.add_text(
x_tick_points[index][0],
canvas.height() - (self.vertical_padding() * canvas.height() * 0.75),
str(tick[0]),
name="xtick"
)
if self.x_grid():
line = canvas.add_line(
x_tick_points[index][0], canvas.height() * (1 - self.vertical_padding()),
x_tick_points[index][0], canvas.height() * self.vertical_padding(),
line_style="..",
line_color="#333333",
name="xgrid"
)
while canvas.graphics()[0] is not line:
canvas.move_graphic_backward(line)
y_tick_series = Series(*[(0, tick) for tick in self.y_ticks()])
y_tick_series._chart = self
y_tick_points = y_tick_series.canvas_points()
for index, tick in enumerate(y_tick_series.data()):
canvas.add_text(
self.horizontal_padding() * canvas.width() * 0.75,
y_tick_points[index][1],
str(tick[1]),
name="ytick"
)
if self.y_grid():
line = canvas.add_line(
canvas.width() * self.horizontal_padding(), y_tick_points[index][1],
canvas.width() * (1 - self.horizontal_padding()), y_tick_points[index][1],
line_style="..",
line_color="#333333",
name="ygrid"
)
while canvas.graphics()[0] is not line:
canvas.move_graphic_backward(line)
return canvas
def determine_ticks(low, high):
"""The function used to auto-generate ticks for an axis, based on its
range of values.
:param Number low: The lower bound of the axis.
:param Number high: The upper bound of the axis.
:rtype: ``tuple``"""
range_ = high - low
tick_difference = 10 ** math.floor(math.log10(range_ / 1.25))
low_tick = math.floor(low / tick_difference) * tick_difference
ticks = [low_tick + tick_difference] if low_tick < low else [low_tick]
while ticks[-1] + tick_difference <= high:
ticks.append(ticks[-1] + tick_difference)
return tuple(ticks)
|
CELERY_RESULT_BACKEND = "django-db" |
"""DEPRECATED 2014-03-01 by cam.
This csv stuff is now all done through compute engine. I'll leave this file
around in case we want to know how to interact with cloud storage from app
engine in the future.
Note 2013-04-11 by ajb:
Adding a delete file classmethod. This will be used to clean out our backup
buckets with a cron job, to keep them from getting too gross.
TODO: If we want to do more stuff with GCS, we should clean up this class
because it's getting a little smelly. Currently it's geared around one specific
bucket--perts_prod/--but we should generalize it to make it more useful.
(also it like doesn't really make sense that the file's called csv_file)
"""
import cloudstorage as gcs
import json
import util
# The demo code in this library download is very useful. This particular
# download is for SDK version 1.8.8
# https://code.google.com/p/appengine-gcs-client/downloads/detail?name=appengine-gcs-client-python-r127.zip&can=2&q=
# Some documentation for the GCS client library:
# https://developers.google.com/appengine/docs/python/googlecloudstorageclient/
# https://developers.google.com/appengine/docs/python/googlecloudstorageclient/functions
class CsvFile:
"""Uses Google Cloud Storage to manage csv files with certain presets."""
mime_type = 'text/plain'
if util.is_development():
bucket = 'perts_dev'
else:
bucket = 'perts_prod'
# You can specific some parameters here, see commented example.
retry_params = gcs.RetryParams()
# retry_params = gcs.RetryParams(initial_delay=0.2,
# max_delay=5.0,
# backoff_factor=2,
# max_retry_period=15)
# Only used during writes. Controls permissions. For types of permission,
# see https://developers.google.com/storage/docs/accesscontrol#extension
write_options = {
'x-goog-acl': 'project-private', # default permission
# 'x-goog-meta-foo': 'foo', # arbitrary meta data
# 'x-goog-meta-bar': 'bar',
}
@classmethod
def list_bucket_files(klass, bucket_name):
iterable = gcs.listbucket('/' + bucket_name + '/')
return [f.filename for f in iterable]
def __init__(self, filename):
self.relative_path = filename
self.absolute_path = '/{}/{}'.format(self.bucket, self.relative_path)
# create the file, if it doesn't exist already
# This doesn't work.
try:
gcs_file = gcs.open(self.absolute_path, mode='r',
retry_params=self.retry_params)
gcs_file.close()
except:
self.write(json.dumps([]))
def read(self):
gcs_file = gcs.open(self.absolute_path, mode='r',
retry_params=self.retry_params)
string = gcs_file.read()
gcs_file.close()
return string
def write(self, string):
gcs_file = gcs.open(self.absolute_path, mode='w',
content_type=self.mime_type,
retry_params=self.retry_params,
options=self.write_options)
# unicode not allowed here
gcs_file.write(str(string))
gcs_file.close()
@classmethod
def delete(self, filename):
"""Delete method will remove a file from GCS, provided an absolute
file path."""
try:
gcs.delete(filename)
return "{} deleted.".format(filename)
except gcs.NotFoundError:
return 'GCS File Not Found'
|
from unittest import TestCase
import PyObjectTree
class TestNode(TestCase):
def test_Node(self):
pass
|
from sympy.multipledispatch import dispatch, Dispatcher
from sympy.core import Basic, Expr, Function, Add, Mul, Pow, Dummy, Integer
from sympy import Min, Max, Set, sympify, Lambda, symbols, exp, log, S
from sympy.core.numbers import Infinity, NegativeInfinity
from sympy.sets import (imageset, Interval, FiniteSet, Union, ImageSet,
ProductSet, EmptySet, Intersection)
from sympy.core.function import FunctionClass
from sympy.logic.boolalg import And, Or, Not, true, false
_x, _y = symbols("x y")
@dispatch(Basic, Basic)
def _set_add(x, y):
return None
@dispatch(Expr, Expr)
def _set_add(x, y):
return x+y
@dispatch(Interval, Interval)
def _set_add(x, y):
"""
Additions in interval arithmetic
https://en.wikipedia.org/wiki/Interval_arithmetic
"""
return Interval(x.start + y.start, x.end + y.end,
x.left_open or y.left_open, x.right_open or y.right_open)
@dispatch(Interval, Infinity)
def _set_add(x, y):
if x.start == S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet({S.Infinity})
@dispatch(Interval, NegativeInfinity)
def _set_add(x, y):
if x.end == S.Infinity:
return Interval(-oo, oo)
return FiniteSet({S.NegativeInfinity})
@dispatch(Basic, Basic)
def _set_sub(x, y):
return None
@dispatch(Expr, Expr)
def _set_sub(x, y):
return x-y
@dispatch(Interval, Interval)
def _set_sub(x, y):
"""
Subtractions in interval arithmetic
https://en.wikipedia.org/wiki/Interval_arithmetic
"""
return Interval(x.start - y.end, x.end - y.start,
x.left_open or y.right_open, x.right_open or y.left_open)
@dispatch(Interval, Infinity)
def _set_sub(x, y):
if self.start is S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet(-oo)
@dispatch(Interval, NegativeInfinity)
def _set_sub(x, y):
if self.start is S.NegativeInfinity:
return Interval(-oo, oo)
return FiniteSet(-oo)
|
BASE_URL = "https://www.duolingo.com/stories"
|
from gui import *
class MainFrame(MyFrame):
def __init__(self,master):
super().__init__(master)
self.panel1 = MyPanel(self)
self.panel2 = MyPanel(self,orient=tk.HORIZONTAL)
self.panel3 = MyPanel(self,orient=tk.HORIZONTAL)
self.label1 = MyLabel(self,"Word Similarity Checker",f_size=30)
self.entry = MyEntry(self,text="Word you want to check")
self.entry2 = MyEntry(self,text="Word to check with")
self.go_btn = MyBtn(self,"Go")
self.ans = MyLabel(self,"",f_size=20,fg="darkblue")
self.binding()
self.entry.focus_force()
def binding(self):
self.go_btn.bind('<Button-1>',self.respond)
def show(self):
self.pack(pady=150)
self.panel1.pack()
self.panel1.add(self.label1)
self.panel1.add(self.panel2,pady=50)
self.panel1.add(self.panel3,pady=50)
self.panel2.add(self.entry)
self.panel2.add(self.entry2)
self.panel2.add(self.go_btn)
self.panel3.add(self.ans)
def respond(self,x):
self.ans.configure(text=s.score(self.entry.get(),self.entry2.get()))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 (http://hl7.org/fhir/StructureDefinition/DataRequirement) on 2020-02-03.
# 2020, SMART Health IT.
import sys
from dataclasses import dataclass, field
from typing import ClassVar, Optional, List
from .codeableconcept import CodeableConcept
from .coding import Coding
from .duration import Duration
from .element import Element
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .period import Period
@dataclass
class DataRequirementCodeFilter(Element):
""" What codes are expected.
Code filters specify additional constraints on the data, specifying the
value set of interest for a particular element of the data. Each code
filter defines an additional constraint on the data, i.e. code filters are
AND'ed, not OR'ed.
"""
resource_type: ClassVar[str] = "DataRequirementCodeFilter"
path: Optional[str] = None
searchParam: Optional[str] = None
valueSet: Optional[str] = None
code: Optional[List[Coding]] = None
@dataclass
class DataRequirementDateFilter(Element):
""" What dates/date ranges are expected.
Date filters specify additional constraints on the data in terms of the
applicable date range for specific elements. Each date filter specifies an
additional constraint on the data, i.e. date filters are AND'ed, not OR'ed.
"""
resource_type: ClassVar[str] = "DataRequirementDateFilter"
path: Optional[str] = None
searchParam: Optional[str] = None
valueDateTime: Optional[FHIRDate] = field(default=None, metadata=dict(one_of_many='value',))
valuePeriod: Optional[Period] = field(default=None, metadata=dict(one_of_many='value',))
valueDuration: Optional[Duration] = field(default=None, metadata=dict(one_of_many='value',))
@dataclass
class DataRequirementSort(Element):
""" Order of the results.
Specifies the order of the results to be returned.
"""
resource_type: ClassVar[str] = "DataRequirementSort"
path: str = None
direction: str = None
@dataclass
class DataRequirement(Element):
""" Describes a required data item.
Describes a required data item for evaluation in terms of the type of data,
and optional code or date-based filters of the data.
"""
resource_type: ClassVar[str] = "DataRequirement"
type: str = None
profile: Optional[List[str]] = None
subjectCodeableConcept: Optional[CodeableConcept] = field(default=None, metadata=dict(one_of_many='subject',))
subjectReference: Optional[FHIRReference] = field(default=None, metadata=dict(one_of_many='subject',))
mustSupport: Optional[List[str]] = None
codeFilter: Optional[List[DataRequirementCodeFilter]] = None
dateFilter: Optional[List[DataRequirementDateFilter]] = None
limit: Optional[int] = None
sort: Optional[List[DataRequirementSort]] = None |
null = "null"
# from BlueTest.logInit import *
import BlueTest,time,random,asyncio
def test():
with open(".//srcdata//test.json.postman_collection","w") as file:
a = {
"id": "3560d742-c3da-4ad7-32c1-222",
"name": "test",
"requests": [
{
"id": "49c2b90f-8ea7-c53e-910b-222",
"headers": "Origin: https://blog.csdn.net\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\nContent-Type: text/plain\nAccept: */*\nReferer: https://blog.csdn.net/qq_37159430/article/details/79970518\nAccept-Encoding: gzip, deflate, br\nAccept-Language: zh-CN,zh;q=0.8",
"url": "https://nbrecsys.4paradigm.com/action/api/log?requestID=Abac6ban&clientToken=1f9d3d10b0ab404e86c2e61a935d3888",
"pathVariables": {},
"preRequestScript": null,
"method": "POST",
"collectionId": "3560d742-c3da-4ad7-32c1-222",
"data": [],
"dataMode": "raw",
"name": "testapi",
"description": "",
"descriptionFormat": "html",
"time": 1542777522436,
"version": 2,
"responses": [],
"tests": null,
"currentHelper": "normal",
"helperAttributes": {},
"rawModeData": "{\"date\":\"2018-11-04 10:21:06\",\"actions\":[{\"requestID\":\"2222\",\"actionTime\":1542248466944,\"action\":\"show\",\"sceneId\":420,\"userId\":\"xubyCjC6zO\",\"itemId\":\"user_define\",\"itemSetId\":\"39\",\"uuid_tt_dd\":\"10_28867322960-222-222\",\"specialType\":\"csdn_net_alliance_ads\",\"ads\":1}]}"
}
],
"order": [
"49c2b90f-8ea7-c53e-910b-05a3c7014269"
],
"timestamp": 1542777522437
}
file.write(str(a))
BlueTest.log.logger.info("测试数据生成 .//srcdata//test.json.postman_collection")
# time.sleep(1)
BlueTest.initPostMan("test")
BlueTest.testByCsvData("test")
# BlueTest.testByCsvData("test", limit_check=False, extras_check=True)
def presstest():
temp = ["temp1", "temp2", "temp3"]
class press_2(BlueTest.SoloPress):
def setup(self):
self.num = temp[0]
self.count = 20
def runcase(self):
response = random.choice(["成功","失败"])
self.file_write(str(self.num), response, BlueTest.toolbox.responseAssert(response))
press = BlueTest.Press(200)
press.run(press_2)
press.dataReduction()
def pressTestByCsv():
a = BlueTest.Csv2Dict(path="./srcdata/test.csv")
a = a.run()
b = BlueTest.apiTest(a[0])
b.error_list = ["error", "Error", "False", "false", "失败", "错误", "异常", "禁止"]
temp = ["temp1", "temp2", "temp3"]
class press_2(BlueTest.SoloPress):
def setup(self):
self.num = temp[self.index-1]
self.count = 20
def runcase(self):
response = b.soloRequest()
#dothing ....
self.file_write(str(self.num),response,b.responseAssert(response))
# print (self.name,self.num)
press= BlueTest.Press(2)
press.run(press_2)
press.dataReduction()
def pressAioTest():
class Test(BlueTest.SoloPressAsync):
async def setup(self, **kwargs):
kwargs["data"]["code"] = await self.queue.get()
return kwargs
def newQueue(self):
temp_list = []
for i in range(0, 1000000):
if len(str(i)) < 6:
i = "0" * (6 - len(str(i))) + str(i)
temp_list.append(str(i))
queue = asyncio.Queue()
[queue.put_nowait(temp) for temp in temp_list]
self.queue = queue
new = Test("http://hq.sinajs.cn/list=sh600001", "Get", headers={"requestTime": "test","User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50 IE 9.0"},
vuser=500, total_num=2000,
data={"code": "", "phone": "13111111111"})
new.mainrun()
new.dataReduction()
def rsafileTest():
from BlueTest.toolbox import ToolBox as toolbox
toolbox.RsaFile.creatKeys()
toolbox.RsaFile.rsaEncrypt('public.pem', 'a.xlsx') # public.pem生成的公钥,a.xlsx加密文件,默认加密后文件名Encrypt
toolbox.RsaFile.rsaDecrypt('private.pem', "Encrypt", "dfc.xlsx") # private.pem私钥文件,Encrypt需要解密的文件,dfc.xlsx解密后文件
if __name__ == '__main__':
new = Test("http://hq.sinajs.cn/list=sh600001", "Get", headers={"requestTime": "test",
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50 IE 9.0"},
vuser=500, total_num=2000,
data={"code": "", "phone": "13111111111"})
new.mainrun()
new.dataReduction() |
# Write your solution here:
class Person:
def __init__(self, name: str):
self.name = name
def return_first_name(self):
first_name = self.name.split(" ")[0]
return first_name
def return_last_name(self):
last_name = self.name.split(" ")[1]
return last_name
if __name__ == "__main__":
peter = Person("Peter Pythons")
print(peter.return_first_name())
print(peter.return_last_name())
paula = Person("Paula Pythonnen")
print(paula.return_first_name())
print(paula.return_last_name())
|
import requests
from bs4 import BeautifulSoup
# Hardcode URL for testing purposes, will integrate input later
url = "" # needs to be verified as valid URL
try:
# downloading HTML page
target = requests.get(url)
# initializing bs4 object
soup = BeautifulSoup(target.content, 'html.parser')
status = target.status_code
# contains names or ids of text fields only; final output of the program
field = []
for form in soup.find_all('input', type='text'):
'''
Build input field list here
Remember to only record text inputs
'''
field.append([{'id': form.get('id'), 'name': form.get('name')}])
'''
field is stored as a list of dictionaries
each containing the 'id' and 'name' attributes of the respective input text fields.
If the attribute isn't present it's value is stored as None
'''
except Exception as exception:
print(exception)
print('URL DOES NOT EXIST!')
|
#!/usr/bin/env python3
import sys
import ipaddress
for line in sys.stdin:
line = line.rstrip("\r\n")
if not line: break
ip = ipaddress.IPv6Address(line)
print(ip.reverse_pointer + ".\tPTR")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
from pycompss.api.task import task
from pycompss.api.binary import binary
@binary(binary="date")
@task()
def myDate(dprefix, param):
pass
def main():
from pycompss.api.api import compss_barrier
myDate("-d", "next friday")
compss_barrier()
print("Finished")
if __name__ == '__main__':
main()
|
# Copyright 2017-2021 object_database Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from object_database.web.cells.cell import Cell
from object_database.web.cells.leaves import Octicon
from object_database.web.cells.session_state import sessionState
class Expands(Cell):
def __init__(self, closed, open, closedIcon=None, openedIcon=None):
super().__init__()
self.closed = closed
self.open = open
self.openedIcon = openedIcon or Octicon("diff-removed")
self.closedIcon = closedIcon or Octicon("diff-added")
@property
def isExpanded(self):
isExpandedSlot = sessionState().slotFor(self.identityPath + ("ExpandsState",))
return isExpandedSlot.get() or False
@isExpanded.setter
def isExpanded(self, isExpanded):
isExpandedSlot = sessionState().slotFor(self.identityPath + ("ExpandsState",))
isExpandedSlot.set(bool(isExpanded))
def sortsAs(self):
if self.isExpanded:
return self.open.sortsAs()
return self.closed.sortsAs()
def recalculate(self):
self.children.addFromDict(
{
"content": self.open if self.isExpanded else self.closed,
"icon": self.openedIcon if self.isExpanded else self.closedIcon,
}
)
self.exportData["isOpen"] = self.isExpanded
for c in self.children.allChildren:
if c.cells is not None:
c.prepareForReuse()
def onMessage(self, msgFrame):
self.isExpanded = not self.isExpanded
|
import ipyvuetify as v
import pandas as pd
from ipywidgets import Output
from matplotlib import pyplot as plt
from component import parameter as cp
class LayerFull(v.Layout):
COLORS = cp.gradient(5) + ["grey"]
def __init__(self, layer_name, values, aoi_names, colors):
# read the layer list and find the layer information based on the layer name
layer_list = pd.read_csv(cp.layer_list).fillna("")
layer_row = layer_list[layer_list.layer_name == layer_name]
if len(layer_row) != 1:
raise IndexError(
f"The layer {layer_name} is not part of the existing layers of the application. Please contact our maintainer."
)
# build the internal details
details = v.ExpansionPanels(
xs12=True,
class_="mt-3",
children=[
v.ExpansionPanel(
children=[
v.ExpansionPanelHeader(
children=["Details"],
expand_icon="mdi-help-circle-outline",
disable_icon_rotate=True,
),
v.ExpansionPanelContent(
children=[layer_row.layer_info.values[0]]
),
]
)
],
)
# create a title with the layer name
title = v.Html(
class_="mt-2 mb-2",
xs12=True,
tag="h3",
children=[f"{layer_name} ({layer_row.unit.values[0]})"],
)
# taken from https://stackoverflow.com/questions/579310/formatting-long-numbers-as-strings-in-python
def human_format(num, round_to=2):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num = round(num / 1000.0, round_to)
return "{:.{}f}{}".format(
round(num, round_to), round_to, ["", "K", "M", "G", "T", "P"][magnitude]
)
# create a matplotlib stack horizontal bar chart
chart = Output()
with chart:
# change pyplot style
plt.style.use("dark_background")
# create the chart
fig, ax = plt.subplots(
figsize=[50, len(values) * 2], facecolor=((0, 0, 0, 0))
)
# set the datas
max_value = max(values)
norm_values = [v / max_value * 100 for v in reversed(values)]
human_values = [f"{human_format(val)}" for val in reversed(values)]
colors = [
colors[i - 1] if i else v.theme.themes.dark.primary
for i in range(len(values))
][::-1]
# add the axes
ax.barh(aoi_names, norm_values, color=colors)
# add the text
for i, (norm, name, val, color) in enumerate(
zip(norm_values, aoi_names, human_values, colors)
):
ax.text(norm + 1, i, val, fontsize=40, color=color)
# cosmetic tuning
ax.set_xlim(0, 110)
ax.tick_params(axis="y", which="major", pad=30, labelsize=40, left=False)
ax.tick_params(axis="x", bottom=False, labelbottom=False)
ax.set_frame_on(False)
plt.show()
super().__init__(
class_="ma-5",
row=True,
children=[
v.Flex(xs12=True, children=[title]),
v.Flex(xs12=True, children=[chart]),
v.Flex(xs12=True, children=[details]),
],
)
class LayerPercentage(v.Layout):
def __init__(self, layer_name, pcts, colors):
# read the layer list and find the layer information based on the layer name
layer_list = pd.read_csv(cp.layer_list).fillna("")
layer_row = layer_list[layer_list.layer_name == layer_name]
if len(layer_row) != 1 and layer_name not in cp.criterias:
raise IndexError(
f"The layer {layer_name} is not part of the existing layers of the application. Please contact our maintainer."
)
if len(layer_row) != 1 and layer_name in cp.criterias:
layer_row = pd.DataFrame(
[["", "", layer_name, "", layer_name, "HA"]],
columns=[
"theme",
"subtheme",
"layer_name",
"gee_asset",
"layer_info",
"unit",
],
)
# add the title
title = v.Html(tag="h4", children=[layer_name])
# build the internal details
details = v.ExpansionPanels(
xs12=True,
class_="mt-3",
children=[
v.ExpansionPanel(
children=[
v.ExpansionPanelHeader(
children=["Details"],
expand_icon="mdi-help-circle-outline",
disable_icon_rotate=True,
),
v.ExpansionPanelContent(
children=[layer_row.layer_info.values[0]]
),
]
)
],
)
# create the list of value
spans = []
for i, val in enumerate(pcts):
# if val != 0: #TODO: do we need this still?
spans.append(
v.Html(
tag="span",
class_="ml-1 mr-1",
style_=f"color: {colors[i-1] if i else v.theme.themes.dark.primary}",
children=[f"{round(val,2)}%"],
)
)
super().__init__(
class_="ma-5",
row=True,
children=[
v.Flex(xs12=True, children=[title] + spans),
v.Flex(xs12=True, children=[details]),
],
)
|
import numpy as np
import torch
import torch.nn as nn
from diff_models import diff_CSDI
class CSDI_base(nn.Module):
def __init__(self, target_dim, config, device):
super().__init__()
self.device = device
self.target_dim = target_dim
self.emb_time_dim = config["model"]["timeemb"]
self.emb_feature_dim = config["model"]["featureemb"]
self.is_unconditional = config["model"]["is_unconditional"]
self.target_strategy = config["model"]["target_strategy"]
self.emb_total_dim = self.emb_time_dim + self.emb_feature_dim
if self.is_unconditional == False:
self.emb_total_dim += 1 # for conditional mask
self.embed_layer = nn.Embedding(
num_embeddings=self.target_dim, embedding_dim=self.emb_feature_dim
)
config_diff = config["diffusion"]
config_diff["side_dim"] = self.emb_total_dim
input_dim = 1 if self.is_unconditional == True else 2
self.diffmodel = diff_CSDI(config_diff, input_dim)
# parameters for diffusion models
self.num_steps = config_diff["num_steps"]
if config_diff["schedule"] == "quad":
self.beta = np.linspace(
config_diff["beta_start"] ** 0.5, config_diff["beta_end"] ** 0.5, self.num_steps
) ** 2
elif config_diff["schedule"] == "linear":
self.beta = np.linspace(
config_diff["beta_start"], config_diff["beta_end"], self.num_steps
)
self.alpha_hat = 1 - self.beta
self.alpha = np.cumprod(self.alpha_hat)
self.alpha_torch = torch.tensor(self.alpha).float().to(self.device).unsqueeze(1).unsqueeze(1)
def time_embedding(self, pos, d_model=128):
pe = torch.zeros(pos.shape[0], pos.shape[1], d_model).to(self.device)
position = pos.unsqueeze(2)
div_term = 1 / torch.pow(
10000.0, torch.arange(0, d_model, 2).to(self.device) / d_model
)
pe[:, :, 0::2] = torch.sin(position * div_term)
pe[:, :, 1::2] = torch.cos(position * div_term)
return pe
def get_randmask(self, observed_mask):
rand_for_mask = torch.rand_like(observed_mask) * observed_mask
rand_for_mask = rand_for_mask.reshape(len(rand_for_mask), -1)
for i in range(len(observed_mask)):
sample_ratio = np.random.rand() # missing ratio
num_observed = observed_mask[i].sum().item()
num_masked = round(num_observed * sample_ratio)
rand_for_mask[i][rand_for_mask[i].topk(num_masked).indices] = -1
cond_mask = (rand_for_mask > 0).reshape(observed_mask.shape).float()
return cond_mask
def get_hist_mask(self, observed_mask, for_pattern_mask=None):
if for_pattern_mask is None:
for_pattern_mask = observed_mask
if self.target_strategy == "mix":
rand_mask = self.get_randmask(observed_mask)
cond_mask = observed_mask.clone()
for i in range(len(cond_mask)):
mask_choice = np.random.rand()
if self.target_strategy == "mix" and mask_choice > 0.5:
cond_mask[i] = rand_mask[i]
else: # draw another sample for histmask (i-1 corresponds to another sample)
cond_mask[i] = cond_mask[i] * for_pattern_mask[i - 1]
return cond_mask
def get_side_info(self, observed_tp, cond_mask):
B, K, L = cond_mask.shape
time_embed = self.time_embedding(observed_tp, self.emb_time_dim) # (B,L,emb)
time_embed = time_embed.unsqueeze(2).expand(-1, -1, K, -1)
feature_embed = self.embed_layer(
torch.arange(self.target_dim).to(self.device)
) # (K,emb)
feature_embed = feature_embed.unsqueeze(0).unsqueeze(0).expand(B, L, -1, -1)
side_info = torch.cat([time_embed, feature_embed], dim=-1) # (B,L,K,*)
side_info = side_info.permute(0, 3, 2, 1) # (B,*,K,L)
if self.is_unconditional == False:
side_mask = cond_mask.unsqueeze(1) # (B,1,K,L)
side_info = torch.cat([side_info, side_mask], dim=1)
return side_info
def calc_loss_valid(
self, observed_data, cond_mask, observed_mask, side_info, is_train
):
loss_sum = 0
for t in range(self.num_steps): # calculate loss for all t
loss = self.calc_loss(
observed_data, cond_mask, observed_mask, side_info, is_train, set_t=t
)
loss_sum += loss.detach()
return loss_sum / self.num_steps
def calc_loss(
self, observed_data, cond_mask, observed_mask, side_info, is_train, set_t=-1
):
B, K, L = observed_data.shape
if is_train != 1: # for validation
t = (torch.ones(B) * set_t).long().to(self.device)
else:
t = torch.randint(0, self.num_steps, [B]).to(self.device)
current_alpha = self.alpha_torch[t] # (B,1,1)
noise = torch.randn_like(observed_data)
noisy_data = (current_alpha ** 0.5) * observed_data + (1.0 - current_alpha) ** 0.5 * noise
total_input = self.set_input_to_diffmodel(noisy_data, observed_data, cond_mask)
predicted = self.diffmodel(total_input, side_info, t) # (B,K,L)
target_mask = observed_mask - cond_mask
residual = (noise - predicted) * target_mask
num_eval = target_mask.sum()
loss = (residual ** 2).sum() / (num_eval if num_eval > 0 else 1)
return loss
def set_input_to_diffmodel(self, noisy_data, observed_data, cond_mask):
if self.is_unconditional == True:
total_input = noisy_data.unsqueeze(1) # (B,1,K,L)
else:
cond_obs = (cond_mask * observed_data).unsqueeze(1)
noisy_target = ((1 - cond_mask) * noisy_data).unsqueeze(1)
total_input = torch.cat([cond_obs, noisy_target], dim=1) # (B,2,K,L)
return total_input
def impute(self, observed_data, cond_mask, side_info, n_samples):
B, K, L = observed_data.shape
imputed_samples = torch.zeros(B, n_samples, K, L).to(self.device)
for i in range(n_samples):
# generate noisy observation for unconditional model
if self.is_unconditional == True:
noisy_obs = observed_data
noisy_cond_history = []
for t in range(self.num_steps):
noise = torch.randn_like(noisy_obs)
noisy_obs = (self.alpha_hat[t] ** 0.5) * noisy_obs + self.beta[t] ** 0.5 * noise
noisy_cond_history.append(noisy_obs * cond_mask)
current_sample = torch.randn_like(observed_data)
for t in range(self.num_steps - 1, -1, -1):
if self.is_unconditional == True:
diff_input = cond_mask * noisy_cond_history[t] + (1.0 - cond_mask) * current_sample
diff_input = diff_input.unsqueeze(1) # (B,1,K,L)
else:
cond_obs = (cond_mask * observed_data).unsqueeze(1)
noisy_target = ((1 - cond_mask) * current_sample).unsqueeze(1)
diff_input = torch.cat([cond_obs, noisy_target], dim=1) # (B,2,K,L)
predicted = self.diffmodel(diff_input, side_info, torch.tensor([t]).to(self.device))
coeff1 = 1 / self.alpha_hat[t] ** 0.5
coeff2 = (1 - self.alpha_hat[t]) / (1 - self.alpha[t]) ** 0.5
current_sample = coeff1 * (current_sample - coeff2 * predicted)
if t > 0:
noise = torch.randn_like(current_sample)
sigma = (
(1.0 - self.alpha[t - 1]) / (1.0 - self.alpha[t]) * self.beta[t]
) ** 0.5
current_sample += sigma * noise
imputed_samples[:, i] = current_sample.detach()
return imputed_samples
def forward(self, batch, is_train=1):
(
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
_,
) = self.process_data(batch)
if is_train == 0:
cond_mask = gt_mask
elif self.target_strategy != "random":
cond_mask = self.get_hist_mask(
observed_mask, for_pattern_mask=for_pattern_mask
)
else:
cond_mask = self.get_randmask(observed_mask)
side_info = self.get_side_info(observed_tp, cond_mask)
loss_func = self.calc_loss if is_train == 1 else self.calc_loss_valid
return loss_func(observed_data, cond_mask, observed_mask, side_info, is_train)
def evaluate(self, batch, n_samples):
(
observed_data,
observed_mask,
observed_tp,
gt_mask,
_,
cut_length,
) = self.process_data(batch)
with torch.no_grad():
cond_mask = gt_mask
target_mask = observed_mask - cond_mask
side_info = self.get_side_info(observed_tp, cond_mask)
samples = self.impute(observed_data, cond_mask, side_info, n_samples)
for i in range(len(cut_length)): # to avoid double evaluation
target_mask[i, ..., 0 : cut_length[i].item()] = 0
return samples, observed_data, target_mask, observed_mask, observed_tp
class CSDI_PM25(CSDI_base):
def __init__(self, config, device, target_dim=36):
super(CSDI_PM25, self).__init__(target_dim, config, device)
def process_data(self, batch):
observed_data = batch["observed_data"].to(self.device).float()
observed_mask = batch["observed_mask"].to(self.device).float()
observed_tp = batch["timepoints"].to(self.device).float()
gt_mask = batch["gt_mask"].to(self.device).float()
cut_length = batch["cut_length"].to(self.device).long()
for_pattern_mask = batch["hist_mask"].to(self.device).float()
observed_data = observed_data.permute(0, 2, 1)
observed_mask = observed_mask.permute(0, 2, 1)
gt_mask = gt_mask.permute(0, 2, 1)
for_pattern_mask = for_pattern_mask.permute(0, 2, 1)
return (
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
cut_length,
)
class CSDI_Physio(CSDI_base):
def __init__(self, config, device, target_dim=35):
super(CSDI_Physio, self).__init__(target_dim, config, device)
def process_data(self, batch):
observed_data = batch["observed_data"].to(self.device).float()
observed_mask = batch["observed_mask"].to(self.device).float()
observed_tp = batch["timepoints"].to(self.device).float()
gt_mask = batch["gt_mask"].to(self.device).float()
observed_data = observed_data.permute(0, 2, 1)
observed_mask = observed_mask.permute(0, 2, 1)
gt_mask = gt_mask.permute(0, 2, 1)
cut_length = torch.zeros(len(observed_data)).long().to(self.device)
for_pattern_mask = observed_mask
return (
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
cut_length,
)
|
import random
try:
from urlparse import urlsplit
except ImportError:
# try python3 then
from urllib.parse import urlsplit
class HostIterator(object):
"""An iterator that returns selected hosts in order.
A host is guaranteed to not be selected twice unless there is only
one host in the collection.
"""
def __init__(self, hosts):
self.hosts = hosts
def __iter__(self):
for host in self.hosts[:]:
yield host
def __len__(self):
return len(self.hosts)
class RandomHostIterator(HostIterator):
"""An iterator that returns a randomly selected host."""
def __iter__(self):
hostslist = self.hosts[:]
random.shuffle(hostslist)
for host in hostslist:
yield host
def collect_hosts(hosts, randomize=True):
"""Collect a set of hosts and an optional chroot from a string."""
host_ports, chroot = hosts.partition("/")[::2]
chroot = "/" + chroot if chroot else None
result = []
for host_port in host_ports.split(","):
# put all complexity of dealing with
# IPv4 & IPv6 address:port on the urlsplit
res = urlsplit("xxx://" + host_port)
host = res.hostname
port = int(res.port) if res.port else 2181
result.append((host.strip(), port))
if randomize:
return (RandomHostIterator(result), chroot)
return (HostIterator(result), chroot)
|
from random import randint
matriz = []
cartela = []
cont = contador = 0
cont_cartela = 0
cont_cartela2 = 5
while contador < 5:
num = randint(0, 100)
if num not in cartela:
cartela.append(num)
cont = cont + 1
if cont == 5:
if contador == 1:
cont_cartela = 5
if contador > 1:
cont_cartela = cont_cartela + cont_cartela
cont_cartela2 = cont_cartela2 + cont_cartela2
matriz.append(cartela[cont_cartela:cont_cartela2])
cont = 0
contador = contador + 1
print(cartela)
print(matriz)
|
class Stock_IEX:
def __init__(self,key):
self.key = key
import requests
import json
from pandas.io.json import json_normalize
import pandas as pd
import numpy as np
def get_quote(self,ticker,range = 'YTD'):
"""
Returns the historic daily stock prices for one ticker
API Documentation
https://iexcloud.io/docs/api/#historical-prices
Arguements
-- ticker -> One ticker (STR)
-- range (Optional, default = YTD, STR)
Options: max, 5y, 2y, 1y, ytd, 6m, 3m, 1m, 1mm, 5d, 5dm
"""
temp = self.pd.DataFrame()
base_url = 'https://sandbox.iexapis.com/'+'stable/stock/'
url = base_url + ticker + '/chart/' + range
company = self.requests.get(url, params = {"token": self.key})
if company.status_code != 200: print(company) #Error Checking
company = company.json()
company = self.pd.DataFrame(company)
temp = self.pd.concat([temp,company]).drop_duplicates()
return temp
def get_daily_stocks(self,tickers,range = 'ytd',return_value = 'close'):
"""
Returns daily stock data for entered tickers.
-- Tickers -> List[] of stock tickers
-- Range -> time frame (Optional, default = YTD, STR)
Options: max, 5y, 2y, 1y, ytd, 6m, 3m, 1m, 1mm, 5d, 5dm
-- Return_value -> (Optional,default = close, STR)
Options: open, close, high, low, volume, change, changePercent
"""
temp = self.pd.DataFrame()
for i,t in enumerate(tickers):
stock_temp = self.get_quote(t,range)
if i == 0:
temp['date'] = stock_temp['date']
temp[t] = stock_temp[return_value]
temp[t] = stock_temp[return_value]
temp.set_index('date', inplace = True)
temp.index = self.pd.to_datetime(temp.index, format='%Y-%m-%d').to_period('D')
temp = temp.sort_index()
return temp
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame, sys, random, time
from pygame.locals import *
pygame.init()
DISPLAYSURF = pygame.display.set_mode((960, 720))
pygame.display.set_caption('快乐小鸡')
WHITE = (0, 0, 255)
DISPLAYSURF.fill(WHITE)
backdrop = pygame.image.load('images/backdrop.png')
chickens = [pygame.image.load('images/hmc.png'), pygame.image.load('images/hmc2.png')]
egg = pygame.image.load('images/egg.png')
chicken_position = (random.randint(0, 960-157), random.randint(0, 720-195))
chicken_number = 0
IMAGE_TIME = 20
egg_positions = []
meow = pygame.mixer.Sound('sounds/meow.wav')
clock = pygame.time.Clock()
while True: # main game loop
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_SPACE:
egg_positions.append(chicken_position)
chicken_position = (random.randint(0, 960-157), random.randint(0, 720-195))
meow.play()
if IMAGE_TIME > 0:
IMAGE_TIME -= 1
else:
IMAGE_TIME = 20
chicken_number ^= 1
DISPLAYSURF.blit(backdrop, (0, 0))
for p in egg_positions:
DISPLAYSURF.blit(egg, p)
DISPLAYSURF.blit(chickens[chicken_number], chicken_position)
font = pygame.font.Font(None, 100)
count_text = font.render(str(len(egg_positions)), 1, WHITE)
DISPLAYSURF.blit(count_text, (19,19))
pygame.display.update()
|
"""Main entry point."""
import json
import random
import sys
from .culture import Culture
def main(culture_key, gender=None):
cultures = _get_cultures()
chosen_culture = None
for culture in cultures:
if culture.key == culture_key:
chosen_culture = culture
print(chosen_culture.get_name(gender))
def _get_cultures():
names_json = _read_file('./misc/names.json')
cultures = []
for culture_key, culture_dict in names_json.items():
new_culture = Culture(culture_key, culture_dict)
cultures.append(new_culture)
return cultures
def _read_file(file_path):
contents = None
with open(file_path, mode='r') as name_file:
contents = json.loads(name_file.read())
return contents
if __name__ == '__main__':
if len(sys.argv) == 3:
culture_key = sys.argv[1]
name_gender = sys.argv[2]
main(culture_key, name_gender)
elif len(sys.argv) == 2:
culture_key = sys.argv[1]
main(culture_key)
else:
print('Invalid number of arguments.')
print('Usage:')
print('\tname_randomiser culture_key gender')
|
from pymtl import *
from lizard.util.rtl.interface import Interface, UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.types import canonicalize_type
from lizard.util.rtl.case_mux import CaseMuxInterface, CaseMux
class LookupTableInterface(Interface):
def __init__(s, in_type, out_type):
s.In = canonicalize_type(in_type)
s.Out = canonicalize_type(out_type)
super(LookupTableInterface, s).__init__([
MethodSpec(
'lookup',
args={
'in_': s.In,
},
rets={
'out': s.Out,
'valid': Bits(1),
},
call=False,
rdy=False,
),
])
class LookupTable(Model):
def __init__(s, interface, mapping):
UseInterface(s, interface)
size = len(mapping)
# Sort by key to ensure it is deterministic
svalues, souts = zip(*[(key, mapping[key])
for key in sorted(mapping.keys())])
s.mux = CaseMux(
CaseMuxInterface(s.interface.Out, s.interface.In, size), svalues)
s.connect(s.mux.mux_default, 0)
s.connect(s.mux.mux_select, s.lookup_in_)
for i, sout in enumerate(souts):
s.connect(s.mux.mux_in_[i], int(sout))
s.connect(s.lookup_out, s.mux.mux_out)
s.connect(s.lookup_valid, s.mux.mux_matched)
def line_trace(s):
return "{} ->({}) {}".format(s.lookup_in_, s.lookup_valid, s.lookup_out)
|
from __future__ import absolute_import
import functools
import logging
import time
from datetime import timedelta
from hashlib import md5
from django.db.models import Q
from django.utils import timezone
from sentry import options, quotas
from sentry.api.event_search import convert_search_filter_to_snuba_query, InvalidSearchQuery
from sentry.api.paginator import DateTimePaginator, SequencePaginator, Paginator
from sentry.constants import ALLOWED_FUTURE_DELTA
from sentry.models import Group, Release, GroupEnvironment
from sentry.search.base import SearchBackend
from sentry.utils import snuba, metrics
logger = logging.getLogger("sentry.search.snuba")
datetime_format = "%Y-%m-%dT%H:%M:%S+00:00"
EMPTY_RESULT = Paginator(Group.objects.none()).get_result()
# mapping from query parameter sort name to underlying scoring aggregation name
sort_strategies = {
"date": "last_seen",
"freq": "times_seen",
"new": "first_seen",
"priority": "priority",
}
dependency_aggregations = {"priority": ["last_seen", "times_seen"]}
aggregation_defs = {
"times_seen": ["count()", ""],
"first_seen": ["multiply(toUInt64(min(timestamp)), 1000)", ""],
"last_seen": ["multiply(toUInt64(max(timestamp)), 1000)", ""],
# https://github.com/getsentry/sentry/blob/804c85100d0003cfdda91701911f21ed5f66f67c/src/sentry/event_manager.py#L241-L271
"priority": ["toUInt64(plus(multiply(log(times_seen), 600), last_seen))", ""],
# Only makes sense with WITH TOTALS, returns 1 for an individual group.
"total": ["uniq", "issue"],
}
issue_only_fields = set(
[
"query",
"status",
"bookmarked_by",
"assigned_to",
"unassigned",
"subscribed_by",
"active_at",
"first_release",
"first_seen",
]
)
class QuerySetBuilder(object):
def __init__(self, conditions):
self.conditions = conditions
def build(self, queryset, search_filters):
for search_filter in search_filters:
name = search_filter.key.name
if name in self.conditions:
condition = self.conditions[name]
queryset = condition.apply(queryset, search_filter)
return queryset
class Condition(object):
"""\
Adds a single filter to a ``QuerySet`` object. Used with
``QuerySetBuilder``.
"""
def apply(self, queryset, name, parameters):
raise NotImplementedError
class QCallbackCondition(Condition):
def __init__(self, callback):
self.callback = callback
def apply(self, queryset, search_filter):
value = search_filter.value.raw_value
q = self.callback(value)
if search_filter.operator not in ("=", "!="):
raise InvalidSearchQuery(
u"Operator {} not valid for search {}".format(search_filter.operator, search_filter)
)
queryset_method = queryset.filter if search_filter.operator == "=" else queryset.exclude
queryset = queryset_method(q)
return queryset
class ScalarCondition(Condition):
"""
Adds a scalar filter to a ``QuerySet`` object. Only accepts `SearchFilter`
instances
"""
OPERATOR_TO_DJANGO = {">=": "gte", "<=": "lte", ">": "gt", "<": "lt"}
def __init__(self, field, extra=None):
self.field = field
self.extra = extra
def _get_operator(self, search_filter):
django_operator = self.OPERATOR_TO_DJANGO.get(search_filter.operator, "")
if django_operator:
django_operator = "__{}".format(django_operator)
return django_operator
def apply(self, queryset, search_filter):
django_operator = self._get_operator(search_filter)
qs_method = queryset.exclude if search_filter.operator == "!=" else queryset.filter
q_dict = {"{}{}".format(self.field, django_operator): search_filter.value.raw_value}
if self.extra:
q_dict.update(self.extra)
return qs_method(**q_dict)
def assigned_to_filter(actor, projects):
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
if isinstance(actor, Team):
return Q(assignee_set__team=actor)
teams = Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember__in=OrganizationMember.objects.filter(
user=actor, organization_id=projects[0].organization_id
),
is_active=True,
).values("team")
)
return Q(
Q(assignee_set__user=actor, assignee_set__project__in=projects)
| Q(assignee_set__team__in=teams)
)
def unassigned_filter(unassigned, projects):
from sentry.models.groupassignee import GroupAssignee
query = Q(
id__in=GroupAssignee.objects.filter(project_id__in=[p.id for p in projects]).values_list(
"group_id", flat=True
)
)
if unassigned:
query = ~query
return query
def get_search_filter(search_filters, name, operator):
"""
Finds the value of a search filter with the passed name and operator. If
multiple values are found, returns the most restrictive value
:param search_filters: collection of `SearchFilter` objects
:param name: Name of the field to find
:param operator: '<' or '>'
:return: The value of the field if found, else None
"""
assert operator in ("<", ">")
comparator = max if operator.startswith(">") else min
found_val = None
for search_filter in search_filters:
# Note that we check operator with `startswith` here so that we handle
# <, <=, >, >=
if search_filter.key.name == name and search_filter.operator.startswith(operator):
val = search_filter.value.raw_value
found_val = comparator(val, found_val) if found_val else val
return found_val
class SnubaSearchBackend(SearchBackend):
def query(
self,
projects,
environments=None,
sort_by="date",
limit=100,
cursor=None,
count_hits=False,
paginator_options=None,
search_filters=None,
date_from=None,
date_to=None,
):
from sentry.models import Group, GroupStatus, GroupSubscription
search_filters = search_filters if search_filters is not None else []
# ensure projects are from same org
if len({p.organization_id for p in projects}) != 1:
raise RuntimeError("Cross organization search not supported")
if paginator_options is None:
paginator_options = {}
group_queryset = Group.objects.filter(project__in=projects).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
GroupStatus.PENDING_MERGE,
]
)
qs_builder_conditions = {
"status": QCallbackCondition(lambda status: Q(status=status)),
"bookmarked_by": QCallbackCondition(
lambda user: Q(bookmark_set__project__in=projects, bookmark_set__user=user)
),
"assigned_to": QCallbackCondition(
functools.partial(assigned_to_filter, projects=projects)
),
"unassigned": QCallbackCondition(
functools.partial(unassigned_filter, projects=projects)
),
"subscribed_by": QCallbackCondition(
lambda user: Q(
id__in=GroupSubscription.objects.filter(
project__in=projects, user=user, is_active=True
).values_list("group")
)
),
"active_at": ScalarCondition("active_at"),
}
group_queryset = QuerySetBuilder(qs_builder_conditions).build(
group_queryset, search_filters
)
# filter out groups which are beyond the retention period
retention = quotas.get_event_retention(organization=projects[0].organization)
if retention:
retention_window_start = timezone.now() - timedelta(days=retention)
else:
retention_window_start = None
# TODO: This could be optimized when building querysets to identify
# criteria that are logically impossible (e.g. if the upper bound
# for last seen is before the retention window starts, no results
# exist.)
if retention_window_start:
group_queryset = group_queryset.filter(last_seen__gte=retention_window_start)
# This is a punt because the SnubaSearchBackend (a subclass) shares so much that it
# seemed better to handle all the shared initialization and then handoff to the
# actual backend.
return self._query(
projects,
retention_window_start,
group_queryset,
environments,
sort_by,
limit,
cursor,
count_hits,
paginator_options,
search_filters,
date_from,
date_to,
)
def _query(
self,
projects,
retention_window_start,
group_queryset,
environments,
sort_by,
limit,
cursor,
count_hits,
paginator_options,
search_filters,
date_from,
date_to,
):
# TODO: It's possible `first_release` could be handled by Snuba.
if environments is not None:
environment_ids = [environment.id for environment in environments]
group_queryset = group_queryset.filter(
groupenvironment__environment_id__in=environment_ids
)
group_queryset = QuerySetBuilder(
{
"first_release": QCallbackCondition(
lambda version: Q(
# if environment(s) are selected, we just filter on the group
# environment's first_release attribute.
groupenvironment__first_release__organization_id=projects[
0
].organization_id,
groupenvironment__first_release__version=version,
groupenvironment__environment_id__in=environment_ids,
)
),
"first_seen": ScalarCondition(
"groupenvironment__first_seen",
{"groupenvironment__environment_id__in": environment_ids},
),
}
).build(group_queryset, search_filters)
else:
group_queryset = QuerySetBuilder(
{
"first_release": QCallbackCondition(
lambda release_version: Q(
# if no specific environments are supplied, we either choose any
# groups/issues whose first release matches the given release_version,
Q(
first_release_id__in=Release.objects.filter(
version=release_version,
organization_id=projects[0].organization_id,
)
)
|
# or we choose any groups whose first occurrence in any environment and the latest release at
# the time of the groups' first occurrence matches the given
# release_version
Q(
id__in=GroupEnvironment.objects.filter(
first_release__version=release_version,
first_release__organization_id=projects[0].organization_id,
environment__organization_id=projects[0].organization_id,
).values_list("group_id")
)
)
),
"first_seen": ScalarCondition("first_seen"),
}
).build(group_queryset, search_filters)
now = timezone.now()
end = None
end_params = filter(None, [date_to, get_search_filter(search_filters, "date", "<")])
if end_params:
end = min(end_params)
if not end:
end = now + ALLOWED_FUTURE_DELTA
# This search is for some time window that ends with "now",
# so if the requested sort is `date` (`last_seen`) and there
# are no other Snuba-based search predicates, we can simply
# return the results from Postgres.
if (
cursor is None
and sort_by == "date"
and not environments
and
# This handles tags and date parameters for search filters.
not [
sf
for sf in search_filters
if sf.key.name not in issue_only_fields.union(["date"])
]
):
group_queryset = group_queryset.order_by("-last_seen")
paginator = DateTimePaginator(group_queryset, "-last_seen", **paginator_options)
# When its a simple django-only search, we count_hits like normal
return paginator.get_result(limit, cursor, count_hits=count_hits)
# TODO: Presumably we only want to search back to the project's max
# retention date, which may be closer than 90 days in the past, but
# apparently `retention_window_start` can be None(?), so we need a
# fallback.
retention_date = max(filter(None, [retention_window_start, now - timedelta(days=90)]))
# TODO: We should try and consolidate all this logic together a little
# better, maybe outside the backend. Should be easier once we're on
# just the new search filters
start_params = [date_from, retention_date, get_search_filter(search_filters, "date", ">")]
start = max(filter(None, start_params))
end = max([retention_date, end])
if start == retention_date and end == retention_date:
# Both `start` and `end` must have been trimmed to `retention_date`,
# so this entire search was against a time range that is outside of
# retention. We'll return empty results to maintain backwards compatibility
# with Django search (for now).
return EMPTY_RESULT
if start >= end:
# TODO: This maintains backwards compatibility with Django search, but
# in the future we should find a way to notify the user that their search
# is invalid.
return EMPTY_RESULT
# Here we check if all the django filters reduce the set of groups down
# to something that we can send down to Snuba in a `group_id IN (...)`
# clause.
max_candidates = options.get("snuba.search.max-pre-snuba-candidates")
too_many_candidates = False
candidate_ids = list(group_queryset.values_list("id", flat=True)[: max_candidates + 1])
metrics.timing("snuba.search.num_candidates", len(candidate_ids))
if not candidate_ids:
# no matches could possibly be found from this point on
metrics.incr("snuba.search.no_candidates", skip_internal=False)
return EMPTY_RESULT
elif len(candidate_ids) > max_candidates:
# If the pre-filter query didn't include anything to significantly
# filter down the number of results (from 'first_release', 'query',
# 'status', 'bookmarked_by', 'assigned_to', 'unassigned',
# 'subscribed_by', 'active_at_from', or 'active_at_to') then it
# might have surpassed the `max_candidates`. In this case,
# we *don't* want to pass candidates down to Snuba, and instead we
# want Snuba to do all the filtering/sorting it can and *then* apply
# this queryset to the results from Snuba, which we call
# post-filtering.
metrics.incr("snuba.search.too_many_candidates", skip_internal=False)
too_many_candidates = True
candidate_ids = []
sort_field = sort_strategies[sort_by]
chunk_growth = options.get("snuba.search.chunk-growth-rate")
max_chunk_size = options.get("snuba.search.max-chunk-size")
chunk_limit = limit
offset = 0
num_chunks = 0
hits = None
paginator_results = EMPTY_RESULT
result_groups = []
result_group_ids = set()
max_time = options.get("snuba.search.max-total-chunk-time-seconds")
time_start = time.time()
if count_hits and (too_many_candidates or cursor is not None):
# If we had too many candidates to reasonably pass down to snuba,
# or if we have a cursor that bisects the overall result set (such
# that our query only sees results on one side of the cursor) then
# we need an alternative way to figure out the total hits that this
# query has.
# To do this, we get a sample of groups matching the snuba side of
# the query, and see how many of those pass the post-filter in
# postgres. This should give us an estimate of the total number of
# snuba matches that will be overall matches, which we can use to
# get an estimate for X-Hits.
# The sampling is not simple random sampling. It will return *all*
# matching groups if there are less than N groups matching the
# query, or it will return a random, deterministic subset of N of
# the groups if there are more than N overall matches. This means
# that the "estimate" is actually an accurate result when there are
# less than N matching groups.
# The number of samples required to achieve a certain error bound
# with a certain confidence interval can be calculated from a
# rearrangement of the normal approximation (Wald) confidence
# interval formula:
#
# https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
#
# Effectively if we want the estimate to be within +/- 10% of the
# real value with 95% confidence, we would need (1.96^2 * p*(1-p))
# / 0.1^2 samples. With a starting assumption of p=0.5 (this
# requires the most samples) we would need 96 samples to achieve
# +/-10% @ 95% confidence.
sample_size = options.get("snuba.search.hits-sample-size")
snuba_groups, snuba_total = snuba_search(
start=start,
end=end,
project_ids=[p.id for p in projects],
environment_ids=environments and [environment.id for environment in environments],
sort_field=sort_field,
limit=sample_size,
offset=0,
get_sample=True,
search_filters=search_filters,
)
snuba_count = len(snuba_groups)
if snuba_count == 0:
return EMPTY_RESULT
else:
filtered_count = group_queryset.filter(
id__in=[gid for gid, _ in snuba_groups]
).count()
hit_ratio = filtered_count / float(snuba_count)
hits = int(hit_ratio * snuba_total)
# Do smaller searches in chunks until we have enough results
# to answer the query (or hit the end of possible results). We do
# this because a common case for search is to return 100 groups
# sorted by `last_seen`, and we want to avoid returning all of
# a project's groups and then post-sorting them all in Postgres
# when typically the first N results will do.
while (time.time() - time_start) < max_time:
num_chunks += 1
# grow the chunk size on each iteration to account for huge projects
# and weird queries, up to a max size
chunk_limit = min(int(chunk_limit * chunk_growth), max_chunk_size)
# but if we have candidate_ids always query for at least that many items
chunk_limit = max(chunk_limit, len(candidate_ids))
# {group_id: group_score, ...}
snuba_groups, total = snuba_search(
start=start,
end=end,
project_ids=[p.id for p in projects],
environment_ids=environments and [environment.id for environment in environments],
sort_field=sort_field,
cursor=cursor,
candidate_ids=candidate_ids,
limit=chunk_limit,
offset=offset,
search_filters=search_filters,
)
metrics.timing("snuba.search.num_snuba_results", len(snuba_groups))
count = len(snuba_groups)
more_results = count >= limit and (offset + limit) < total
offset += len(snuba_groups)
if not snuba_groups:
break
if candidate_ids:
# pre-filtered candidates were passed down to Snuba, so we're
# finished with filtering and these are the only results. Note
# that because we set the chunk size to at least the size of
# the candidate_ids, we know we got all of them (ie there are
# no more chunks after the first)
result_groups = snuba_groups
if count_hits and hits is None:
hits = len(snuba_groups)
else:
# pre-filtered candidates were *not* passed down to Snuba,
# so we need to do post-filtering to verify Sentry DB predicates
filtered_group_ids = group_queryset.filter(
id__in=[gid for gid, _ in snuba_groups]
).values_list("id", flat=True)
group_to_score = dict(snuba_groups)
for group_id in filtered_group_ids:
if group_id in result_group_ids:
# because we're doing multiple Snuba queries, which
# happen outside of a transaction, there is a small possibility
# of groups moving around in the sort scoring underneath us,
# so we at least want to protect against duplicates
continue
group_score = group_to_score[group_id]
result_group_ids.add(group_id)
result_groups.append((group_id, group_score))
# TODO do we actually have to rebuild this SequencePaginator every time
# or can we just make it after we've broken out of the loop?
paginator_results = SequencePaginator(
[(score, id) for (id, score) in result_groups], reverse=True, **paginator_options
).get_result(limit, cursor, known_hits=hits)
# break the query loop for one of three reasons:
# * we started with Postgres candidates and so only do one Snuba query max
# * the paginator is returning enough results to satisfy the query (>= the limit)
# * there are no more groups in Snuba to post-filter
if candidate_ids or len(paginator_results.results) >= limit or not more_results:
break
# HACK: We're using the SequencePaginator to mask the complexities of going
# back and forth between two databases. This causes a problem with pagination
# because we're 'lying' to the SequencePaginator (it thinks it has the entire
# result set in memory when it does not). For this reason we need to make some
# best guesses as to whether the `prev` and `next` cursors have more results.
if len(paginator_results.results) == limit and more_results:
# Because we are going back and forth between DBs there is a small
# chance that we will hand the SequencePaginator exactly `limit`
# items. In this case the paginator will assume there are no more
# results, so we need to override the `next` cursor's results.
paginator_results.next.has_results = True
if cursor is not None and (not cursor.is_prev or len(paginator_results.results) > 0):
# If the user passed a cursor, and it isn't already a 0 result `is_prev`
# cursor, then it's worth allowing them to go back a page to check for
# more results.
paginator_results.prev.has_results = True
metrics.timing("snuba.search.num_chunks", num_chunks)
groups = Group.objects.in_bulk(paginator_results.results)
paginator_results.results = [groups[k] for k in paginator_results.results if k in groups]
return paginator_results
def snuba_search(
start,
end,
project_ids,
environment_ids,
sort_field,
cursor=None,
candidate_ids=None,
limit=None,
offset=0,
get_sample=False,
search_filters=None,
):
"""
This function doesn't strictly benefit from or require being pulled out of the main
query method above, but the query method is already large and this function at least
extracts most of the Snuba-specific logic.
Returns a tuple of:
* a sorted list of (group_id, group_score) tuples sorted descending by score,
* the count of total results (rows) available for this query.
"""
filters = {"project_id": project_ids}
if environment_ids is not None:
filters["environment"] = environment_ids
if candidate_ids:
filters["issue"] = sorted(candidate_ids)
conditions = []
having = []
for search_filter in search_filters:
if (
# Don't filter on issue fields here, they're not available
search_filter.key.name in issue_only_fields
or
# We special case date
search_filter.key.name == "date"
):
continue
converted_filter = convert_search_filter_to_snuba_query(search_filter)
# Ensure that no user-generated tags that clashes with aggregation_defs is added to having
if search_filter.key.name in aggregation_defs and not search_filter.key.is_tag:
having.append(converted_filter)
else:
conditions.append(converted_filter)
extra_aggregations = dependency_aggregations.get(sort_field, [])
required_aggregations = set([sort_field, "total"] + extra_aggregations)
for h in having:
alias = h[0]
required_aggregations.add(alias)
aggregations = []
for alias in required_aggregations:
aggregations.append(aggregation_defs[alias] + [alias])
if cursor is not None:
having.append((sort_field, ">=" if cursor.is_prev else "<=", cursor.value))
selected_columns = []
if get_sample:
query_hash = md5(repr(conditions)).hexdigest()[:8]
selected_columns.append(("cityHash64", ("'{}'".format(query_hash), "issue"), "sample"))
sort_field = "sample"
orderby = [sort_field]
referrer = "search_sample"
else:
# Get the top matching groups by score, i.e. the actual search results
# in the order that we want them.
orderby = ["-{}".format(sort_field), "issue"] # ensure stable sort within the same score
referrer = "search"
snuba_results = snuba.dataset_query(
dataset=snuba.Dataset.Events,
start=start,
end=end,
selected_columns=selected_columns,
groupby=["issue"],
conditions=conditions,
having=having,
filter_keys=filters,
aggregations=aggregations,
orderby=orderby,
referrer=referrer,
limit=limit,
offset=offset,
totals=True, # Needs to have totals_mode=after_having_exclusive so we get groups matching HAVING only
turbo=get_sample, # Turn off FINAL when in sampling mode
sample=1, # Don't use clickhouse sampling, even when in turbo mode.
)
rows = snuba_results["data"]
total = snuba_results["totals"]["total"]
if not get_sample:
metrics.timing("snuba.search.num_result_groups", len(rows))
return [(row["issue"], row[sort_field]) for row in rows], total
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Service to monitor a set of OPC UA nodes and send commands to
a socket service upon node value changes and, conversely, change
node values upon socket responses. The service parameters are
defined in a YAML file.
"""
import argparse
import logging
import yaml
import coloredlogs
from opc_client import OpcClient
from opcua import ua
from socket_client import SocketClient
from logiclibrary import opc_node_triggers
__author__ = "Giuseppe Cogoni"
__author__ = "Brent Maranzano"
__license__ = "MIT"
class OpcSocketRelay(object):
"""OPC - socket relay class
"""
def __init__(self, configuration_file):
"""Create logger, socket client and OPC UA client.
Args:
configuration_file (str): Filename containing the parameters \
for the socket client, the OPC client and the mapping to \
relay commands and values.
"""
self._setup_logger()
self._get_parameters(configuration_file)
self._create_socket_client()
self._create_opc_client()
self._getOPCnodes()
self._subscribe_to_opc_changes()
self._logic_handle = opc_node_triggers(self._OPCnodes,
self._socket,
self._parameters)
def _setup_logger(self, config_file="./configs/logger_conf.yml"):
"""Start the logger using the provided configuration file.
Args:
config_file (YAML file): logger configuration file.
"""
try:
with open(config_file, "rt") as file_obj:
config = yaml.safe_load(file_obj.read())
logging.config.dictConfig(config)
coloredlogs.install(level="DEBUG")
except Exception as e:
print(e)
self._logger = logging.getLogger(__name__)
self._logger.info("Logger started...")
def _get_parameters(self, configuration_file):
"""Read and parse the yaml file containing the
parameters to start the services. Sets the
object attribute self._parameters.
Args:
configuration_file (str): Filename containing parameters formatted as yaml.
"""
with open(configuration_file, "rt") as file_obj:
self._parameters = yaml.safe_load(file_obj.read())
def _create_socket_client(self):
"""Create the socket client.
"""
socket_parameters = self._parameters['socket']
self._socket = SocketClient(*socket_parameters.values())
def _create_opc_client(self):
"""Create an OPC UA client
"""
endpoint = self._parameters['opc']['endpoint']
self._opc_client = OpcClient(endpoint)
def _getOPCnodes(self):
"""Creates a dictionary with the OPC UA nodes and variat types from the
tag parameters entered in the configuration.yml file.
"""
nodes_dict = self._parameters['opc']['tags']
self._logger.info('This is the client object {}'.format(nodes_dict))
self._OPCnodes = {}
for info in nodes_dict:
for name, tag_type in info.items():
tag_node = '{}{}'.format(self._parameters['opc']['root_node'],
name)
self._logger.info('Tag node {}'.format(tag_node))
self._OPCnodes[name] = {}
self._OPCnodes[name]['node'] = \
self._opc_client.client.get_node(tag_node)
self._OPCnodes[name]['vtype'] = getattr(ua.VariantType,
tag_type)
self._logger.info('OPC tags retrieved: {}'.format(self._OPCnodes))
def _subscribe_to_opc_changes(self):
"""Instantiate a subscription class that passes
self as the handler. Then subscribe to the nodes.
"""
subs = [self._parameters['mapping']['subscribed_tags']['configuration'][0],
self._parameters['mapping']['subscribed_tags']['watchdog'][0],
self._parameters['mapping']['subscribed_tags']['sequencing'][0]]
nodes = [self._OPCnodes[i]['node'] for i in subs]
self._subscription = self._opc_client.client.create_subscription(
500, self
)
self._subscription.subscribe_data_change(nodes)
def datachange_notification(self, node, val, data):
"""Main function to capture datachenges coming from DeltaV server.
The function is called for every datachange notification from DeltaV server subscribed tags.
Note:
The function executes specific logic associated to the changing subscribed node.
These logics are define within the `logiclibrary.py`.
"""
self._logger.debug(
"""OPC-server data change:
node:{}
value:{}
data: {}""".format(node, val, data))
self._logic_handle._logicBlock_1(node, val)
self._logic_handle._logicBlock_2(node, val)
self._logic_handle._logicBlock_3(node, val)
#Main routine
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Service to relay data and commands between"
" OPC client and socket client."
)
parser.add_argument(
"--configuration_file",
help="YAML file containing the configuration that define the services."
" See docstring for example.",
type=str,
default="./configs/configuration.yml"
)
args = parser.parse_args()
OpcSocketRelay(args.configuration_file)
|
import re
import json
import math
import time
import datetime
from explorer.views import homepage
from explorer.settings import LOGGING
import logging
#from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED, FIRST_COMPLETED
logging.config.dictConfig(LOGGING)
logger = logging.getLogger('django.request')
from django.shortcuts import render
from django.db.models import Q
from django.http import HttpResponse
from .models import *
from .chain import Chain
##############################################
from django.template import loader
from pyecharts.charts import Line3D
import pyecharts.options as opts
def block_update_finished(request):
blocks_list = Block.objects.all()
context = {
'blocks': blocks_list,
}
context = { 'blocks': blocks_list }
return render(request, 'block/block_list.html', context)
def block_detail(request):
hash = request.GET.get('hash')
logger.info("hash: %s" %(hash))
ablock = Block.objects.get(hash=hash)
blmessages = BlsMessage.objects.filter(block=ablock)
context = {
'ablock': ablock,
'blmessages': blmessages
}
return render(request, 'block/block_detail.html', context)
def block_count(request):
to = request.GET.get('to')
method = request.GET.get('method')
logger.info("to: %s, method:%s" %(to, method))
# 查询集
blsmessages = BlsMessage.objects.filter(to=to,method=method)
blsmessages = blsmessages.order_by('block')
context = {
'to': to,
'method': method,
'blsmessages': blsmessages
}
return render(request, 'block/block_count.html', context)
def block_search(request):
search = request.GET.get('search')
blocks_list = Block.objects.all()
# 搜索查询集
logger.info("search: %s" %(search))
if search:
blocks_list = blocks_list.filter(
Q(miner__icontains=search) |
Q(height__icontains=search) |
Q(hash__icontains=search)
)
else:
# 将 search 参数重置为空
search = ''
blocks_list.order_by('height')
context = {
'blocks': blocks_list,
'search': search,
}
return render(request, 'block/block_search.html', context)
# get blocklist with date
def block_list(request):
date = request.GET.get('date')
if (None == date):
date = datetime.date.today()
startTimestr = ("%s %s" %(date, " 00:00:00"))
startTimestamp = time.mktime(time.strptime(startTimestr, "%Y-%m-%d %H:%M:%S"))
endTimeStr = ("%s %s" %(date, " 23:59:59"))
endTimestamp = time.mktime(time.strptime(endTimeStr, "%Y-%m-%d %H:%M:%S"))
blocks_list = Block.objects.all().filter(timestamp__gte=startTimestamp, timestamp__lte=endTimestamp)
blocks_list.order_by('height')
# 需要传递给模板(templates)的对象
context = {
'blocks': blocks_list,
'date': date,
'startTimestamp' : startTimestamp,
'endTimestamp' : endTimestamp,
'currentTimestamp' : startTimestamp,
}
# render函数:载入模板,并返回context对象
return render(request, 'block/block_list.html', context)
|
"""
Quaternion - defined as (x, y, z, w) to be compatible with ROS and RealSense
ROS:
http://wiki.ros.org/tf2/Tutorials/Quaternions
ROS uses quaternions to track and apply rotations. A quaternion has 4
components (x,y,z,w). That's right, 'w' is last (but beware: some libraries
like Eigen put w as the first number!). The commonly-used unit quaternion that
yields no rotation about the x/y/z axes is (0,0,0,1)
https://www.andre-gaschler.com/rotationconverter/
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
"""
import math
def multiply(quaternion1, quaternion0):
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return [ x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0]
def conjugate(quaternion):
x, y, z, w = quaternion
return [-x, -y, -z, w]
def identity():
return [0, 0, 0, 1]
def rotate_vector(vector, quaternion):
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
qvector = vector + [0]
con = conjugate(quaternion)
part1 = multiply(quaternion, qvector)
return multiply(part1, con)[:-1]
def transform(vector, transformation):
translation, rotation = transformation
rotated = rotate_vector(vector, rotation)
return [sum(v) for v in zip(rotated, translation)]
def normalize(quaternion):
x0, y0, z0, w0 = quaternion
sqr_size = x0*x0 + y0*y0 + z0*z0 + w0*w0
if abs(sqr_size - 1.0) > 0.00001:
k = math.sqrt(sqr_size)
x0, y0, z0, w0 = x0/k, y0/k, z0/k, w0/k
return [x0, y0, z0, w0]
def euler_to_quaternion(yaw, pitch, roll):
qx = math.sin(roll/2) * math.cos(pitch/2) * math.cos(yaw/2) - math.cos(roll/2) * math.sin(pitch/2) * math.sin(yaw/2)
qy = math.cos(roll/2) * math.sin(pitch/2) * math.cos(yaw/2) + math.sin(roll/2) * math.cos(pitch/2) * math.sin(yaw/2)
qz = math.cos(roll/2) * math.cos(pitch/2) * math.sin(yaw/2) - math.sin(roll/2) * math.sin(pitch/2) * math.cos(yaw/2)
qw = math.cos(roll/2) * math.cos(pitch/2) * math.cos(yaw/2) + math.sin(roll/2) * math.sin(pitch/2) * math.sin(yaw/2)
return [qx, qy, qz, qw]
def euler_zyx(quaternion):
x0, y0, z0, w0 = normalize(quaternion)
ax = math.atan2(2*(w0*x0+y0*z0), 1-2*(x0*x0+y0*y0))
ay = math.asin(max(-1, min(1, 2*(w0*y0-z0*x0)))) # cliping the range because of posible small overflows
az = math.atan2(2*(w0*z0+x0*y0), 1-2*(y0*y0+z0*z0))
return [az, ay, ax]
def heading(quaternion):
x0, y0, z0, w0 = quaternion
az = math.atan2(2*(w0*z0+x0*y0), 1-2*(y0*y0+z0*z0))
return az
def from_axis_angle(axis, angle):
ax, ay, az = axis
qx = ax * math.sin(angle / 2)
qy = ay * math.sin(angle / 2)
qz = az * math.sin(angle / 2)
qw = math.cos(angle / 2)
return [qx, qy, qz, qw]
def rotation_matrix(quaternion):
qx, qy, qz, qw = quaternion
r1 = [1 - 2*qy**2 - 2*qz**2, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw]
r2 = [2*qx*qy + 2*qz*qw, 1 - 2*qx**2 - 2*qz**2, 2*qy*qz - 2*qx*qw]
r3 = [2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx**2 - 2*qy**2]
return [r1, r2, r3]
def from_rotation_matrix(rotation_matrix):
# http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
# https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
m00, m01, m02 = rotation_matrix[0]
m10, m11, m12 = rotation_matrix[1]
m20, m21, m22 = rotation_matrix[2]
qw = math.sqrt(max(0, 1 + m00 + m11 + m22)) / 2
qx = math.sqrt(max(0, 1 + m00 - m11 - m22)) / 2
qy = math.sqrt(max(0, 1 - m00 + m11 - m22)) / 2
qz = math.sqrt(max(0, 1 - m00 - m11 + m22)) / 2
qx = math.copysign(qx, m21 - m12)
qy = math.copysign(qy, m02 - m20)
qz = math.copysign(qz, m10 - m01)
return [qx, qy, qz, qw]
def angle_between(quaternion0, quaternion1):
# https://math.stackexchange.com/questions/90081/quaternion-distance
inner_product = sum(a * b for (a, b) in zip(quaternion0, quaternion1))
val = 2 * inner_product**2 - 1
return math.acos(max(-1.0, min(1.0, val)))
# vim: expandtab sw=4 ts=4
|
from bfio import BioReader, BioWriter
import bioformats
import javabridge as jutil
import logging, traceback
from pathlib import Path
import os
import cv2
import numpy as np
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
def invert_binary(image, kernel=None, n=None):
"""
This function inverts the binary image.
The 0s get mapped to 1.
The 1s get mapped to 0.
"""
invertedimg = np.zeros(image.shape).astype('uint8')
invertedimg = 1 - image
return invertedimg
def dilate_binary(image, kernel=None, n=None):
"""
Increases the white region in the image, or the
foreground object increases.
Additional Arguments:
---------------------
n : int
(iterations) The number of times to apply the dilation.
"""
dilatedimg = cv2.dilate(image, kernel, iterations=n)
return dilatedimg
def erode_binary(image, kernel=None, n=None):
"""
Decreases the white region in the image, or the
foreground object decreases.
Additional Arguments:
---------------------
n : int
(iterations) The number of times to apply the erosion.
"""
erodedimg = cv2.erode(image, kernel, iterations=n)
return erodedimg
def open_binary(image, kernel=None, n=None):
"""
An opening operation is similar to applying an erosion
followed by a dilation. It removes small objects/noise in the
background of the images.
"""
openimg = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
return openimg
def close_binary(image, kernel=None, n=None):
"""
A closing operation is similar to applying a dilation
followed by an erosion. It is useful in closing small holes
inside the foreground objects, or small black points inside
the image.
"""
closeimg = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
return closeimg
def morphgradient_binary(image, kernel=None, n=None):
"""
This operation is the difference between dilation and
erosion of an image. It creates an outline of the
foreground object.
"""
mg = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel)
return mg
def fill_holes_binary(image, kernel=None, n=None):
"""
This function fills segments. It finds countours in
the image, and then fills it with black, and inverts
it back
https://stackoverflow.com/questions/10316057/filling-holes-inside-a-binary-object
"""
contour,hier = cv2.findContours(image,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(image,[cnt],0,1,-1)
return image
def skeleton_binary(image, kernel=None, n=None):
"""
This operation reduces the foreground regions in a binary image
to a skeletal remnant that largely preserves the extent and
connectivity of the original region while throwing away most of
the original foreground pixels.
https://homepages.inf.ed.ac.uk/rbf/HIPR2/skeleton.htm
"""
done = False
size = np.size(image)
skel = np.zeros(image.shape,np.uint8)
while (not done):
erode = cv2.erode(image,kernel)
temp = cv2.dilate(erode,kernel)
temp = cv2.subtract(image,temp)
skel = cv2.bitwise_or(skel,temp)
image = erode.copy()
zeros = size - cv2.countNonZero(image)
if zeros==size:
done = True
return skel
def tophat_binary(image, kernel=None, n=None):
"""
It is the difference between input image and
opening of the image
"""
tophat = cv2.morphologyEx(image,cv2.MORPH_TOPHAT, kernel)
return tophat
def blackhat_binary(image, kernel=None, n=None):
"""
It is the difference between the closing of
the input image and input image.
"""
blackhat = cv2.morphologyEx(image,cv2.MORPH_BLACKHAT, kernel)
return blackhat
def areafiltering_remove_smaller_objects_binary(image, kernel=None, n=None):
"""
Removes all objects in the image that have an area larger than
the threshold specified.
Additional Arguments
--------------------
n : int
Specifies the threshold.
"""
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
af = np.zeros((image.shape))
count_removed = 0
logger.info("{} ROI in tile".format(nb_components))
for i in range(0, nb_components):
if sizes[i] >= n:
af[output == i+1] = 1
count_removed = count_removed + 1
logger.info("{} ROI removed in tile".format(count_removed))
return af
def areafiltering_remove_larger_objects_binary(image, kernel=None, n=None):
"""
Removes all objects in the image that have an area smaller than
the threshold specified.
Additional Arguments
--------------------
n : int
Specifies the threshold.
"""
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
af = np.zeros((image.shape))
count_removed = 0
logger.info("{} ROI in tile".format(nb_components))
for i in range(0, nb_components):
if sizes[i] <= n:
af[output == i+1] = 1
count_removed = count_removed + 1
logger.info("{} ROI removed in tile".format(count_removed))
return af
def binary_operation(image,
output,
function,
extra_arguments,
extra_padding,
kernel,
Tile_Size):
"""
This function goes through the images and calls the appropriate binary operation
Parameters
----------
image : str
Location of image
function_to_call : str
The binary operation to dispatch on image
extra_arguments : int
Extra argument(s) for the binary operation that is called
extra_padding : int
The extra padding around each tile so that
binary operations do not skewed around the edges.
kernel : cv2 object
The kernel used for most binary operations
output : str
Location for BioWriter
Tile_Size : int
Tile Size for reading images
"""
# Start the javabridge with proper java logging
logger.info('Initializing the javabridge...')
log_config = Path(__file__).parent.joinpath("log4j.properties")
jutil.start_vm(args=["-Dlog4j.configuration=file:{}".format(str(log_config.absolute()))],class_path=bioformats.JARS)
try:
# Read the image
br = BioReader(image)
# Get the dimensions of the Image
br_x, br_y, br_z, br_c, br_t = br.num_x(), br.num_y(), br.num_z(), br.num_c(), br.num_t()
br_shape = (br_x, br_y, br_z, br_c, br_t)
datatype = br.pixel_type()
max_datatype_val = np.iinfo(datatype).max
logger.info("Original Datatype {}: ({})".format(datatype, max_datatype_val))
logger.info("Shape of Input (XYZCT): {}".format(br_shape))
# Initialize Output
bw = BioWriter(file_path=output, metadata=br.read_metadata())
# Initialize the Python Generators to go through each "tile" of the image
tsize = Tile_Size + (2*extra_padding)
logger.info("Tile Size {}x{}".format(tsize, tsize))
readerator = br.iterate(tile_stride=[Tile_Size, Tile_Size],tile_size=[tsize, tsize], batch_size=1)
writerator = bw.writerate(tile_size=[Tile_Size, Tile_Size], tile_stride=[Tile_Size, Tile_Size], batch_size=1)
next(writerator)
for images,indices in readerator:
# Extra tiles do not need to be calculated.
# Indices should range from -intkernel < index value < Image_Dimension + intkernel
if (indices[0][0][0] == br_x - extra_padding) or (indices[1][0][0] == br_y - extra_padding):
continue
logger.info(indices)
# Images are (1, Tile_Size, Tile_Size, 1)
# Need to convert to (Tile_Size, Tile_Size) to be able to do operation
images = np.squeeze(images)
images[images == max_datatype_val] = 1
# Initialize which function we are dispatching
if callable(function):
trans_image = function(images, kernel=kernel, n=extra_arguments)
trans_image = trans_image.astype(datatype)
trans_image[trans_image==1] = max_datatype_val
# The image needs to be converted back to (1, Tile_Size_Tile_Size, 1) to write it
reshape_img = np.reshape(trans_image[extra_padding:-extra_padding,extra_padding:-extra_padding], (1, Tile_Size, Tile_Size, 1))
# Send it to the Writerator
writerator.send(reshape_img)
# Close the image
bw.close_image()
except:
traceback.print_exc()
# Always close the JavaBridge
finally:
jutil.kill_vm()
|
#!/usr/bin/env python3
# -*- coding: utf-8; py-indent-offset:4 -*-
import sys
import os
import io
import socket
import logging
import numpy as np
import pandas as pd
import datetime as dt
import argparse
from influxdb import DataFrameClient as dfclient
from influxdb.exceptions import InfluxDBClientError
class IQFeedTool(object):
def __init__(self):
timeout = 10.0
self._dbhost = args.dbhost if args.dbhost else 'localhost'
self._dbport = args.dbport if args.dbport else 8086
self._username = args.username if args.username else None
self._password = args.password if args.password else None
self._database = args.database if args.database else 'instruments'
self._ticker = args.ticker
self._iqhost = args.iqhost if args.iqhost else 'localhost'
self._iqport = args.iqport if args.iqport else 9100
self._ticker = args.ticker
self._year = None
self._recv_buf = ""
self._ndf = pd.DataFrame()
# Open a streaming socket to the IQFeed daemon
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._iqhost, self._iqport))
self._sock.settimeout(timeout)
self.dfdb = dfclient(self._dbhost, self._dbport,
self._username, self._password,
self._database)
if not args.fromdate:
self._start = str(dt.datetime.today().year)
elif len(args.fromdate) == 4 or len(args.fromdate == 10):
self._start = args.fromdate
else:
log.error('Starting date required in YYYY-MM-DD or YYYY format.')
sys.exit(-1)
if not args.todate:
self._stop = str(dt.datetime.today().year)
elif len(args.fromdate) == 4 or len(args.fromdate == 10):
self._stop = args.todate
else:
log.error('Starting date required in YYYY-MM-DD or YYYY format.')
sys.exit(-1)
def _send_cmd(self, cmd: str):
"""Encode IQFeed API messages."""
self._sock.sendall(cmd.encode(encoding='latin-1', errors='strict'))
def iq_query(self, message: str):
"""Send data query to IQFeed API."""
end_msg = '!ENDMSG!'
recv_buffer = 4096
# Send the historical data request message and buffer the data
self._send_cmd(message)
chunk = ""
data = ""
while True:
chunk = self._sock.recv(recv_buffer).decode('latin-1')
data += chunk
if chunk.startswith('E,'): # error condition
if chunk.startswith('E,!NO_DATA!'):
log.warn('No data available for the given symbol or dates')
return
else:
raise Exception(chunk)
elif end_msg in chunk:
break
# Clean up the data.
data = data[:-1 * (len(end_msg) + 3)]
data = "".join(data.split("\r"))
data = data.replace(",\n", ",")[:-1]
data = data.split(",")
return data
def get_historical_minute_data(self, ticker: str):
"""Request historical 5 minute data from DTN."""
start = self._start
stop = self._stop
if len(stop) > 4:
stop = stop[:4]
if len(start) > 4:
start = start[:4]
for year in range(int(start), int(stop) + 1):
beg_time = ('%s0101000000' % year)
end_time = ('%s1231235959' % year)
msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker,
beg_time,
end_time)
try:
data = iq.iq_query(message=msg)
iq.add_data_to_df(data=data)
except Exception as err:
log.error('No data returned because %s', err)
try:
self.dfdb.write_points(self._ndf, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err)
def add_data_to_df(self, data: np.array):
"""Build Pandas Dataframe in memory"""
col_names = ['high_p', 'low_p', 'open_p', 'close_p', 'volume', 'oi']
data = np.array(data).reshape(-1, len(col_names) + 1)
df = pd.DataFrame(data=data[:, 1:], index=data[:, 0],
columns=col_names)
df.index = pd.to_datetime(df.index)
# Sort the dataframe based on ascending dates.
df.sort_index(ascending=True, inplace=True)
# Convert dataframe columns to float and ints.
df[['high_p', 'low_p', 'open_p', 'close_p']] = df[
['high_p', 'low_p', 'open_p', 'close_p']].astype(float)
df[['volume', 'oi']] = df[['volume', 'oi']].astype(int)
if self._ndf.empty:
self._ndf = df
else:
self._ndf = self._ndf.append(df)
def get_tickers_from_file(self, filename):
"""Load ticker list from txt file"""
if not os.path.exists(filename):
log.error("Ticker List file does not exist: %s", filename)
tickers = []
with io.open(filename, 'r') as fd:
for ticker in fd:
tickers.append(ticker.rstrip())
return tickers
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Import IQFeed Historical Data to InfluxDB")
exoptgroup = parser.add_mutually_exclusive_group(required=True)
exoptgroup.add_argument("--ticker",
action='store', default='SPY',
help="Ticker to request data for.")
exoptgroup.add_argument('--ticker-list',
action='store', default=None,
help='Path to folder to create files.')
parser.add_argument('--dbhost',
required=False, action='store',
default=None,
help='InfluxDB hostname.')
parser.add_argument('--dbport',
required=False, action='store',
default=None, type=int,
help='InfluxDB port number.')
parser.add_argument('--iqhost',
required=False, action='store',
default=None,
help='IQfeed Connect hostname.')
parser.add_argument('--iqport',
required=False, action='store',
default=None, type=int,
help='IQfeed Connect port number.')
parser.add_argument('--username',
required=False, action='store',
default=None,
help='InfluxDB username.')
parser.add_argument('--password',
required=False, action='store',
default=None,
help='InfluxDB password.')
parser.add_argument('--database',
required=False, action='store',
default=None,
help='InfluxDB database to use.')
parser.add_argument('--fromdate',
required=False, action='store', default=None,
type=str,
help=('Starting date for historical download '
'with format: YYYY[-MM-DDTHH:MM:SS].'))
parser.add_argument('--todate',
required=False, action='store', default=None,
type=str,
help=('Ending date for historical download '
'with format: YYYY[-MM-DDTHH:MM:SS].'))
parser.add_argument('--debug',
required=False, action='store_true',
help='Turn on debug logging level.')
parser.add_argument('--info',
required=False, action='store_true',
help='Turn on info logging level.')
args = parser.parse_args()
iq = IQFeedTool()
log = logging.getLogger()
log_console = logging.StreamHandler(sys.stdout)
log.addHandler(log_console)
tickers = []
if args.ticker_list:
tickers = iq.get_tickers_from_file(args.ticker_list)
else:
tickers.append(args.ticker.rstrip())
for (i, ticker) in enumerate(tickers):
try:
log.info("Processing %s (%d out of %d)", ticker, i+1,
len(tickers))
iq.get_historical_minute_data(ticker=ticker)
except Exception as err:
log.error('Error returned: %s', err)
|
# Generated by Django 3.1.4 on 2021-10-26 17:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0022_auto_20211026_2254'),
]
operations = [
migrations.AlterField(
model_name='addroom',
name='image1',
field=models.FileField(upload_to='media/'),
),
migrations.AlterField(
model_name='bookingrooms',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2021, 10, 26, 23, 14, 21, 50545)),
),
]
|
from rest_framework.views import APIView
from rest_framework.response import Response
class SimpleAPI(APIView):
def get(self, request, format=None):
return Response('hidden')
|
from compas.geometry import add_vectors
from compas.geometry import cross_vectors
from compas.geometry import normalize_vector
from compas.geometry import scale_vector
from compas.geometry import subtract_vectors
from numpy import array
from numpy import cos
from numpy import cross
from numpy import hstack
from numpy import newaxis
from numpy import ones
from numpy import sin
from numpy import tile
from numpy import zeros
from numpy.linalg import norm
from numba import float64
from numba import int64
from numba import jit
from compas.hpc import cross_vectors_numba
from compas.hpc import length_vector_numba
from time import time
__author__ = ['Andrew Liew <[email protected]>']
__copyright__ = 'Copyright 2017, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = '[email protected]'
def f(x, y):
return sin(x) * cos(y)
@jit(float64(float64, float64), nogil=True, nopython=True)
def fj(x, y):
return sin(x) * cos(y)
def python_normals(points, offset=0.1, delta=10**(-5)):
tic = time()
n = len(points)
normals = [0] * n
c = 0
for x, y, z in points:
zx = f(x + delta, y)
zy = f(x, y + delta)
xyz = [x, y, z]
vecx = subtract_vectors([x + delta, y, zx], xyz)
vecy = subtract_vectors([x, y + delta, zy], xyz)
vecn = normalize_vector(cross_vectors(vecx, vecy))
normals[c] = add_vectors(xyz, scale_vector(vecn, offset))
c += 1
toc = time() - tic
return normals, toc
def numpy_normals(points, offset=0.1, delta=10**(-5)):
tic = time()
xyz = array(points)
X = xyz[:, 0][:, newaxis]
Y = xyz[:, 1][:, newaxis]
Z = xyz[:, 2][:, newaxis]
delta_ = ones(X.shape) * delta
zeros_ = zeros(X.shape)
zx = f(X + delta_, Y)
zy = f(X, Y + delta_)
vecx = hstack([delta_, zeros_, zx - Z])
vecy = hstack([zeros_, delta_, zy - Z])
n_ = cross(vecx, vecy)
vecn = n_ / tile(norm(n_, ord=2, axis=1)[:, newaxis], (1, 3))
normals = xyz + offset * vecn
toc = time() - tic
return [list(i) for i in list(normals)], toc
def numba_normals(points, offset=0.1, delta=10**(-5)):
tic = time()
n = len(points)
normals = zeros((n, 3))
xyz = array(points)
X = xyz[:, 0]
Y = xyz[:, 1]
Z = xyz[:, 2]
normals = njit(normals, X, Y, Z, n, delta, offset)
toc = time() - tic
return [list(i) for i in list(normals)], toc
@jit(float64[:, :](float64[:, :], float64[:], float64[:], float64[:], int64, float64, float64), nogil=True, nopython=True)
def njit(normals, X, Y, Z, n, delta, offset):
vecx = array([delta, 0, 0])
vecy = array([0, delta, 0])
for i in range(n):
xi = X[i]
yi = Y[i]
zi = Z[i]
zx = fj(xi + delta, yi)
zy = fj(xi, yi + delta)
vecx[2] = zx - zi
vecy[2] = zy - zi
n_ = cross_vectors_numba(vecx, vecy)
vecn = n_ / length_vector_numba(n_)
normals[i, 0] = xi + offset * vecn[0]
normals[i, 1] = yi + offset * vecn[1]
normals[i, 2] = zi + offset * vecn[2]
return normals
# ==============================================================================
# Debugging
# ==============================================================================
if __name__ == '__main__':
points = [[1., 2., 3.], [2., 3., 4.], [5., 6., 7.]]
normals = numba_normals(points=points)
print(normals)
|
from __future__ import unicode_literals
import frappe
import erpnext
from frappe import auth
import random
import datetime
import json, ast
from erpnext.accounts.utils import get_balance_on
from frappe.utils import (flt, getdate, get_url, now,
nowtime, get_time, today, get_datetime, add_days)
from frappe.utils import add_to_date, now, nowdate
from frappe.utils import cstr
from frappe.utils.make_random import get_random
@frappe.whitelist()
def general_service(doctype, filter1='%%', filter2='%%', filter3='%%', filter4='%%', filter5='%%', filter6='%%', filter7='%%', search_text='%%', cur_nam='%%', con_doc='%%', start=0, page_length=20):
############################################ LEAD ############################################
########################### Lead Full List & Search ############################
if doctype == "Lead" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["lead_name"] = ['like', search_text]
conditions1["company_name"] = ['like', search_text]
conditions1["mobile_no"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["lead_owner"] = filter2
if filter3 != '%%':
conditions["organization_lead"] = filter3
if filter4 != '%%':
conditions["creation"] = ['>=', filter4]
if filter5 != '%%':
conditions["creation"] = ['<=', filter5]
query = frappe.db.get_list('Lead',
or_filters=conditions1,
filters=conditions,
fields=["name", "lead_name", "company_name", "territory", "source", "market_segment", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Lead" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabLead`.name like '%{search_text}%' or `tabLead`.lead_name like '%{search_text}%' or `tabLead`.company_name like '%{search_text}%' or `tabLead`.mobile_no like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabLead`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabLead`.lead_owner = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabLead`.organization_lead = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and Date_Format(`tabLead`.creation,'%Y-%m-%d') >= '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and Date_Format(`tabLead`.creation,'%Y-%m-%d') <= '{filter5}' ".format(filter5=filter5)
query = frappe.db.sql(
""" select name, lead_name, company_name, territory, source, market_segment, status
from `tabLead`
where `tabLead`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Quotations Connected With Lead & Search ############################
if doctype == "Quotation" and con_doc == "Lead":
connections = frappe.db.sql(
""" select name, quotation_to, customer_name, transaction_date, grand_total, status
from `tabQuotation` where `party_name` = '{cur_nam}'
and (`tabQuotation`.name like '%{search_text}%' or `tabQuotation`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Opportunities Connected With Lead & Search ############################
if doctype == "Opportunity" and con_doc == "Lead":
connections = frappe.db.sql(
""" select name,opportunity_from,customer_name,transaction_date,opportunity_type,sales_stage,status
from `tabOpportunity` where `party_name` = '{cur_nam}'
and (`tabOpportunity`.name like '%{search_text}%' or `tabOpportunity`.customer_name like '%{search_text}%' or `tabOpportunity`.party_name like '%{search_text}%' or `tabOpportunity`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ OPPORTUNITY ############################################
########################### Opportunity Full List & Search ############################
if doctype == "Opportunity" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
conditions1["party_name"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["opportunity_from"] = filter2
if filter3 != '%%':
conditions["party_name"] = filter3
if filter4 != '%%':
conditions["opportunity_type"] = filter4
if filter5 != '%%':
conditions["transaction_date"] = ['>=', filter5]
if filter6 != '%%':
conditions["transaction_date"] = ['<=', filter6]
query = frappe.db.get_list('Opportunity',
or_filters=conditions1,
filters=conditions,
fields=["name", "opportunity_from", "customer_name", "transaction_date",
"opportunity_type", "sales_stage", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Opportunity" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabOpportunity`.name like '%{search_text}%' or `tabOpportunity`.customer_name like '%{search_text}%' or `tabOpportunity`.party_name like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabOpportunity`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabOpportunity`.opportunity_from = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabOpportunity`.party_name = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabOpportunity`.opportunity_type = '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabOpportunity`.transaction_date >= '{filter5}' ".format(filter5=filter5)
if filter6 != '%%':
conditions += " and `tabOpportunity`.transaction_date <= '{filter6}' ".format(filter6=filter6)
query = frappe.db.sql(
""" select name, opportunity_from, customer_name, transaction_date,
opportunity_type, sales_stage, status
from `tabOpportunity`
where `tabOpportunity`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Quotations Connected With Opportunity & Search ############################
if doctype == "Quotation" and con_doc == "Opportunity":
connections = frappe.db.sql(
""" select name, quotation_to, customer_name, transaction_date, grand_total, status
from `tabQuotation` where `opportunity` = '{cur_nam}'
and (`tabQuotation`.name like '%{search_text}%' or `tabQuotation`.customer_name like '%{search_text}%' or `tabQuotation`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Supplier Quotations Connected With Opportunity & Search ############################
if doctype == "Supplier Quotation" and con_doc == "Opportunity":
connections = frappe.db.sql(
""" select name,supplier,transaction_date,valid_till,grand_total,status
from `tabSupplier Quotation` where `opportunity` = '{cur_nam}'
and (`tabSupplier Quotation`.name like '%{search_text}%' or `tabSupplier Quotation`.supplier like '%{search_text}%' or `tabSupplier Quotation`.supplier_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ QUOTATION ############################################
########################### Quotation Full List & Search ############################
if doctype == "Quotation" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
conditions1["party_name"] = ['like', search_text]
conditions1["customer_address"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["quotation_to"] = filter2
if filter3 != '%%':
conditions["customer_name"] = filter3
if filter4 != '%%':
conditions["order_type"] = filter4
if filter5 != '%%':
conditions["transaction_date"] = ['>=', filter5]
if filter6 != '%%':
conditions["transaction_date"] = ['<=', filter6]
query = frappe.db.get_list('Quotation',
or_filters=conditions1,
filters=conditions,
fields=["name", "quotation_to", "customer_name", "transaction_date", "grand_total",
"status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Quotation" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabQuotation`.name like '%{search_text}%' or `tabQuotation`.customer_name like '%{search_text}%' or `tabQuotation`.party_name like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabQuotation`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabQuotation`.quotation_to = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabQuotation`.customer_name = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabQuotation`.order_type = '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabQuotation`.transaction_date >= '{filter5}' ".format(filter5=filter5)
if filter6 != '%%':
conditions += " and `tabQuotation`.transaction_date <= '{filter6}' ".format(filter6=filter6)
query = frappe.db.sql(
""" select name, quotation_to, customer_name, transaction_date, grand_total,
status
from `tabQuotation`
where `tabQuotation`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Sales Orders Connected With Quotation & Search ############################
if doctype == "Sales Order" and con_doc == "Quotation":
connections = frappe.db.sql(
""" select distinct `tabSales Order`.name as name,`tabSales Order`.customer_name as customer_name,`tabSales Order`.customer_address as customer_address,
`tabSales Order`.transaction_date as transaction_date,`tabSales Order`.grand_total as grand_total,`tabSales Order`.status as status
from `tabSales Order` join `tabSales Order Item` on `tabSales Order`.name = `tabSales Order Item`.parent
where `tabSales Order Item`.prevdoc_docname = '{cur_nam}'
and (`tabSales Order`.name like '%{search_text}%' or `tabSales Order`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ CUSTOMER ############################################
########################### Customer Full List & Search ############################
if doctype == "Customer" and con_doc == '%%':
conditions1 = {}
conditions = {"disabled": ['=', 0]}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
conditions1["mobile_no"] = ['like', search_text]
if filter1 != '%%':
conditions["customer_group"] = filter1
if filter2 != '%%':
conditions["territory"] = filter2
if filter3 != '%%':
conditions["customer_type"] = filter3
if filter4 != '%%':
conditions["creation"] = ['>=', filter4]
if filter5 != '%%':
conditions["creation"] = ['<=', filter5]
query = frappe.db.get_list('Customer',
or_filters=conditions1,
filters=conditions,
fields=["name","customer_name","customer_group","customer_type","territory","mobile_no","tax_id","customer_primary_address","customer_primary_contact","default_currency","default_price_list","payment_terms","default_sales_partner"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Customer" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabCustomer`.name like '%{search_text}%' or `tabCustomer`.customer_name like '%{search_text}%' or `tabCustomer`.mobile_no like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabCustomer`.customer_group = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabCustomer`.territory = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabCustomer`.customer_type = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and Date_Format(`tabCustomer`.creation,'%Y-%m-%d') >= '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and Date_Format(`tabCustomer`.creation,'%Y-%m-%d') <= '{filter5}' ".format(filter5=filter5)
query = frappe.db.sql(
""" select name,customer_name,customer_group,customer_type,territory,mobile_no,tax_id,customer_primary_address,customer_primary_contact,default_currency,default_price_list,payment_terms,default_sales_partner
from `tabCustomer`
where `tabCustomer`.disabled = 0
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Quotations Connected With Customer & Search ############################
if doctype == "Quotation" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name, quotation_to, customer_name, transaction_date, grand_total, status
from `tabQuotation` where `party_name` = '{cur_nam}'
and (`tabQuotation`.name like '%{search_text}%' or `tabQuotation`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Opportunities Connected With Customer & Search ############################
if doctype == "Opportunity" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name,opportunity_from,customer_name,transaction_date,opportunity_type,sales_stage,status
from `tabOpportunity` where `party_name` = '{cur_nam}'
and (`tabOpportunity`.name like '%{search_text}%' or `tabOpportunity`.customer_name like '%{search_text}%' or `tabOpportunity`.party_name like '%{search_text}%' or `tabOpportunity`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Sales Orders Connected With Customer & Search ############################
if doctype == "Sales Order" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name,customer_name,customer_address,transaction_date,grand_total,status
from `tabSales Order` where `customer` = '{cur_nam}'
and (`tabSales Order`.name like '%{search_text}%' or `tabSales Order`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Delivery Notes Connected With Customer & Search ############################
if doctype == "Delivery Note" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name,customer,territory,posting_date,set_warehouse,status
from `tabDelivery Note` where `customer` = '{cur_nam}'
and (`tabDelivery Note`.name like '%{search_text}%' or `tabDelivery Note`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Sales Invoices Connected With Customer & Search ############################
if doctype == "Sales Invoice" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name,customer_name,customer_address,posting_date,grand_total,status
from `tabSales Invoice` where `customer` = '{cur_nam}'
and (`tabSales Invoice`.name like '%{search_text}%' or `tabSales Invoice`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Payment Entries Connected With Customer & Search ############################
if doctype == "Payment Entry" and con_doc == "Customer":
connections = frappe.db.sql(
""" select name,party_name,payment_type,mode_of_payment,posting_date,paid_amount,status
from `tabPayment Entry` where `party` = '{cur_nam}'
and (`tabPayment Entry`.name like '%{search_text}%' or `tabPayment Entry`.mode_of_payment like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ SALES ORDER ############################################
########################### Sales Order Full List & Search ############################
if doctype == "Sales Order" and con_doc == '%%':
conditions1 = {}
conditions = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
conditions1["customer"] = ['like', search_text]
conditions1["customer_address"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["customer"] = filter2
if filter3 != '%%':
conditions["delivery_status"] = filter3
if filter4 != '%%':
conditions["billing_status"] = filter4
if filter5 != '%%':
conditions["transaction_date"] = ['>=', filter5]
if filter6 != '%%':
conditions["transaction_date"] = ['<=', filter6]
query = frappe.db.get_list('Sales Order',
or_filters=conditions1,
filters=conditions,
fields=["name", "customer_name", "customer_address", "transaction_date",
"grand_total", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Sales Order" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabSales Order`.name like '%{search_text}%' or `tabSales Order`.customer_name like '%{search_text}%' or `tabSales Order`.customer like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabSales Order`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabSales Order`.customer = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabSales Order`.delivery_status = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabSales Order`.billing_status = '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabSales Order`.transaction_date >= '{filter5}' ".format(filter5=filter5)
if filter6 != '%%':
conditions += " and `tabSales Order`.transaction_date <= '{filter6}' ".format(filter6=filter6)
query = frappe.db.sql(
""" select name, customer_name, customer_address, transaction_date,
grand_total, status
from `tabSales Order`
where `tabSales Order`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Sales Invoices Connected With Sales Order & Search ############################
if doctype == "Sales Invoice" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabSales Invoice`.name as name,`tabSales Invoice`.customer_name as customer_name,`tabSales Invoice`.customer_address as customer_address,
`tabSales Invoice`.posting_date as posting_date,`tabSales Invoice`.grand_total as grand_total,`tabSales Invoice`.status as status
from `tabSales Invoice` join `tabSales Invoice Item` on `tabSales Invoice`.name = `tabSales Invoice Item`.parent
where `tabSales Invoice Item`.sales_order = '{cur_nam}'
and (`tabSales Invoice`.name like '%{search_text}%' or `tabSales Invoice`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Delivery Notes Connected With Sales Order & Search ############################
if doctype == "Delivery Note" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabDelivery Note`.name as name,`tabDelivery Note`.customer as customer,`tabDelivery Note`.territory as territory,
`tabDelivery Note`.posting_date as posting_date,`tabDelivery Note`.set_warehouse as set_warehouse,`tabDelivery Note`.status as status
from `tabDelivery Note` join `tabDelivery Note Item` on `tabDelivery Note`.name = `tabDelivery Note Item`.parent
where `tabDelivery Note Item`.against_sales_order = '{cur_nam}'
and (`tabDelivery Note`.name like '%{search_text}%' or `tabDelivery Note`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Material Requests Connected With Sales Order & Search ############################
if doctype == "Material Request" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabMaterial Request`.name as name,`tabMaterial Request`.material_request_type as material_request_type,
`tabMaterial Request`.transaction_date as transaction_date,`tabMaterial Request`.set_warehouse as set_warehouse,`tabMaterial Request`.status as status
from `tabMaterial Request` join `tabMaterial Request Item` on `tabMaterial Request`.name = `tabMaterial Request Item`.parent
where `tabMaterial Request Item`.sales_order = '{cur_nam}'
and (`tabMaterial Request`.name like '%{search_text}%' or `tabMaterial Request`.material_request_type like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Purchase Orders Connected With Sales Order & Search ############################
if doctype == "Purchase Order" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabPurchase Order`.name as name,`tabPurchase Order`.supplier as supplier, `tabPurchase Order`.grand_total as grand_total,
`tabPurchase Order`.transaction_date as transaction_date,`tabPurchase Order`.set_warehouse as set_warehouse,`tabPurchase Order`.status as status
from `tabPurchase Order` join `tabPurchase Order Item` on `tabPurchase Order`.name = `tabPurchase Order Item`.parent
where `tabPurchase Order Item`.sales_order = '{cur_nam}'
and (`tabPurchase Order`.name like '%{search_text}%' or `tabPurchase Order`.supplier_address like '%{search_text}%' or `tabPurchase Order`.supplier like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Quotations Connected With Sales Order & Search ############################
if doctype == "Quotation" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabQuotation`.name as name, `tabQuotation`.quotation_to as quotation_to, `tabQuotation`.customer_name as customer_name,
`tabQuotation`.transaction_date as transaction_date, `tabQuotation`.grand_total as grand_total, `tabQuotation`.status as status
from `tabQuotation` join `tabSales Order Item` on `tabQuotation`.name = `tabSales Order Item`.prevdoc_docname
where `tabSales Order Item`.parent = '{cur_nam}'
and (`tabQuotation`.name like '%{search_text}%' or `tabQuotation`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Payment Entries Connected With Sales Order & Search ############################
if doctype == "Payment Entry" and con_doc == "Sales Order":
connections = frappe.db.sql(
""" select distinct `tabPayment Entry`.name as name,`tabPayment Entry`.party_name as party_name,
`tabPayment Entry`.payment_type as payment_type,`tabPayment Entry`.mode_of_payment as mode_of_payment,
`tabPayment Entry`.posting_date as posting_date,`tabPayment Entry`.paid_amount as paid_amount,`tabPayment Entry`.status as status
from `tabPayment Entry` join `tabPayment Entry Reference` on `tabPayment Entry`.name = `tabPayment Entry Reference`.parent
where `tabPayment Entry Reference`.reference_name = '{cur_nam}'
and (`tabPayment Entry`.name like '%{search_text}%' or `tabPayment Entry`.mode_of_payment like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ SALES INVOICE ############################################
########################### Sales Invoice Full List & Search ############################
if doctype == "Sales Invoice" and con_doc == '%%':
conditions1 = {}
conditions = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
conditions1["customer"] = ['like', search_text]
conditions1["customer_address"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["customer"] = filter2
if filter3 != '%%':
conditions["posting_date"] = ['>=', filter3]
if filter4 != '%%':
conditions["posting_date"] = ['<=', filter4]
query = frappe.db.get_list('Sales Invoice',
or_filters=conditions1,
filters=conditions,
fields=["name", "customer_name", "customer_address", "posting_date", "grand_total",
"status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Sales Invoice" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabSales Invoice`.name like '%{search_text}%' or `tabSales Invoice`.customer_name like '%{search_text}%' or `tabSales Invoice`.customer like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabSales Invoice`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabSales Invoice`.customer = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabSales Invoice`.posting_date >= '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabSales Invoice`.posting_date <= '{filter4}' ".format(filter4=filter4)
query = frappe.db.sql(
""" select name, customer_name, customer_address, posting_date, grand_total,
status
from `tabSales Invoice`
where `tabSales Invoice`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
########################### Sales Orders Connected With Sales Invoice & Search ############################
if doctype == "Sales Order" and con_doc == "Sales Invoice":
connections = frappe.db.sql(
""" select distinct `tabSales Order`.name as name,`tabSales Order`.customer_name as customer_name,`tabSales Order`.customer_address as customer_address,
`tabSales Order`.transaction_date as transaction_date,`tabSales Order`.grand_total as grand_total,`tabSales Order`.status as status
from `tabSales Order` join `tabSales Invoice Item` on `tabSales Order`.name = `tabSales Invoice Item`.sales_order
where `tabSales Invoice Item`.parent = '{cur_nam}'
and (`tabSales Order`.name like '%{search_text}%' or `tabSales Order`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Delivery Notes Connected With Sales Invoice & Search ############################
if doctype == "Delivery Note" and con_doc == "Sales Invoice":
connections = frappe.db.sql(
""" select distinct `tabDelivery Note`.name as name,`tabDelivery Note`.customer as customer,`tabDelivery Note`.territory as territory,
`tabDelivery Note`.posting_date as posting_date,`tabDelivery Note`.set_warehouse as set_warehouse,`tabDelivery Note`.status as status
from `tabDelivery Note` join `tabDelivery Note Item` on `tabDelivery Note`.name = `tabDelivery Note Item`.parent
where `tabDelivery Note Item`.against_sales_invoice = '{cur_nam}'
and (`tabDelivery Note`.name like '%{search_text}%' or `tabDelivery Note`.customer_address like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
########################### Payment Entries Connected With Sales Invoice & Search ############################
if doctype == "Payment Entry" and con_doc == "Sales Invoice":
connections = frappe.db.sql(
""" select distinct `tabPayment Entry`.name as name,`tabPayment Entry`.party_name as party_name,
`tabPayment Entry`.payment_type as payment_type,`tabPayment Entry`.mode_of_payment as mode_of_payment,
`tabPayment Entry`.posting_date as posting_date,`tabPayment Entry`.paid_amount as paid_amount,`tabPayment Entry`.status as status
from `tabPayment Entry` join `tabPayment Entry Reference` on `tabPayment Entry`.name = `tabPayment Entry Reference`.parent
where `tabPayment Entry Reference`.reference_name = '{cur_nam}'
and (`tabPayment Entry`.name like '%{search_text}%' or `tabPayment Entry`.mode_of_payment like '%{search_text}%') LIMIT {start},{page_length}
""".format(start=start, page_length=page_length, cur_nam=cur_nam, search_text=search_text), as_dict=1)
if connections:
return connections
else:
return "لا يوجد روابط !"
############################################ PAYMENT ENTRY ############################################
########################### Payment Entry Full List & Search ############################
if doctype == "Payment Entry" and con_doc == '%%':
conditions1 = {}
conditions = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["party_name"] = ['like', search_text]
conditions1["mode_of_payment"] = ['like', search_text]
conditions1["party"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["payment_type"] = filter2
if filter3 != '%%':
conditions["mode_of_payment"] = filter3
if filter4 != '%%':
conditions["party_type"] = filter4
if filter5 != '%%':
conditions["party"] = filter5
if filter6 != '%%':
conditions["posting_date"] = ['>=', filter6]
if filter7 != '%%':
conditions["posting_date"] = ['<=', filter7]
query = frappe.db.get_list('Payment Entry',
or_filters=conditions1,
filters=conditions,
fields=["name", "party_name", "payment_type", "mode_of_payment", "posting_date",
"paid_amount", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Payment Entry" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabPayment Entry`.name like '%{search_text}%' or `tabPayment Entry`.party_name like '%{search_text}%' or `tabPayment Entry`.party like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabPayment Entry`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabPayment Entry`.payment_type = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabPayment Entry`.mode_of_payment = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabPayment Entry`.party_type = '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabPayment Entry`.party = '{filter5}' ".format(filter5=filter5)
if filter6 != '%%':
conditions += " and `tabPayment Entry`.posting_date >= '{filter6}' ".format(filter6=filter6)
if filter7 != '%%':
conditions += " and `tabPayment Entry`.posting_date <= '{filter7}' ".format(filter7=filter7)
query = frappe.db.sql(
""" select name, party_name, payment_type, mode_of_payment, posting_date,
paid_amount, status
from `tabPayment Entry`
where `tabPayment Entry`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
############################################ LEAD SOURCE ############################################
########################### Lead Source Full List & Search ############################
if doctype == "Lead Source" and con_doc == '%%':
query = frappe.db.get_list('Lead Source',
or_filters=[{'name': ['like', search_text]},
{'source_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ PROJECT ############################################
########################### Project Segment Full List & Search ############################
if doctype == "Project" and con_doc == '%%':
query = frappe.db.get_list('Project',
filters=[{'is_active': ['=', 'Yes']}],
or_filters=[{'name': ['like', search_text]},
{'project_name': ['like', search_text]}],
fields=["name", "project_name", "status"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ PAYMENT TERMS TEMPLATE ############################################
########################### Payment Terms Template Full List & Search ############################
if doctype == "Payment Terms Template" and con_doc == '%%':
query = frappe.db.get_list('Payment Terms Template',
or_filters=[{'name': ['like', search_text]},
{'template_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ MARKET SEGEMENT ############################################
########################### Market Segment Full List & Search ############################
if doctype == "Market Segment" and con_doc == '%%':
query = frappe.db.get_list('Market Segment',
or_filters=[{'name': ['like', search_text]},
{'market_segment': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ TERRITORY ############################################
########################### Territory Full List & Search ############################
if doctype == "Territory" and con_doc == '%%':
query = frappe.db.get_list('Territory',
filters=[{'is_group': ['=', 0]}],
or_filters=[{'name': ['like', search_text]},
{'territory_name': ['like', search_text]}],
fields=["name", "parent_territory"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ WAREHOUSE ############################################
########################### Warehouse Full List & Search ############################
if doctype == "Warehouse" and con_doc == '%%':
query = frappe.db.get_list('Warehouse',
filters=[{'is_group': ['=', 0]}],
or_filters=[{'name': ['like', search_text]},
{'warehouse_name': ['like', search_text]}],
fields=["name", "warehouse_name", "warehouse_type", "parent_warehouse"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ COUNTRY ############################################
########################### Country Full List & Search ############################
if doctype == "Country" and con_doc == '%%':
query = frappe.db.get_list('Country',
or_filters=[{'name': ['like', search_text]},
{'country_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ OPPORTUNITY TYPE ############################################
########################### Opportunity Type Full List & Search ############################
if doctype == "Opportunity Type" and con_doc == '%%':
query = frappe.db.get_list('Opportunity Type',
or_filters=[{'name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ CUSTOMER GROUP ############################################
########################### Customer Group Full List & Search ############################
if doctype == "Customer Group" and con_doc == '%%':
query = frappe.db.get_list('Customer Group',
filters=[{'is_group': ['=', 0]}],
or_filters=[{'name': ['like', search_text]},
{'customer_group_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ ITEM GROUP ############################################
########################### Item Group Full List & Search ############################
if doctype == "Item Group" and con_doc == '%%':
query = frappe.db.get_list('Item Group',
or_filters=[{'name': ['like', search_text]},
{'item_group_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ BRAND ############################################
########################### Brand Full List & Search ############################
if doctype == "Brand" and con_doc == '%%':
query = frappe.db.get_list('Brand',
or_filters=[{'name': ['like', search_text]},
{'brand': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ UOM ############################################
########################### UOM Full List & Search ############################
if doctype == "UOM" and con_doc == '%%':
query = frappe.db.get_list('UOM',
filters=[{'enabled': 1}],
or_filters=[{'name': ['like', search_text]},
{'uom_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ User ############################################
########################### User Full List & Search ############################
if doctype == "User" and con_doc == '%%':
query = frappe.db.get_list('User',
filters=[{'enabled': 1}],
or_filters=[{'name': ['like', search_text]},
{'full_name': ['like', search_text]}],
fields=["name", "full_name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ Stock Entry Type ############################################
########################### Stock Entry Type Full List & Search ############################
if doctype == "Stock Entry Type" and con_doc == '%%':
query = frappe.db.get_list('Stock Entry Type',
or_filters=[{'name': ['like', search_text]},
{'purpose': ['like', search_text]}],
fields=["name", "purpose"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ CAMPAIGN ############################################
########################### Campaign Full List & Search ############################
if doctype == "Campaign" and con_doc == '%%':
query = frappe.db.get_list('Campaign',
or_filters=[{'name': ['like', search_text]},
{'campaign_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ INDUSTRY TYPE ############################################
########################### Industry Type Full List & Search ############################
if doctype == "Industry Type" and con_doc == '%%':
query = frappe.db.get_list('Industry Type',
or_filters=[{'name': ['like', search_text]},
{'industry': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ CURRENCY ############################################
########################### Currency Full List & Search ############################
if doctype == "Currency" and con_doc == '%%':
query = frappe.db.get_list('Currency',
or_filters=[{'name': ['like', search_text]},
{'currency_name': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ SALES PARTNER ############################################
########################### Sales Partner Full List & Search ############################
if doctype == "Sales Partner" and con_doc == '%%':
query = frappe.db.get_list('Sales Partner',
or_filters=[{'name': ['like', search_text]},
{'partner_name': ['like', search_text]}],
fields=["name","commission_rate"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ TERMS & CONDITIONS ############################################
########################### Terms and Conditions Full List & Search ############################
if doctype == "Terms and Conditions" and con_doc == '%%':
query = frappe.db.get_list('Terms and Conditions',
or_filters=[{'name': ['like', search_text]},
{'title': ['like', search_text]}],
fields=["name"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ MODE OF PAYMENT ############################################
########################### Mode of Payment Full List & Search ############################
if doctype == "Mode of Payment" and con_doc == '%%':
query = frappe.db.get_list('Mode of Payment',
or_filters=[{'name': ['like', search_text]},
{'mode_of_payment': ['like', search_text]}],
fields=["name", "type"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ PRICE LIST ############################################
########################### Price List Full List & Search ############################
if doctype == "Price List" and con_doc == '%%':
query = frappe.db.get_list('Price List',
or_filters=[{'name': ['like', search_text]},
{'price_list_name': ['like', search_text]}],
fields=["name", "currency"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ COST CENTER ############################################
########################### Cost Center Full List & Search ############################
if doctype == "Cost Center" and con_doc == '%%':
query = frappe.db.get_list('Cost Center',
filters=[{'is_group': ['=', 0]}],
or_filters=[{'name': ['like', search_text]},
{'cost_center_name': ['like', search_text]}],
fields=["name", "cost_center_name", "parent_cost_center"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ ACCOUNT ############################################
########################### Account Full List & Search ############################
if doctype == "Account" and con_doc == '%%':
query = frappe.db.get_list('Account',
filters=[{'is_group': ['=', 0]}],
or_filters=[{'name': ['like', search_text]},
{'account_name': ['like', search_text]},
{'account_number': ['like', search_text]}],
fields=["name", "account_type", "root_type", "account_currency", "parent_account"],
order_by='name asc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ ITEM ############################################
########################### Item Full List & Search ############################
if doctype == "Item" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["item_name"] = ['like', search_text]
conditions1["item_code"] = ['like', search_text]
if filter1 != '%%':
conditions["item_group"] = filter1
if filter2 != '%%':
conditions["brand"] = filter2
if filter3 != '%%':
conditions["is_stock_item"] = filter3
if filter4 != '%%':
conditions["stock_uom"] = filter4
query = frappe.db.get_list('Item',
or_filters=conditions1,
filters=conditions,
fields=["name", "item_name", "item_group", "stock_uom", "image"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Item" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabItem`.name like '%{search_text}%' or `tabItem`.item_name like '%{search_text}%' or `tabItem`.item_code like '%{search_text}%') ".format(search_text=search_text)
if filter1 != '%%':
conditions += " and `tabItem`.item_group = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabItem`.brand = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabItem`.is_stock_item = '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabItem`.stock_uom = '{filter4}' ".format(filter4=filter4)
query = frappe.db.sql(
""" select name, item_name, item_group, stock_uom, image
from `tabItem`
where `tabItem`.disabled = 0
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
############################################ MATERIAL REQUEST ############################################
########################### Material Request Full List & Search ############################
if doctype == "Material Request" and con_doc == '%%':
query = frappe.db.get_list('Material Request',
or_filters=[{'name': ['like', search_text]},
{'title': ['like', search_text]}],
fields=["name", "material_request_type", "transaction_date", "set_warehouse",
"status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ STOCK ENTRY ############################################
########################### Stock Entry Full List & Search ############################
if doctype == "Stock Entry" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
if filter1 != '%%':
conditions["docstatus"] = filter1
if filter2 != '%%':
conditions["stock_entry_type"] = filter2
if filter3 != '%%':
conditions["posting_date"] = ['>=', filter3]
if filter4 != '%%':
conditions["posting_date"] = ['<=', filter4]
if filter5 != '%%':
conditions["from_warehouse"] = filter5
if filter6 != '%%':
conditions["to_warehouse"] = filter6
query = frappe.db.get_list('Stock Entry',
or_filters=conditions1,
filters=conditions,
fields=["name", "stock_entry_type", "posting_date", "from_warehouse", "to_warehouse",
"docstatus"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Stock Entry" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`tabStock Entry`.name like '%{search_text}%' or `tabStock Entry`.title like '%{search_text}%') ".format(search_text=search_text)
if filter1 != '%%':
conditions += " and `tabStock Entry`.docstatus = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabStock Entry`.stock_entry_type = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabStock Entry`.posting_date >= '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabStock Entry`.posting_date <= '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabStock Entry`.from_warehouse = '{filter5}' ".format(filter5=filter5)
if filter6 != '%%':
conditions += " and `tabStock Entry`.to_warehouse = '{filter6}' ".format(filter6=filter6)
query = frappe.db.sql(
""" select name, stock_entry_type, posting_date, from_warehouse, to_warehouse, docstatus
from `tabStock Entry`
where `tabStock Entry`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
############################################ PURCHASE RECEIPT ############################################
########################### Purchase Receipt Full List & Search ############################
if doctype == "Purchase Receipt" and con_doc == '%%':
query = frappe.db.get_list('Purchase Receipt',
or_filters=[{'name': ['like', search_text]},
{'title': ['like', search_text]},
{'supplier': ['like', search_text]},
{'supplier_name': ['like', search_text]}],
fields=["name", "supplier", "posting_date", "set_warehouse", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ DELIVERY NOTE ############################################
########################### Delivery Note Full List & Search ############################
if doctype == "Delivery Note" and con_doc == '%%':
conditions = {}
conditions1 = {}
if search_text != '%%':
conditions1["name"] = ['like', search_text]
conditions1["title"] = ['like', search_text]
conditions1["customer"] = ['like', search_text]
conditions1["customer_name"] = ['like', search_text]
if filter1 != '%%':
conditions["status"] = filter1
if filter2 != '%%':
conditions["customer"] = filter2
if filter3 != '%%':
conditions["posting_date"] = ['>=', filter3]
if filter4 != '%%':
conditions["posting_date"] = ['<=', filter4]
if filter5 != '%%':
conditions["set_warehouse"] = filter5
query = frappe.db.get_list('Delivery Note',
or_filters=conditions1,
filters=conditions,
fields=["name", "customer", "territory", "posting_date", "set_warehouse", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
'''
if doctype == "Delivery Note" and con_doc == '%%':
conditions = ""
if search_text != '%%':
conditions += " and (`taDelivery Note`.name like '%{search_text}%' or `taDelivery Note`.title like '%{search_text}%' or `tabDelivery Note`.customer_name like '%{search_text}%' or `tabDelivery Note`.customer like '%{search_text}%') ".format(
search_text=search_text)
if filter1 != '%%':
conditions += " and `tabDelivery Note`.status = '{filter1}' ".format(filter1=filter1)
if filter2 != '%%':
conditions += " and `tabDelivery Note`.customer = '{filter2}' ".format(filter2=filter2)
if filter3 != '%%':
conditions += " and `tabDelivery Note`.posting_date >= '{filter3}' ".format(filter3=filter3)
if filter4 != '%%':
conditions += " and `tabDelivery Note`.posting_date <= '{filter4}' ".format(filter4=filter4)
if filter5 != '%%':
conditions += " and `tabDelivery Note`.set_warehouse = '{filter5}' ".format(filter5=filter5)
query = frappe.db.sql(
""" select name, customer, territory, posting_date, set_warehouse, status
from `tabDelivery Note`
where `tabDelivery Note`.docstatus in (0, 1, 2)
{conditions}
order by modified desc
LIMIT {start},{page_length}
""".format(conditions=conditions, start=start, page_length=page_length), as_dict=1)
if query:
return query
else:
return "لا يوجد !"
'''
############################################ SUPPLIER ############################################
########################### Supplier Full List & Search ############################
if doctype == "Supplier" and con_doc == '%%':
query = frappe.db.get_list('Supplier',
or_filters=[{'name': ['like', search_text]},
{'supplier_name': ['like', search_text]},
{'mobile_no': ['like', search_text]}],
fields=["name", "supplier_name", "supplier_group", "supplier_type", "country",
"mobile_no"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ SUPPLIER QUOTATION ############################################
########################### Supplier Quotation Full List & Search ############################
if doctype == "Supplier Quotation" and con_doc == '%%':
query = frappe.db.get_list('Supplier Quotation',
or_filters=[{'name': ['like', search_text]},
{'supplier': ['like', search_text]}],
fields=["name", "supplier", "transaction_date", "valid_till", "grand_total",
"status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ PURCHASE ORDER ############################################
########################### Purchase Order Full List & Search ############################
if doctype == "Purchase Order" and con_doc == '%%':
query = frappe.db.get_list('Purchase Order',
or_filters=[{'name': ['like', search_text]},
{'supplier': ['like', search_text]}],
fields=["name", "supplier", "transaction_date", "set_warehouse", "grand_total",
"status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ PURCHASE INVOICE ############################################
########################### Purchase Invoice Full List & Search ############################
if doctype == "Purchase Invoice" and con_doc == '%%':
query = frappe.db.get_list('Purchase Invoice',
or_filters=[{'name': ['like', search_text]},
{'supplier': ['like', search_text]}],
fields=["name", "supplier", "posting_date", "grand_total", "status"],
order_by='modified desc',
start=start,
page_length=page_length
)
if query:
return query
else:
return "لا يوجد !"
############################################ ADDRESS ############################################
########################### Filtered Address List & Search ############################
if doctype == "Address" and con_doc == '%%':
addresses = frappe.db.get_list('Dynamic Link', filters={'link_name': cur_nam}, fields=['parent'])
result = []
for d in addresses:
query = frappe.db.sql(""" select name as name ,
address_title as address_title,
address_line1 as address_line1,
city as city,
phone as phone
from tabAddress where name = '{filtered}'
and (address_title like '{search_text}' or address_line1 like '{search_text}'
or city like '{search_text}' or phone like '{search_text}') LIMIT {start},{page_length}
""".format(filtered=d.parent, search_text=search_text, start=start, page_length=page_length,), as_dict=1)
for x in query:
data = {
'name': x.name,
'address_title': x.address_title,
'address_line1': x.address_line1,
'city': x.city,
'phone': x.phone
}
result.append(data)
if result:
return result
else:
return "لا يوجد !"
############################################ CONTACT ############################################
########################### Filtered Contact List & Search ############################
if doctype == "Contact" and con_doc == '%%':
contacts = frappe.db.get_list('Dynamic Link', filters={'link_name': cur_nam}, fields=['parent'])
result = []
for d in contacts:
query = frappe.db.sql(""" select name as name ,
email_id as email_id,
mobile_no as mobile_no,
phone as phone,
company_name as company_name
from tabContact where name = '{filtered}'
and (name like '{search_text}' or email_id like '{search_text}'
or mobile_no like '{search_text}' or phone like '{search_text}'
or company_name like '{search_text}') LIMIT {start},{page_length}
""".format(filtered=d.parent, search_text=search_text, start=start, page_length=page_length,), as_dict=1)
for x in query:
data = {
'name': x.name,
'email_id': x.email_id,
'mobile_no': x.mobile_no,
'company_name': x.company_name,
'phone': x.phone
}
result.append(data)
if result:
return result
else:
return "لا يوجد !"
|
import numpy as np
from subriemannian_qc.matrix_util import conj_transpose
'''
Input validation utility functions.
'''
def is_matrix(a: np.ndarray) -> bool:
return len(a.shape) == 2 and a.shape[0] == a.shape[1]
# Not very efficient, but gets the job done
def is_unitary_matrix(a: np.ndarray) -> bool:
if not is_matrix(a):
return False
return np.isclose(np.linalg.inv(a), conj_transpose(a)).all()
|
#Non-Boolean and operator
print(10 and 5) # --A1
print(0 and 12) # -- A2
print(13 and 0) # --A3
print(12 and 'python') # --A4
|
from stanza.research.metrics import *
# TODO: define new metrics
METRICS = {
name: globals()[name]
for name in dir()
if (name not in ['np']
and not name.startswith('_'))
}
|
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
import numpy as np
from sklearn.datasets import load_iris, load_breast_cancer, load_boston
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import cross_val_predict
from xcessiv import functions
from xcessiv.presets import metricsetting
clf = LogisticRegression(random_state=8)
multiclass_X, multiclass_y = load_iris(return_X_y=True)
multiclass_preds = cross_val_predict(clf, multiclass_X, multiclass_y, method='predict')
multiclass_probas = cross_val_predict(clf, multiclass_X, multiclass_y, method='predict_proba')
binary_X, binary_y = load_breast_cancer(return_X_y=True)
binary_preds = cross_val_predict(clf, binary_X, binary_y, method='predict')
binary_probas = cross_val_predict(clf, binary_X, binary_y, method='predict_proba')
regression_X, regression_y = load_boston(return_X_y=True)
reg = LinearRegression()
regression_preds = cross_val_predict(reg, regression_X, regression_y, method='predict')
class TestAccuracyFromScores(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.accuracy_from_scores['source'])
assert np.round(module.metric_generator(binary_y, binary_probas), 2) == 0.95
assert np.round(module.metric_generator(multiclass_y, multiclass_probas), 2) == 0.95
del module
class TestAccuracyFromPreds(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.accuracy_from_preds['source'])
assert np.round(module.metric_generator(binary_y, binary_preds), 2) == 0.95
assert np.round(module.metric_generator(multiclass_y, multiclass_preds), 2) == 0.95
del module
class TestRecallFromScores(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.recall_from_scores['source'])
assert np.round(module.metric_generator(binary_y, binary_probas), 2) == 0.97
assert np.round(module.metric_generator(multiclass_y, multiclass_probas), 2) == 0.95
del module
class TestRecallFromPreds(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.recall_from_preds['source'])
assert np.round(module.metric_generator(binary_y, binary_preds), 2) == 0.97
assert np.round(module.metric_generator(multiclass_y, multiclass_preds), 2) == 0.95
del module
class TestPrecisionFromScores(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.precision_from_scores['source'])
assert np.round(module.metric_generator(binary_y, binary_probas), 2) == 0.95
assert np.round(module.metric_generator(multiclass_y, multiclass_probas), 2) == 0.95
del module
class TestPrecisionFromPreds(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.precision_from_preds['source'])
assert np.round(module.metric_generator(binary_y, binary_preds), 2) == 0.95
assert np.round(module.metric_generator(multiclass_y, multiclass_preds), 2) == 0.95
del module
class TestF1ScoreFromScores(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.f1_score_from_scores['source'])
assert np.round(module.metric_generator(binary_y, binary_probas), 2) == 0.96
assert np.round(module.metric_generator(multiclass_y, multiclass_probas), 2) == 0.95
del module
class TestF1ScoreFromPreds(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.f1_score_from_preds['source'])
assert np.round(module.metric_generator(binary_y, binary_preds), 2) == 0.96
assert np.round(module.metric_generator(multiclass_y, multiclass_preds), 2) == 0.95
del module
class TestROCAUCFromScores(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(
metricsetting.roc_auc_score_from_scores['source']
)
assert np.round(module.metric_generator(binary_y, binary_probas), 2) == 0.99
assert np.round(module.metric_generator(multiclass_y, multiclass_probas), 2) == 0.99
del module
class TestMAE(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.mae['source'])
assert np.round(module.metric_generator(regression_y, regression_preds), 2) == 6.99
del module
class TestMSE(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.mse['source'])
assert np.round(module.metric_generator(regression_y, regression_preds), 2) == 168.09
del module
class TestMedianAbsoluteError(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.median_absolute_error['source'])
assert np.round(module.metric_generator(regression_y, regression_preds), 2) == 3.72
del module
class TestR2Score(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.r2_score['source'])
assert np.round(module.metric_generator(regression_y, regression_preds), 2) == -0.99
del module
class TestExplainedVarianceScore(unittest.TestCase):
def test_source(self):
module = functions.import_string_code_as_module(metricsetting.explained_variance_score['source'])
assert np.round(module.metric_generator(regression_y, regression_preds), 2) == -0.89
del module
|
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = f"data/covid_info/train.json"
_VAL_DOWNLOAD_URL = f"data/covid_info/val.json"
class CovidDataConfig(datasets.BuilderConfig):
def __init__(
self,
**kwargs,
):
# self.second_choice=kwargs.pop("second_choice",None)
super(CovidDataConfig, self).__init__(version=datasets.Version("0.0.0", ""), **kwargs)
class CovidData(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CovidDataConfig(
name="default",
description="",
),
]
"""customize dataset."""
# VERSION = datasets.Version("0.0.0")
def _info(self):
data_info = datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
return data_info
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}),
]
def _generate_examples(self, filepath):
with open(filepath, encoding='utf-8') as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"source": data["text"],
"target": data["label"],
}
|
#!/usr/bin/env python
"""
This script accepts Kraken/Kaiju output and a file listing
all taxids, and gives a .csv file with taxid statistics
"""
__author__ = "Paul Donovan"
__maintainer__ = "Paul Donovan"
__email__ = "[email protected]"
import sys
from ete3 import NCBITaxa
import argparse
#Display help and usage
parser = argparse.ArgumentParser(description="Incorrect number of command line arguments")
parser.add_argument('KrakenOutput.tsv')
parser.add_argument('Taxids.txt')
parser.add_argument('Output.tsv')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
ncbi = NCBITaxa()
ResultsTuple = tuple(line.strip().split("\t") for line in open(sys.argv[1]))
AllTaxidSet = tuple(line.strip() for line in open(sys.argv[2]))
Output = open(sys.argv[3], "w")
Output.write("#Taxon name,Taxid,Reads mapping to taxid,Reads mapping to children taxids,Pearson skewness score,Percent of pseudo-chromosomes with read hits\n")
AllTaxidDict = dict.fromkeys(AllTaxidSet, 0)
ResultTaxids = list()
TaxidPearsonDict = dict()
for line in ResultsTuple:
ResultTaxid = line[2]
Pearson = line[5] + "___" + line[6]
ResultTaxids.append(ResultTaxid)
if ResultTaxid in AllTaxidDict:
OldValue = int(AllTaxidDict[ResultTaxid])
AllTaxidDict[ResultTaxid] = OldValue + 1
if ResultTaxid in TaxidPearsonDict:
pass
else:
TaxidPearsonDict[ResultTaxid] = Pearson
for k,v in AllTaxidDict.iteritems():
ChildrenCount = 0
try:
descendants = ncbi.get_lineage(str(k))
except ValueError:
pass
taxid = list([k])
name = ncbi.get_taxid_translator(taxid)
for i in descendants:
if str(i) in AllTaxidDict:
ChildrenCount = ChildrenCount + int(AllTaxidDict[str(i)])
PearsonSkewness = TaxidPearsonDict.get(k)
Skewness = (PearsonSkewness.strip().split("___"))[0]
HitDist= (PearsonSkewness.strip().split("___"))[1]
if PearsonSkewness == "":
Output.write(str(name.itervalues().next()) + "," + str(k) + "," + str(v) + "," + str(ChildrenCount) + ",N/A (Pearson score only calculated for leaf nodes\n")
else:
Output.write(str(name.itervalues().next()) + "," + str(k) + "," + str(v) + "," + str(ChildrenCount) + "," + str(Skewness) + "," + str(HitDist) + "\n")
Output.close()
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from typing import Type
def add_l2_regularization(optimizer_class: Type[tf.keras.optimizers.Optimizer],
l2_regularization: float) -> Type[tf.keras.optimizers.Optimizer]:
class L2Regularizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(L2Regularizer, self).__init__(*args, **kwargs)
self.l2_regularization = l2_regularization
def _resource_apply_dense(self, grad, var, apply_state):
return super()._resource_apply_dense(grad + var * self.l2_regularization, var, apply_state)
return L2Regularizer
|
import unittest
from pychess.element.piecer import generate_pieces, Piece, get_piece_row_place
from pychess import constant as c
class TestPiecer(unittest.TestCase):
def test_sort_pieces(self):
import itertools
pieces = sorted([
Piece(pd[0], pd[1])
for pd in itertools.product(c.PieceType, c.Color)
])
expected_result = [
'<Piece(black pawn 0)>', '<Piece(white pawn 0)>',
'<Piece(black knight 0)>', '<Piece(white knight 0)>',
'<Piece(black bishop 0)>', '<Piece(white bishop 0)>',
'<Piece(black rook 0)>', '<Piece(white rook 0)>',
'<Piece(black queen 0)>', '<Piece(white queen 0)>',
'<Piece(black king 0)>', '<Piece(white king 0)>',
]
self.assertEqual([repr(p) for p in pieces], expected_result)
def test_generate_pieces(self):
pieces = generate_pieces()
w_pieces = [p for p in pieces if p.color == c.Color.white]
b_pieces = [p for p in pieces if p.color == c.Color.black]
self.assertTrue(len(w_pieces) == len(b_pieces) == 16)
w_pawns = [p for p in w_pieces if p.type == c.PieceType.pawn]
b_pawns = [p for p in b_pieces if p.type == c.PieceType.pawn]
self.assertTrue(len(w_pawns) == len(b_pawns) == 8)
w_knights = [p for p in w_pieces if p.type == c.PieceType.knight]
b_knights = [p for p in b_pieces if p.type == c.PieceType.knight]
self.assertTrue(len(w_knights) == len(b_knights) == 2)
w_bishops = [p for p in w_pieces if p.type == c.PieceType.bishop]
b_bishops = [p for p in b_pieces if p.type == c.PieceType.bishop]
self.assertTrue(len(w_bishops) == len(b_bishops) == 2)
w_bishops = [p for p in w_pieces if p.type == c.PieceType.bishop]
b_bishops = [p for p in b_pieces if p.type == c.PieceType.bishop]
self.assertTrue(len(w_bishops) == len(b_bishops) == 2)
w_rooks = [p for p in w_pieces if p.type == c.PieceType.rook]
b_rooks = [p for p in b_pieces if p.type == c.PieceType.rook]
self.assertTrue(len(w_rooks) == len(b_rooks) == 2)
w_queens = [p for p in w_pieces if p.type == c.PieceType.queen]
b_queens = [p for p in b_pieces if p.type == c.PieceType.queen]
self.assertTrue(len(w_queens) == len(b_queens) == 1)
w_kings = [p for p in w_pieces if p.type == c.PieceType.king]
b_kings = [p for p in b_pieces if p.type == c.PieceType.king]
self.assertTrue(len(w_kings) == len(b_kings) == 1)
def test_name(self):
expected_result = ['pawn', 'knight', 'bishop', 'rook', 'queen', 'king']
pieces = [Piece(p, c.Color.black) for p in c.PieceType]
names = [p.name for p in pieces]
self.assertEqual(names, expected_result)
def test_code(self):
expected_result = ['p', 'n', 'b', 'r', 'q', 'k']
pieces = [Piece(p, c.Color.black) for p in c.PieceType]
codes = [p.code for p in pieces]
self.assertEqual(codes, expected_result)
def test_color(self):
p = Piece(c.PieceType.pawn, color=c.Color.black)
self.assertEqual(p.color, c.Color.black)
p = Piece(c.PieceType.pawn, color=c.Color.white)
self.assertEqual(p.color, c.Color.white)
def test_color_code(self):
p = Piece(c.PieceType.pawn, color=c.Color.black)
self.assertEqual(p.color_code, 'b')
p = Piece(c.PieceType.pawn, color=c.Color.white)
self.assertEqual(p.color_code, 'w')
def test_worth(self):
expected_result = [1, 3, 3, 5, 9, 10]
pieces = [Piece(p, c.Color.black) for p in c.PieceType]
worths = [p.worth for p in pieces]
self.assertEqual(worths, expected_result)
def test_type(self):
pieces = [Piece(p, c.Color.black) for p in c.PieceType]
types = [p.type for p in pieces]
self.assertEqual(types, [t for t in c.PieceType])
def test_nb_pieces(self):
expected_result = [8, 2, 2, 2, 1, 1]
pieces = [Piece(p, c.Color.black) for p in c.PieceType]
nb_pieces = [p.nb_pieces for p in pieces]
self.assertEqual(nb_pieces, expected_result)
def test_order(self):
nb_pieces = {
c.PieceType.pawn: 8,
c.PieceType.knight: 2,
c.PieceType.bishop: 2,
c.PieceType.rook: 2,
c.PieceType.queen: 1,
c.PieceType.king: 1,
}
for piece_type in nb_pieces.keys():
pieces = []
orders = []
for i in range(nb_pieces[piece_type]):
pieces.append(Piece(piece_type, c.Color.black, order=i))
orders = [p.order for p in pieces]
expected_result = list(range(nb_pieces[piece_type]))
self.assertEqual(orders, expected_result)
def test_uid(self):
expected_result = [
'0bb', '0bw', '0kb', '0kw', '0nb', '0nw', '0pb', '0pw', '0qb',
'0qw', '0rb', '0rw', '1bb', '1bw', '1nb', '1nw', '1pb', '1pw',
'1rb', '1rw', '2pb', '2pw', '3pb', '3pw', '4pb', '4pw', '5pb',
'5pw', '6pb', '6pw', '7pb', '7pw'
]
pieces = generate_pieces()
uids = sorted([p.uid for p in pieces])
self.assertEqual(uids, expected_result)
def test_hash(self):
import itertools
piece_data = [d for d in itertools.product(c.PieceType, c.Color)]
expected_results = [
(d[0].value * 100) + (d[1].value * 10) + (0) # default order
for d in piece_data
]
pieces = [Piece(piece_type=d[0], color=d[1]) for d in piece_data]
for piece, expected_result in zip(pieces, expected_results):
try:
self.assertEqual(hash(piece), expected_result)
except AssertionError:
print(f'{piece} != {expected_result}')
raise
def test_equals(self):
p1 = Piece(c.PieceType.pawn, c.Color.black)
p2 = Piece(c.PieceType.pawn, c.Color.black)
self.assertTrue(p1 is not p2)
self.assertEqual(p1, p2)
def test_not_equals(self):
p1 = Piece(c.PieceType.rook, c.Color.black)
p2 = Piece(c.PieceType.rook, c.Color.black, order=1)
self.assertNotEqual(p1, p2)
def test_greater(self):
p1 = Piece(c.PieceType.king, c.Color.black)
p2 = Piece(c.PieceType.queen, c.Color.white)
self.assertGreater(p1, p2)
p1 = Piece(c.PieceType.king, c.Color.white)
p2 = Piece(c.PieceType.king, c.Color.black)
self.assertGreater(p1, p2)
def test_less(self):
p1 = Piece(c.PieceType.queen, c.Color.black)
p2 = Piece(c.PieceType.king, c.Color.white)
self.assertLess(p1, p2)
p1 = Piece(c.PieceType.queen, c.Color.black)
p2 = Piece(c.PieceType.queen, c.Color.white)
self.assertLess(p1, p2)
def test_greater_equals(self):
p1 = Piece(c.PieceType.king, c.Color.black)
p2 = Piece(c.PieceType.queen, c.Color.white)
self.assertGreaterEqual(p1, p2)
p1 = Piece(c.PieceType.king, c.Color.white)
p2 = Piece(c.PieceType.king, c.Color.white)
self.assertGreaterEqual(p1, p2)
def test_less_equals(self):
p1 = Piece(c.PieceType.queen, c.Color.black)
p2 = Piece(c.PieceType.king, c.Color.white)
self.assertLessEqual(p1, p2)
p1 = Piece(c.PieceType.king, c.Color.white)
p2 = Piece(c.PieceType.king, c.Color.white)
self.assertLessEqual(p1, p2)
def test_get_pieces_row_place(self):
places = {
(c.PieceType.rook, 0): 0,
(c.PieceType.knight, 0): 1,
(c.PieceType.bishop, 0): 2,
(c.PieceType.queen, 0): 3,
(c.PieceType.king, 0): 4,
(c.PieceType.bishop, 1): 5,
(c.PieceType.knight, 1): 6,
(c.PieceType.rook, 1): 7,
}
for piece in generate_pieces():
if piece.type == c.PieceType.pawn:
expected_result = piece.order
else:
expected_result = places[(piece.type, piece.order)]
row_place = get_piece_row_place(piece)
self.assertEqual(row_place, expected_result)
def test_first_row(self):
pieces = generate_pieces()
expected_result = None
for p in pieces:
if p.color == c.Color.black:
expected_result = 7
if p.type == c.PieceType.pawn:
expected_result = 6
else:
expected_result = 0
if p.type == c.PieceType.pawn:
expected_result = 1
self.assertEqual(p.first_row, expected_result)
def test_move_path(self):
expected_paths = {
c.PieceType.pawn: (((-1, 1), ), ((0, 1), ), ((1, 1), )),
c.PieceType.knight: (
((1, 2), ), ((1, -2), ), ((-1, 2), ), ((-1, -2), ),
((2, 1), ), ((2, -1), ), ((-2, 1), ), ((-2, -1), ),
),
c.PieceType.bishop: (
(
(-1, -1), (-2, -2), (-3, -3), (-4, -4), (-5, -5), (-6, -6),
(-7, -7),
),
(
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7),
),
(
(-1, 1), (-2, 2), (-3, 3), (-4, 4), (-5, 5), (-6, 6),
(-7, 7),
),
(
(1, -1), (2, -2), (3, -3), (4, -4), (5, -5), (6, -6),
(7, -7),
),
),
c.PieceType.rook: (
((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7)),
(
(0, -1), (0, -2), (0, -3), (0, -4), (0, -5), (0, -6),
(0, -7)
),
((1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0)),
(
(-1, 0), (-2, 0), (-3, 0), (-4, 0), (-5, 0), (-6, 0),
(-7, 0)
),
),
c.PieceType.queen: (
(
(-1, -1), (-2, -2),
(-3, -3), (-4, -4), (-5, -5), (-6, -6),
(-7, -7),
),
(
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5),
(6, 6), (7, 7),
),
(
(-1, 1), (-2, 2), (-3, 3), (-4, 4),
(-5, 5), (-6, 6),
(-7, 7),
),
(
(1, -1), (2, -2), (3, -3), (4, -4),
(5, -5), (6, -6),
(7, -7),
),
(
(0, 1), (0, 2), (0, 3), (0, 4), (0, 5),
(0, 6), (0, 7)
),
(
(0, -1), (0, -2), (0, -3), (0, -4), (0, -5),
(0, -6), (0, -7)
),
(
(1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
(6, 0), (7, 0)
),
(
(-1, 0), (-2, 0), (-3, 0), (-4, 0),
(-5, 0), (-6, 0), (-7, 0)
),
),
c.PieceType.king: (
((-1, 1), ), ((0, 1), ), ((1, 1), ), ((1, 0), ),
((1, -1), ), ((0, -1), ), ((-1, -1), ), ((-1, 0), ),
),
}
for p in generate_pieces():
expected_result = expected_paths[p.type]
self.assertEqual(p.move_paths, expected_result)
if __name__ == "__main__":
unittest.main()
|
# Author: Andreas Putz
# Copyright (c) 2013, PythonFCST
# License: TBD.
r"""
****************************************************************
:mod:`PythonFCST.mesh`: Mesh Generation Classes for PythonFCST
****************************************************************
.. module:: PythonFCST.mesh
Contents
--------
The PythonFCST package imports all the functions from the top level modules.
Import
------
>>> import PythonFCST as fcst
Submodules
----------
.. automodule:: PythonFCST.mesh.GridGenerator
:members:
:undoc-members:
:show-inheritance:
"""
from GridGenerator import GridGenerator
from PhaseGenerator import PhaseGenerator
from distanceSD import distanceSD |
import FWCore.ParameterSet.Config as cms
HCALHighEnergyFilter = cms.EDFilter("HCALHighEnergyFilter",
JetTag = cms.InputTag("iterativeCone5CaloJets"),
JetThreshold = cms.double(20),
EtaCut = cms.double(3.0)
# CentralJets = cms.untracked.InputTag("hltL1extraParticles","Central"),
# TauJets = cms.untracked.InputTag("hltL1extraParticles","Tau")
)
|
import json
import os
from copy import deepcopy
from main import main, parse_args
from utils import get_stats
def load_config(path="./grid_search_config.json"):
with open(path, "r") as f:
return json.load(f)
def run_experiments(args):
res = []
for i in range(args.num_trials):
print("Trial {}/{}".format(i + 1, args.num_trials))
acc, _ = main(args)
res.append(acc)
mean, err_bd = get_stats(res, conf_interval=True)
return mean, err_bd
def grid_search(config:dict):
args = parse_args()
results = {}
for d in config["dataset"]:
args.dataset = d
best_acc, err_bd = 0., 0.
best_args = vars(args)
for arch in config["arch"]:
args.architecture = arch
for hidden in config["hidden"]:
args.hid_dim = hidden
for pool_ratio in config["pool_ratio"]:
args.pool_ratio = pool_ratio
for lr in config["lr"]:
args.lr = lr
for weight_decay in config["weight_decay"]:
args.weight_decay = weight_decay
acc, bd = run_experiments(args)
if acc > best_acc:
best_acc = acc
err_bd = bd
best_args = deepcopy(vars(args))
args.output_path = "./output"
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
args.output_path = "./output/{}.log".format(d)
result = {
"params": best_args,
"result": "{:.4f}({:.4f})".format(best_acc, err_bd)
}
with open(args.output_path, "w") as f:
json.dump(result, f, sort_keys=True, indent=4)
grid_search(load_config())
|
#!/usr/bin/env python
import click
import os
import codecs
import shutil
from bs4 import UnicodeDammit
from functools import partial
from multiprocessing import Pool
from nlppln.utils import create_dirs, get_files, out_file_name
def check_file(in_file, convert, out_dir):
fo = out_file_name(out_dir, in_file)
try:
with codecs.open(in_file, encoding='utf-8') as f:
text = f.read()
if convert:
# don't copy if it's the same file
if os.path.abspath(in_file) != fo:
shutil.copy2(in_file, fo)
except UnicodeDecodeError:
with codecs.open(in_file, 'rb') as f:
text = f.read()
dammit = UnicodeDammit(text)
print('{}: {}'.format(in_file, dammit.original_encoding))
if convert:
with codecs.open(fo, 'w', encoding='utf-8') as f:
f.write(dammit.unicode_markup)
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--convert/--no-convert', default=False)
@click.option('--processes', '-p', default=1)
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
def check_utf8(in_dir, convert, processes, out_dir):
create_dirs(out_dir)
in_files = get_files(in_dir)
check = partial(check_file, convert=convert, out_dir=out_dir)
pool = Pool(processes=processes)
pool.map(check, in_files)
if __name__ == '__main__':
check_utf8()
|
from django.urls import reverse
from django.views import generic as views
from testing_demos.web.models import Profile
class ProfileCreateView(views.CreateView):
model = Profile
fields = '__all__'
template_name = 'profiles/create.html'
def get_success_url(self):
return reverse('details profile', kwargs={'pk': self.object.pk})
class ProfilesListView(views.ListView):
model = Profile
template_name = 'profiles/list.html'
context_user_key = 'user'
no_logged_in_user_value = 'No user'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
if self.request.user.is_authenticated:
context[self.context_user_key] = self.request.user.username
else:
context[self.context_user_key] = self.no_logged_in_user_value
return context
# def get_queryset(self):
# return super().get_queryset() \
# .prefetch_related('') \
# .filter()
class ProfileDetailsView(views.DetailView):
model = Profile
template_name = 'profiles/details.html'
|
from django.db import models
from customer.models import Customer, Department
from engineer.models import Area, Engineer
from django.utils import timezone
from datetime import datetime, timedelta
from django.db.transaction import atomic
# Create your models here.
#print(datetime.now()+datetime)
speed_choices=((20,20),(30,30),(35,35),(45,45),(55,55),(65,65),(75,75),(90,90))
class MachineDetail(models.Model):
name= models.CharField(max_length=200)
serial = models.IntegerField('standard seial', help_text='fill this if your serial is numbers only', unique=True, blank=True)
serial2 = models.CharField('non-standard serial', max_length=10, help_text="fill this if your serial consists of numbers and letters", unique=True, blank=True)
machine_model = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
description = models.TextField()
added = models.DateTimeField(auto_now_add=True)
speed = models.IntegerField(blank = True, null=True, choices=speed_choices)
class Meta:
abstract=True
class Category(models.Model):
type_choice= (('black-white machine','B&W Machine'),('color machine', 'Colour Machine'),('White Format machine','White-Format machine'))
category_type = models.CharField(max_length=150, choices=type_choice, unique=True)
def __str__(self):
return self.category_type
class MachineManager(models.Manager):
def get_machines_by_customer(self,customer=None):
if customer:
return Machine.objects.filter(customer=customer)
else:
return None
class Machine(MachineDetail):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, on_delete=models.CASCADE)
department = models.ForeignKey(Department, related_name='machines_dep', on_delete=models.CASCADE)
area = models.ForeignKey(Area, related_name='machines', on_delete=models.CASCADE, blank=True,null=True)
engineers = models.ManyToManyField(Engineer, related_name='machines', blank=True)
#call = models.OneToOneField()
objects = MachineManager()
def __str__(self):
return self.machine_model + '({})'.format(self.category)+" "+self.customer.name+" "+self.department.department_name
class Call(models.Model):
#from machine.models import Machine
def customer_name(self):
machine = Machine.objects.get(id=self.machine.id)
customer = Customer.objects.get(id=machine.customer.id)
return customer.name
engineer = models.ForeignKey(Engineer, on_delete=models.CASCADE, blank=True, null=True)
customer = models.ForeignKey(Customer,related_name='calls',on_delete=models.CASCADE, blank=True, null=True)
machine = models.ForeignKey(to='Machine', related_name='calls', on_delete=models.CASCADE)
assign_date = models.DateTimeField(auto_now_add=True)
end_date = models.DateTimeField(help_text="the default value is 6 hours + assign_date", default=datetime.now()+timedelta(hours=6))
notification_number = models.PositiveIntegerField(primary_key=True)
class Meta:
get_latest_by=['notification_number']
def __str__(self):
return (Engineer.objects.get(id=self.engineer.id).name)
'''class ex(models.Field):
def db_type(self, connection):
if connection.setting_dict['ENGINE']=='django.db.backends.mysql':
return 'datetime'
else:
return 'timestamp
def get_end_date(self):
#d = datetime.now()
d= self.assign_date
if d.hour+6 >= 24:
t = datetime(d.year, d.month, d.day+1, d.hour+6-24)
print(t)
return t
else:
t = datetime(d.year, d.month, d.day+1, d.hour+6)
return t
def save(self,*args,**kwargs):
with atomic():
#last_notification = Call.objects.latest('id').select_for_update(nowait=False).notification_number
if Call.objects.all():
self.notification_number = Call.objects.all().count()+1
self.save()
#self.notification_number += 1
else:
self.notification_number=1
self.save()
def save(self,*args,**kwargs):
with atomic():
#last_notification = Call.objects.latest('id').select_for_update(nowait=False).notification_number
if Call.objects.all():
self.notification_number = Call.objects.all().count()+1
self.save()
#self.notification_number += 1
else:
self.notification_number=1
self.save()''' |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import HTMLTestRunner
import xmlrunner
global DRIVER
global MS_ip
# Import test cases
##################################
from Login_and_Accounts import *
from Service_Offering import *
from TemplatesAndISO import *
from VM_lifeCycle import *
###################################
# Following are BVT Tests
# serialize the test cases
suite = unittest.TestSuite() # setup new test suite
####################################################################################################
# Following logs admin user in and creates test account then logs admin user out and logs in as test to run tests.
# You should leave this as is for all the tests.
suite.addTest(unittest.makeSuite(login)) #Login Admin
time.sleep(5)
suite.addTest(unittest.makeSuite(createAcc)) # Create an Account test. We will use test account for all our tests
time.sleep(5)
suite.addTest(unittest.makeSuite(logout)) #Logout Admin
time.sleep(5)
suite.addTest(unittest.makeSuite(login_test)) # Login Test
####################################################################################################
time.sleep(5)
suite.addTest(unittest.makeSuite(Disk_offering_Add))
time.sleep(5)
suite.addTest(unittest.makeSuite(Disk_offering_Edit))
time.sleep(5)
suite.addTest(unittest.makeSuite(Disk_offering_Delete))
time.sleep(5)
suite.addTest(unittest.makeSuite(Compute_offering_Add))
time.sleep(5)
suite.addTest(unittest.makeSuite(Compute_offering_Edit))
time.sleep(5)
suite.addTest(unittest.makeSuite(Compute_offering_Delete))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(deployVM))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(stopVM))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(startVM))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(destroyVM))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(restoreVM))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(Template_Add))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(Template_Edit))
# time.sleep(5)
# suite.addTest(unittest.makeSuite(Template_Delete))
####################################################################################################
# Following logs test user out and logs back in as Admin and tears down the test account.
# You should leave this as is for all the tests.
suite.addTest(unittest.makeSuite(logout)) #Logout test
time.sleep(5)
suite.addTest(unittest.makeSuite(login)) #Login Admin
time.sleep(5)
suite.addTest(unittest.makeSuite(tearAcc)) # Delete Account test
####################################################################################################
# If XML reports compatible with junit's XML output are desired then leave folowing code as is.
# If HTML reports are desired follow instructions
#Comment following line for HTML and uncomment for XML
runner = xmlrunner.XMLTestRunner(output='test-reports')
#Comment following line for XML and uncomment for HTML
#runner = HTMLTestRunner.HTMLTestRunner()
#header is required for displaying the website
#Comment following line for XML and uncomment for HTML
#print "Content-Type: text/html\n"
# Leave following as is for either XML or HTML
runner.run(suite)
|
suitable_locations = land_use_residential * (dem_gent > 10) * (1 - roads_buffer_arr) |
from abc import ABC, abstractmethod, abstractproperty
from pytz import timezone
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id, meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
|
import numpy as np
def target_transform(y: np.array, increment: float=0.01) -> np.array:
"""
Transform non-negative array to R using np.log
:param y: np.array
:param increment: float
:return:
"""
return np.log(y + increment)
def target_inverse_transform(y_trn: np.array, increment: float=0.01) -> np.array:
"""
Inverse transform of array in R to non-negative
:param y_trn: np.array
:param increment: float
:return:
"""
return np.exp(y_trn) - increment |
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
basedir = abspath(dirname(__file__))
with open(join(basedir, "README.md"), encoding="utf-8") as f:
README = f.read()
with open(join(basedir, "fimed", "__init__.py"), "r") as f:
version_marker = "__version__ = "
for line in f:
if line.startswith(version_marker):
_, VERSION = line.split(version_marker)
VERSION = VERSION.strip().strip('"')
break
else:
raise RuntimeError("Version not found on __init__")
install_requires = [
# routes
"fastapi",
"pydantic[dotenv]",
"uvicorn",
"aiofiles",
"pyjwt",
"passlib[bcrypt]",
"python-multipart",
"strconv",
"requests",
# database
"pymongo",
"cryptography",
#package
"numpy",
"pandas"
]
setup(
name="fimed",
version=VERSION,
description="FIMED 2.0",
long_description=README,
long_description_content_type="text/markdown",
author="Khaos Research",
author_email="",
maintainer="Daniel Doblas",
maintainer_email="",
license="MIT",
url="https://github.com/dandobjim/FIMED2.0",
packages=find_packages(exclude=["test_"]),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["fimed=fimed.__main__:cli",],},
install_requires=install_requires,
)
|
import os
import sys
import re
from pathlib import Path
from paraview.simple import *
PATH = Path(os.getenv('HOME')) / "SPHinXsys-build"/"tests"/"user_examples"/"2d_boulder_on_slop"
if sys.platform.startswith('linux'):
PATH = str(PATH) + "/bin/output/"
else:
PATH = str(PATH) + "\\src\\output\\"
def open_files():
wall_re = re.compile(r'SPHBody_Wall_[0-9.]+.vtp$')
boulder_re = re.compile(r'SPHBody_Boulder_[0-9.]+.vtp$')
wall_files = []
boulder_files = []
files = os.listdir(PATH)
for name in files:
if boulder_re.fullmatch(name):
boulder_files.append(PATH + name)
elif wall_re.fullmatch(name):
wall_files.append(PATH + name)
num_re = re.compile(r'_([0-9]+).vtp$')
boulder_files.sort(key=lambda x: int(num_re.findall(x)[0]))
return (OpenDataFile(wall_files), OpenDataFile(boulder_files),
len(boulder_files))
ResetSession()
# for x in GetSources().values():
# Delete(x[0])
view = GetActiveView()
wall, boulder, number_of_files = open_files()
scene = GetAnimationScene()
scene.PlayMode = 'Snap To TimeSteps'
RenameSource('Wall', wall)
RenameSource('Boulder', boulder)
wall_disp = GetDisplayProperties(wall, view)
boulder_disp = GetDisplayProperties(boulder, view)
for disp in (wall_disp, boulder_disp):
disp.SetRepresentationType('Point Gaussian')
disp.GaussianRadius = 0.0013
boulder_disp.DiffuseColor = [0.67, 0.33, 0.0]
for source in [wall, boulder]:
Show(source, view)
Render()
|
from colorama import init
from colorama import Fore, Back, Style
from menu import menu_one, menu_two, menu_three, menu_four
def show_menu():
'''Display menu on terminal'''
print('='*30)
print('=' + ' '*12 + 'menu' + ' '*12 + '=')
print('='*30)
print('1. Add new record')
print('2. Show All Apps/WebPages connected with Login/Email')
print('3. Show Login and Password for App/WebPage')
print('4. Generate New Password')
print('5. Exit')
print()
menu_respond()
def menu_respond():
'''Listener for user response'''
while True:
try:
response = int(input(Fore.CYAN + 'Choose menu option: ' + Style.RESET_ALL))
if response == 1:
menu_one()
show_menu()
elif response == 2:
menu_two()
show_menu()
elif response == 3:
menu_three()
show_menu()
elif response == 4:
menu_four()
show_menu()
elif response == 5:
print(Fore.GREEN + 'Exiting...' + Style.RESET_ALL)
exit()
break
else:
print(Fore.RED + 'Number must be one of available menu options!')
except ValueError:
print(Fore.RED + 'Expected integer value, not char!' + Style.RESET_ALL)
if __name__ == '__main__':
init()
show_menu()
|
#!/usr/bin/env python2
# Win32 named pipe helper
from __future__ import absolute_import
__all__ = ["Win32Pipe"]
import sys
import ctypes
import logging
import random
from ctypes import POINTER as PTR
from ctypes.wintypes import (BOOL, DWORD, HANDLE, LPCWSTR, LPVOID, LPCVOID)
class OVERLAPPED(ctypes.Structure):
class PointerUnion(ctypes.Union):
class OffsetStruct(ctypes.Structure):
_fields_ = [("Offset", DWORD),
("OffsetHigh", DWORD)]
_anonymous_ = ("s",)
_fields_ = [("s", OffsetStruct),
("Pointer", LPVOID)]
_anonymous_ = ("u",)
_fields_ = [("Internal", LPVOID),
("InternalHigh", LPVOID),
("u", PointerUnion),
("hEvent", HANDLE)]
def __init__(self):
super(OVERLAPPED, self).__init__(Offset=0, OffsetHigh=0,
Pointer=0, Internal=0,
InternalHigh=0, hEvent=None)
self.hEvent = CreateEvent(None, True, False, None)
LPOVERLAPPED = ctypes.POINTER(OVERLAPPED)
log = logging.getLogger("py.win32_named_pipe")
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
def _decl(name, ret=None, args=()):
fn = getattr(kernel32, name)
fn.restype = ret
fn.argtypes = args
return fn
CloseHandle = _decl("CloseHandle", BOOL, (HANDLE,))
CreateEvent = _decl("CreateEventW", HANDLE, (LPVOID, BOOL, BOOL, LPCWSTR))
CreateFile = _decl("CreateFileW", HANDLE, (LPCWSTR, DWORD, DWORD,
LPVOID, DWORD, DWORD, HANDLE))
CreateNamedPipe = _decl("CreateNamedPipeW", HANDLE,
(LPCWSTR, DWORD, DWORD, DWORD, DWORD, DWORD,
DWORD, LPVOID))
ConnectNamedPipe = _decl("ConnectNamedPipe", BOOL, (HANDLE, LPOVERLAPPED))
WriteFile = _decl("WriteFile", BOOL, (HANDLE, LPCVOID, DWORD,
PTR(DWORD), PTR(OVERLAPPED)))
ReadFile = _decl("ReadFile", BOOL, (HANDLE, LPVOID, DWORD,
PTR(DWORD), PTR(OVERLAPPED)))
GetOverlappedResult = _decl("GetOverlappedResult", BOOL,
(HANDLE, PTR(OVERLAPPED), PTR(DWORD), BOOL))
ERROR_ACCESS_DENIED = 5
ERROR_IO_PENDING = 997
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
INVALID_HANDLE_VALUE = -1
OPEN_EXISTING = 3
PIPE_ACCESS_DUPLEX = 0x00000003
PIPE_READMODE_BYTE = 0x00000000
PIPE_REJECT_REMOTE_CLIENTS = 0x00000008
PIPE_TYPE_BYTE = 0x00000000
del _decl
class Win32Pipe(object):
"""This class implements a Win32 named pipe; it has a file-like API. It is
oriented towards byte streams.
Usage:
pipe = Win32Pipe() # Create a new pipe with a randomly generated name
pipe = Win32Pipe("name") # Create a new pipe, \\.\pipe\name
pipe = Win32Pipe("name", client=True) # Connect to an existing pipe
"""
name = None ###< The name of the pipe
_has_stream = False ###< Whether we have connected to the other end yet
_pipe = None ###< The underlying Win32 handle
pipe_prefix = None ###< Prefix to place before randomly generated pipe names
def __init__(self, name=None, client=False):
if client:
self._connect_to_existing(name=name)
else:
self._create(name=name)
def _create(self, name=None):
"""Create a new pipe as a server, with the given name"""
self._has_stream = False
flags = (PIPE_ACCESS_DUPLEX |
FILE_FLAG_FIRST_PIPE_INSTANCE |
FILE_FLAG_OVERLAPPED)
mode = PIPE_TYPE_BYTE | PIPE_READMODE_BYTE
# Windows XP, version (5, 1) doesn't support PIPE_REJECT_REMOTE_CLIENTS
# see bug 104569.
if sys.getwindowsversion() >= (5, 2):
mode |= PIPE_REJECT_REMOTE_CLIENTS
pipe_prefix = "\\\\.\\pipe\\"
if name is not None:
if not name.lower().startswith(pipe_prefix):
name = pipe_prefix + name
log.debug("Creating new named pipe %s", name)
self._pipe = CreateNamedPipe(name, flags, mode, 1, 0x1000, 0x1000,
0, None)
if self._pipe == INVALID_HANDLE_VALUE:
self._pipe = None
raise ctypes.WinError(ctypes.get_last_error())
else:
bits = min((256, (255 - len(pipe_prefix)) * 4))
start = random.getrandbits(bits)
log.debug("Trying to create pipe with randomness %s",
hex(start))
# Try a few variations on the name in case it's somehow taken
for i in range(1024):
name = (pipe_prefix + (self.pipe_prefix or "") +
hex(start + i)[2:-1])
assert len(name) <= 256
# Unfortuantely, it is more reliable to create a nowait pipe
# and poll for it than it is to create a blocking pipe.
self._pipe = CreateNamedPipe(name, flags, mode, 1, 0x1000,
0x1000, 0, None)
if self._pipe != INVALID_HANDLE_VALUE:
break
self._pipe = None
errno = ctypes.get_last_error()
if errno != ERROR_ACCESS_DENIED:
# we get access denied on a name collision
raise ctypes.WinError(errno)
else:
raise ctypes.WinError(ctypes.get_last_error())
self.name = name
def _connect_to_existing(self, name):
self._has_stream = False
pipe_prefix = "\\\\.\\pipe\\"
if not name.lower().startswith(pipe_prefix):
name = pipe_prefix + name
log.debug("Connecting to existing named pipe %s", name)
self._pipe = CreateFile(name,
GENERIC_READ | GENERIC_WRITE,
0,
None,
OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None)
if self._pipe == INVALID_HANDLE_VALUE:
self._pipe = None
error = ctypes.WinError(ctypes.get_last_error())
log.debug("Failed to open pipe %s: %s", name, error)
raise error
self._has_stream = True
def _ensure_stream(self, action="open"):
if self._pipe is None:
raise IOError("Cannot %s closed pipe" % (action,))
if self._has_stream:
return
overlapped = OVERLAPPED()
try:
if not ConnectNamedPipe(self._pipe, overlapped):
errno = ctypes.get_last_error()
if errno != ERROR_IO_PENDING:
raise ctypes.WinError(errno)
if not GetOverlappedResult(self._pipe, overlapped,
ctypes.byref(DWORD(0)), True):
raise ctypes.WinError(ctypes.get_last_error())
self._has_stream = True
finally:
CloseHandle(overlapped.hEvent)
def write(self, data):
self._ensure_stream("write to")
overlapped = OVERLAPPED()
try:
if not WriteFile(self._pipe, data, len(data), None,
ctypes.byref(overlapped)):
errno = ctypes.get_last_error()
if errno != ERROR_IO_PENDING:
raise ctypes.WinError(errno)
written = DWORD(0)
if not GetOverlappedResult(self._pipe,
ctypes.byref(overlapped),
ctypes.byref(written),
True):
raise ctypes.WinError(ctypes.get_last_error())
assert written.value == len(data), "Incomplete write"
finally:
CloseHandle(overlapped.hEvent)
def read(self, count):
self._ensure_stream("read from")
overlapped = OVERLAPPED()
try:
buf = ctypes.create_string_buffer(count)
if not ReadFile(self._pipe, ctypes.byref(buf), count,
None, ctypes.byref(overlapped)):
errno = ctypes.get_last_error()
if errno != ERROR_IO_PENDING:
raise ctypes.WinError(errno)
read = DWORD(0)
if not GetOverlappedResult(self._pipe,
ctypes.byref(overlapped),
ctypes.byref(read),
True):
raise ctypes.WinError(ctypes.get_last_error())
assert read.value == count
return buf.value
finally:
CloseHandle(overlapped.hEvent)
def close(self):
CloseHandle(self._pipe)
self._pipe = None
|
#!/usr/bin/env python
import argparse
import os
import requests
import json
import urllib.parse
import m3u8
from pathlib import Path
import re
import ffmpeg
import shutil
class TwitterDownloader:
video_player_prefix = 'https://twitter.com/i/videos/tweet/'
video_api = 'https://api.twitter.com/1.1/videos/tweet/config/'
tweet_data = {}
def __init__(self, tweet_url, output_dir, debug):
self.tweet_url = tweet_url
self.output_dir = output_dir
self.debug = debug
if debug > 2:
self.debug = 2
self.tweet_data['tweet_url'] = tweet_url.split('?', 1)[0]
self.tweet_data['user'] = self.tweet_data['tweet_url'].split('/')[3]
self.tweet_data['id'] = self.tweet_data['tweet_url'].split('/')[5]
self.downloaded_video_names = []
output_path = Path(output_dir)
storage_dir = output_path #/ self.tweet_data['user'] / self.tweet_data['id']
Path.mkdir(storage_dir, parents = True, exist_ok = True)
self.storage = str(storage_dir)
self.requests = requests.Session()
def download(self):
self.__debug('Tweet URL', self.tweet_data['tweet_url'])
# Get the bearer token
token = self.__get_bearer_token()
# Get the M3u8 file - this is where rate limiting has been happening
video_host, playlist = self.__get_playlist(token)
print('[+] Multiple resolutions found.')
for plist in playlist.playlists:
resolution = str(plist.stream_info.resolution[0]) + 'x' + str(plist.stream_info.resolution[1])
resolution_file = Path(self.storage) / Path(resolution + '_' + self.tweet_data['id'] + '_' + '.mp4')
self.downloaded_video_names.append(resolution_file)
print('[+] Downloading ' + resolution)
playlist_url = video_host + plist.uri
ts_m3u8_response = self.requests.get(playlist_url, headers = {'Authorization': None})
ts_m3u8_parse = m3u8.loads(ts_m3u8_response.text)
ts_list = []
ts_full_file_list = []
for ts_uri in ts_m3u8_parse.segments.uri:
#ts_list.append(video_host + ts_uri)
ts_file = requests.get(video_host + ts_uri)
fname = ts_uri.split('/')[-1]
ts_path = Path(self.storage) / Path(fname)
ts_list.append(ts_path)
ts_path.write_bytes(ts_file.content)
ts_full_file = Path(self.storage) / Path(resolution + '.ts')
ts_full_file = str(ts_full_file)
ts_full_file_list.append(ts_full_file)
# Shamelessly taken from https://stackoverflow.com/questions/13613336/python-concatenate-text-files/27077437#27077437
with open(str(ts_full_file), 'wb') as wfd:
for f in ts_list:
with open(f, 'rb') as fd:
shutil.copyfileobj(fd, wfd, 1024 * 1024 * 10)
for ts in ts_full_file_list:
print('\t[*] Doing the magic ...')
ffmpeg\
.input(ts)\
.output(str(resolution_file), acodec = 'copy', vcodec = 'libx264', format = 'mp4', loglevel = 'error')\
.overwrite_output()\
.run()
print('\t[+] Doing cleanup')
for ts in ts_list:
p = Path(ts)
p.unlink()
for ts in ts_full_file_list:
p = Path(ts)
p.unlink()
#else:
#print('[-] Sorry, single resolution video download is not yet implemented. Please submit a bug report with the link to the tweet.')
for x in range(0, len(self.downloaded_video_names)-1):
os.system("sudo rm " + str(self.downloaded_video_names[x]))
def __get_bearer_token(self):
video_player_url = self.video_player_prefix + self.tweet_data['id']
video_player_response = self.requests.get(video_player_url).text
self.__debug('Video Player Body', '', video_player_response)
js_file_url = re.findall('src="(.*js)', video_player_response)[0]
js_file_response = self.requests.get(js_file_url).text
self.__debug('JS File Body', '', js_file_response)
bearer_token_pattern = re.compile('Bearer ([a-zA-Z0-9%-])+')
bearer_token = bearer_token_pattern.search(js_file_response)
bearer_token = bearer_token.group(0)
self.requests.headers.update({'Authorization': bearer_token})
self.__debug('Bearer Token', bearer_token)
self.__get_guest_token()
return bearer_token
def __get_playlist(self, token):
player_config_req = self.requests.get(self.video_api + self.tweet_data['id'] + '.json')
player_config = json.loads(player_config_req.text)
if 'errors' not in player_config:
self.__debug('Player Config JSON', '', json.dumps(player_config))
m3u8_url = player_config['track']['playbackUrl']
else:
self.__debug('Player Config JSON - Error', json.dumps(player_config['errors']))
print('[-] Rate limit exceeded. Could not recover. Try again later.')
sys.exit(1)
# Get m3u8
m3u8_response = self.requests.get(m3u8_url)
self.__debug('M3U8 Response', '', m3u8_response.text)
m3u8_url_parse = urllib.parse.urlparse(m3u8_url)
video_host = m3u8_url_parse.scheme + '://' + m3u8_url_parse.hostname
m3u8_parse = m3u8.loads(m3u8_response.text)
return [video_host, m3u8_parse]
def __get_guest_token(self):
res = self.requests.post("https://api.twitter.com/1.1/guest/activate.json")
res_json = json.loads(res.text)
self.requests.headers.update({'x-guest-token': res_json.get('guest_token')})
def __debug(self, msg_prefix, msg_body, msg_body_full = ''):
if self.debug == 0:
return
if self.debug == 1:
print('[Debug] ' + '[' + msg_prefix + ']' + ' ' + msg_body)
if self.debug == 2:
print('[Debug+] ' + '[' + msg_prefix + ']' + ' ' + msg_body + ' - ' + msg_body_full)
if __name__ == '__main__':
import sys
if sys.version_info[0] == 2:
print('Python3 is required.')
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('tweet_url', help = 'The video URL on Twitter (https://twitter.com/<user>/status/<id>).')
parser.add_argument('-o', '--output', dest = 'output', default = './output', help = 'The directory to output to. The structure will be: <output>/<user>/<id>.')
parser.add_argument('-d', '--debug', default = 0, action = 'count', dest = 'debug', help = 'Debug. Add more to print out response bodies (maximum 2).')
args = parser.parse_args()
twitter_dl = TwitterDownloader(args.tweet_url, args.output, args.debug)
twitter_dl.download()
|
import os
import shutil
import unittest
import torch
import torchvision
import torch.nn as nn
from neural_compressor.data import DATASETS
from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader
def build_fake_yaml():
fake_yaml = """
model:
name: imagenet_prune
framework: pytorch
pruning:
train:
start_epoch: 0
end_epoch: 4
iteration: 10
dataloader:
batch_size: 30
dataset:
dummy:
shape: [128, 3, 224, 224]
label: True
optimizer:
SGD:
learning_rate: 0.1
momentum: 0.1
nesterov: True
weight_decay: 0.1
criterion:
CrossEntropyLoss:
reduction: sum
approach:
weight_compression:
initial_sparsity: 0.0
target_sparsity: 0.97
start_epoch: 0
end_epoch: 4
pruners:
- !Pruner
start_epoch: 1
end_epoch: 3
prune_type: basic_magnitude
names: ['layer1.0.conv1.weight']
- !Pruner
target_sparsity: 0.6
prune_type: gradient_sensitivity
update_frequency: 2
names: ['layer1.0.conv2.weight']
evaluation:
accuracy:
metric:
topk: 1
dataloader:
batch_size: 30
dataset:
dummy:
shape: [128, 3, 224, 224]
label: True
"""
with open('fake.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
class TestPruning(unittest.TestCase):
model = torchvision.models.resnet18()
@classmethod
def setUpClass(cls):
build_fake_yaml()
@classmethod
def tearDownClass(cls):
os.remove('fake.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_pruning_internal(self):
from neural_compressor.experimental import Pruning, common
prune = Pruning('fake.yaml')
prune.model = self.model
_ = prune()
if __name__ == "__main__":
unittest.main()
|
# Downstream: bikeshare prediction with latent representation
# The model consists of a 3d cnn network that uses
# historical ST data to predict next hour bike demand
# as well as taking a latent feature map trained from
# an autoencoder that includes multiple urban features
# Treat latent representation as ordinary 3D dataset
# which will go through 3D CNN
import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from matplotlib import pyplot as plt
import math
import datetime
from datetime import timedelta
from utils import datetime_utils
import os
HEIGHT = 32
WIDTH = 20
TIMESTEPS = 168
# without exogenous data, the only channel is the # of trip starts
BIKE_CHANNEL = 1
LATENT_CHANNEL = 5 # or dim, dimension of latent features should be customized
BATCH_SIZE = 32
TRAINING_STEPS = 200
LEARNING_RATE = 0.005
def my_leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.2)
class generateData(object):
def __init__(self, input_data, timesteps, batchsize):
self.timesteps = timesteps
self.batchsize = batchsize
self.rawdata = input_data
self.train_batch_id = 0
X, y = self.load_data()
# x should be [batchsize, time_steps, height, width,channel]
self.X = X['train']
# y should be [batchsize, height, width, channel]
self.y = y['train']
# load raw data
def load_data(self):
data = self.rawdata
train_x = data[:self.timesteps, :, :, :]
train_y = data[self.timesteps:,:, :, :]
# reshape x to [None, time_steps, height, width,channel]
train_x = np.expand_dims(train_x, axis=4)
# transpose
train_x = np.swapaxes(train_x,0,1)
train_y = np.expand_dims(train_y, axis=4)
# transpose
train_y = np.swapaxes(train_y,0,1)
# sqeeze to [batch_size, height, width, channel]
train_y = np.squeeze(train_y, axis = 1)
return dict(train=train_x), dict(train = train_y)
def train_next(self):
""" Return a batch of data. When dataset end is reached, start over.
"""
if self.train_batch_id == len(self.X):
self.train_batch_id = 0
batch_data = (self.X[self.train_batch_id:min(self.train_batch_id +
self.batchsize, len(self.X))])
batch_labels = (self.y[self.train_batch_id:min(self.train_batch_id +
self.batchsize, len(self.y))])
self.train_batch_id = min(self.train_batch_id + self.batchsize, len(self.X))
return batch_data, batch_labels
class generateData_1d(object):
def __init__(self, input_data, timesteps, batchsize):
self.timesteps = timesteps
self.batchsize = batchsize
self.rawdata = input_data
self.train_batch_id = 0
X, y = self.load_data()
# x should be [batchsize, time_steps, height, width,channel]
self.X = X['train']
# y should be [batchsize, height, width, channel]
self.y = y['train']
def rnn_data(self, data, labels=False):
"""
creates new data frame based on previous observation
* example:
l = [1, 2, 3, 4, 5]
time_steps = 2
-> labels == False [[1, 2], [2, 3], [3, 4]] #Data frame for input with 2 timesteps
-> labels == True [3, 4, 5] # labels for predicting the next timestep
"""
rnn_df = []
for i in range(len(data) - self.timesteps):
if labels:
try:
rnn_df.append(data.iloc[i + self.timesteps].as_matrix())
except AttributeError:
rnn_df.append(data.iloc[i + self.timesteps])
else:
data_ = data.iloc[i: i + self.timesteps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df, dtype=np.float32)
# load raw data
def load_data(self):
# (169, 1296, 1, 1, 3)
data = self.rawdata
train_x = data[:self.timesteps, :, :]
train_y = data[self.timesteps:,:, :]
train_x = np.swapaxes(train_x,0,1)
# transpose
train_y = np.swapaxes(train_y,0,1)
# sqeeze to [batch_size, height, width, channel]
train_y = np.squeeze(train_y, axis = 1)
return dict(train=train_x), dict(train = train_y)
# input train_x, train_y or test_x or test_y
def train_next(self):
""" Return a batch of data. When dataset end is reached, start over.
"""
if self.train_batch_id == len(self.X):
self.train_batch_id = 0
batch_data = (self.X[self.train_batch_id:min(self.train_batch_id +
self.batchsize, len(self.X))])
batch_labels = (self.y[self.train_batch_id:min(self.train_batch_id +
self.batchsize, len(self.y))])
self.train_batch_id = min(self.train_batch_id + self.batchsize, len(self.X))
return batch_data, batch_labels
class Conv3DPredictor:
def __init__(self, intersect_pos_set,
demo_mask_arr, channel, time_steps, height, width):
self.time_steps = time_steps
self.width = width
self.height = height
self.channel = channel
self.x = tf.placeholder(tf.float32, shape=[None,time_steps, height, width, channel], name = 'x_input')
self.y = tf.placeholder(tf.float32, shape= [None, height, width, channel], name = 'y_input')
self.latent_fea = tf.placeholder(tf.float32, shape=[None, time_steps, height, width, LATENT_CHANNEL], name = 'latent')
self.is_training = tf.placeholder(tf.bool)
self.global_step = tf.Variable(0, trainable=False)
# for 3d cnn stream
def cnn_model(self, x_train_data, is_training, dim = 1, seed=None):
# output from 3d cnn (?, 168, 32, 20, 1) -> (?, 32, 20, 1)
with tf.name_scope("3d_layer_a"):
conv1 = tf.layers.conv3d(inputs=x_train_data, filters=16, kernel_size=[3,3,3], padding='same', activation=None)
conv1 = tf.layers.batch_normalization(conv1, training=is_training)
conv1 = tf.nn.leaky_relu(conv1, alpha=0.2)
conv2 = tf.layers.conv3d(inputs=conv1, filters=32, kernel_size=[3,3,3], padding='same', activation=None)
conv2 = tf.layers.batch_normalization(conv2, training=is_training)
conv2 = tf.nn.leaky_relu(conv2, alpha=0.2)
conv3 = tf.layers.conv3d(inputs=conv2, filters=1, kernel_size=[3,3,3], padding='same', activation=None)
conv3 = tf.layers.batch_normalization(conv3, training=is_training)
conv3 = tf.nn.leaky_relu(conv3, alpha=0.2)
cnn3d_bn_squeeze = tf.squeeze(conv3, axis = 4)
cnn3d_bn_squeeze = tf.transpose(cnn3d_bn_squeeze, perm=[0,2,3,1])
with tf.name_scope("3d_layer_b"):
conv5 = tf.layers.conv2d(
inputs=cnn3d_bn_squeeze,
filters=dim,
kernel_size=[1, 1],
padding="same",
activation=my_leaky_relu
)
with tf.name_scope("3d_batch_norm_b"):
conv5_bn = tf.layers.batch_normalization(inputs=conv5, training= is_training)
out = conv5_bn
# output size should be [None, height, width, channel]
return out
# fuse 3D CNN with latent representations
def model_fusion_latent_feature(self,prediction_3d, latent_feature, is_training):
fuse_feature = tf.concat([prediction_3d, latent_feature], 3)
with tf.name_scope("fusion_layer_a"):
conv1 = tf.layers.conv2d(fuse_feature, 16, 3, padding='same',activation=my_leaky_relu)
conv2 = tf.layers.conv2d(conv1, 16, 3, padding='same',activation=my_leaky_relu)
with tf.name_scope("fusion_batch_norm"):
cnn2d_bn = tf.layers.batch_normalization(inputs=conv2, training=is_training)
# output should be (?, 32, 20, 1)
with tf.name_scope("fusion_layer_b"):
conv3 = tf.layers.conv2d(
inputs=cnn2d_bn,
filters=1,
kernel_size=[1, 1],
padding="same",
activation=my_leaky_relu
)
out = conv3
# output size should be [batchsize, height, width, 1]
return out
def train_neural_network(self, x_train_data, y_train_data, x_test_data, y_test_data,
demo_mask_arr,
latent_train_series,latent_test_series,
save_folder_path,
resume_training = False, checkpoint_path = None,
epochs=10, batch_size=64):
with tf.device('/gpu:0'):
starter_learning_rate = LEARNING_RATE
learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,
5000, 0.9, staircase=True)
# fusion model
prediction_3d = self.cnn_model(self.x, self.is_training, 1, seed=1)
latent_fea_output = self.cnn_model(self.latent_fea, self.is_training,
latent_train_series.shape[-1], seed=1)
# fusion
prediction = self.model_fusion_latent_feature(prediction_3d, latent_fea_output, self.is_training)
demo_mask_arr_expanded = tf.expand_dims(demo_mask_arr, 0) # [1, 2]
demo_mask_arr_expanded = tf.tile(demo_mask_arr_expanded, [tf.shape(prediction)[0],1,1,1])
weight = tf.cast(tf.greater(demo_mask_arr_expanded, 0), tf.float32)
acc_loss = tf.losses.absolute_difference(prediction, self.y, weight)
cost = acc_loss
with tf.name_scope("training"):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step = self.global_step)
saver = tf.train.Saver()
test_result = list()
if not os.path.exists(save_folder_path):
os.makedirs(save_folder_path)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
# ---- if resume training -----
if resume_training:
if checkpoint_path is not None:
saver.restore(sess, checkpoint_path)
else:
saver.restore(sess, tf.train.latest_checkpoint(save_folder_path))
# check global step
print("global step: ", sess.run([self.global_step]))
print("Model restore finished, current globle step: %d" % self.global_step.eval())
# get new epoch num
print("int(len(x_train_data) / batch_size +1): ", int(len(x_train_data) / batch_size +1))
start_epoch_num = tf.div(self.global_step, int(len(x_train_data) / batch_size +1))
print("start_epoch_num: ", start_epoch_num.eval())
start_epoch = start_epoch_num.eval()
else:
start_epoch = 0
start_time = datetime.datetime.now()
if len(x_train_data)%batch_size ==0:
iterations = int(len(x_train_data)/batch_size)
else:
iterations = int(len(x_train_data)/batch_size) + 1
for epoch in range(start_epoch,epochs):
print('Epoch', epoch, 'started', end='')
epoch_loss = 0
epoch_fairloss = 0
epoch_accloss = 0
# mini batch
for itr in range(iterations):
mini_batch_x = x_train_data[itr*batch_size: (itr+1)*batch_size]
mini_batch_y = y_train_data[itr*batch_size: (itr+1)*batch_size]
mini_batch_latent = latent_train_series[itr*batch_size: (itr+1)*batch_size]
_optimizer, _cost, _acc_loss = sess.run([optimizer, cost, acc_loss], feed_dict={self.x: mini_batch_x, self.y: mini_batch_y,
self.latent_fea: mini_batch_latent,
self.is_training: True })
epoch_loss += _cost
epoch_accloss += _acc_loss
if itr % 10 == 0:
print('epoch: {}, step: {}, train err: {}, mae:{}'.format(epoch, itr, _cost, _acc_loss))
# report loss per epoch
epoch_loss = epoch_loss/ iterations
epoch_accloss = epoch_accloss / iterations
print('epoch: ', epoch, 'Trainig Set Epoch total Cost: ',epoch_loss)
print('epoch: ', epoch, 'Trainig Set Epoch accuracy Cost: ',epoch_accloss)
test_cost = 0
test_acc_loss = 0
final_output = list()
print('testing')
start_time_epoch = datetime.datetime.now()
itrs = int(len(x_test_data)/batch_size) + 1
for itr in range(itrs):
mini_batch_x_test = x_test_data[itr*batch_size: (itr+1)*batch_size]
mini_batch_y_test = y_test_data[itr*batch_size: (itr+1)*batch_size]
mini_batch_latent_test = latent_test_series[itr*batch_size: (itr+1)*batch_size]
test_cost += sess.run(cost, feed_dict={self.x: mini_batch_x_test, self.y: mini_batch_y_test,
self.latent_fea: mini_batch_latent_test,
self.is_training: True })
test_acc_loss += sess.run(acc_loss, feed_dict={self.x: mini_batch_x_test, self.y: mini_batch_y_test,
self.latent_fea: mini_batch_latent_test,
self.is_training: True})
batch_output = sess.run(prediction, feed_dict={self.x: mini_batch_x_test, self.y: mini_batch_y_test,
self.latent_fea: mini_batch_latent_test,
self.is_training: True})
# model fusion
final_output.extend(batch_output)
end_time_epoch = datetime.datetime.now()
test_time_per_epoch = end_time_epoch - start_time_epoch
test_time_per_sample = test_time_per_epoch/ len(x_test_data)
print(' Testing Set Cost:',test_cost/itrs, ' Time elapse: ', str(test_time_per_epoch), 'Time per sample: ', str(test_time_per_sample))
print(' Testing Set Accuracy Cost:',test_acc_loss/itrs, ' Time elapse: ', str(end_time_epoch - start_time_epoch))
save_path = saver.save(sess, save_folder_path +'latent_fea_model_' +str(epoch)+'.ckpt', global_step=self.global_step)
print('Model saved to {}'.format(save_path))
# save epoch statistics to csv
ecoch_res_df = pd.DataFrame([[epoch_loss, test_cost/itrs, epoch_accloss, test_acc_loss/itrs]],
columns=[ 'train_loss','test_loss', 'train_acc', 'test_acc'])
res_csv_path = save_folder_path + 'ecoch_res_df_' +'.csv'
with open(res_csv_path, 'a') as f:
ecoch_res_df.to_csv(f, header=f.tell()==0)
# save results to txt
txt_name = save_folder_path + 'latent_fea_df_' + '.txt'
with open(txt_name, 'w') as the_file:
#the_file.write('Only account for grids that intersect with city boundary \n')
the_file.write('epoch\n')
the_file.write(str(epoch)+'\n')
the_file.write(' Testing Set Cost:\n')
the_file.write(str(test_cost/itrs) + '\n')
the_file.write('Testing Set Accuracy Cost\n')
the_file.write(str(test_acc_loss/itrs)+ '\n')
the_file.write('total time of last test epoch\n')
the_file.write(str(test_time_per_epoch) + '\n')
the_file.write('time per sample\n')
the_file.write(str(test_time_per_sample) + '\n')
the_file.write('\n')
the_file.close()
if epoch == epochs-1:
test_result.extend(final_output)
# plot results
print('saving train_test plots')
train_test = pd.read_csv(save_folder_path + 'ecoch_res_df_' +'.csv')
# train_test = train_test.loc[:, ~train_test.columns.str.contains('^Unnamed')]
train_test[['train_loss', 'test_loss']].plot()
plt.savefig(save_folder_path + 'total_loss_inprogress.png')
train_test[['train_acc', 'test_acc']].plot()
plt.savefig(save_folder_path + 'acc_loss_inprogress.png')
plt.close()
output = np.array(test_result)
return output
class Conv3D:
def __init__(self, train_obj, train_arr, test_arr, intersect_pos_set,
train_latent_arr, test_latent_arr,
demo_mask_arr,
save_path,
HEIGHT, WIDTH, TIMESTEPS, BIKE_CHANNEL,
BATCH_SIZE, TRAINING_STEPS, LEARNING_RATE,
is_inference = False, checkpoint_path = None,
resume_training = False, train_dir = None
):
self.train_obj = train_obj
self.train_df = train_obj.train_df
self.test_df = train_obj.test_df
self.train_arr = train_arr
self.test_arr = test_arr
self.intersect_pos_set = intersect_pos_set
self.demo_mask_arr = demo_mask_arr
self.latent_train_series = train_latent_arr
self.latent_test_series = test_latent_arr
self.save_path = save_path
globals()['HEIGHT'] = HEIGHT
globals()['WIDTH'] = WIDTH
globals()['TIMESTEPS'] = TIMESTEPS
globals()['BIKE_CHANNEL'] = BIKE_CHANNEL
globals()['BATCH_SIZE'] = BATCH_SIZE
globals()['TRAINING_STEPS'] = TRAINING_STEPS
globals()['LEARNING_RATE'] = LEARNING_RATE
globals()['LATENT_CHANNEL'] = self.latent_test_series.shape[-1]
self.is_inference = is_inference
self.checkpoint_path = checkpoint_path
self.resume_training = resume_training
self.train_dir = train_dir
self.test_df_cut = self.test_df.loc[:,self.test_df.columns.isin(list(self.intersect_pos_set))]
if is_inference == False:
if resume_training == False:
# get prediction results
print('training from scratch, and get prediction results')
# predicted_vals: (552, 30, 30, 1)
self.predicted_vals = self.run_conv3d()
np.save(self.save_path +'prediction_arr.npy', self.predicted_vals)
else:
# resume training
print("resume training, and get prediction results")
self.predicted_vals = self.run_resume_training()
np.save(self.save_path +'resumed_prediction_arr.npy', self.predicted_vals)
else:
# inference only
print('get inference results')
self.predicted_vals = self.run_inference()
np.save(self.save_path +'inference_arr.npy', self.predicted_vals)
self.evaluation()
self.conv3d_predicted = self.arr_to_df()
# run training and testing together
def run_conv3d(self):
tf.reset_default_graph()
predictor = Conv3DPredictor(self.intersect_pos_set,
self.demo_mask_arr, channel=BIKE_CHANNEL, time_steps=TIMESTEPS, height=HEIGHT, width = WIDTH,
)
self.train_data = generateData(self.train_arr, TIMESTEPS, BATCH_SIZE)
self.test_data = generateData(self.test_arr, TIMESTEPS, BATCH_SIZE)
self.train_latent = generateData(self.latent_train_series, TIMESTEPS, BATCH_SIZE)
self.test_latent = generateData(self.latent_test_series, TIMESTEPS, BATCH_SIZE)
self.train_lat = np.squeeze(self.train_latent.X, axis = 4)
self.test_lat= np.squeeze(self.test_latent.X, axis = 4)
predicted_vals = predictor.train_neural_network(self.train_data.X, self.train_data.y,
self.test_data.X, self.test_data.y,
self.demo_mask_arr,
self.train_lat, self.test_lat,
self.save_path,
epochs=TRAINING_STEPS, batch_size=BATCH_SIZE)
predicted = predicted_vals.flatten()
y = self.test_data.y.flatten()
rmse = np.sqrt((np.asarray((np.subtract(predicted, y))) ** 2).mean())
mae = mean_absolute_error(predicted, y)
return predicted_vals
# run training and testing together
def run_resume_training(self):
tf.reset_default_graph()
predictor = Conv3DPredictor(self.intersect_pos_set,
self.demo_mask_arr, channel=BIKE_CHANNEL, time_steps=TIMESTEPS, height=HEIGHT, width = WIDTH,
)
self.train_data = generateData(self.train_arr, TIMESTEPS, BATCH_SIZE)
self.test_data = generateData(self.test_arr, TIMESTEPS, BATCH_SIZE)
self.train_latent = generateData(self.latent_train_series, TIMESTEPS, BATCH_SIZE)
self.test_latent = generateData(self.latent_test_series, TIMESTEPS, BATCH_SIZE)
self.train_lat = np.squeeze(self.train_latent.X, axis = 4)
self.test_lat= np.squeeze(self.test_latent.X, axis = 4)
predicted_vals = predictor.train_neural_network(self.train_data.X, self.train_data.y,
self.test_data.X, self.test_data.y,
self.demo_mask_arr,
self.train_lat, self.test_lat,
self.save_path,
self.train_dir, self.checkpoint_path,
epochs=TRAINING_STEPS, batch_size=BATCH_SIZE)
predicted = predicted_vals.flatten()
y = self.test_data.y.flatten()
rmse = np.sqrt((np.asarray((np.subtract(predicted, y))) ** 2).mean())
mae = mean_absolute_error(predicted, y)
return predicted_vals
# evaluate rmse and mae with grids that intersect with city boundary
def evaluation(self):
sample_pred_squeeze = np.squeeze(self.predicted_vals)
test_squeeze = np.squeeze(self.test_data.y)
pred_shape = self.predicted_vals.shape
mse = 0
mae = 0
count = 0
for i in range(0, pred_shape[0]):
temp_image = sample_pred_squeeze[i]
test_image = test_squeeze[i]
# rotate
temp_rot = np.rot90(temp_image, axes=(1,0))
test_rot= np.rot90(test_image, axes=(1,0))
for c in range(pred_shape[1]):
for r in range(pred_shape[2]):
temp_str = str(r)+'_'+str(c)
if temp_str in self.intersect_pos_set:
#print('temp_str: ', temp_str)
count +=1
mse += (test_rot[r][c] - temp_rot[r][c]) ** 2
mae += abs(test_rot[r][c] - temp_rot[r][c])
rmse = math.sqrt(mse / (pred_shape[0] * len(self.intersect_pos_set)))
mae = mae / (pred_shape[0] * len(self.intersect_pos_set))
# convert predicted result tensor back to pandas dataframe
def arr_to_df(self):
df_predicted = pd.DataFrame(np.nan,
index=self.test_df_cut[self.train_obj.predict_start_time: self.train_obj.predict_end_time].index,
columns=list(self.test_df_cut))
sample_pred_squeeze = np.squeeze(self.predicted_vals)
pred_shape = self.predicted_vals.shape
# loop through time stamps
for i in range(0, pred_shape[0]):
temp_image = sample_pred_squeeze[i]
temp_rot = np.rot90(temp_image, axes=(1,0))
dt = datetime_utils.str_to_datetime(self.train_obj.test_start_time) + datetime.timedelta(hours=i)
predicted_timestamp = dt+self.train_obj.window
predicted_timestamp_str = pd.to_datetime(datetime_utils.datetime_to_str(predicted_timestamp))
for c in range(pred_shape[1]):
for r in range(pred_shape[2]):
temp_str = str(r)+'_'+str(c)
if temp_str in self.intersect_pos_set:
df_predicted.loc[predicted_timestamp_str, temp_str] = temp_rot[r][c]
return df_predicted
|
import os
head = input("Enter beginning of file to be trimmed: ")
tail = input("Enter end of file to be trimmed (Do not include file etension) : ")
ext = input("Enter the file extension (.exe, .avi, .jpg, etc...): ")
if ext == "":
ext = ".txt"
if "." not in ext:
ext = "." + ext
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.startswith(head):
os.rename(filename, filename[len(head):])
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.endswith(tail + ext):
try:
os.rename(filename, filename[:-len(tail + ext)])
except FileNotFoundError:
print("Tried to create blank file name...")
break;
for filename in os.listdir("."):
if "." not in filename:
os.rename(filename, filename + ext)
for filename in os.listdir("."):
if filename.endswith(ext):
print(filename)
input("Press any key to exit...")
|
# A simple two state example, but with several separate decay rates and starting values, which are fitted individually
import StateModeling as stm
import numpy as np
NumMolecules = 16
M = stm.Model() # creates a new Model instance
M.addAxis("Different Molecules", NumMolecules)
sigma = 0.2
true_I0 = 1200 * np.random.normal(np.ones(NumMolecules), sigma)
I0 = M.newVariables({'I0': true_I0})
k = M.newVariables({'k': 0.02 * 1. / np.random.normal(np.ones(NumMolecules), sigma)})
M.newState(name='S0', axesInit={'Different Molecules': M.Axes['Different Molecules'].init(0)}) # ground state
M.newState(name='S1', axesInit={'Different Molecules': M.Axes['Different Molecules'].init(I0())}) # excited state. Systems starts in the excited state
M.addRate('S1', 'S0', 'k') # S1 --> S0 first order decay leading to a single exponential decay
M.addResult('detected', lambda State: State['S1']) # ('I', 'S')
M.toFit(['k', 'I0']) # fitting S1 works, but not fitting I0 !
# M.toFit(['k'])
# simulate data
Tmax = 80
measured = M.simulate('measured', {'detected': None}, Tmax=Tmax, applyPoisson=True)
# Fit with distorted starting values
M.relDistort({'k': 0.8, 'I0': 1.2})
distorted = M.simulate('distorted', {'detected': None}, Tmax=Tmax)
oparam={'noiseModel': 'Gaussian', "normFac": "max"} #
if True:
otype = "L-BFGS"
lossScale = None
else:
# lossScale = None
otype = "adagrad" # "adadelta" "SGD" "nesterov" "adam"
fittedVars, fittedRes = M.fit({'detected': measured}, Tmax, otype=otype, oparam=oparam, NIter=150, verbose=True, lossScale=lossScale)
M.compareFit()
M.showResults(ylabel='Intensity')
M.showStates()
|
Second Largest Value among N integers
The program must accept N integers and print the second largest value among the N integers.
Input Format:
The first line denotes the value of N.
Next N lines will contain the N integer values.
Output Format:
The first line contains the second largest integer.
Boundary Conditions:
2 <= N <= 100
The value of the integers will be from -999999 to 999999.
Example Input/Output 1:
Input:
3
100
2200
345
Output:
345
Example Input/Output 2:
Input:
6
-23
-256
-87
-90
-11019
-2
Output:
-23
n=int(input())
l=[]
for i in range(n):
a=int(input())
l.append(a)
l.sort()
print(l[-2])
|
"""Script to plot 3-body ground state energies for the SRG run."""
import glob
import json
import matplotlib
import matplotlib.pyplot as plt
# matplotlib font configurations
matplotlib.rcParams["text.latex.preamble"] = [
# i need upright \micro symbols, but you need...
r"\usepackage{siunitx}",
# ...this to force siunitx to actually use your fonts
r"\sisetup{detect-all}",
# set the normal font here
r"\usepackage{helvet}",
# load up the sansmath so that math -> helvet
r"\usepackage{sansmath}",
# <- tricky! -- gotta actually tell tex to use!
r"\sansmath",
]
# Use tex for matplotlib
plt.rc("text", usetex=True)
# Set path for data
# pylint: disable=C0103
path = "./fig_data/{}"
# Height for figure
fig_height = 1.3
# Create figure
fig1, ax1 = plt.subplots()
# Load file
file = glob.glob(path.format("t_rel*eigen*.json"))[0]
with open(file) as f:
data = json.load(f)
# Get lambdas and values
lambdas = [float(key) for key in data]
vals = [data[key]["ho3"] for key in data]
# Plot data, once for points, one for the line
ax1.plot(lambdas, vals, "r.", label=r"$T_{rel}$")
ax1.plot(lambdas, vals, "r-")
# Load file
file = glob.glob(path.format("BlD*eigen*.json"))[0]
with open(file) as f:
data = json.load(f)
# Get lambdas and values
lambdas = [float(key) for key in data]
vals = [data[key]["ho3"] for key in data]
# Plot data, once for points, one for the line
ax1.plot(lambdas, vals, "b.", label=r"Block")
ax1.plot(lambdas, vals, "b-")
# Set x-axis scale and ticks
ax1.set_xscale("log")
ax1.set_xticks([1, 2, 5, 10, 20, 50])
ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
# Set axis labels and title
plt.xlabel(r"$\lambda$ (fm$^{-1}$)")
plt.ylabel(r"$E_3$ (MeV)")
plt.title("3-Body Binding Energy", x=0.4, y=1.05)
# Set y-axis limits
plt.ylim(-3.0, -2.5)
# Enable legend
plt.legend(loc="lower right", fontsize="small")
# Adjust margins
plt.gcf().subplots_adjust(left=0.30)
plt.gcf().subplots_adjust(right=0.99)
plt.gcf().subplots_adjust(top=0.85)
plt.gcf().subplots_adjust(bottom=0.20)
# Set size of plot
plt.gcf().set_size_inches(1.6 * fig_height, 1.6 * fig_height)
# Save as PDF
plt.savefig("eigenvalues.pdf")
|
import sys
sys.path.append('../Nets/')
from glob import glob
from os.path import join
from multiprocessing import Pool
from scipy.ndimage.interpolation import rotate
from keras.callbacks import ModelCheckpoint
from tqdm import tqdm
from functools import partial
from Nodule import *
from numpy import *
PATH = {
'DATA': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/TRAIN',
'DATA_OUT': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/TRAIN_OUT',
'CSV': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/CSV',
'LABELS': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/CSV/stage1_labels.csv',
'LUNA': '/fasthome/a.dobrenkii/LUNA',
'LUNA_DATA': '/fasthome/a.dobrenkii/LUNA/DATA',
'LUNA_SOBEL': '/fasthome/a.dobrenkii/LUNA/SOBEL_IMG',
'LUNA_LUNGS': '/fasthome/a.dobrenkii/LUNA/LUNGS_IMG',
'LUNA_MASKS': '/fasthome/a.dobrenkii/LUNA/MASKS',
'LUNA_CSV': '/fasthome/a.dobrenkii/LUNA/CSVFILES',
'LUNA_PRED': '/fasthome/a.dobrenkii/LUNA/PRED',
'PATCH_PATHS': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/LUNA/OUT/PATCHES',
'LUNA_NODULES': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/LUNA/OUT/PATCHES/NODULES',
'LUNA_VESSELS': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/LUNA/OUT/PATCHES/VESSELS',
'WEIGHTS': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/WEIGHTS',
'CHECKPOINTS': '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data'
}
CPU = 10
ANGLE = 35
SHIFT = 4
SHAPE = (8, 64, 64)
TRAIN_TEST = .2
NB_EPOCH = 50
model, bottle_neck = dim_concentration()
model.compile('adam', 'mse')
def augment_patch(patch, shape, angle=15, shift=4):
if angle:
shift = random.randint(-shift, shift, 3)
patch = rotate(patch, random.uniform(-angle, angle), axes=[1,2])
patch = rotate(patch, random.uniform(-angle, angle), axes=[0,1])
patch = rotate(patch, random.uniform(-angle, angle), axes=[0,2])
center = (array(patch.shape) // 2) + shift
left = array(shape) // 2
right = array(shape) - left
patch = patch[center[0] - left[0]:center[0] + right[0],
center[1] - left[1]:center[1] + right[1],
center[2] - left[2]:center[2] + right[2]]
mn = patch.min()
mx = patch.max()
if (mx - mn) != 0:
patch = (patch - mn) / (mx - mn)
else:
patch[:, :, :] = 0.
return patch
def batch_generator(patch_paths, batch_size, shape=(8, 64, 64), angle=15, shift=4, CPU=4):
number_of_batches = ceil(len(patch_paths) / batch_size)
counter = 0
random.shuffle(patch_paths)
while True:
batch_files = patch_paths[batch_size * counter:batch_size * (counter + 1)]
patch_list = [load(patch_path) for patch_path in batch_files]
augment = partial(augment_patch, shape=shape, angle=angle, shift=shift)
with Pool(CPU) as pool:
patch_list = pool.map(augment, patch_list)
counter += 1
yield expand_dims(array(patch_list), 1), expand_dims(array(patch_list), 1)
if counter == number_of_batches:
random.shuffle(patch_paths)
counter = 0
# patch_paths = glob(join(PATH['LUNA_NODULES'], '*_patch.npy'))
# patch_paths += glob(join(PATH['LUNA_VESSELS'], '*_patch.npy'))
# shuffle(patch_paths)
# save(join(PATH['PATCH_PATHS'], 'LUNA'), array(patch_paths))
patch_paths = load(join(PATH['PATCH_PATHS'], 'LUNA.npy'))
train = patch_paths[int(len(patch_paths) * .2):]
valid = patch_paths[:int(len(patch_paths) * .2)]
SAMPLES_PER_EPOCH = len(train)
NB_VAL_SAMPLES = len(valid)
train_generator = batch_generator(train,
batch_size=32,
shape=SHAPE,
angle=ANGLE,
shift=SHIFT,
CPU=CPU)
valid_generator = batch_generator(valid,
batch_size=32,
shape=SHAPE,
angle=0,
shift=0,
CPU=CPU)
checkpoint = ModelCheckpoint(filepath=join(PATH['WEIGHTS'], '3DCAE_nodule_model'),
verbose=1,
save_best_only=True)
model.fit_generator(train_generator,
samples_per_epoch=1853,
nb_epoch=NB_EPOCH,
callbacks=[checkpoint],
validation_data=valid_generator,
class_weight=None,
nb_val_samples=463,
nb_worker=1) |
"""Module for raceplan adapter."""
import logging
from typing import Any, List, Optional
class RaceplansAdapter:
"""Class representing an adapter for raceplans."""
@classmethod
async def get_all_raceplans(cls: Any, db: Any) -> List[dict]: # pragma: no cover
"""Get all raceplans function."""
raceplans: List = []
cursor = db.raceplans_collection.find()
for raceplan in await cursor.to_list(None):
raceplans.append(raceplan)
logging.debug(raceplan)
return raceplans
@classmethod
async def create_raceplan(
cls: Any, db: Any, raceplan: dict
) -> str: # pragma: no cover
"""Create raceplan function."""
result = await db.raceplans_collection.insert_one(raceplan)
return result
@classmethod
async def get_raceplan_by_id(
cls: Any, db: Any, id: str
) -> dict: # pragma: no cover
"""Get raceplan function."""
result = await db.raceplans_collection.find_one({"id": id})
return result
@classmethod
async def get_raceplan_by_event_id(
cls: Any, db: Any, event_id: str
) -> List[dict]: # pragma: no cover
"""Get raceplan by event_id function."""
raceplans: List = []
result = await db.raceplans_collection.find_one({"event_id": event_id})
if result:
raceplans.append(result)
return raceplans
@classmethod
async def update_raceplan(
cls: Any, db: Any, id: str, raceplan: dict
) -> Optional[str]: # pragma: no cover
"""Get raceplan function."""
result = await db.raceplans_collection.replace_one({"id": id}, raceplan)
return result
@classmethod
async def delete_raceplan(
cls: Any, db: Any, id: str
) -> Optional[str]: # pragma: no cover
"""Get raceplan function."""
result = await db.raceplans_collection.delete_one({"id": id})
return result
|
#!/usr/bin/env python
# Step 1 - Get a list of your Webex Teams Rooms with the token
import requests, os
from pprint import pprint
token = os.getenv('WEBEX_TOKEN')
headers = {'Content-Type':'application/json','Authorization':f'Bearer {token}'}
get_url = 'https://webexapis.com/v1/rooms'
get_response = requests.get(get_url, headers=headers)
pprint(get_response.json()) # display all rooms
print() # blank line between output
pprint(get_response.json()['items'][0]) # display the first room in the response
# Step 2 - Create a new Webex Teams Room
new_room = {'title':'New Python Room'}
post_url = 'https://webexapis.com/v1/rooms'
post_response = requests.post(post_url, headers=headers, json=new_room)
print(f'{post_response.status_code} {post_response.reason}')
pprint(post_response.json())
# Step 3 - Post a message to the new Webex Teams Room
new_msg = {'roomId':post_response.json()['id'],'text':'Hello from Python'}
msg_url = 'https://webexapis.com/v1/messages'
msg_response = requests.post(msg_url, headers=headers, json=new_msg)
print(f'{msg_response.status_code} {msg_response.reason}')
pprint(msg_response.json())
|
import time
from pathlib import Path
import superannotate as sa
from .common import upload_project
PROJECT_NAME1 = "test_get_exports1"
PROJECT_FOLDER = Path("tests") / "sample_project_vector"
def test_get_exports(tmpdir):
tmpdir = Path(tmpdir)
project = upload_project(
PROJECT_FOLDER,
PROJECT_NAME1,
'gg',
'Vector',
annotation_status='QualityCheck'
)
time.sleep(2)
# projects_found = sa.search_projects(PROJECT_NAME1, return_metadata=True)
# for pr in projects_found:
# sa.delete_project(pr)
# project = sa.create_project(PROJECT_NAME1, "gg", "Vector")
# sa.upload_images_from_folder_to_project(
# project,
# "./tests/sample_project_vector/",
# annotation_status="QualityCheck"
# )
# sa.create_annotation_classes_from_classes_json(
# project,
# "./tests/sample_project_vector/classes/classes.json",
# )
# sa.upload_annotations_from_folder_to_project(
# project,
# "./tests/sample_project_vector/",
# )
exports_old = sa.get_exports(project)
export = sa.prepare_export(project)
time.sleep(2)
sa.download_export(project, export["name"], tmpdir)
js = list(tmpdir.glob("*.json"))
assert len(js) == 4
exports_new = sa.get_exports(project)
assert len(exports_new) == len(exports_old) + 1
|
__version__ = '3.15.0dev'
|
from abc import ABC, abstractmethod
from dataclasses import dataclass
from math import radians, sin, cos
from math import pi as π
from typing import Tuple, Union
from numba import njit
import numpy as np
from zodipy._functions import interplanetary_temperature, blackbody_emission
@dataclass
class Component(ABC):
"""Base class for an Interplanetary Dust Component.
This class defines a method for getting the coordinates to a shell at
distance R around an observer in the primed coordinate system
(component-centric ecliptic cartesian coordinates).
Attributes
----------
x_0
x-offset from the Sun in helliocentric ecliptic cartesian coordinates.
y_0
y-offset from the Sun in helliocentric ecliptic cartesian coordinates.
z_0
z-offset from the Sun in helliocentric ecliptic cartesian coordinates.
i
Inclination with respect to the ecliptic plane [deg].
Ω
Ascending node [deg].
"""
x_0: float
y_0: float
z_0: float
i: float
Ω: float
def __post_init__(self) -> None:
self.i = radians(self.i)
self.Ω = radians(self.Ω)
self.X_component = np.expand_dims([self.x_0, self.y_0, self.z_0], axis=1)
@abstractmethod
def get_density(
self, R_prime: np.ndarray, Z_prime: np.ndarray, *, θ_prime: np.ndarray
) -> np.ndarray:
"""Returns the dust density at a shell around the observer.
Parameters
----------
R_prime
Array of distances corresponding to discrete points along a
line-of-sight for a shell surrounding the observer in the primed
coordinates.
Z_prime
Heights above the midplane in primed coordinates of a component
corresponding to the distances in `R_prime`.
θ_prime
Relative mean lognitude between the discrete points along the
line-of-sight describe by `R_prime` and the Earth.
Returns
-------
Density of the component at the coordinates given by R_prime,
Z_prime, and θ_prime.
"""
@staticmethod
@njit
def get_coordinates(
R_comp: Union[float, np.ndarray],
X_observer: np.ndarray,
X_earth: np.ndarray,
X_unit: np.ndarray,
X_component: np.ndarray,
Ω_component: float,
i_component: float,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Returns a set of coordinates for a component.
Given a set of heliocentric ecliptic positions in space given by the
unit vectors `X_unit` and `R_comp`, we compute the `R_helio`, `R_prime`,
`Z_prime`, and `θ_prime` coordinates as seen by and observer whos
heliocentric ecliptic coordinates are given by `X_observer`. These are
the coordinates required by the Interplanetary Dust Model to evalutate
the density of a Zodiacal Component at the given positions.
Parameters
----------
R_comp
Distance R to a shell centered on the observer at which we want
to evaluate the Zodiacal emission at.
X_observer
Heliocentric ecliptic cartesian coordinates of the observer.
X_earth
Heliocentric ecliptic cartesian coordinates of the Earth.
X_unit
Heliocentric ecliptic cartesian unit vectors pointing to each
position in space we that we consider.
X_component
Heliocentric ecliptic cartesian off-set of the component
(x_0, y_0, z_0).
Ω_component
Ascending node of the component.
i_component
Inclination of the component.
Returns
-------
R_helio
Array of distances corresponding to discrete points along a
line-of-sight for a shell surrounding the observer in heliocentric
ecliptic coordinates.
R_prime
Array of distances corresponding to discrete points along a
line-of-sight for a shell surrounding the observer in the primed
coordinates.
Z_prime
Heights above the midplane in primed coordinates of a component
corresponding to the distances in `R_prime`.
θ_prime
Relative mean lognitude between the discrete points along the
line-of-sight describe by `R_prime` and the Earth.
"""
X_helio = R_comp * X_unit + X_observer
R_helio = np.sqrt(X_helio[0] ** 2 + X_helio[1] ** 2 + X_helio[2] ** 2)
X_prime = X_helio - X_component
R_prime = np.sqrt(X_prime[0] ** 2 + X_prime[1] ** 2 + X_prime[2] ** 2)
Z_prime = (
X_prime[0] * sin(Ω_component) * sin(i_component)
- X_prime[1] * cos(Ω_component) * sin(i_component)
+ X_prime[2] * cos(i_component)
)
X_earth_prime = X_earth - X_component[0]
θ_prime = np.arctan2(X_prime[1], X_prime[0]) - np.arctan2(
X_earth_prime[1], X_earth_prime[0]
)
return R_helio, R_prime, Z_prime, θ_prime
def get_emission(
self,
distance_to_shell: Union[float, np.ndarray],
observer_position: np.ndarray,
earth_position: np.ndarray,
unit_vectors: np.ndarray,
freq: float,
) -> np.ndarray:
"""Returns the emission at a shell of distance R from the observer.
For a description on X_observer, X_earth, X_unit and R, please
see the get_coords function.
Parameters
----------
distance_to_shell
Distance R to a shell centered on the observer for which we want
to evaluate the Zodiacal emission.
observer_position
Heliocentric ecliptic cartesian coordinates of the observer.
earth_position
Heliocentric ecliptic cartesian coordinates of the Earth.
unit_vectors
Heliocentric ecliptic cartesian unit vectors pointing to each
position in space we that we consider.
freq
Frequency at which to evaluate the Zodiacal emission.
Returns
-------
emission
Zodiacal emission at
Array containing the Zodiacal emission emitted from a shell at
distance R from the observer. The shape is (len(R), `NPIX`).
"""
observer_position = np.expand_dims(observer_position, axis=1)
R_helio, R_prime, Z_prime, θ_prime = self.get_coordinates(
R_comp=distance_to_shell,
X_observer=observer_position,
X_earth=earth_position,
X_unit=unit_vectors,
X_component=self.X_component,
Ω_component=self.Ω,
i_component=self.i,
)
density = self.get_density(
R_prime=R_prime,
Z_prime=Z_prime,
θ_prime=θ_prime,
)
temperature = interplanetary_temperature(R=R_helio)
emission = blackbody_emission(T=temperature, ν=freq)
return emission * density
@dataclass
class Cloud(Component):
"""The Zodiacal Diffuse Cloud component.
This class represents the diffuse cloud in the K98 IPD model. It provides a
method to estimate the density of the diffuse cloud at a shell around the
observer.
Attributes
----------
n_0
Density at 1 AU.
α
Radial power-law exponent.
β
Vertical shape parameter.
γ
Vertical power-law exponent.
μ
Widening parameter for the modified fan.
"""
n_0: float
α: float
β: float
γ: float
μ: float
def __post_init__(self) -> None:
super().__post_init__()
def get_density(self, R_prime: np.ndarray, Z_prime: np.ndarray, **_) -> np.ndarray:
"""See base class for documentation."""
ζ = np.abs(Z_prime) / R_prime
μ = self.μ
g = np.zeros_like(ζ)
condition = ζ < μ
g[condition] = ζ[condition] ** 2 / (2 * μ)
g[~condition] = ζ[~condition] - (μ / 2)
return self.n_0 * R_prime ** -self.α * np.exp(-self.β * g * self.γ)
@dataclass
class Band(Component):
"""The Zodiacal Astroidal Band component.
This class represents the Astroidal Dust Band components in the K98 IPD
model. It provides a method to estimate the density of the dust bands at a
shell around the observer.
Attributes
----------
n_0
Density at 3 AU.
δ_ζ
Shape parameter [deg].
v
Shape parameter.
p
Shape parameter.
δ_r
Inner radial cutoff.
"""
n_0: float
δ_ζ: float
v: float
p: float
δ_r: float
def __post_init__(self) -> None:
super().__post_init__()
self.δ_ζ = radians(self.δ_ζ)
def get_density(self, R_prime: np.ndarray, Z_prime: np.ndarray, **_) -> np.ndarray:
"""See base class for documentation."""
ζ = np.abs(Z_prime) / R_prime
ζ_over_δ_ζ = ζ / self.δ_ζ
term1 = (3 * self.n_0 / R_prime) * np.exp(-((ζ_over_δ_ζ) ** 6))
term2 = 1 + ((ζ_over_δ_ζ) ** self.p) / self.v
term3 = 1 - np.exp(-((R_prime / self.δ_r) ** 20))
return term1 * term2 * term3
@dataclass
class Ring(Component):
"""The Zodiacal Circum-solar Ring component.
This class represents the Circum-solar Ring component in the K98 IPD model.
It provides a method to estimate the density of the Circum-solar Ring at a
shell around the observer.
Attributes
----------
n_0
Density at 1 AU.
R
Radius of the peak density.
σ_r
Radial dispersion.
σ_z
Vertical dispersion.
"""
n_0: float
R: float
σ_r: float
σ_z: float
def __post_init__(self) -> None:
super().__post_init__()
def get_density(self, R_prime: np.ndarray, Z_prime: np.ndarray, **_) -> np.ndarray:
"""See base class for documentation."""
term1 = -(((R_prime - self.R) / self.σ_r) ** 2)
term2 = np.abs(Z_prime) / self.σ_z
return self.n_0 * np.exp(term1 - term2)
@dataclass
class Feature(Component):
"""The Zodiacal Earth-trailing Feature component.
This class represents the Earth-trailing Feature component in the K98 IPD
model. It provides a method to estimate the density of the Earth-trailing
Feature at a shell around the observer.
Attributes
----------
n_0
Density at 1 AU.
R
Radius of the peak density.
σ_r
Radial dispersion.
σ_z
Vertical dispersion.
θ
Longitude with respect to Earth.
σ_θ
Longitude dispersion.
"""
n_0: float
R: float
σ_r: float
σ_z: float
θ: float
σ_θ: float
def __post_init__(self) -> None:
super().__post_init__()
self.θ = radians(self.θ)
self.σ_θ = radians(self.σ_θ)
def get_density(
self, R_prime: np.ndarray, Z_prime: np.ndarray, θ_prime: np.ndarray
) -> np.ndarray:
"""See base class for documentation."""
Δθ = θ_prime - self.θ
condition1 = Δθ < -π
condition2 = Δθ > π
Δθ[condition1] = Δθ[condition1] + 2 * π
Δθ[condition2] = Δθ[condition2] - 2 * π
term1 = -(((R_prime - self.R) / self.σ_r) ** 2)
term2 = np.abs(Z_prime) / self.σ_z
term3 = (Δθ / self.σ_θ) ** 2
return self.n_0 * np.exp(term1 - term2 - term3)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 15:58:28 2020
@author: Andrea
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
from mpl_toolkits.basemap import Basemap
#from mpi4py import MPI
import pandapower as pp
import pandapower.networks
import numpy as np
import sys
import time
import random
import itertools
import networkx as nx
import multiprocessing
import pandapower.plotting as pplt
import math
#import geopy.distance
#from geopy.point import Point
from cascade_power import OPA_model
import collections
import copy
###################################### INITIALIZE RAILWAY AND POWER NETWORKS #################
list_station = [
'Glasgow', 'Edinburgh', 'Newcastle', 'Tees Valley',
'Leeds', 'Sheffield', 'Nottingham', 'Cambridge',
'Stansted', 'London', 'Heathrow', 'Bristol',
'Cardiff', 'Birmingham', 'Manchester', 'Liverpool'
]
list_railway = [
(0, 1), (0, 14), (0, 13), (1, 2), (1, 13),
(2, 3), (3, 4), (4, 5), (4, 14),
(5, 6), (5, 14), (6, 7), (7, 8),
(7, 9), (8, 9), (9, 10), (9, 13),
(10, 11), (11, 12), (13, 14), (14,15)
]
net = pp.networks.GBreducednetwork()
G=pp.topology.create_nxgraph(net)
pp.replace_sgen_by_gen(net)
list_ext_grid = list(net.ext_grid.index)
initial_slack = pp.replace_ext_grid_by_gen(net, list_ext_grid, slack=True)
net.line.loc[:,'max_loading_percent'] = 100
net.trafo.loc[:,'max_loading_percent'] = 100
net.load.loc[:,'max_p_mw'] = net.load.p_mw
net.load.loc[:,'min_p_mw'] = 0
net.gen.loc[:,'max_p_mw'] = net.gen.max_p_mw/1.438
net.gen.loc[:,'p_mw'] = net.gen.max_p_mw
list_load_bus = list(net.load.bus)
list_load_bus.sort()
net.load.loc[:,'controllable'] = True
net.poly_cost.loc[:,'cp0_eur'] = 0
net.poly_cost.loc[:,'cp1_eur_per_mw'] = 1
net.poly_cost.loc[:,'cp2_eur_per_mw2'] = 0
list_load =list(net.load.index)
for load in list_load:
globals()['costload%s' %load] = pp.create_poly_cost(net, load, 'load',cp0_eur=0,
cp1_eur_per_mw = -100,cp2_eur_per_mw2=0)
initial_state_load = dict(net.load.loc[:,'p_mw'])
order_dict_load = collections.OrderedDict(sorted(initial_state_load.items()))
initial_load_power = list(order_dict_load.values())
# initial_interdep_power = power to subtract to each load when one of the corresponding station is failed
list_interdependencies = [4, 3, 9, 9, 14, 13, 16, 20, 20, 24, 24, 22, 28, 17, 12, 11]
set_interdependencies = list(set(list_interdependencies))
initial_interdep_power = np.zeros(len(initial_load_power))
for i,l in enumerate(initial_load_power):
counter = list_interdependencies.count(i)
if counter > 0:
initial_interdep_power[i] = l/counter
else:
initial_interdep_power[i] = 0
n_bus = len(list(net.bus.index))
n_line = len(list(net.line.index))
n_trafo = len(list(net.trafo.index))
n_comp_power = n_bus + n_line + n_trafo
list_components_power = list(range(n_comp_power))
n_station = 16
n_railway = 21
n_comp_rail = n_station + n_railway
list_components_rail = list(range(n_comp_rail))
# Define if the component n is a bus, a normal line or a line with a trafo
def power_failed_element(n, f_bus, f_line, f_trafo):
lim_1 = n_bus
lim_2 = lim_1 + n_line
if n < lim_1:
f_bus += [n]
elif n >= lim_1 and n < lim_2:
f_line += [n - lim_1]
elif n >= lim_2:
f_trafo += [n - lim_2]
return(f_bus, f_line, f_trafo)
# Define if the component n is a station or a railway
def rail_failed_element(n, f_station, f_rail):
lim_1 = n_station
if n < lim_1:
f_station += [n]
elif n >= lim_1:
f_rail += [n - lim_1]
return(f_station, f_rail)
# Adjacency matrix railway
adj_matrix_rail = np.zeros((len(list_station), len(list_station)))
for i in list_railway:
adj_matrix_rail[i[0]][i[1]] = 1
adj_matrix_rail[i[1]][i[0]] = 1
# This function computes the accessibility of a railway network given its adjacency matrix
def accessibility(matrix):
G = nx.convert_matrix.from_numpy_array(matrix)
if nx.is_connected(G):
C = 1
else:
conn = 0
nominal_conn = math.comb(len(matrix[0]), 2)
islands = list(nx.connected_components(G))
for i in islands:
#print(islands)
couples = math.comb(len(i), 2)
conn += couples
C = conn/nominal_conn
return(C)
power_threshold = 0.5
n_exp = 500
matrix_vulnerability_rail = np.zeros((11,11))
matrix_vulnerability_rail_std = np.zeros((11,11))
matrix_vulnerability_power = np.zeros((11,11))
matrix_vulnerability_power_std = np.zeros((11,11))
idx_frac_rail = 0
frazioni = list(np.arange(0.0,1.1,0.1))
for idx_frac_rail, frac_rail in enumerate(frazioni[:]):
for idx_frac_power, frac_power in enumerate(frazioni[:]):
vuln_frac_power = np.zeros(n_exp)
vuln_frac_rail = np.zeros(n_exp)
exp = 0
while exp < n_exp:
print(frac_rail, frac_power, exp)
# Define elements to remove in both the networks
n_failures_power = int(n_comp_power*frac_power)
list_failures_power = random.sample(list_components_power, k=n_failures_power)
n_failures_rail = int(n_comp_rail*frac_rail)
list_failures_rail = random.sample(list_components_rail, k=n_failures_rail)
f_bus = []; f_line = []; f_trafo = []; f_station = []; f_rail = []
for f in list_failures_power:
f_bus, f_line, f_trafo = power_failed_element(f, f_bus, f_line, f_trafo)
for f in list_failures_rail:
f_station, f_rail = rail_failed_element(f, f_station, f_rail)
# Initialize networks
new_matrix = np.copy(adj_matrix_rail)
net_GB = copy.deepcopy(net)
# Remove elements from railway network
for f in f_station:
new_matrix[f, :] = 0
new_matrix[:, f] = 0
for f in f_rail:
new_matrix[list_railway[f][0], list_railway[f][1]] = 0
new_matrix[list_railway[f][1], list_railway[f][0]] = 0
G = nx.convert_matrix.from_numpy_array(new_matrix)
# Cascading failures in railway network
islands = list(nx.connected_components(G))
for isl in islands:
isl = list(isl)
if len(isl) == 1:
if isl[0] not in f_station:
f_station += [isl[0]]
f_station = list(set(f_station))
# Adjust loads powers
for f in f_station:
load_sh = list_interdependencies[f]
net_GB.load.loc[load_sh, 'p_mw'] -= initial_interdep_power[load_sh]
net_GB.load.loc[load_sh, 'max_p_mw'] -= initial_interdep_power[load_sh]
if net_GB.load.loc[load_sh, 'p_mw'] < 0:
net_GB.load.loc[load_sh, 'p_mw'] = 0
if net_GB.load.loc[load_sh, 'max_p_mw'] < 0:
net_GB.load.loc[load_sh, 'max_p_mw'] = 0
# Run cascading failures in power networks
initial_state_load = dict(net_GB.load.loc[:,'p_mw'])
order_dict_load = collections.OrderedDict(sorted(initial_state_load.items()))
new_initial_load_power = list(order_dict_load.values())
for i,j in enumerate(new_initial_load_power):
if j == 0:
new_initial_load_power[i] = 0
g,l = OPA_model(net_GB, f_bus, [], [], f_line, f_trafo)
l = np.around(l, 2)
load_shedding = np.zeros(len(new_initial_load_power))
for i,j in enumerate(l):
if new_initial_load_power[i] > 0:
load_shedding[i] = j/new_initial_load_power[i]
else:
load_shedding[i] = 1
load_shedding = np.around(load_shedding, 2)
'''
print(sum(load_shedding)/len(load_shedding))
print(new_initial_load_power)
print(l)
'''
# Cascading failures between networks
for i,j in enumerate(list_interdependencies):
if load_shedding[j] < power_threshold or load_shedding[j] == 0:
if i not in f_station:
f_station += [i]
# Compute vulnerabilities
for f in f_station:
new_matrix[f, :] = 0
new_matrix[:, f] = 0
for f in f_rail:
new_matrix[list_railway[f][0], list_railway[f][1]] = 0
new_matrix[list_railway[f][1], list_railway[f][0]] = 0
delta_conn = 1 - accessibility(new_matrix)
vuln_frac_rail[exp] = delta_conn
if sum(new_initial_load_power) > 0:
vuln_power_shed = sum(l)/sum(new_initial_load_power)
else:
vuln_power_shed = 1
vuln_frac_power[exp] = 1-vuln_power_shed
exp += 1
v_rail = sum(vuln_frac_rail)/n_exp
v_power = sum(vuln_frac_power)/n_exp
matrix_vulnerability_rail[idx_frac_rail][idx_frac_power] = v_rail
matrix_vulnerability_power[idx_frac_rail][idx_frac_power] = v_power
matrix_vulnerability_rail_std[idx_frac_rail][idx_frac_power] = np.std(vuln_frac_rail)
matrix_vulnerability_power_std[idx_frac_rail][idx_frac_power] = np.std(vuln_frac_power)
'''
matrix_vulnerability_rail[-1,:] = 1
matrix_vulnerability_rail[:,-1] = 1
matrix_vulnerability_power[:,-1] = 1
'''
np.save(os.path.join('vuln_pr', 'matrix_vulnerability_rail_'+str(int(power_threshold*100))),
matrix_vulnerability_rail)
np.save(os.path.join('vuln_pr', 'matrix_vulnerability_power_'+str(int(power_threshold*100))),
matrix_vulnerability_power)
np.save(os.path.join('vuln_pr', 'std_matrix_vulnerability_rail_'+str(int(power_threshold*100))),
matrix_vulnerability_rail_std)
np.save(os.path.join('vuln_pr', 'std_matrix_vulnerability_power_'+str(int(power_threshold*100))),
matrix_vulnerability_power_std)
|
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
class _SimpleSegmentationModel(nn.Module):
__constants__ = ["aux_classifier"]
def __init__(self, backbone, classifier, aux_classifier=None):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.aux_classifier = aux_classifier
def logits(self, **kargs):
x = kargs["x"].float()
if "extra" in kargs:
extra = kargs["extra"].float()
x = torch.cat([x, extra], axis=1)
x = 2 * x / 255 - 1
input_shape = x.shape[-2:]
# contract: features is a dict of tensors
features = self.backbone(x)
result = OrderedDict()
x = features["out"]
x = self.classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["out"] = x
if self.aux_classifier is not None:
x = features["aux"]
x = self.aux_classifier(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=False)
result["aux"] = x
return result
else:
return x
def forward(self, **kargs):
x = self.logits(**kargs)
x = (x + 1) / 2
y = kargs["y"].float() / 255
loss = F.mse_loss(x, y)
return loss
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import requests
import aiohttp
import asyncio
import datetime
import json
from os import getenv
from urllib.parse import urlparse, urlunparse
from urllib3.util import Retry
class DistributedApiTaskManager:
def __init__(self):
self.cache_connector_upsert_url = getenv('CACHE_CONNECTOR_UPSERT_URI')
self.cache_connector_get_url = getenv('CACHE_CONNECTOR_GET_URI')
def AddTask(self):
ret = asyncio.get_event_loop().run_until_complete(self.AddTaskAsync())
return json.loads(ret)
async def AddTaskAsync(self):
async with aiohttp.ClientSession() as session:
async with session.post(self.cache_connector_upsert_url) as resp:
if resp.status != 200:
return '{"TaskId": "-1", "Status": "error"}'
else:
return await resp.text("UTF-8")
def _UpdateTaskStatus(self, taskId, status, backendStatus):
old_stat = self.GetTaskStatus(taskId)
endpoint = 'http://localhost'
if not old_stat['Endpoint']:
print("Cannot find task status. Creating")
else:
endpoint = old_stat['Endpoint']
ret = asyncio.get_event_loop().run_until_complete(self._UpdateTaskStatusAsync(taskId, status, backendStatus, endpoint))
return json.loads(ret)
async def _UpdateTaskStatusAsync(self, taskId, status, backendStatus, endpoint):
session = aiohttp.ClientSession()
resp = await session.post(self.cache_connector_upsert_url, json={'TaskId': taskId,
'Timestamp': datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"),
'Status': status,
'BackendStatus': backendStatus,
'Endpoint': endpoint,
'PublishToGrid': False
})
if resp.status != 200:
print("status code: " + str(resp.status_code))
resstr = '{"TaskId": "' + taskId + '", "Status": "not found"}'
else:
resstr = await resp.text("UTF-8")
await session.close()
return resstr
def CompleteTask(self, taskId, status):
return self._UpdateTaskStatus(taskId, status, 'completed')
def UpdateTaskStatus(self, taskId, status):
return self._UpdateTaskStatus(taskId, status, 'running')
def FailTask(self, taskId, status):
return self._UpdateTaskStatus(taskId, status, 'failed')
def AddPipelineTask(self, taskId, organization_moniker, version, api_name, body):
old_stat = self.GetTaskStatus(taskId)
if old_stat['Status'] == "not found":
print("Cannot find task status.")
return json.loads('{"TaskId": "-1", "Status": "error"}')
parsed_endpoint = urlparse(old_stat['Endpoint'])
path = '{}/{}/{}'.format(version, organization_moniker, api_name)
next_endpoint = '{}://{}/{}'.format(parsed_endpoint.scheme, parsed_endpoint.netloc, path)
print("Sending to next endpoint: " + next_endpoint)
asyncio.set_event_loop(asyncio.new_event_loop())
ret = asyncio.get_event_loop().run_until_complete(self.AddPipelineTaskAsync(taskId, next_endpoint, body))
return json.loads(ret)
async def AddPipelineTaskAsync(self, taskId, next_endpoint, body):
session = aiohttp.ClientSession()
resp = await session.post(self.cache_connector_upsert_url, json={'TaskId': taskId,
'Timestamp': datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"),
'Status': 'created',
'BackendStatus': 'created',
'Endpoint': next_endpoint,
'Body': body,
'PublishToGrid': True
})
if resp.status != 200:
print("status code: " + str(r.status_code))
resstr = '{"TaskId": "' + taskId + '", "Status": "not found"}'
else:
resstr = await resp.text("UTF-8")
await session.close()
return resstr
def GetTaskStatus(self, taskId):
asyncio.set_event_loop(asyncio.new_event_loop())
ret = asyncio.get_event_loop().run_until_complete(self.GetTaskStatusAsync(taskId))
return json.loads(ret)
async def GetTaskStatusAsync(self, taskId):
session = aiohttp.ClientSession()
resp = await session.get(self.cache_connector_get_url, params={'taskId': taskId})
if resp.status != 200:
resstr = '{"TaskId": "' + taskId + '", "Status": "not found"}'
else:
resstr = await resp.text("UTF-8")
await session.close()
return resstr |
import csv
input_path = "../resources/normalization/resources/rawdata/CL_20210810.csv"
output_path = "../resources/normalization/resources/dictionary/best_dict_CellType_20210810.txt"
cui2names = {}
with open(input_path) as f:
rdr = csv.reader(f)
for line in rdr:
class_id = line[0]
# only consider CL
if not class_id.split("/")[-1].startswith("CL"):
continue
cui = class_id.split("/")[-1]
name = line[1]
synonyms = line[2].split("|")
if line[2].strip() != '':
cui2names[cui] = '|'.join([name] + synonyms)
else:
cui2names[cui] = name
# save
with open(output_path, 'w') as f:
for cui, names in cui2names.items():
f.write(cui + "||" + names)
f.write("\n")
|
from network import *
from PIL import Image
import scipy.misc as misc
import os
class DnCNN:
def __init__(self):
self.clean_img = tf.placeholder(tf.float32, [None, None, None, IMG_C])
self.noised_img = tf.placeholder(tf.float32, [None, None, None, IMG_C])
self.train_phase = tf.placeholder(tf.bool)
dncnn = net("DnCNN")
self.res = dncnn(self.noised_img, self.train_phase)
self.denoised_img = self.noised_img - self.res
self.loss = tf.reduce_mean(tf.reduce_sum(tf.square(self.res - (self.noised_img - self.clean_img)), [1, 2, 3]))
self.Opt = tf.train.AdamOptimizer(1e-3).minimize(self.loss)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def train(self):
filepath = "./TrainingSet//"
filenames = os.listdir(filepath)
saver = tf.train.Saver()
for epoch in range(50):
for i in range(filenames.__len__()//BATCH_SIZE):
cleaned_batch = np.zeros([BATCH_SIZE, IMG_H, IMG_W, IMG_C])
for idx, filename in enumerate(filenames[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE]):
cleaned_batch[idx, :, :, 0] = np.array(Image.open(filepath+filename))
noised_batch = cleaned_batch + np.random.normal(0, SIGMA, cleaned_batch.shape)
self.sess.run(self.Opt, feed_dict={self.clean_img: cleaned_batch, self.noised_img: noised_batch, self.train_phase: True})
if i % 10 == 0:
[loss, denoised_img] = self.sess.run([self.loss, self.denoised_img], feed_dict={self.clean_img: cleaned_batch, self.noised_img: noised_batch, self.train_phase: False})
print("Epoch: %d, Step: %d, Loss: %g"%(epoch, i, loss))
compared = np.concatenate((cleaned_batch[0, :, :, 0], noised_batch[0, :, :, 0], denoised_img[0, :, :, 0]), 1)
Image.fromarray(np.uint8(compared)).save("./TrainingResults//"+str(epoch)+"_"+str(i)+".jpg")
if i % 500 == 0:
saver.save(self.sess, "./save_para//DnCNN.ckpt")
np.random.shuffle(filenames)
def test(self, cleaned_path="./TestingSet//02.png"):
saver = tf.train.Saver()
saver.restore(self.sess, "./save_para/DnCNN.ckpt")
cleaned_img = np.reshape(np.array(misc.imresize(np.array(Image.open(cleaned_path)), [256, 256])), [1, 256, 256, 1])
noised_img = cleaned_img + np.random.normal(0, SIGMA, cleaned_img.shape)
[denoised_img] = self.sess.run([self.denoised_img], feed_dict={self.clean_img: cleaned_img, self.noised_img: noised_img, self.train_phase: False})
compared = np.concatenate((cleaned_img[0, :, :, 0], noised_img[0, :, :, 0], denoised_img[0, :, :, 0]), 1)
Image.fromarray(np.uint8(compared)).show()
if __name__ == "__main__":
dncnn = DnCNN()
dncnn.train()
|
def lex():
return raw_input().split()
def input_set(size):
a = map(int, lex())
assert len(a) == int(size)
return set(a)
def input_query():
cmd, size = lex()
return cmd.strip(), input_set(size)
s = input_set(raw_input())
for i in xrange(0, int(raw_input())):
cmd, t = input_query()
if cmd == 'update':
s |= t
elif cmd == 'intersection_update':
s &= t
elif cmd == 'difference_update':
s -= t
elif cmd == 'symmetric_difference_update':
s ^= t
print sum(s)
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import math
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
self.hidden = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
del self.hidden[:]
class ActorCritic(nn.Module):
def __init__(self, feature_dim, state_dim, hidden_state_dim=1024, policy_conv=True, action_std=0.1):
super(ActorCritic, self).__init__()
# encoder with convolution layer for MobileNetV3, EfficientNet and RegNet
if policy_conv:
self.state_encoder = nn.Sequential(
nn.Conv2d(feature_dim, 32, kernel_size=1, stride=1, padding=0, bias=False),
nn.ReLU(),
nn.Flatten(),
nn.Linear(int(state_dim * 32 / feature_dim), hidden_state_dim),
nn.ReLU()
)
# encoder with linear layer for ResNet and DenseNet
else:
self.state_encoder = nn.Sequential(
nn.Linear(state_dim, 2048),
nn.ReLU(),
nn.Linear(2048, hidden_state_dim),
nn.ReLU()
)
self.gru = nn.GRU(hidden_state_dim, hidden_state_dim, batch_first=False)
self.actor = nn.Sequential(
nn.Linear(hidden_state_dim, 2),
nn.Sigmoid())
self.critic = nn.Sequential(
nn.Linear(hidden_state_dim, 1))
self.action_var = torch.full((2,), action_std).cuda()
self.hidden_state_dim = hidden_state_dim
self.policy_conv = policy_conv
self.feature_dim = feature_dim
self.feature_ratio = int(math.sqrt(state_dim/feature_dim))
def forward(self):
raise NotImplementedError
def act(self, state_ini, memory, restart_batch=False, training=False):
if restart_batch:
del memory.hidden[:]
memory.hidden.append(torch.zeros(1, state_ini.size(0), self.hidden_state_dim).cuda())
if not self.policy_conv:
state = state_ini.flatten(1)
else:
state = state_ini
state = self.state_encoder(state)
state, hidden_output = self.gru(state.view(1, state.size(0), state.size(1)), memory.hidden[-1])
memory.hidden.append(hidden_output)
state = state[0]
action_mean = self.actor(state)
cov_mat = torch.diag(self.action_var).cuda()
dist = torch.distributions.multivariate_normal.MultivariateNormal(action_mean, scale_tril=cov_mat)
action = dist.sample().cuda()
if training:
action = F.relu(action)
action = 1 - F.relu(1 - action)
action_logprob = dist.log_prob(action).cuda()
memory.states.append(state_ini)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
else:
action = action_mean
return action.detach()
def evaluate(self, state, action):
seq_l = state.size(0)
batch_size = state.size(1)
if not self.policy_conv:
state = state.flatten(2)
state = state.view(seq_l * batch_size, state.size(2))
else:
state = state.view(seq_l * batch_size, state.size(2), state.size(3), state.size(4))
state = self.state_encoder(state)
state = state.view(seq_l, batch_size, -1)
state, hidden = self.gru(state, torch.zeros(1, batch_size, state.size(2)).cuda())
state = state.view(seq_l * batch_size, -1)
action_mean = self.actor(state)
cov_mat = torch.diag(self.action_var).cuda()
dist = torch.distributions.multivariate_normal.MultivariateNormal(action_mean, scale_tril=cov_mat)
action_logprobs = dist.log_prob(torch.squeeze(action.view(seq_l * batch_size, -1))).cuda()
dist_entropy = dist.entropy().cuda()
state_value = self.critic(state)
return action_logprobs.view(seq_l, batch_size), \
state_value.view(seq_l, batch_size), \
dist_entropy.view(seq_l, batch_size)
class PPO:
def __init__(self, feature_dim, state_dim, hidden_state_dim, policy_conv,
action_std=0.1, lr=0.0003, betas=(0.9, 0.999), gamma=0.7, K_epochs=1, eps_clip=0.2):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(feature_dim, state_dim, hidden_state_dim, policy_conv, action_std).cuda()
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
self.policy_old = ActorCritic(feature_dim, state_dim, hidden_state_dim, policy_conv, action_std).cuda()
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def select_action(self, state, memory, restart_batch=False, training=True):
return self.policy_old.act(state, memory, restart_batch, training)
def update(self, memory):
rewards = []
discounted_reward = 0
for reward in reversed(memory.rewards):
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
rewards = torch.cat(rewards, 0).cuda()
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
old_states = torch.stack(memory.states, 0).cuda().detach()
old_actions = torch.stack(memory.actions, 0).cuda().detach()
old_logprobs = torch.stack(memory.logprobs, 0).cuda().detach()
for _ in range(self.K_epochs):
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
ratios = torch.exp(logprobs - old_logprobs.detach())
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5 * self.MseLoss(state_values, rewards) - 0.01 * dist_entropy
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy_old.load_state_dict(self.policy.state_dict())
class Full_layer(torch.nn.Module):
def __init__(self, feature_num, hidden_state_dim=1024, fc_rnn=True, class_num=1000):
super(Full_layer, self).__init__()
self.class_num = class_num
self.feature_num = feature_num
self.hidden_state_dim = hidden_state_dim
self.hidden = None
self.fc_rnn = fc_rnn
# classifier with RNN for ResNet, DenseNet and RegNet
if fc_rnn:
self.rnn = nn.GRU(feature_num, self.hidden_state_dim)
self.fc = nn.Linear(self.hidden_state_dim, class_num)
# cascaded classifier for MobileNetV3 and EfficientNet
else:
self.fc_2 = nn.Linear(self.feature_num * 2, class_num)
self.fc_3 = nn.Linear(self.feature_num * 3, class_num)
self.fc_4 = nn.Linear(self.feature_num * 4, class_num)
self.fc_5 = nn.Linear(self.feature_num * 5, class_num)
def forward(self, x, restart=False):
if self.fc_rnn:
if restart:
output, h_n = self.rnn(x.view(1, x.size(0), x.size(1)), torch.zeros(1, x.size(0), self.hidden_state_dim).cuda())
self.hidden = h_n
else:
output, h_n = self.rnn(x.view(1, x.size(0), x.size(1)), self.hidden)
self.hidden = h_n
return self.fc(output[0])
else:
if restart:
self.hidden = x
else:
self.hidden = torch.cat([self.hidden, x], 1)
if self.hidden.size(1) == self.feature_num:
return None
elif self.hidden.size(1) == self.feature_num * 2:
return self.fc_2(self.hidden)
elif self.hidden.size(1) == self.feature_num * 3:
return self.fc_3(self.hidden)
elif self.hidden.size(1) == self.feature_num * 4:
return self.fc_4(self.hidden)
elif self.hidden.size(1) == self.feature_num * 5:
return self.fc_5(self.hidden)
else:
print(self.hidden.size())
exit() |
import pytest
import kleat.misc.settings as S
from kleat.misc.calc_genome_offset import calc_genome_offset
@pytest.mark.parametrize("ctg_clv, tail_side, skip_check_size, expected_gnm_offset", [
# [1, 'left', 0, 1],
# [1, 'right', 0, 1],
# [2, 'left', 0, 3],
# [2, 'right', 0, 3],
[1, 'left', 1, 3],
[1, 'right', 1, 1],
[2, 'left', 1, 3],
[2, 'right', 1, 1],
])
def test_clv_before_insertion(ctg_clv, tail_side, skip_check_size, expected_gnm_offset):
"""
TT AA <-bridge read tail
└C┘ <-bridge read, for visual convenience two cases for different tail sides are merged with only one base shown
| # blank line to separate the bridge read the insertion
| AGC <-inserted sequence
| 345 <-contig offset coord for inserted sequence
| ┬
AT-G GT <-contig
01 2 678 <-contig offset coord
^ctg_clv
0123 456 <-genome offset coord
^gnm_offset
"""
ctg_cigartuples = (
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 1),
(S.BAM_CINS, 3),
(S.BAM_CMATCH, 2)
)
assert calc_genome_offset(ctg_cigartuples, ctg_clv, tail_side, skip_check_size) == expected_gnm_offset
@pytest.mark.parametrize("ctg_clv, tail_side, skip_check_size, expected_gnm_offset", [
[3, 'left', 0, 3],
[3, 'right', 0, 4],
[5, 'left', 0, 3],
[5, 'right', 0, 4],
[3, 'left', 1, 3],
[3, 'right', 1, 4],
[5, 'left', 1, 3],
[5, 'right', 1, 4],
[3, 'left', 2, 3],
# this case is considered fine for now
[3, 'right', 2, 4],
[2, 'left', 2, 3],
[2, 'right', 2, 1],
[5, 'left', 2, 3],
[5, 'right', 2, 4],
])
def test_clv_inside_insertion(ctg_clv, tail_side, skip_check_size, expected_gnm_offset):
"""
TT AA <-bridge read tail
└A┘ <-bridge read, for visual convenience two cases for different tail sides are merged with only one base shown
| # blank line to separate the bridge read the insertion
AGC <-inserted sequence
345 <-contig offset coord for inserted sequence
ctg_clv^|
|┬
AT-G GT <-contig
01 2 678 <-contig offset coord
0123 456 <-genome offset coord
^gnm_offset
"""
ctg_cigartuples = (
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 1),
(S.BAM_CINS, 3),
(S.BAM_CMATCH, 2)
)
assert calc_genome_offset(ctg_cigartuples, ctg_clv, tail_side, skip_check_size) == expected_gnm_offset
@pytest.mark.parametrize("ctg_clv, tail_side, skip_check_size, expected_gnm_offset", [
[6, 'left', 0, 4],
[6, 'right', 0, 4],
[7, 'left', 0, 5],
[7, 'right', 0, 5],
[6, 'left', 2, 4],
[6, 'right', 2, 4],
[6, 'left', 3, 4],
[6, 'right', 3, 4],
[6, 'left', 4, 4],
[6, 'right', 4, 4],
[6, 'left', 5, 4],
[6, 'right', 5, 4],
[6, 'left', 6, 4],
[6, 'right', 6, 4],
# varying skip_check_size won't change the result the current
# implementation only considers the last skip. Here the insertion is
# between skip and match when tail_side is right
])
def test_clv_after_insertion(ctg_clv, tail_side, skip_check_size, expected_gnm_offset):
"""
TT AA <-bridge read tail
└A┘ <-bridge read, for visual convenience two cases for different tail sides are merged with only one base shown
| # blank line to separate the bridge read the insertion
AGC <-inserted sequence
345 <-contig offset coord for inserted sequence
┬
AT-G GT <-contig
01 2 678 <-contig offset coord
^ctg_clv
0123 456 <-genome offset coord
^gnm_offset
"""
ctg_cigartuples = (
(S.BAM_CMATCH, 2),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 1),
(S.BAM_CINS, 3),
(S.BAM_CMATCH, 2)
)
assert calc_genome_offset(ctg_cigartuples, ctg_clv, tail_side, skip_check_size) == expected_gnm_offset
|
# -*- coding: utf-8 -*-
from flask import Flask, request, render_template
from gevent.pywsgi import WSGIServer
import numpy as np
import re
import requests
import os
app = Flask(__name__)
def check(output):
url = "https://image-to-text2.p.rapidapi.com/cloudVision/imageToText"
querystring = {"source":output,"sourceType":"url"}
payload = '''{\r\n \"source\": "'''+output+'''" ,\r\n \"sourceType\": \"url\"\r\n}'''
headers = {
'content-type': "application/json",
'x-rapidapi-key': "fbd4199c1amsh2c93fd54d73d474p1d7c4cjsn786fe34bf93d",
'x-rapidapi-host': "image-to-text2.p.rapidapi.com"
}
response = requests.request("POST", url, data=payload, headers=headers, params=querystring)
print(response.text)
return (response.json()['text'])
#home page
@app.route('/')
def home():
return render_template('base.html')
#Summarizer page
@app.route('/predict',methods=['POST'])
def predict():
output=request.form['output']
text=check(output)
return render_template('base.html',output=text)
port = os.getenv('VCAP_APP_PORT', '8080')
if __name__ == "__main__":
app.secret_key= os.urandom(12)
app.run(debug=True, host='0.0.0.0', port=port) |
import queue
import numpy as np
from scipy import optimize as op
import matplotlib.pyplot as plt
import sys
import random
import math
MACHINE = 5
PER_GPU = 2
DBL_MIN = float('-Infinity')
JOB_TIME = 5
COM_TIME = 0
READ_INF = 0
PTA = 10
# real
# trace = [8, 8, 4, 16, 8, 16, 8, 4, 4, 4, 4, 16, 4, 4, 8, 8, 4, 4, 2, 2, 4, 8, 8, 4, 16, 8, 16, 32, 4, 8, 4, 2, 4, 8, 4, 4, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 4, 24, 4, 16, 2, 8, 16, 4, 8, 4, 4, 16, 8, 8, 8, 4, 8, 8, 4, 4, 8, 8, 4, 8, 4, 4, 4, 4, 8, 4, 8]
# trace = [5, 4, 2, 5, 4, 5, 5, 2, 3, 3, 2, 6, 3, 3, 3, 4, 3, 2, 1, 1, 3, 5, 5, 2, 6, 3, 7, 6, 2, 3, 2, 1, 3, 4, 3, 2, 5, 5, 4, 5, 4, 2, 4, 5, 3, 3, 1, 3, 4, 3, 3, 4, 3, 5, 3, 5, 3, 3, 4, 3, 5, 5, 3, 5, 2, 5, 4, 3, 3, 2, 5, 2, 5, 1, 3, 5, 3, 5, 3, 3, 7, 5, 3, 5, 2, 5, 3, 2, 2, 3, 5, 2, 4, 3, 3, 3, 2, 3, 2, 3]
# trace = [3, 3, 2, 4, 3, 6, 3, 2, 2, 3, 2, 4, 2, 2, 3, 4, 2, 2, 1, 1, 3, 3, 5, 3, 6, 5, 5, 7, 3, 3, 3, 1, 3, 5, 3, 2, 5, 5, 5, 5, 5, 3, 5, 3, 5, 4, 1, 5, 3, 5, 4, 3, 3, 3, 3, 3, 4, 5, 4, 5, 5, 4, 4, 3, 3, 4, 3, 3, 2, 3, 4, 2, 6, 1, 5, 6, 3, 5, 3, 3, 4, 4, 4, 5, 3, 5, 5, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 4, 3, 5]
# for i in range(len(trace)):
# base = int(math.log(trace[i], 2))
# trace[i] = random.randint(base, base*2-1)
# norm
# trace = [3, 7, 5, 3, 5, 4, 6, 1, 5, 4, 4, 8, 2, 7, 4, 3, 9, 2, 2, 7, 6, 3, 5, 2, 6, 6, 4, 7, 3, 5, 4, 7, 8, 8, 5, 5, 5, 5, 4, 6, 8, 4, 5, 9, 3, 8, 6, 5, 4, 5]
# trace = [5, 5, 4, 4, 3, 4, 2, 8, 3, 4, 1, 4, 7, 6, 4, 4, 1, 3, 2, 8, 5, 4, 0, 1, 1, 6, 5, 1, 4, 8, 5, 6, 6, 4, 1, 2, 4, 4, 2, 3, 3, 0, 3, 0, 6, 5, 3, 7, 0, 5, 7, 5, 8, 2, 6, 6, 0, 5, 6, 3, 6, 3, 4, 2, 4, 1, 3, 9, 1, 4, 7, 4, 5, 3, 4, 3, 5, 5, 1, 3, 5, 5, 2, 2, 4, 7, 1, 6, 2, 0, 3, 2, 1, 3, 4, 3, 6, 2, 2, 2]
# trace = np.random.normal(PER_GPU, 2, 100).astype(int)
# poisson
# trace = [6, 4, 6, 2, 4, 5, 9, 2, 4, 4, 3, 7, 4, 5, 2, 5, 4, 1, 3, 0, 4, 7, 3, 2, 1, 3, 4, 4, 3, 2, 3, 2, 4, 4, 0, 5, 7, 3, 0, 6, 5, 1, 4, 1, 4, 5, 4, 5, 3, 2, 2, 8, 4, 3, 3, 4, 5, 4, 4, 4, 6, 7, 7, 5, 4, 9, 2, 4, 1, 4, 2, 4, 4, 4, 3, 4, 6, 2, 4, 9, 1, 3, 2, 3, 0, 2, 6, 1, 0, 3, 4, 5, 6, 6, 0, 6, 4, 4, 6, 8]
# trace = [7, 3, 4, 1, 3, 4, 6, 2, 6, 2, 6, 1, 5, 5, 1, 3, 2, 2, 2, 5, 4, 5, 3, 3, 6, 7, 1, 4, 2, 10, 6, 3, 6, 5, 1, 5, 8, 1, 2, 3, 4, 4, 4, 8, 6, 4, 9, 1, 5, 1, 3, 4, 4, 0, 2, 3, 3, 1, 1, 4, 3, 5, 7, 4, 6, 6, 5, 6, 6, 5, 1, 5, 3, 4, 0, 1, 4, 2, 6, 1, 8, 5, 8, 5, 3, 5, 4, 5, 2, 2, 6, 3, 4, 6, 3, 3, 2, 3, 8, 2]
# trace = np.random.poisson(2, 20)
trace = [3,1,1,2,2,3,5,2,3,2,2,5,2,1,3,3,1,3,1,2]
# trace = [1, 1, 1, 2, 2, 1, 3, 2, 1, 1, 3, 2, 3, 1, 3, 1, 2, 1, 2, 2]
period = np.array([5 for x in range(100)])
# period = np.array([16.966666666666665, 3221.05, 3318.3333333333335, 3112.4333333333334, 14.266666666666667, 5726.25, 1698.5333333333333, 1694.3166666666666, 1689.5333333333333, 15144.35, 3759.2333333333336, 172.46666666666667, 5672.7, 1639.5333333333333, 2996.1833333333334, 1622.1166666666668, 1622.3, 669.7666666666667, 10024.716666666667, 1592.1333333333332, 5096.55, 5594.383333333333, 1534.8666666666666, 1527.9666666666667, 12722.566666666666, 26094.916666666668, 1513.1666666666667, 11185.216666666667, 3394.35, 1488.2333333333333, 161.06666666666666, 401.43333333333334, 15780.566666666666, 2956.2, 3115.733333333333, 1936.7666666666667, 1407.9666666666667, 5431.583333333333, 5416.116666666667, 5414.383333333333, 1387.6833333333334, 3145.4166666666665, 4894.15, 3398.9833333333336, 1330.3166666666666, 3113.733333333333, 1321.0666666666666, 3406.7666666666664, 1295.6833333333334, 5308.433333333333, 2929.1666666666665, 1264.6833333333334, 3197.0333333333333, 5643.116666666667, 9866.2, 3372.85, 2757.05, 2278.0166666666664, 4698.016666666666, 1151.3833333333334, 2798.8, 1142.65, 1653.2833333333333, 1094.0833333333333, 9703.716666666667, 3388.0833333333335, 45.03333333333333, 2433.5166666666664, 2054.0166666666664, 65.43333333333334, 962.5666666666667, 4466.266666666666, 45816.95, 32654.15, 33076.416666666664, 32653.933333333334, 32656.333333333332, 15705.3, 791.0833333333334, 789.0666666666667, 783.6833333333333, 782.4333333333333, 617.0166666666667, 14216.483333333334, 683.3833333333333, 2764.2333333333336, 2752.866666666667, 3183.2, 661.9333333333333, 1365.8833333333334, 3166.6666666666665, 655.0166666666667, 13519.233333333334, 2751.416666666667, 6299.95, 8833.616666666667, 6212.833333333333, 21.616666666666667, 3019.0333333333333, 6163.166666666667, 1624.0166666666667, 15.483333333333333, 15.366666666666667, 272.98333333333335, 254.36666666666667, 2885.266666666667, 1111.35, 1107.85, 13.85, 15.366666666666667, 49.333333333333336, 17.183333333333334, 16.05, 4369.716666666666, 17.6, 15.25, 1415.25, 736.2666666666667, 14.216666666666667, 3395.55, 223.45, 255.16666666666666, 255.28333333333333, 256.35, 249.93333333333334, 255.26666666666668, 160.68333333333334, 160.58333333333334, 205.7, 38.55, 118.55, 217.66666666666666, 22.333333333333332, 17853.35, 17.266666666666666, 2758.0666666666666, 987.4333333333333, 46.63333333333333, 979.1833333333333, 24.25, 12.05, 11.75, 10359.033333333333, 14.716666666666667, 4141.883333333333, 810.7166666666667, 5739.55, 3696.9166666666665, 2560.25, 20.233333333333334, 1400.0833333333333, 2819.0, 2497.15, 2478.15, 1469.85, 15427.183333333334, 99.95, 3056.866666666667, 2370.6666666666665, 3034.0333333333333, 327.43333333333334, 2336.35, 2841.083333333333, 1856.05, 2267.0666666666666, 835.4666666666667, 789.2666666666667, 2201.0333333333333, 775.4166666666666, 90.5, 709.3833333333333, 1322.6166666666666, 3361.383333333333, 1203.7333333333333, 48.916666666666664, 147.56666666666666, 1226.8666666666666, 50.0, 1200.7666666666667, 23602.216666666667, 2015.2333333333333, 207.33333333333334, 3506.616666666667, 1353.0333333333333, 1203.6833333333334, 1212.6333333333334, 625.85, 11.833333333333334, 16.6, 49.88333333333333, 2763.7666666666664, 3135.6, 2752.383333333333, 1744.0333333333333, 2304.366666666667, 893.5666666666667, 9329.983333333334, 1658.1, 2453.0, 1645.5666666666666])
# period = np.log(period).astype(int)
if type(trace) == list:
print(trace)
else:
print(trace.tolist())
def update(server, link, job_trace, job_load, bandwidth, job_ps, ce):
release = 0
workload = 0
for i in range(len(bandwidth)):
job = job_trace.get()
load = job_load.get()
ps = job_ps.get()
load[2] += 1
if load[0] > 0 and load[1] <= 0:
load[0] -= 1
if load[0] >= 0:
workload += 1
load[1] = 1
else:
if bandwidth[i] == 0:
load[1] = 0
load[0] -= 1
if load[0] >= 0:
workload += 1
else:
load[1] -= bandwidth[i]
if load[0] <= 0 and load[1] <= 0:
for p in job:
server[p[0]] -= p[1]
release += p[1]
if len(job) > 1:
link[p[0]] -= 1
if ps != -1:
link[ps] -= 1
ce.append(load[3]/load[2])
else:
job_trace.put(job)
job_load.put(load)
job_ps.put(ps)
return release, workload
def linear_evaluation(server, link, job_trace, job_ps):
m = job_trace.qsize()
n = len(server)
bd = []
for x in range(m):
bd.append([0,1])
c = np.array([-1 for x in range(m)])
A_ub = [[0 for x in range(m)] for x in range(n)]
B_ub = [1 for x in range(n)]
A_eq = [[0 for x in range(m)]]
B_eq = [0]
job_link = [1 for x in range(m)]
for i in range(m):
job = job_trace.get()
job_trace.put(job)
ps = job_ps.get()
job_ps.put(ps)
if len(job) > 1:
for p in job:
A_ub[p[0]][i] += 1
job_link[i] = max(job_link[i], link[p[0]])
else:
job_link[i] = 0
if ps != -1:
A_ub[ps][i] += 1
job_link[i] = max(job_link[i], link[ps])
for i in range(m):
if job_link[i] > 0:
tmp = [0 for x in range(m)]
tmp[i] = -1*job_link[i]
A_ub.append(tmp)
B_ub.append(-1)
else:
tmp = [0 for x in range(m)]
tmp[i] = 1
A_eq.append(tmp)
B_eq.append(0)
res = op.linprog(c, np.array(A_ub), np.array(B_ub), np.array(A_eq), np.array(B_eq), bounds=bd)
# print(res['x'])
num = 0
sums = 0.0
for x in res['x']:
# if x > 0:
# sums += x
# num += 1
if x > 1:
print("warning!")
if x > 0:
sums += 1/x
num += 1
else:
sums += 0
num += 1
if num > 0:
avg = sums/num
else:
avg = 0
# print(avg)
return avg, res['x']
def bw_evaluation(server, tmp_job, job_trace, job_ps):
m = job_trace.qsize()
n = len(server)
job_link = [[] for x in range(m)]
server_link = [[] for x in range(n)]
job_bw = [[0.0, 0.0] for x in range(m)]
server_bw = [1.0 for x in range(n)]
alloc = 0
pta = PTA
for i in range(m):
job = job_trace.get()
job_trace.put(job)
ps = job_ps.get()
job_ps.put(ps)
if ps != -1:
job_link[i].append(-1*ps-1)
server_link[ps].append(-1*i-1)
for p in job:
job_link[i].append(p[0])
server_link[p[0]].append(i)
else:
alloc += 1
if len(tmp_job) > 1:
m += 1
job_link.append([])
job_bw.append([0.0, 0.0])
for p in tmp_job:
job_link[m-1].append(p[0])
server_link[p[0]].append(m-1)
# stage 1
for i in range(n):
min_bw = 1.1
bottle = -1
for j in range(n):
if server_bw[j] > 0 and len(server_link[j]) > 0 and server_bw[j]/len(server_link[j]) < min_bw:
bottle = j
min_bw = server_bw[j]/len(server_link[j])
if bottle == -1:
break
bw = server_bw[bottle]/len(server_link[bottle])
if pta/(m-alloc) < bw: # step into stage two
break
for j in server_link[bottle]:
jdx = j
if jdx < 0:
jdx = -1*(jdx + 1)
if job_bw[jdx][0] > 0:
continue
job_bw[jdx][0] = bw
alloc += 1
pta -= bw
for s in job_link[jdx]:
sdx = s
if sdx < 0:
sdx = -1*(sdx + 1)
if sdx != bottle:
if s < 0:
server_link[sdx].remove(-1*jdx-1)
else:
server_link[sdx].remove(jdx)
server_bw[sdx] -= bw
server_link[bottle] = []
if alloc == m:
break
# stage 2
if alloc < m:
a = pta / (m - alloc)
for i in range(m):
if job_bw[i][0] == 0 and len(job_link[i]) > 1:
job_bw[i][0] = a
for s in job_link[i]:
sdx = s
if sdx < 0:
sdx = -1 * (sdx + 1)
server_bw[sdx] -= a
for i in range(n):
min_bw = 1.1
bottle = -1
for j in range(n):
x = 0
for k in server_link[j]:
if k >= 0:
x += 1
else:
kdx = -1*(k+1)
x += len(job_link[kdx]) - 1
if server_bw[j] > 0 and x > 0 and server_bw[j]/x < min_bw:
bottle = j
min_bw = server_bw[j]/x
if bottle == -1:
break
bw = min_bw
for j in server_link[bottle]:
jdx = j
if jdx < 0:
jdx = -1*(jdx + 1)
if job_bw[jdx][1] > 0:
continue
job_bw[jdx][1] = bw
alloc += 1
for s in job_link[jdx]:
sdx = s
if sdx < 0:
sdx = -1*(sdx + 1)
if s < 0:
if sdx != bottle:
server_link[sdx].remove(-1*jdx-1)
server_bw[sdx] -= bw * (len(job_link[jdx])-1)
else:
if sdx != bottle:
server_link[sdx].remove(jdx)
server_bw[sdx] -= bw
server_link[bottle] = []
if alloc == m:
break
bandwidth = [x[0]+x[1] for x in job_bw]
num = 0
sums = 0.0
for x in bandwidth:
# if x > 0:
# sums += x
# num += 1
if x > 1:
print("warning!")
if x > 0:
sums += x
num += 1
else:
sums += 0
# num += 1
if num > 0:
avg = sums / num
else:
avg = 0
# print(avg)
return avg, bandwidth, server_bw
def gpu_balance(server, link, gpus, job_trace, job_ps):
n = len(server)
ls = []
flag = 0
job = []
tmp = []
rest = gpus
for i in range(n):
tmp.append([i, server[i]])
tmp = sorted(tmp, key=lambda x: x[1])
# tmp.reverse()
for i in range(n):
usage = min(rest, PER_GPU - tmp[i][1])
server[tmp[i][0]] += usage
rest -= usage
if usage > 0:
job.append([tmp[i][0], usage])
if usage > 0 and usage != gpus:
link[tmp[i][0]] += 1
flag = 1
if rest == 0:
break
if flag == 1:
for i in range(n):
ls.append([i, link[i]])
ls = sorted(ls, key=lambda x: x[1])
job_ps.put(ls[0][0])
link[ls[0][0]] += 1
else:
job_ps.put(-1)
job_trace.put(job)
def Tetris(server, link, gpus, job_trace, job_ps):
ls = []
n = len(server)
flag = 0
job = []
tmp = []
rest = gpus
for i in range(n):
tmp.append([i, server[i]*gpus/PER_GPU/PER_GPU+link[i]*1])
tmp = sorted(tmp, key=lambda x: x[1])
for i in range(n):
usage = min(rest, PER_GPU - server[tmp[i][0]])
server[tmp[i][0]] += usage
rest -= usage
if usage > 0:
job.append([tmp[i][0], usage])
if usage > 0 and usage != gpus:
link[tmp[i][0]] += 1
flag = 1
if rest == 0:
break
if flag == 1:
for i in range(n):
ls.append([i, link[i]])
ls = sorted(ls, key=lambda x: x[1])
job_ps.put(ls[0][0])
link[ls[0][0]] += 1
else:
job_ps.put(-1)
job_trace.put(job)
def Optimus(server, link, gpus, job_trace, job_ps):
n = len(server)
ls = []
flag = 0
job = []
tmp = []
rest = gpus
for i in range(n):
tmp.append([i, server[i]])
tmp = sorted(tmp, key=lambda x: x[1])
# tmp.reverse()
idx = []
for i in range(n):
usage = min(rest, PER_GPU - tmp[i][1])
idx.append(tmp[i])
rest -= usage
if rest == 0:
break
allocate = [0 for x in range(len(idx))]
k = 0
for i in range(gpus):
while True:
if allocate[k % len(idx)] < PER_GPU - idx[k % len(idx)][1]:
allocate[k % len(idx)] += 1
k += 1
break
else:
k += 1
for i in range(len(idx)):
server[idx[i][0]] += allocate[i]
if allocate[i] > 0:
job.append([idx[i][0], allocate[i]])
if 0 < allocate[i] < gpus:
link[idx[i][0]] += 1
flag = 1
if flag == 1:
for i in range(n):
ls.append([i, link[i]])
ls = sorted(ls, key=lambda x: x[1])
job_ps.put(ls[0][0])
link[ls[0][0]] += 1
else:
job_ps.put(-1)
job_trace.put(job)
def link_balance(server, link, gpus, job_trace, job_ps):
n = len(server)
tmp = []
ls = []
flag = 0
job = []
rest = gpus
for i in range(n):
tmp.append([i, link[i]])
tmp = sorted(tmp, key=lambda x: x[1])
for i in range(n):
usage = min(rest, PER_GPU - server[tmp[i][0]])
server[tmp[i][0]] += usage
rest -= usage
if usage > 0:
job.append([tmp[i][0], usage])
if usage > 0 and usage != gpus:
link[tmp[i][0]] += 1
flag = 1
if rest == 0:
break
if flag == 1:
for i in range(n):
ls.append([i, link[i]])
ls = sorted(ls, key=lambda x: x[1])
job_ps.put(ls[0][0])
link[ls[0][0]] += 1
else:
job_ps.put(-1)
job_trace.put(job)
def least_fragment(server, link, gpus, job_trace, job_ps):
n = len(server)
tmp = []
ls = []
flag = 0
job = []
rest = gpus
for i in range(n):
tmp.append([i, server[i]])
tmp = sorted(tmp, key=lambda x: x[1])
tmp.reverse()
for i in range(n):
usage = min(rest, PER_GPU - tmp[i][1])
server[tmp[i][0]] += usage
rest -= usage
if usage > 0:
job.append([tmp[i][0], usage])
if usage > 0 and usage != gpus:
link[tmp[i][0]] += 1
flag = 1
if rest == 0:
break
if flag == 1:
for i in range(n):
ls.append([i, link[i]])
ls = sorted(ls, key=lambda x: x[1])
job_ps.put(ls[0][0])
link[ls[0][0]] += 1
else:
job_ps.put(-1)
job_trace.put(job)
def packing(server, link, gpus, job_trace, job_ps):
n = len(server)
# shadow link computing
shadow_link = [0.0 for x in range(n)]
empty = 0
up_link = 0
for i in range(n):
shadow_link[i] = link[i]
if server[i] < PER_GPU:
up_link = max(up_link, link[i])
if link[i] == 0:
empty = 1
# m = job_trace.qsize()
# for i in range(m):
# job = job_trace.get()
# job_trace.put(job)
# ps = job_ps.get()
# job_ps.put(ps)
# if ps == -1:
# continue
# max_link = link[ps]
# for j in job:
# max_link = max(max_link, link[j[0]])
# for j in job:
# if link[j[0]] == 1 and max_link > 1:
# shadow_link[j[0]] = 0
avg, bandwidth, sbw = bw_evaluation(server, [], job_trace, job_ps)
ls = []
# connectionless solution
if gpus <= PER_GPU:
min_del = sys.maxsize
min_link = 0
min_bw = 0
idx = -1
for i in range(n):
if PER_GPU - server[i] == gpus:
idx = i
break
if PER_GPU - server[i] > gpus:
if min_del > PER_GPU - server[i] - gpus:
min_del = PER_GPU - server[i] - gpus
min_link = link[i]
min_bw = sbw[i]
idx = i
elif min_del == PER_GPU - server[i] - gpus:
if min_link < link[i]:
min_del = PER_GPU - server[i] - gpus
min_link = link[i]
min_bw = sbw[i]
idx = i
elif min_link == link[i]:
if min_bw < sbw[i]:
min_del = PER_GPU - server[i] - gpus
min_link = link[i]
min_bw = sbw[i]
idx = i
if idx != -1:
server[idx] += gpus
job_trace.put([[idx, gpus]])
job_ps.put(-1)
return
# connection-oriented solution
dp = [[DBL_MIN for x in range(gpus+PER_GPU+1)] for j in range(up_link+1)]
trace = [[[[-1, -1] for x in range(gpus+PER_GPU+1)]for x in range(n)]for j in range(up_link+1)]
for i in range(up_link+1):
dp[i][0] = 1/(i+1)
for i in range(n):
w = PER_GPU - server[i]
# v = -1 * (1 - 1 / (shadow_link[i] + 1))
# v = 0
# if shadow_link[i] > 0:
# v = -shadow_link[i]*(1/shadow_link[i]-1/(shadow_link[i]+1))
v = sbw[i] - (1/(link[i]+1))
l = link[i]
for s in range(up_link + 1):
for j in range(gpus+PER_GPU, -1, -1):
trace[s][i][j] = trace[s][i-1][j]
if w == 0:
continue
for s in range(up_link, -1, -1):
wl = max(s, l)
for j in range(gpus+PER_GPU, w-1, -1):
if dp[s][j-w] != DBL_MIN and dp[wl][j] < dp[s][j-w] + v - 1/(s+1) + 1/(wl+1):
dp[wl][j] = dp[s][j-w] + v - 1/(s+1) + 1/(wl+1)
trace[wl][i][j] = [i, s]
# decide solution
ans = -1
level = DBL_MIN
state = 0
for s in range(up_link + 1):
if dp[s][gpus] > level: # exist exact solution
ans = gpus
level = dp[s][gpus]
state = s
if ans == -1 or empty:
for s in range(up_link + 1):
for i in range(gpus+1, gpus+PER_GPU+1):
if dp[s][i] == DBL_MIN:
continue
tmp = dp[s][i]
if tmp > level:
ans = i
level = tmp
state = s
# print("gain: ", level)
job = []
cur_w = ans
row = n-1
while cur_w > 0:
idx = trace[state][row][cur_w][0]
pre_s = trace[state][row][cur_w][1]
usage = PER_GPU - server[idx]
job.append([idx, usage])
cur_w -= usage
state = pre_s
row = idx - 1
if ans > gpus:
job = sorted(job, key=lambda x: x[1], reverse=True)
for i in range(len(job)):
if job[i][1] >= ans-gpus:
job[i][1] -= ans-gpus
break
for j in job:
server[j[0]] += j[1]
link[j[0]] += 1
# compute best ps
avg, bandwidth, sbw = bw_evaluation(server, job, job_trace, job_ps)
for i in range(n):
shadow_link[i] = link[i]
m = job_trace.qsize()
for i in range(m):
ejob = job_trace.get()
job_trace.put(ejob)
ps = job_ps.get()
job_ps.put(ps)
if ps == -1:
continue
max_link = link[ps]
for j in ejob:
max_link = max(max_link, link[j[0]])
for j in ejob:
if link[j[0]] == 1 and max_link > 1:
shadow_link[j[0]] = 0
if link[ps] == 1 and max_link > 1:
shadow_link[ps] = 0
for i in range(n):
ls.append([i, 1 - sbw[i], link[i]+shadow_link[i]])
ls = sorted(ls, key=lambda x: [x[1], x[2]])
job_ps.put(ls[0][0])
# print(ls[0][1])
link[ls[0][0]] += 1
job_trace.put(job)
monitor = []
gce = []
for fn in [packing, Optimus, Tetris, gpu_balance, link_balance, least_fragment]:
job_trace = queue.Queue()
job_load = queue.Queue()
job_ps = queue.Queue()
if READ_INF:
print(fn.__name__)
server = [0 for x in range(MACHINE)]
link = [0 for x in range(MACHINE)]
gpu_sum = MACHINE * PER_GPU
cur_sum = 0
res = []
term = 0
idx = 0
avg = 0
bandwidth = []
t = 0
workload = 0
progress = []
comm_load = []
band_sum = []
ce = []
while True:
while idx < len(trace) and trace[idx] == 0:
idx += 1
if idx < len(trace):
t = trace[idx]
a, b = update(server, link, job_trace, job_load, bandwidth, job_ps, ce)
cur_sum -= a
workload += b
progress.append(workload)
if idx < len(trace) and cur_sum + t <= gpu_sum:
fn(server, link, t, job_trace, job_ps)
job_load.put([period[idx], COM_TIME, 0, period[idx]])
cur_sum += t
idx += 1
# print(cur_sum, np.sum(np.array(server)))
if job_trace.qsize() > 0:
# avg, bandwidth = linear_evaluation(server, link, job_trace, job_ps)
avg, bandwidth, sbw = bw_evaluation(server, [], job_trace, job_ps)
if fn.__name__ == "packing":
if link[2]:
monitor.append(1-sbw[2])
else:
monitor.append(0)
band_sum.append(np.sum(bandwidth))
# print(link)
# print(bandwidth)
res.append(avg)
cur_load = 0.0
for i in range(job_load.qsize()):
ps = job_ps.get()
job_ps.put(ps)
load = job_load.get()
job_load.put(load)
if ps != -1:
cur_load += max(load[0], 0.0)+max(load[1], 0.0)
comm_load.append(cur_load)
if job_trace.qsize() == 0:
break
term += 1
# if READ_INF:
# print(band_sum)
gce.append(np.average(np.array(ce)))
# plt.plot(progress, label=fn.__name__)
# print(server)
# print(link)
print(term)
num = 0
sums = 0
avg = 0
for x in res:
if x > 0:
sums += x
num += 1
if num > 0:
avg = sums / num
else:
avg = 0
if READ_INF:
print(avg)
# print(job_trace)
# plt.bar([x.__name__ for x in [packing, gpu_balance, link_balance, least_fragment, Optimus, Tetris]], gce, color=['r','g','b', 'c', 'm', 'y'])
# plt.xticks(rotation=45)
monitor = [0] + monitor + [0]
plt.plot(monitor)
plt.legend()
plt.show() |
from adobject import *
from pyadexceptions import InvalidObjectException, invalidResults
import aduser, adcomputer, addomain, addomain, adgroup, adobject, pyadconstants, adcontainer
def from_cn(common_name, search_base=None, options={}):
try:
q = ADObject.from_cn(common_name, search_base, options)
q.adjust_pyad_type()
return q
except invalidResults:
return None
def from_dn(distinguished_name, options={}):
try:
q = ADObject.from_dn(distinguished_name,options)
q.adjust_pyad_type()
return q
except InvalidObjectException:
return None
def from_guid(cls, guid, options={}):
"Generates ADObject based on GUID"
try:
guid = "<GUID=%s>" % guid.strip('}').strip('{')
q = ADObject.from_dn(guid, options)
q.adjust_pyad_type()
return q
except InvalidObjectException:
return None
|
# 3.1.1.5
n = int(input('Input for n greater or equal to 100: '))
print(n >= 100)
# 3.1.1.9
n1 = int(input('Enter the first number: '))
n2 = int(input('Enter the second number: '))
n3 = int(input('Enter the third number: '))
if n1 > n2 and n1 > n3:
largest = n1
elif n2 > n1 and n2 > n3:
largest = n2
elif n3 > n1 and n3 > n2:
largest = n3
print('The largest number is:', largest)
# 3.1.1.10
print('Largest with max():', max(n1, n2, n3))
# 3.1.1.11
# TBC(ontinued)
|
import numpy as np
import matplotlib.pyplot as plt
class NSG:
def __init__(self, epsilon, alpha, delta, tasks, arms):
self.epsilon = epsilon
self.alpha = alpha
self.beta = 1
self.delta = delta
self.n = tasks
self.k = arms
self.Q = [([0]*self.k) for i in range(self.n)]
#self.Q2 = [[([0]*self.k) for i in range(self.n)] for y in range(self.delta)]
self.VQ = [([0]*self.k) for i in range(self.n)]
self.CQ = [([0]*self.k) for i in range(self.n)]
self.VN = [([0]*self.k) for i in range(self.n)]
self.CN = [([0]*self.k) for i in range(self.n)]
self.VR = [0]*self.delta
self.CR = [0]*self.delta
def RW(self, x):
for i in range(len(x)):
x[i]=x[i]+np.random.randint(-1,2)
return x
def V(self):
for i in range(self.delta):
for j in range(self.n):
self.Q[j] = self.RW(self.Q[j])
#self.Q2[i][j] = self.Q[j]
tmp = np.random.random()
if tmp <= self.epsilon: arm = np.random.randint(0, self.k)
else: arm = np.argmax(self.VQ[j])
R = self.Q[j][arm]
self.VN[j][arm] = self.VN[j][arm] + 1
self.VQ[j][arm] = self.VQ[j][arm] + 1/self.VN[j][arm] * (R - self.VQ[j][arm])
self.VR[i] = self.VR[i] + R/self.n
if i % 100 == 0:
print(i)
def C(self):
self.Q = [([0]*self.k) for i in range(self.n)]
for i in range(self.delta):
for j in range(self.n):
self.Q[j] = self.RW(self.Q[j])
tmp = np.random.random()
if tmp <= self.epsilon: arm = np.random.randint(0, self.k)
else: arm = np.argmax(self.CQ[j])
R = self.Q[j][arm]
self.CN[j][arm] = self.CN[j][arm] + 1
self.CQ[j][arm] = self.CQ[j][arm] + self.alpha * (R - self.CQ[j][arm])
self.CR[i] = self.CR[i] + R/self.n
if i % 100 == 0:
print(i)
def plot(self):
plt.plot(self.VR, color='red', label='V')
plt.plot(self.CR, color='blue', label='C')
plt.legend()
plt.show()
if __name__ == "__main__":
r = NSG(0.1, 0.1, 10000, 500, 10)
r.V()
r.C()
r.plot()
|
from string import digits, ascii_lowercase, ascii_uppercase
from random import choice as rand_choice
from secrets import choice as sec_choice
def slug_generator(size: int = 10, char: str = digits + ascii_uppercase + ascii_lowercase) -> str:
return "".join(rand_choice(char) for _ in range(size))
def otp_generator(size: int = 6, char: str = digits) -> str:
return "".join(sec_choice(char) for _ in range(size))
|
#!/usr/bin/env python3
from aws_cdk import core
from {{cookiecutter.alexa_skill_name_slug}}_cdk.cdk_stack import AlexaCdkStack as CdkStack
app = core.App()
CdkStack(
app,
"{{cookiecutter.alexa_skill_cdk_stack_name}}",
stack_name = "{{cookiecutter.alexa_skill_cdk_stack_name}}",
description = "Lambda / DynamoDB stack for {{cookiecutter.alexa_skill_name}}"
)
app.synth()
|
Subsets and Splits