code
stringlengths 20
1.05M
| apis
list | extract_api
stringlengths 75
5.24M
|
---|---|---|
import tensorflow as tf
import pandas as pd
import numpy as np
import sys
import time
from cflow import ConditionalFlow
from MoINN.modules.subnetworks import DenseSubNet
from utils import train_density_estimation, plot_loss, plot_tau_ratio
# import data
tau1_gen = np.reshape(np.load("../data/tau1s_Pythia_gen.npy"), (-1,1))
tau2_gen = np.reshape(np.load("../data/tau2s_Pythia_gen.npy"), (-1,1))
tau1_sim = np.reshape(np.load("../data/tau1s_Pythia_sim.npy"), (-1,1))
tau2_sim = np.reshape(np.load("../data/tau2s_Pythia_sim.npy"), (-1,1))
data_gen = tf.convert_to_tensor(np.concatenate([tau1_gen,tau2_gen], axis=-1), dtype=tf.float32)
data_sim = tf.convert_to_tensor(np.concatenate([tau1_sim,tau2_sim], axis=-1), dtype=tf.float32)
train_gen, test_gen = np.split(data_gen, 2)
train_sim, test_sim = np.split(data_sim, 2)
# Get the flow
meta = {
"units": 16,
"layers": 4,
"initializer": "glorot_uniform",
"activation": "leakyrelu",
}
cflow = ConditionalFlow(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta, subnet_constructor=DenseSubNet)
# train the network
EPOCHS = 50
BATCH_SIZE = 1000
LR = 5e-3
DECAY_RATE=0.1
ITERS = len(train_gen)//BATCH_SIZE
DECAY_STEP=ITERS
#Prepare the tf.dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_gen, train_sim))
train_dataset = train_dataset.shuffle(buffer_size=500000).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(LR, DECAY_STEP, DECAY_RATE)
opt = tf.keras.optimizers.Adam(lr_schedule)
train_losses = []
#train_all = np.concatenate([train_gen, train_sim], axis=-1)
start_time = time.time()
for e in range(EPOCHS):
batch_train_losses = []
# Iterate over the batches of the dataset.
for step, (batch_gen, batch_sim) in enumerate(train_dataset):
batch_loss = train_density_estimation(cflow, opt, batch_gen, [batch_sim])
batch_train_losses.append(batch_loss)
train_loss = tf.reduce_mean(batch_train_losses)
train_losses.append(train_loss)
if (e + 1) % 1 == 0:
# Print metrics
print(
"Epoch #{}: Loss: {}, Learning_Rate: {}".format(
e + 1, train_losses[-1], opt._decayed_lr(tf.float32)
)
)
end_time = time.time()
print("--- Run time: %s hour ---" % ((end_time - start_time)/60/60))
print("--- Run time: %s mins ---" % ((end_time - start_time)/60))
print("--- Run time: %s secs ---" % ((end_time - start_time)))
# Make plots and sample
plot_loss(train_losses, name="Log-likelihood", log_axis=False)
detector = tf.constant(test_sim, dtype=tf.float32)
unfold_gen = cflow.sample(int(5e5),[detector])
plot_tau_ratio(test_gen, unfold_gen, detector, name="tau_ratio")
unfold_gen = {}
for i in range(10):
unfold_gen[i] = cflow.sample(int(5e5),[detector])
unfold_pythia = np.stack([unfold_gen[i] for i in range(10)])
np.save("inn_pythia",unfold_pythia)
|
[
"cflow.ConditionalFlow",
"tensorflow.keras.optimizers.schedules.InverseTimeDecay",
"tensorflow.data.Dataset.from_tensor_slices",
"utils.train_density_estimation",
"tensorflow.keras.optimizers.Adam",
"numpy.split",
"utils.plot_tau_ratio",
"tensorflow.constant",
"numpy.concatenate",
"tensorflow.reduce_mean",
"numpy.load",
"time.time",
"numpy.save",
"utils.plot_loss"
] |
[((758, 779), 'numpy.split', 'np.split', (['data_gen', '(2)'], {}), '(data_gen, 2)\n', (766, 779), True, 'import numpy as np\n'), ((802, 823), 'numpy.split', 'np.split', (['data_sim', '(2)'], {}), '(data_sim, 2)\n', (810, 823), True, 'import numpy as np\n'), ((986, 1095), 'cflow.ConditionalFlow', 'ConditionalFlow', ([], {'dims_in': '[2]', 'dims_c': '[[2]]', 'n_blocks': '(12)', 'subnet_meta': 'meta', 'subnet_constructor': 'DenseSubNet'}), '(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta,\n subnet_constructor=DenseSubNet)\n', (1001, 1095), False, 'from cflow import ConditionalFlow\n'), ((1262, 1320), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_gen, train_sim)'], {}), '((train_gen, train_sim))\n', (1296, 1320), True, 'import tensorflow as tf\n'), ((1439, 1513), 'tensorflow.keras.optimizers.schedules.InverseTimeDecay', 'tf.keras.optimizers.schedules.InverseTimeDecay', (['LR', 'DECAY_STEP', 'DECAY_RATE'], {}), '(LR, DECAY_STEP, DECAY_RATE)\n', (1485, 1513), True, 'import tensorflow as tf\n'), ((1520, 1557), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr_schedule'], {}), '(lr_schedule)\n', (1544, 1557), True, 'import tensorflow as tf\n'), ((1651, 1662), 'time.time', 'time.time', ([], {}), '()\n', (1660, 1662), False, 'import time\n'), ((2280, 2291), 'time.time', 'time.time', ([], {}), '()\n', (2289, 2291), False, 'import time\n'), ((2516, 2578), 'utils.plot_loss', 'plot_loss', (['train_losses'], {'name': '"""Log-likelihood"""', 'log_axis': '(False)'}), "(train_losses, name='Log-likelihood', log_axis=False)\n", (2525, 2578), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n'), ((2591, 2630), 'tensorflow.constant', 'tf.constant', (['test_sim'], {'dtype': 'tf.float32'}), '(test_sim, dtype=tf.float32)\n', (2602, 2630), True, 'import tensorflow as tf\n'), ((2678, 2742), 'utils.plot_tau_ratio', 'plot_tau_ratio', (['test_gen', 'unfold_gen', 'detector'], {'name': '"""tau_ratio"""'}), "(test_gen, unfold_gen, detector, name='tau_ratio')\n", (2692, 2742), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n'), ((2896, 2932), 'numpy.save', 'np.save', (['"""inn_pythia"""', 'unfold_pythia'], {}), "('inn_pythia', unfold_pythia)\n", (2903, 2932), True, 'import numpy as np\n'), ((279, 318), 'numpy.load', 'np.load', (['"""../data/tau1s_Pythia_gen.npy"""'], {}), "('../data/tau1s_Pythia_gen.npy')\n", (286, 318), True, 'import numpy as np\n'), ((350, 389), 'numpy.load', 'np.load', (['"""../data/tau2s_Pythia_gen.npy"""'], {}), "('../data/tau2s_Pythia_gen.npy')\n", (357, 389), True, 'import numpy as np\n'), ((422, 461), 'numpy.load', 'np.load', (['"""../data/tau1s_Pythia_sim.npy"""'], {}), "('../data/tau1s_Pythia_sim.npy')\n", (429, 461), True, 'import numpy as np\n'), ((493, 532), 'numpy.load', 'np.load', (['"""../data/tau2s_Pythia_sim.npy"""'], {}), "('../data/tau2s_Pythia_sim.npy')\n", (500, 532), True, 'import numpy as np\n'), ((575, 620), 'numpy.concatenate', 'np.concatenate', (['[tau1_gen, tau2_gen]'], {'axis': '(-1)'}), '([tau1_gen, tau2_gen], axis=-1)\n', (589, 620), True, 'import numpy as np\n'), ((671, 716), 'numpy.concatenate', 'np.concatenate', (['[tau1_sim, tau2_sim]'], {'axis': '(-1)'}), '([tau1_sim, tau2_sim], axis=-1)\n', (685, 716), True, 'import numpy as np\n'), ((1979, 2013), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['batch_train_losses'], {}), '(batch_train_losses)\n', (1993, 2013), True, 'import tensorflow as tf\n'), ((1854, 1914), 'utils.train_density_estimation', 'train_density_estimation', (['cflow', 'opt', 'batch_gen', '[batch_sim]'], {}), '(cflow, opt, batch_gen, [batch_sim])\n', (1878, 1914), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n')]
|
# -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
import camos.model.image as img
from camos.utils.apptools import getGui
class InputData:
"""The InputData object.
This behaves as a container for the data, as a numpy array, and the main
properties of interest for the object to be handled in visualization and analysis.
"""
def __init__(self, file=None, memoryPersist=None, name="New Layer"):
"""Initialization of the object
Args:
file ([str, numpy.ndarray], optional): Can be a numpy array containing any numeric data, or a path to a file. The opening plugin must support this. Defaults to None.
memoryPersist (bool, optional): whether the data must be loaded into memory, at once, or can be loaded as required, from disk. Defaults to False.
stack (bool): the file bust be interpreted as a stack (False), various files
are interpreted as a single stack (True)
"""
self.file = file
self.name = name
self._image = None
self.frames = 0
self.data = None
if memoryPersist is None:
_persist = getGui().configuration.readConfiguration()[
"Performance/RAM_persistence"
]
self.memoryPersist = _persist
else:
self.memoryPersist = memoryPersist
self.max = 0
self.opacity = 50
self.brightness = 0
self.contrast = 0
self.colormap = "gray"
def image(self, index):
"""Returns the current frame for an image
Args:
index (int): index corresponding to the frame
Returns:
np.ndarray: current frame of the image, with shape (height, width, channels)
"""
return self._image[index]
def loadImage(self):
self._image = img.Stack(
self.file, dx=1, dz=1, units="nm", persistence=self.memoryPersist
)
self.frames = len(self._image)
self.max = self._image._imgs.max()
|
[
"camos.model.image.Stack",
"camos.utils.apptools.getGui"
] |
[((2021, 2097), 'camos.model.image.Stack', 'img.Stack', (['self.file'], {'dx': '(1)', 'dz': '(1)', 'units': '"""nm"""', 'persistence': 'self.memoryPersist'}), "(self.file, dx=1, dz=1, units='nm', persistence=self.memoryPersist)\n", (2030, 2097), True, 'import camos.model.image as img\n'), ((1329, 1337), 'camos.utils.apptools.getGui', 'getGui', ([], {}), '()\n', (1335, 1337), False, 'from camos.utils.apptools import getGui\n')]
|
import tarfile
import tempfile
from . import Task, TaskVar
class TaskReadFile(Task, name="read-file"):
"""
Read contents of a file in the image into a variable.
"""
class Schema:
path = TaskVar(help="Container file path to read data from")
var = TaskVar(help="Destination variable name to write file contents to")
def run_with_values(self, job, *, var, path):
container = job.create({})
with tempfile.TemporaryFile() as tf:
tstream, tstat = container.get_archive(path)
for chunk in tstream:
tf.write(chunk)
tf.seek(0)
with tarfile.open(fileobj=tf, mode="r") as tar:
for item in tar.members:
data = tar.extractfile(item).read().decode('utf-8')
if data.endswith("\n"):
data = data[:-1]
job.set_var(var, data)
break
job.cancel()
|
[
"tempfile.TemporaryFile",
"tarfile.open"
] |
[((450, 474), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (472, 474), False, 'import tempfile\n'), ((646, 680), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'tf', 'mode': '"""r"""'}), "(fileobj=tf, mode='r')\n", (658, 680), False, 'import tarfile\n')]
|
from abc import ABC, abstractmethod
import numpy as np
class SwarmAlgorithm(ABC):
'''
A base abstract class for different swarm algorithms.
Parameters
----------
D : int
Search space dimension.
N : int
Population size.
fit_func : callable
Fitness (objective) function or a function returning multiple values
corresponding to different objectives (for multi-objective problems).
params : array_like
Model behavioral parameters.
bounds : ndarray
A 2 by D matrix containing lower and upper bounds of the search space
for each dimension.
seed : int, optional, default=None
Random generator seed.
max_iter : int, optional, default=100
Maximum number of iterations (generations).
stag_iter : int, optional, default=100
Specifies the allowed number of iterations without solution improvement
by equal or more than a given tolerance. If the number is exceeded,
the optimization process stagnations occurs and the algorithm stops.
e : float, optional, default=1e-5
Tolerance.
Attributes
----------
particles : ndarray
An N by D array representing the swarm of N particles.
scores : ndarray
An array of size N representing the value of the fitness function
for each particle.
gbest : ndarray
The D-dimensional vector representing the position of the current
global best particle.
gbest_score : float
The value of the fitness function for the current global best particle.
eval_num : int
The number of fitness function evaluations.
'''
def __init__(self, D, N, fit_func, params, bounds, seed=None, max_iter=100,
stag_iter=100, e=1e-5):
self.D = D
self.N = N
# Initialize problem parameters.
self.fit_func = fit_func
self.l_bounds = bounds[0]
self.u_bounds = bounds[1]
# Behavioural parameters' initialization.
self.set_params(params)
# Initializing the Numpy random numbers generator to reproduce results
# of the optimization processes.
self.seed = seed
# Stopping criteria.
self.max_iter = max_iter
self.stag_iter = stag_iter
self.e = e
self.reset()
@abstractmethod
def set_params(self, new_params):
'''
Initialize the algorithm with a strategy (vector of parameters).
Parameters
----------
new_params : array_like
Returns
-------
No value.
'''
pass
def reset(self):
'''
Resets the algorithm state.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
if self.seed is not None:
np.random.seed(self.seed)
# Generate initial population and particles' velocities.
self.set_population([self.generate_particle()
for _ in range(self.N)])
def generate_particle(self):
'''
Generates a swarm particle within bounds.
Parameters
----------
No parameters.
Returns
-------
ndarray
A vector of size D representing particle's coordinates.
'''
coords_range = self.u_bounds - self.l_bounds
return self.l_bounds + np.random.uniform(size=self.D) * coords_range
def set_population(self, new_population):
'''
Sets a population with a pre-generated one.
Parameters
----------
new_population: array_like
A matrix with dimensions N by D, which represents the coordinates
of each particle.
Returns
-------
No value.
'''
self.eval_num = self.N
self.N = len(new_population)
self.particles = np.copy(new_population)
self.scores = np.array([self.fit_func(p) for p in self.particles])
# Initializing current best.
gbest_index = np.ndarray.argmin(self.scores)
self.gbest = np.copy(self.particles[gbest_index])
self.gbest_score = self.scores[gbest_index]
@abstractmethod
def optimize(self):
'''
Main loop of the algorithm.
Parameters
----------
No parameters.
Returns
-------
ndarray
The coordinates of the global best particle at the end of
the optimization process.
'''
pass
def update_best(self):
'''
Updates global best particle if needed.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
current_best_index = np.argmin(self.scores)
current_best = self.particles[current_best_index]
current_best_score = self.scores[current_best_index]
if current_best_score < self.gbest_score:
self.gbest = np.copy(current_best)
self.gbest_score = current_best_score
def simplebounds(self, coords):
'''
Simple constraint rule for particles' positions
(in-place coordinate modification).
Parameters
----------
coords: ndarray
An array of particles to apply the rule to.
Returns
-------
No value.
'''
l_bounds_tiled = np.tile(self.l_bounds, [coords.shape[0], 1])
u_bounds_tiled = np.tile(self.u_bounds, [coords.shape[0], 1])
lower_bound_indexes = coords < self.l_bounds
upper_bound_indexes = coords > self.u_bounds
coords[lower_bound_indexes] = l_bounds_tiled[lower_bound_indexes]
coords[upper_bound_indexes] = u_bounds_tiled[upper_bound_indexes]
def info(self):
'''
Returns basic information about the algorithm state in a
human-readable representation.
Parameters
----------
No parameters.
Returns
-------
str
Information about current best position, score and
current number of fitness-function evaluations.
'''
info = f'Algorithm: {type(self).__name__}\n'
info += f'Best position: {self.gbest}\n'
info += f'Best score: {self.gbest_score}\n'
info += f'Fitness function evaluatiions number: {self.eval_num}'
return info
|
[
"numpy.copy",
"numpy.ndarray.argmin",
"numpy.tile",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.argmin"
] |
[((3954, 3977), 'numpy.copy', 'np.copy', (['new_population'], {}), '(new_population)\n', (3961, 3977), True, 'import numpy as np\n'), ((4113, 4143), 'numpy.ndarray.argmin', 'np.ndarray.argmin', (['self.scores'], {}), '(self.scores)\n', (4130, 4143), True, 'import numpy as np\n'), ((4165, 4201), 'numpy.copy', 'np.copy', (['self.particles[gbest_index]'], {}), '(self.particles[gbest_index])\n', (4172, 4201), True, 'import numpy as np\n'), ((4834, 4856), 'numpy.argmin', 'np.argmin', (['self.scores'], {}), '(self.scores)\n', (4843, 4856), True, 'import numpy as np\n'), ((5481, 5525), 'numpy.tile', 'np.tile', (['self.l_bounds', '[coords.shape[0], 1]'], {}), '(self.l_bounds, [coords.shape[0], 1])\n', (5488, 5525), True, 'import numpy as np\n'), ((5551, 5595), 'numpy.tile', 'np.tile', (['self.u_bounds', '[coords.shape[0], 1]'], {}), '(self.u_bounds, [coords.shape[0], 1])\n', (5558, 5595), True, 'import numpy as np\n'), ((2887, 2912), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2901, 2912), True, 'import numpy as np\n'), ((5052, 5073), 'numpy.copy', 'np.copy', (['current_best'], {}), '(current_best)\n', (5059, 5073), True, 'import numpy as np\n'), ((3458, 3488), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.D'}), '(size=self.D)\n', (3475, 3488), True, 'import numpy as np\n')]
|
from datetime import time
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from vnpy.app.cta_strategy.base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from vnpy.app.cta_strategy.TSMtools import TSMArrayManager
import numpy as np
class TSMyoBiasAccuStrategy(CtaTemplate):
""""""
author = "TheSuperMyo"
# 日内交易
exit_time = time(hour=14, minute=57)
# 针对不同交易时间的市场
open_time_night = time(hour=21,minute=0)# 商品夜盘
open_time_day_1 = time(hour=9,minute=0)# 商品
open_time_day_2 = time(hour=9,minute=30)# 股指
close_time_day = time(hour=15,minute=0)# 商品/股指(除了利率期货)
close_time_night_1 = time(hour=23,minute=0)# 其他夜盘商品
close_time_night_2 = time(hour=1,minute=0)# 工业金属
close_time_night_3 = time(hour=2,minute=30)# 黄金/白银/原油
break_time_start_1 = time(hour=10,minute=15)# 商品茶歇
break_time_start_2 = time(hour=11,minute=30)# 全体午休
break_time_end_1 = time(hour=10,minute=30)# 商品茶歇
break_time_end_2 = time(hour=13,minute=0)# 股指下午
break_time_end_3 = time(hour=13,minute=30)# 商品下午
ma_len = 14 # 计算偏离的均线长度
accu_len = 8 # 偏离积累窗口
accu_std_fliter = 2 # 偏离std倍数
trailing_stop = 0.5 # 跟踪止损
fixed_size = 1 # 固定手数
bar_counter = 0 # 每日分钟计数器
signal = 0 # 开仓信号
stop_long = 0
stop_short = 0
hold_high = 0
hold_low = 0
parameters = ['ma_len','accu_len','accu_std_fliter','trailing_stop','fixed_size']
variables = ['bar_counter','signal','stop_long','stop_short']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TSMyoBiasAccuStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = TSMArrayManager()
# 策略自身订单管理
self.active_orderids = []
self.bars = []
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
# 不会用到昨日数据
self.load_bar(5)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def tick_filter(self, tick: TickData):
"""
过滤异常时间的tick
"""
tick_time = tick.datetime.time()
if tick_time < self.open_time_day_2:
return False
if tick_time > self.break_time_start_2 and tick_time < self.break_time_end_2:
return False
if tick_time > self.close_time_day:
return False
return True
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if not self.tick_filter(tick):
return
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
1.分钟计数
2.挂撤单
"""
self.bar_counter += 1
self.cancel_all()
am = self.am
am.update_bar(bar)
if not am.inited:
return
self.bars.append(bar)
if len(self.bars) <= 2:
return
else:
self.bars.pop(0)
last_bar = self.bars[-2]
if ( last_bar.datetime.date() != bar.datetime.date() ):
self.bar_counter = 1
# 保证偏离积累量信号只反应当天的情况
if self.bar_counter < max(self.accu_len,self.ma_len):
return
self.signal = am.bias_SMA_Accumulated_signal(self.ma_len, self.accu_len, self.accu_std_fliter, False)
if self.pos == 0:
if self.signal == 1:
# 入场开多
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.buy(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.signal == -1:
# 入场开空
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.short(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.pos > 0:
self.hold_high = max(self.hold_high,bar.high_price)
self.stop_long = self.hold_high*(1-self.trailing_stop/100)
if bar.datetime.time() > self.exit_time or self.signal == -1:
# 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平多
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(self.stop_long, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
if self.pos < 0:
self.hold_low = min(self.hold_low,bar.low_price)
self.stop_short = self.hold_low*(1+self.trailing_stop/100)
if bar.datetime.time() > self.exit_time or self.signal == 1:
# 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平空
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(self.stop_short, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
# 移除已成交或已撤销的订单
if not order.is_active() and order.vt_orderid in self.active_orderids:
self.active_orderids.remove(order.vt_orderid)
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
# 邮寄提醒
self.send_email(f"{trade.vt_symbol}在{trade.time}成交,价格{trade.price},方向{trade.direction}{trade.offset},数量{trade.volume}")
if self.pos == 0:
self.stop_long = 0
self.stop_short = 0
if self.pos > 0:
self.hold_high = trade.price
if self.pos < 0:
self.hold_low = trade.price
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
# 刚刚生成的本地停止单
if stop_order.status == StopOrderStatus.WAITING:
return
# 撤销的本地停止单,从活跃列表移除
if stop_order.status == StopOrderStatus.CANCELLED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
# 触发的本地停止单,停止单移除,限价单加入
if stop_order.status == StopOrderStatus.TRIGGERED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
self.active_orderids.extend(stop_order.vt_orderids)
# 撤掉其他停止单
for other_orderids in self.active_orderids:
if other_orderids.startswith(STOPORDER_PREFIX):
self.cancel_order(other_orderids)
|
[
"vnpy.app.cta_strategy.TSMtools.TSMArrayManager",
"datetime.time",
"vnpy.app.cta_strategy.BarGenerator"
] |
[((493, 517), 'datetime.time', 'time', ([], {'hour': '(14)', 'minute': '(57)'}), '(hour=14, minute=57)\n', (497, 517), False, 'from datetime import time\n'), ((558, 581), 'datetime.time', 'time', ([], {'hour': '(21)', 'minute': '(0)'}), '(hour=21, minute=0)\n', (562, 581), False, 'from datetime import time\n'), ((609, 631), 'datetime.time', 'time', ([], {'hour': '(9)', 'minute': '(0)'}), '(hour=9, minute=0)\n', (613, 631), False, 'from datetime import time\n'), ((657, 680), 'datetime.time', 'time', ([], {'hour': '(9)', 'minute': '(30)'}), '(hour=9, minute=30)\n', (661, 680), False, 'from datetime import time\n'), ((706, 729), 'datetime.time', 'time', ([], {'hour': '(15)', 'minute': '(0)'}), '(hour=15, minute=0)\n', (710, 729), False, 'from datetime import time\n'), ((769, 792), 'datetime.time', 'time', ([], {'hour': '(23)', 'minute': '(0)'}), '(hour=23, minute=0)\n', (773, 792), False, 'from datetime import time\n'), ((825, 847), 'datetime.time', 'time', ([], {'hour': '(1)', 'minute': '(0)'}), '(hour=1, minute=0)\n', (829, 847), False, 'from datetime import time\n'), ((878, 901), 'datetime.time', 'time', ([], {'hour': '(2)', 'minute': '(30)'}), '(hour=2, minute=30)\n', (882, 901), False, 'from datetime import time\n'), ((941, 965), 'datetime.time', 'time', ([], {'hour': '(10)', 'minute': '(15)'}), '(hour=10, minute=15)\n', (945, 965), False, 'from datetime import time\n'), ((996, 1020), 'datetime.time', 'time', ([], {'hour': '(11)', 'minute': '(30)'}), '(hour=11, minute=30)\n', (1000, 1020), False, 'from datetime import time\n'), ((1049, 1073), 'datetime.time', 'time', ([], {'hour': '(10)', 'minute': '(30)'}), '(hour=10, minute=30)\n', (1053, 1073), False, 'from datetime import time\n'), ((1102, 1125), 'datetime.time', 'time', ([], {'hour': '(13)', 'minute': '(0)'}), '(hour=13, minute=0)\n', (1106, 1125), False, 'from datetime import time\n'), ((1154, 1178), 'datetime.time', 'time', ([], {'hour': '(13)', 'minute': '(30)'}), '(hour=13, minute=30)\n', (1158, 1178), False, 'from datetime import time\n'), ((1840, 1865), 'vnpy.app.cta_strategy.BarGenerator', 'BarGenerator', (['self.on_bar'], {}), '(self.on_bar)\n', (1852, 1865), False, 'from vnpy.app.cta_strategy import CtaTemplate, StopOrder, TickData, BarData, TradeData, OrderData, BarGenerator, ArrayManager\n'), ((1884, 1901), 'vnpy.app.cta_strategy.TSMtools.TSMArrayManager', 'TSMArrayManager', ([], {}), '()\n', (1899, 1901), False, 'from vnpy.app.cta_strategy.TSMtools import TSMArrayManager\n')]
|
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2020 <NAME> <<EMAIL>>
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from typing import (
List,
Optional,
Set,
Tuple,
)
from slixmpp import JID, Iq
from slixmpp.exceptions import IqError, IqTimeout
from slixmpp.plugins import BasePlugin
from slixmpp.stanza.roster import RosterItem
from slixmpp.plugins.xep_0405 import stanza
from slixmpp.plugins.xep_0369 import stanza as mix_stanza
BASE_NODES = [
'urn:xmpp:mix:nodes:messages',
'urn:xmpp:mix:nodes:participants',
'urn:xmpp:mix:nodes:info',
]
class XEP_0405(BasePlugin):
'''XEP-0405: MIX-PAM'''
name = 'xep_0405'
description = 'XEP-0405: MIX-PAM'
dependencies = {'xep_0369'}
stanza = stanza
namespace = stanza.NS
def plugin_init(self) -> None:
stanza.register_plugins()
async def check_server_capability(self) -> bool:
"""Check if the server is MIX-PAM capable"""
result = await self.xmpp.plugin['xep_0030'].get_info(jid=self.xmpp.boundjid.bare)
features = result['disco_info']['features']
return stanza.NS in features
async def join_channel(self, room: JID, nick: str, subscribe: Optional[Set[str]] = None, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Set[str]:
"""
Join a MIX channel.
:param JID room: JID of the MIX channel
:param str nick: Desired nickname on that channel
:param Set[str] subscribe: Set of nodes to subscribe to when joining.
If empty, all nodes will be subscribed by default.
:rtype: Set[str]
:return: The nodes that failed to subscribe, if any
"""
if subscribe is None:
subscribe = set(BASE_NODES)
if ito is None:
ito = self.xmpp.boundjid.bare
iq = self.xmpp.make_iq_set(ito=ito, ifrom=ifrom)
iq['client_join']['channel'] = room
iq['client_join']['mix_join']['nick'] = nick
for node in subscribe:
sub = mix_stanza.Subscribe()
sub['node'] = node
iq['client_join']['mix_join'].append(sub)
result = await iq.send(**iqkwargs)
result_nodes = {sub['node'] for sub in result['client_join']['mix_join']}
return subscribe.difference(result_nodes)
async def leave_channel(self, room: JID, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Iq:
""""
Leave a MIX channel
:param JID room: JID of the channel to leave
"""
if ito is None:
ito = self.xmpp.boundjid.bare
iq = self.xmpp.make_iq_set(ito=ito, ifrom=ifrom)
iq['client_leave']['channel'] = room
iq['client_leave'].enable('mix_leave')
return await iq.send(**iqkwargs)
async def get_mix_roster(self, *,
ito: Optional[JID] = None,
ifrom: Optional[JID] = None,
**iqkwargs) -> Tuple[List[RosterItem], List[RosterItem]]:
"""
Get the annotated roster, with MIX channels.
:return: A tuple of (contacts, mix channels) as RosterItem elements
"""
iq = self.xmpp.make_iq_get(ito=ito, ifrom=ifrom)
iq['roster'].enable('annotate')
result = await iq.send(**iqkwargs)
self.xmpp.event("roster_update", result)
contacts = []
mix = []
for item in result['roster']:
channel = item.get_plugin('channel', check=True)
if channel:
mix.append(item)
else:
contacts.append(item)
return (contacts, mix)
|
[
"slixmpp.plugins.xep_0369.stanza.Subscribe",
"slixmpp.plugins.xep_0405.stanza.register_plugins"
] |
[((848, 873), 'slixmpp.plugins.xep_0405.stanza.register_plugins', 'stanza.register_plugins', ([], {}), '()\n', (871, 873), False, 'from slixmpp.plugins.xep_0405 import stanza\n'), ((2144, 2166), 'slixmpp.plugins.xep_0369.stanza.Subscribe', 'mix_stanza.Subscribe', ([], {}), '()\n', (2164, 2166), True, 'from slixmpp.plugins.xep_0369 import stanza as mix_stanza\n')]
|
from control.controller_veiculos import ControllerVeiculos
from control.controller_proprietario import ControllerProprietario
from control.controller_area import ControllerAreaEstacionamento
from model.constants import *
controller_veiculo = ControllerVeiculos()
controller_proprietario = ControllerProprietario()
controller_areas = ControllerAreaEstacionamento()
def cadastrar_area_especial():
print("\n====== CADASTRAR AREA ESPECIAL ======")
nome = input("Nome: ")
try:
capacidade = int(input("Capacidade: "))
print("[1] Carro; [2] Motocicleta; [3] Onibus")
tipo = TIPO_VEICULO[int(input("Tipo de veiculo (1, 2, ou 3): "))]
controller_areas.register_area(nome, tipo, capacidade)
except:
print("Input invalido")
def remover_area_especial():
print("\n====== REMOVER AREA ESPECIAL ======")
areas = controller_areas.find_special_areas()
areas_str = ""
if len(areas) <= 0:
print("Nao existem areas especiais cadastradas")
return
for i in range(len(areas)):
areas_str += "[{}] {} ".format((i + 1), areas[i].get_nome())
print(areas_str)
try:
area_nome = areas[(int(input("Area (indice): ")) - 1)].get_nome()
controller_areas.remove_area(area_nome)
except:
print("Input invalido. Voce precisa inserir um indice valido")
|
[
"control.controller_area.ControllerAreaEstacionamento",
"control.controller_proprietario.ControllerProprietario",
"control.controller_veiculos.ControllerVeiculos"
] |
[((244, 264), 'control.controller_veiculos.ControllerVeiculos', 'ControllerVeiculos', ([], {}), '()\n', (262, 264), False, 'from control.controller_veiculos import ControllerVeiculos\n'), ((291, 315), 'control.controller_proprietario.ControllerProprietario', 'ControllerProprietario', ([], {}), '()\n', (313, 315), False, 'from control.controller_proprietario import ControllerProprietario\n'), ((335, 365), 'control.controller_area.ControllerAreaEstacionamento', 'ControllerAreaEstacionamento', ([], {}), '()\n', (363, 365), False, 'from control.controller_area import ControllerAreaEstacionamento\n')]
|
import logging
import itertools
import asyncio
import random
import aiomsg
import aiorun
logging.basicConfig(level="DEBUG")
async def main():
s = aiomsg.Søcket(send_mode=aiomsg.SendMode.ROUNDROBIN)
await s.connect()
async def receiver():
while True:
msg = await s.recv_string()
print("Got back: ", msg)
loop = aiorun.asyncio.get_running_loop()
loop.create_task(receiver())
for i in itertools.count():
await s.send_string(f"{i}")
await asyncio.sleep(random.randint(0, 30) / 6)
aiorun.run(main())
|
[
"logging.basicConfig",
"aiorun.asyncio.get_running_loop",
"aiomsg.Søcket",
"itertools.count",
"random.randint"
] |
[((91, 125), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""DEBUG"""'}), "(level='DEBUG')\n", (110, 125), False, 'import logging\n'), ((154, 205), 'aiomsg.Søcket', 'aiomsg.Søcket', ([], {'send_mode': 'aiomsg.SendMode.ROUNDROBIN'}), '(send_mode=aiomsg.SendMode.ROUNDROBIN)\n', (167, 205), False, 'import aiomsg\n'), ((364, 397), 'aiorun.asyncio.get_running_loop', 'aiorun.asyncio.get_running_loop', ([], {}), '()\n', (395, 397), False, 'import aiorun\n'), ((445, 462), 'itertools.count', 'itertools.count', ([], {}), '()\n', (460, 462), False, 'import itertools\n'), ((528, 549), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (542, 549), False, 'import random\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Author : Virink <<EMAIL>>
Date : 2019/04/18, 14:49
"""
import string
import re
L = string.ascii_lowercase
U = string.ascii_uppercase
A = string.ascii_letters
def func_atbash(*args):
"""埃特巴什码解码"""
arg = args[0]
arg = arg.lower().replace(' ', 'vvvzzzvvv')
res = [L[25 - j] for i in arg for j in range(26) if i == L[j]]
return ''.join(res).replace('eeeaaaeee', ' ')
def __caesar(offset, arg):
"""凯撒编码 : 内部调用"""
result = ""
for ch in arg:
if ch.isupper():
result += U[((U.index(ch) + offset) % 26)]
elif ch.islower():
result += L[((L.index(ch) + offset) % 26)]
elif ch.isdigit():
result += ch
else:
result += ch
return result
def func_caesar(*args):
"""凯撒编码"""
res = []
for offset in range(26):
res.append("[+] offset : %d\tresult : %s" %
(offset, __caesar(offset, args[0])))
return "\r\n".join(res)
def func_rot13(*args):
"""rot13"""
return __caesar(13, args[0])
def func_mpkc(*args):
"""手机键盘编码 Mobile Phone Keyboard Cipher"""
T = {
'A': 21, 'B': 22, 'C': 23, 'D': 31, 'E': 32, 'F': 33,
'G': 41, 'H': 42, 'I': 43, 'J': 51, 'K': 52, 'L': 53,
'M': 61, 'N': 62, 'O': 63, 'P': 71, 'Q': 72, 'R': 73, 'S': 74,
'T': 81, 'U': 82, 'V': 83, 'W': 91, 'X': 92, 'Y': 93, 'Z': 94
}
arg = args[0].upper()
if arg[0] in U:
return ','.join([str(T.get(i, i)) for i in arg])
else:
T = {str(T[k]): k for k in T}
if ',' in arg:
arg = arg.split(',')
elif ' ' in arg:
arg = arg.split(' ')
return ''.join([T.get(i, i) for i in arg])
def func_morse(*args):
"""摩斯电码"""
T = {
'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
',': '--..--', '.': '.-.-.-', ':': '---...', ';': '-.-.-.',
'?': '..--..', '=': '-...-', "'": '.----.', '/': '-..-.',
'!': '-.-.--', '-': '-....-', '_': '..--.-', '(': '-.--.',
')': '-.--.-', '$': '...-..-', '&': '. . . .', '@': '.--.-.',
'{': '----.--', '}': '-----.-'
}
arg = args[0]
if re.match(r'^[\.\-\/ ]+$', arg):
T = {str(T[k]): k for k in T}
if len(args) > 1:
arg = ' '.join(args)
arg = arg.replace('/', ' ').split(' ')
# TODO: morse auto decode when it is not sep
# p = 0
# res = ''
# d = 5
# while p < (len(arg)+7) and d > 0:
# print("[D] len : %d p : %d" % (len(arg), p))
# for j in [6, 5, 4, 3, 2, 1, 0]:
# tmp = T.get(arg[p:p+j], None)
# print("[D] tmp = arg[%d:%s] = %s => %s" %
# (p, j, arg[p:p+j], tmp))
# if tmp:
# p = p+j
# res += tmp
# break
# # p = p+j-1
# # break
# d -= 1
# print("[D] Result : %s" % res)
return ''.join([T.get(i) for i in arg])
else:
return '/'.join([str(T.get(i, '?')) for i in arg.upper()])
def func_peigen(*args):
"""培根密码"""
T = {
'H': 'aabbb', 'G': 'aabba', 'R': 'baaab', 'Q': 'baaaa',
'Z': 'bbaab', 'Y': 'bbaaa', 'N': 'abbab', 'M': 'abbaa',
'U': 'babaa', 'V': 'babab', 'I': 'abaaa', 'J': 'abaab',
'F': 'aabab', 'E': 'aabaa', 'A': 'aaaaa', 'B': 'aaaab',
'T': 'baabb', 'S': 'baaba', 'C': 'aaaba', 'D': 'aaabb',
'P': 'abbbb', 'O': 'abbba', 'K': 'ababa', 'L': 'ababb',
'W': 'babba', 'X': 'babbb'
}
arg = args[0]
if re.match(r'^[ab]+$', arg):
T = {str(T[k]): k for k in T}
return ''.join([T.get(arg[i:i+5]) for i in range(0, len(arg), 5)])
else:
return ''.join([T.get(i.upper()) for i in arg])
def __vigenere(s, key='virink', de=0):
"""维吉利亚密码"""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res
def func_vigenere(*args):
"""维吉利亚密码"""
if len(args) < 2:
return '[-] Vigenere Usage : command key text [isdecode]'
return __vigenere(args[1], args[0], 1 if len(args) >= 3 else 0)
|
[
"re.match"
] |
[((2773, 2805), 're.match', 're.match', (['"""^[\\\\.\\\\-\\\\/ ]+$"""', 'arg'], {}), "('^[\\\\.\\\\-\\\\/ ]+$', arg)\n", (2781, 2805), False, 'import re\n'), ((4221, 4245), 're.match', 're.match', (['"""^[ab]+$"""', 'arg'], {}), "('^[ab]+$', arg)\n", (4229, 4245), False, 'import re\n')]
|
from .utils import (get_prescription, get_attributes, get_group)
from .models import Disease, Result, Score, Question, SurveyResponse
from .analysis import cardio_risk_group, diabetes_risk_group, stroke_risk_group
from statistics import mean
from celery import shared_task
@shared_task
def worker(session_id):
df, attributes = get_attributes(session_id)
diseases = list(Disease.objects.all())
supported_methods = {
'cardiovascular disease': cardio_risk_group,
'diabetes': diabetes_risk_group,
'stroke': stroke_risk_group
}
question_region = Question.objects.get(label='region')
session_region = (list(SurveyResponse.objects.filter(
session_id=session_id,
question_id=question_region.id))[0]).answer
results = []
for disease in diseases:
illness = disease.illness
result_kwargs = {
'session_id': session_id,
'disease': disease,
'region': session_region
}
if illness not in supported_methods:
result_kwargs['risk_factor'] = 0
result_kwargs['prescription'] = 'Method is currently not supported'
else:
method = supported_methods[illness]
score = method(df, attributes[illness])
result_kwargs['risk_factor'] = float(score)
result_kwargs['label'] = get_group(score)
result_kwargs['prescription'] = get_prescription(score)
result_obj = Result.objects.update_or_create(
session_id=session_id, disease=disease,
defaults=result_kwargs
)
results.append(result_obj[0])
score = (1 - mean([res.risk_factor for res in results])) * 100
Score.objects.create(session_id=session_id, score=score)
|
[
"statistics.mean"
] |
[((1662, 1704), 'statistics.mean', 'mean', (['[res.risk_factor for res in results]'], {}), '([res.risk_factor for res in results])\n', (1666, 1704), False, 'from statistics import mean\n')]
|
import json
import random
import subprocess
from stests.core.logging import log_event
from stests.chain.utils import execute_cli
from stests.chain.utils import DeployDispatchInfo
from stests.core.types.chain import Account
from stests.core.types.infra import Network
from stests.core.types.infra import Node
from stests.core.utils import paths
from stests.events import EventType
# Method upon client to be invoked.
_CLIENT_METHOD = "transfer"
# Maximum value of a transfer ID.
_MAX_TRANSFER_ID = (2 ** 63) - 1
@execute_cli(_CLIENT_METHOD, EventType.WFLOW_DEPLOY_DISPATCH_FAILURE)
def execute(info: DeployDispatchInfo, cp2: Account, amount: int, verbose: bool = True) -> str:
"""Executes a transfer between 2 counter-parties & returns resulting deploy hash.
:param info: Standard information required to dispatch deploy.
:param cp2: Account information of counter party 2.
:param amount: Amount (in motes) to be transferred.
:param verbose: Flag inidcating whether event will be logged.
:returns: Dispatched deploy hash.
"""
binary_path = paths.get_path_to_client(info.network)
cp1 = info.dispatcher
cli_response = subprocess.run([
binary_path, _CLIENT_METHOD,
"--target-account", cp2.account_key,
"--amount", str(amount),
"--chain-name", info.network.chain_name,
"--gas-price", str(info.gas_price),
"--node-address", info.node_address,
"--payment-amount", str(info.fee),
"--secret-key", info.dispatcher.get_private_key_pem_filepath(),
"--transfer-id", str(random.randint(1, _MAX_TRANSFER_ID)),
"--ttl", str(info.time_to_live),
],
stdout=subprocess.PIPE,
)
deploy_hash = json.loads(cli_response.stdout)['result']['deploy_hash']
if verbose:
log_event(
EventType.WFLOW_DEPLOY_DISPATCHED,
f"{info.node.address} :: {deploy_hash} :: transfer (native) :: {amount} CSPR :: from {cp1.account_key[:8]} -> {cp2.account_key[:8]} ",
info.node,
deploy_hash=deploy_hash,
)
return deploy_hash
|
[
"json.loads",
"stests.core.logging.log_event",
"stests.core.utils.paths.get_path_to_client",
"stests.chain.utils.execute_cli",
"random.randint"
] |
[((518, 586), 'stests.chain.utils.execute_cli', 'execute_cli', (['_CLIENT_METHOD', 'EventType.WFLOW_DEPLOY_DISPATCH_FAILURE'], {}), '(_CLIENT_METHOD, EventType.WFLOW_DEPLOY_DISPATCH_FAILURE)\n', (529, 586), False, 'from stests.chain.utils import execute_cli\n'), ((1079, 1117), 'stests.core.utils.paths.get_path_to_client', 'paths.get_path_to_client', (['info.network'], {}), '(info.network)\n', (1103, 1117), False, 'from stests.core.utils import paths\n'), ((1814, 2038), 'stests.core.logging.log_event', 'log_event', (['EventType.WFLOW_DEPLOY_DISPATCHED', 'f"""{info.node.address} :: {deploy_hash} :: transfer (native) :: {amount} CSPR :: from {cp1.account_key[:8]} -> {cp2.account_key[:8]} """', 'info.node'], {'deploy_hash': 'deploy_hash'}), "(EventType.WFLOW_DEPLOY_DISPATCHED,\n f'{info.node.address} :: {deploy_hash} :: transfer (native) :: {amount} CSPR :: from {cp1.account_key[:8]} -> {cp2.account_key[:8]} '\n , info.node, deploy_hash=deploy_hash)\n", (1823, 2038), False, 'from stests.core.logging import log_event\n'), ((1728, 1759), 'json.loads', 'json.loads', (['cli_response.stdout'], {}), '(cli_response.stdout)\n', (1738, 1759), False, 'import json\n'), ((1578, 1613), 'random.randint', 'random.randint', (['(1)', '_MAX_TRANSFER_ID'], {}), '(1, _MAX_TRANSFER_ID)\n', (1592, 1613), False, 'import random\n')]
|
from functools import wraps
from collections import Iterable
from django.conf import settings
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six import string_types
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import resolve_url
from waliki.utils import is_authenticated
from .models import ACLRule
from .settings import (WALIKI_ANONYMOUS_USER_PERMISSIONS,
WALIKI_LOGGED_USER_PERMISSIONS,
WALIKI_RENDER_403)
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
|
[
"django.shortcuts.render",
"django.utils.six.moves.urllib.parse.urlparse",
"django.shortcuts.resolve_url",
"django.contrib.auth.views.redirect_to_login",
"waliki.utils.is_authenticated",
"django.utils.decorators.available_attrs"
] |
[((1149, 1171), 'waliki.utils.is_authenticated', 'is_authenticated', (['user'], {}), '(user)\n', (1165, 1171), False, 'from waliki.utils import is_authenticated\n'), ((2374, 2404), 'waliki.utils.is_authenticated', 'is_authenticated', (['request.user'], {}), '(request.user)\n', (2390, 2404), False, 'from waliki.utils import is_authenticated\n'), ((3361, 3425), 'django.contrib.auth.views.redirect_to_login', 'redirect_to_login', (['path', 'resolved_login_url', 'redirect_field_name'], {}), '(path, resolved_login_url, redirect_field_name)\n', (3378, 3425), False, 'from django.contrib.auth.views import redirect_to_login\n'), ((2772, 2816), 'django.shortcuts.resolve_url', 'resolve_url', (['(login_url or settings.LOGIN_URL)'], {}), '(login_url or settings.LOGIN_URL)\n', (2783, 2816), False, 'from django.shortcuts import resolve_url\n'), ((2982, 3010), 'django.utils.six.moves.urllib.parse.urlparse', 'urlparse', (['resolved_login_url'], {}), '(resolved_login_url)\n', (2990, 3010), False, 'from django.utils.six.moves.urllib.parse import urlparse\n'), ((3060, 3074), 'django.utils.six.moves.urllib.parse.urlparse', 'urlparse', (['path'], {}), '(path)\n', (3068, 3074), False, 'from django.utils.six.moves.urllib.parse import urlparse\n'), ((2120, 2146), 'django.utils.decorators.available_attrs', 'available_attrs', (['view_func'], {}), '(view_func)\n', (2135, 2146), False, 'from django.utils.decorators import available_attrs\n'), ((2471, 2525), 'django.shortcuts.render', 'render', (['request', '"""waliki/403.html"""', 'kwargs'], {'status': '(403)'}), "(request, 'waliki/403.html', kwargs, status=403)\n", (2477, 2525), False, 'from django.shortcuts import render\n')]
|
"""
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
|
[
"numpy.abs",
"numpy.linalg.eig",
"rdkit.Chem.AddHs",
"numpy.asarray",
"numpy.triu_indices_from",
"numpy.squeeze",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"rdkit.Chem.AllChem.ETKDG",
"numpy.linalg.norm",
"rdkit.Chem.RemoveHs",
"numpy.random.RandomState",
"deepchem.utils.data_utils.pad_array"
] |
[((3568, 3588), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (3578, 3588), True, 'import numpy as np\n'), ((5033, 5049), 'numpy.asarray', 'np.asarray', (['rval'], {}), '(rval)\n', (5043, 5049), True, 'import numpy as np\n'), ((5793, 5825), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (5814, 5825), True, 'import numpy as np\n'), ((6596, 6637), 'numpy.zeros', 'np.zeros', (['(n_atoms, n_atoms)'], {'dtype': 'float'}), '((n_atoms, n_atoms), dtype=float)\n', (6604, 6637), True, 'import numpy as np\n'), ((9664, 9689), 'numpy.asarray', 'np.asarray', (['features_list'], {}), '(features_list)\n', (9674, 9689), True, 'import numpy as np\n'), ((3701, 3729), 'numpy.squeeze', 'np.squeeze', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (3711, 3729), True, 'import numpy as np\n'), ((4365, 4380), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (4375, 4380), False, 'from rdkit import Chem\n'), ((4474, 4492), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (4487, 4492), False, 'from rdkit import Chem\n'), ((5914, 5939), 'numpy.argsort', 'np.argsort', (['(row_norms + e)'], {}), '(row_norms + e)\n', (5924, 5939), True, 'import numpy as np\n'), ((9454, 9470), 'numpy.linalg.eig', 'np.linalg.eig', (['f'], {}), '(f)\n', (9467, 9470), True, 'import numpy as np\n'), ((9485, 9494), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (9491, 9494), True, 'import numpy as np\n'), ((9511, 9528), 'numpy.argsort', 'np.argsort', (['w_abs'], {}), '(w_abs)\n', (9521, 9528), True, 'import numpy as np\n'), ((9590, 9618), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['w', 'self.max_atoms'], {}), '(w, self.max_atoms)\n', (9599, 9618), False, 'from deepchem.utils.data_utils import pad_array\n'), ((9781, 9809), 'numpy.squeeze', 'np.squeeze', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (9791, 9809), True, 'import numpy as np\n'), ((4414, 4429), 'rdkit.Chem.AllChem.ETKDG', 'AllChem.ETKDG', ([], {}), '()\n', (4427, 4429), False, 'from rdkit.Chem import AllChem\n'), ((4690, 4704), 'numpy.outer', 'np.outer', (['z', 'z'], {}), '(z, z)\n', (4698, 4704), True, 'import numpy as np\n'), ((4970, 4998), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['m', 'self.max_atoms'], {}), '(m, self.max_atoms)\n', (4979, 4998), False, 'from deepchem.utils.data_utils import pad_array\n'), ((5735, 5754), 'numpy.linalg.norm', 'np.linalg.norm', (['row'], {}), '(row)\n', (5749, 5754), True, 'import numpy as np\n'), ((3509, 3532), 'numpy.triu_indices_from', 'np.triu_indices_from', (['f'], {}), '(f)\n', (3529, 3532), True, 'import numpy as np\n'), ((4757, 4768), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (4765, 4768), True, 'import numpy as np\n'), ((4878, 4913), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['random_m', 'self.max_atoms'], {}), '(random_m, self.max_atoms)\n', (4887, 4913), False, 'from deepchem.utils.data_utils import pad_array\n')]
|
"""A simple wrapper around contextlib.suppress"""
import contextlib
from functools import wraps
__version__ = "0.1.1"
def suppress(*exceptions):
def wrap(func):
@wraps(func)
def inner(*args, **kwargs):
with contextlib.suppress(exceptions):
return func(*args, **kwargs)
return inner
return wrap
def async_suppress(*exceptions):
def wrap(func):
@wraps(func)
async def inner(*args, **kwargs):
with contextlib.suppress(exceptions):
return await func(*args, **kwargs)
return inner
return wrap
|
[
"contextlib.suppress",
"functools.wraps"
] |
[((179, 190), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (184, 190), False, 'from functools import wraps\n'), ((423, 434), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (428, 434), False, 'from functools import wraps\n'), ((244, 275), 'contextlib.suppress', 'contextlib.suppress', (['exceptions'], {}), '(exceptions)\n', (263, 275), False, 'import contextlib\n'), ((494, 525), 'contextlib.suppress', 'contextlib.suppress', (['exceptions'], {}), '(exceptions)\n', (513, 525), False, 'import contextlib\n')]
|
import asyncio
import contextlib
import glob
import itertools
import logging
import os
import pytest
import uvloop
try:
import tracemalloc
tracemalloc.start()
except ImportError:
# Not available in pypy
pass
# clear compiled cython tests
for path in itertools.chain(
glob.glob(os.path.join('tests', '*.so')),
glob.glob(os.path.join('tests', '*.c'))):
os.unlink(path)
@pytest.fixture(params=[
asyncio,
uvloop
])
def loop_mod(request):
return request.param
@pytest.fixture(autouse=True)
def foo():
print('TEST IS', os.environ.get('PYTEST_CURRENT_TEST'))
def event_loop(loop_mod):
loop = loop_mod.new_event_loop()
asyncio.set_event_loop(loop)
if loop_mod != uvloop:
# uvloop in debug mode calls extract_stack, which results in "ValueError: call stack is not deep enough"
# for Cython code
loop.set_debug(True)
with contextlib.closing(loop):
yield loop
def pytest_configure(config):
if config.getoption('verbose') > 0:
h = logging.StreamHandler()
h.setLevel(logging.DEBUG)
logger = logging.getLogger('portaudio')
logger.addHandler(h)
logger.setLevel(logging.DEBUG)
|
[
"logging.getLogger",
"logging.StreamHandler",
"tracemalloc.start",
"os.path.join",
"os.environ.get",
"os.unlink",
"contextlib.closing",
"pytest.fixture",
"asyncio.set_event_loop"
] |
[((411, 451), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[asyncio, uvloop]'}), '(params=[asyncio, uvloop])\n', (425, 451), False, 'import pytest\n'), ((513, 541), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (527, 541), False, 'import pytest\n'), ((150, 169), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (167, 169), False, 'import tracemalloc\n'), ((392, 407), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (401, 407), False, 'import os\n'), ((682, 710), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (704, 710), False, 'import asyncio\n'), ((306, 335), 'os.path.join', 'os.path.join', (['"""tests"""', '"""*.so"""'], {}), "('tests', '*.so')\n", (318, 335), False, 'import os\n'), ((356, 384), 'os.path.join', 'os.path.join', (['"""tests"""', '"""*.c"""'], {}), "('tests', '*.c')\n", (368, 384), False, 'import os\n'), ((574, 611), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (588, 611), False, 'import os\n'), ((915, 939), 'contextlib.closing', 'contextlib.closing', (['loop'], {}), '(loop)\n', (933, 939), False, 'import contextlib\n'), ((1044, 1067), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1065, 1067), False, 'import logging\n'), ((1119, 1149), 'logging.getLogger', 'logging.getLogger', (['"""portaudio"""'], {}), "('portaudio')\n", (1136, 1149), False, 'import logging\n')]
|
#!/usr/bin/env python
# coding: utf-8
# This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from joblib import Parallel, delayed
from tqdm import tqdm, trange
import urllib,os,pickle
from os.path import dirname as up
# Importing all the required libraries. Numba is used to optimize functions.
# In[2]:
def repeat_pattern(X,Y,Z):
flag_ = np.where((X>0)&(Y>0))
flag1 = np.where((X>0)&(Y<0))
flag1 = tuple((flag1[0][::-1],flag1[1]))
Z[flag1] = Z[flag_]
flag2 = np.where((X<0)&(Y>0))
flag2 = tuple((flag2[0],flag2[1][::-1]))
Z[flag2] = Z[flag_]
flag3 = np.where((X<0)&(Y<0))
flag3 = tuple((flag3[0][::-1],flag3[1][::-1]))
Z[flag3] = Z[flag_]
return Z
# *repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.
# * *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.
# * *Outputs* : Z itself is modified to reflect the repition.
# In[3]:
def get_property(mat,energy):
url = "http://henke.lbl.gov/cgi-bin/pert_cgi.pl"
data = {'Element':str(mat), 'Energy':str(energy), 'submit':'Submit Query'}
data = urllib.parse.urlencode(data)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
resp = urllib.request.urlopen(req)
respDat = resp.read()
response = respDat.split()
d = b'g/cm^3<li>Delta'
i = response.index(d)
delta = str(response[i+2])[:str(response[i+2]).index('<li>Beta')][2:]
beta = str(response[i+4])[2:-1]
return float(delta),float(beta)
# *get_property* : gets delta and beta for a given material at the specified energy from Henke et al.
# * *Inputs* : mat - material, energy - energy in eV
# * *Outputs* : delta, beta
# In[4]:
@njit # equivalent to "jit(nopython=True)".
def partial_fill(x,y,step,r1,r2,n):
x_ = np.linspace(x-step/2,x+step/2,n)
y_ = np.linspace(y-step/2,y+step/2,n)
cnts = 0
for i in range(n):
for j in range(n):
z = (x_[i] * x_[i] + y_[j] * y_[j])
if r1*r1 < z < r2*r2:
cnts += 1
fill_factor = cnts/(n*n)
return fill_factor
# *partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.
# * *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution
# * *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it
# In[5]:
#find the radius of the nth zone
def zone_radius(n,f,wavel):
return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)
# *zone_radius* : functon to find the radius of a zone given the zone number and wavelength
# * *Inputs* : n - zone number, f - focal length, wavel - wavelength
# * *Outputs* : radius of the zone as specified by the inputs
# In[6]:
def make_quadrant(X,Y,flag,r1,r2,step,n,zone_number):
z = np.zeros(np.shape(X))
Z = np.sqrt(X**2+Y**2)
for l in range(len(flag[0])):
i = flag[0][l]
j = flag[1][l]
if 0.75*r1< Z[i][j] < 1.25*r2:
x1 = X[i][j]
y1 = Y[i][j]
z[i][j] = partial_fill(x1,y1,step,r1,r2,n)
z[tuple((flag[1],flag[0]))] = z[tuple((flag[0],flag[1]))]
return z
# *make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number
# * *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function
# * *Outputs* : z - output pattern with one quadrant filled.
# In[7]:
#2D ZP
def make_ring(i):
print(i)
r1 = radius[i-1]
r2 = radius[i]
n = 250
ring = make_quadrant(X,Y,flag,r1,r2,step_xy,n,zone_number = i)
ring = repeat_pattern(X,Y,ring)
ring_ = np.where(ring!=0)
vals_ = ring[ring_]
np.save('ring_locs_'+str(i)+'.npy',ring_)
np.save('ring_vals_'+str(i)+'.npy',vals_)
return
# *make_ring* : function used to create a ring given the relevant parameters
# * *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function
# * *Outputs* : None. Saves the rings to memory.
# In[8]:
mat = 'Au'
energy = 10000 #Energy in EV
f = 10e-3 #focal length in meters
wavel = (1239.84/energy)*10**(-9) #Wavelength in meters
delta,beta = get_property(mat,energy)
zones = 700 #number of zones
radius = np.zeros(zones)
# Setting up the parameters and initializing the variables.
# In[9]:
for k in range(zones):
radius[k] = zone_radius(k,f,wavel)
# Filling the radius array with the radius of zones for later use in making the rings.
# In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.
# In[10]:
grid_size = 55296
input_xrange = 262e-6
step_xy = input_xrange/grid_size
L_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)
step_xy_output = L_out/grid_size
print(' Ouput L : ',L_out)
print(' output pixel size(nm) : ',step_xy_output*1e9)
print(' input pixel size(nm) : ',step_xy*1e9)
# In[11]:
drn = radius[-1]-radius[-2]
print(' maximum radius(um) : ',radius[-1]*1e6)
print(' outermost zone width(nm) :',drn*1e9)
# In[12]:
print(' max shift of focal spot(um) : ',(L_out/2)*1e6)
# invert the following to get max tilt allowance
# after which the focal spot falls of the
# simulation plane
# np.sin(theta*(np.pi/180))*f = (L_out/2)
theta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)
print(' max wavefield aligned tilt(deg) : ',theta_max)
# In[13]:
if step_xy > 0.25*drn :
print(' WARNING ! input pixel size too small')
print(' ratio of input step size to outermost zone width', step_xy/drn)
if step_xy_output > 0.25*drn :
print(' WARNING ! output pixel size too small')
print(' ratio of output step size to outermost zone width', step_xy_output/drn)
# In[14]:
zones_to_fill = []
for i in range(zones):
if i%2 == 1 :
zones_to_fill.append(i)
zones_to_fill = np.array(zones_to_fill)
# Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)
# In[ ]:
try :
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
except :
os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
# Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !
# In[ ]:
x1 = input_xrange/2
x = np.linspace(-x1,x1,grid_size)
step_xy = x[-1]-x[-2]
zp_coords =[-x1,x1,-x1,x1]
# In[ ]:
X,Y = np.meshgrid(x,x)
flag = np.where((X>0)&(Y>0)&(X>=Y))
# Creating the input 1D array and setting the parameters for use by the make ring function.
# Note that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.
# In[ ]:
get_ipython().run_cell_magic('capture', '', 'from joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)')
# Creating the rings ! (Adjust the number of jobs depending on CPU cores.)
# In[ ]:
params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}
pickle.dump(params,open('parameters.pickle','wb'))
# Pickling and saving all the associated parameters along with the rings for use in simulation!
|
[
"numpy.sqrt",
"numpy.where",
"urllib.request.Request",
"numpy.arcsin",
"os.getcwd",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"urllib.parse.urlencode",
"numpy.meshgrid",
"numpy.shape",
"urllib.request.urlopen"
] |
[((4720, 4735), 'numpy.zeros', 'np.zeros', (['zones'], {}), '(zones)\n', (4728, 4735), True, 'import numpy as np\n'), ((6546, 6569), 'numpy.array', 'np.array', (['zones_to_fill'], {}), '(zones_to_fill)\n', (6554, 6569), True, 'import numpy as np\n'), ((7066, 7097), 'numpy.linspace', 'np.linspace', (['(-x1)', 'x1', 'grid_size'], {}), '(-x1, x1, grid_size)\n', (7077, 7097), True, 'import numpy as np\n'), ((7164, 7181), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (7175, 7181), True, 'import numpy as np\n'), ((7188, 7226), 'numpy.where', 'np.where', (['((X > 0) & (Y > 0) & (X >= Y))'], {}), '((X > 0) & (Y > 0) & (X >= Y))\n', (7196, 7226), True, 'import numpy as np\n'), ((520, 547), 'numpy.where', 'np.where', (['((X > 0) & (Y > 0))'], {}), '((X > 0) & (Y > 0))\n', (528, 547), True, 'import numpy as np\n'), ((554, 581), 'numpy.where', 'np.where', (['((X > 0) & (Y < 0))'], {}), '((X > 0) & (Y < 0))\n', (562, 581), True, 'import numpy as np\n'), ((657, 684), 'numpy.where', 'np.where', (['((X < 0) & (Y > 0))'], {}), '((X < 0) & (Y > 0))\n', (665, 684), True, 'import numpy as np\n'), ((760, 787), 'numpy.where', 'np.where', (['((X < 0) & (Y < 0))'], {}), '((X < 0) & (Y < 0))\n', (768, 787), True, 'import numpy as np\n'), ((1327, 1355), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['data'], {}), '(data)\n', (1349, 1355), False, 'import urllib, os, pickle\n'), ((1398, 1431), 'urllib.request.Request', 'urllib.request.Request', (['url', 'data'], {}), '(url, data)\n', (1420, 1431), False, 'import urllib, os, pickle\n'), ((1443, 1470), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (1465, 1470), False, 'import urllib, os, pickle\n'), ((2016, 2058), 'numpy.linspace', 'np.linspace', (['(x - step / 2)', '(x + step / 2)', 'n'], {}), '(x - step / 2, x + step / 2, n)\n', (2027, 2058), True, 'import numpy as np\n'), ((2058, 2100), 'numpy.linspace', 'np.linspace', (['(y - step / 2)', '(y + step / 2)', 'n'], {}), '(y - step / 2, y + step / 2, n)\n', (2069, 2100), True, 'import numpy as np\n'), ((2764, 2809), 'numpy.sqrt', 'np.sqrt', (['(n * wavel * f + (n * wavel / 2) ** 2)'], {}), '(n * wavel * f + (n * wavel / 2) ** 2)\n', (2771, 2809), True, 'import numpy as np\n'), ((3131, 3155), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (3138, 3155), True, 'import numpy as np\n'), ((4010, 4029), 'numpy.where', 'np.where', (['(ring != 0)'], {}), '(ring != 0)\n', (4018, 4029), True, 'import numpy as np\n'), ((5998, 6028), 'numpy.arcsin', 'np.arcsin', (['(L_out / 2 * (1 / f))'], {}), '(L_out / 2 * (1 / f))\n', (6007, 6028), True, 'import numpy as np\n'), ((3110, 3121), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3118, 3121), True, 'import numpy as np\n'), ((6733, 6744), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6742, 6744), False, 'import urllib, os, pickle\n'), ((6793, 6804), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6802, 6804), False, 'import urllib, os, pickle\n'), ((6844, 6855), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6853, 6855), False, 'import urllib, os, pickle\n')]
|
# pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers import SnliReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestSnliReader(object):
@pytest.mark.parametrize(u"lazy", (True, False))
def test_read_from_file(self, lazy):
reader = SnliReader(lazy=lazy)
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'snli.jsonl')
instances = ensure_list(instances)
instance1 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"training", u"his", u"horse", u"for", u"a",
u"competition", u"."],
u"label": u"neutral"}
instance2 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"at", u"a", u"diner", u",", u"ordering", u"an",
u"omelette", u"."],
u"label": u"contradiction"}
instance3 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"outdoors", u",", u"on", u"a", u"horse", u"."],
u"label": u"entailment"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields[u"premise"].tokens] == instance1[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance1[u"hypothesis"]
assert fields[u"label"].label == instance1[u"label"]
fields = instances[1].fields
assert [t.text for t in fields[u"premise"].tokens] == instance2[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance2[u"hypothesis"]
assert fields[u"label"].label == instance2[u"label"]
fields = instances[2].fields
assert [t.text for t in fields[u"premise"].tokens] == instance3[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance3[u"hypothesis"]
assert fields[u"label"].label == instance3[u"label"]
|
[
"pytest.mark.parametrize",
"allennlp.common.util.ensure_list",
"allennlp.data.dataset_readers.SnliReader"
] |
[((318, 365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['u"""lazy"""', '(True, False)'], {}), "(u'lazy', (True, False))\n", (341, 365), False, 'import pytest\n'), ((424, 445), 'allennlp.data.dataset_readers.SnliReader', 'SnliReader', ([], {'lazy': 'lazy'}), '(lazy=lazy)\n', (434, 445), False, 'from allennlp.data.dataset_readers import SnliReader\n'), ((556, 578), 'allennlp.common.util.ensure_list', 'ensure_list', (['instances'], {}), '(instances)\n', (567, 578), False, 'from allennlp.common.util import ensure_list\n')]
|
from django.contrib import admin
from .models import AppropriatedHistory
@admin.register(AppropriatedHistory)
class AppropriatedHistoryAdmin(admin.ModelAdmin):
list_display = [
'fiscal_year',
'source',
'dollars_received'
]
list_editable = [
'dollars_received',
]
ordering = [
'fiscal_year',
]
|
[
"django.contrib.admin.register"
] |
[((77, 112), 'django.contrib.admin.register', 'admin.register', (['AppropriatedHistory'], {}), '(AppropriatedHistory)\n', (91, 112), False, 'from django.contrib import admin\n')]
|
#!flask/bin/python
# Copyright 2021 <NAME> (@luisblazquezm)
# See LICENSE for details.
from flask_restx import Api
api = Api(version='1.0',
title='Influencer Detection Project',
description="**PORBI Influencer Detection project's Flask RESTX API**")
|
[
"flask_restx.Api"
] |
[((124, 257), 'flask_restx.Api', 'Api', ([], {'version': '"""1.0"""', 'title': '"""Influencer Detection Project"""', 'description': '"""**PORBI Influencer Detection project\'s Flask RESTX API**"""'}), '(version=\'1.0\', title=\'Influencer Detection Project\', description=\n "**PORBI Influencer Detection project\'s Flask RESTX API**")\n', (127, 257), False, 'from flask_restx import Api\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite full python api demo
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from paddlelite.lite import *
import numpy as np
import platform
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument("--model_file", default="", type=str, help="Model file")
parser.add_argument(
"--param_file", default="", type=str, help="Combined model param file")
parser.add_argument(
"--input_shape",
default=[1, 3, 224, 224],
nargs='+',
type=int,
required=False,
help="Model input shape, eg: 1 3 224 224. Defalut: 1 3 224 224")
parser.add_argument(
"--backend",
default="",
type=str,
help="To use a particular backend for execution. Should be one of: arm|opencl|x86|x86_opencl|metal|nnadapter"
)
parser.add_argument(
"--image_path", default="", type=str, help="The path of test image file")
parser.add_argument(
"--label_path", default="", type=str, help="The path of label file")
parser.add_argument(
"--print_results",
type=bool,
default=False,
help="Print results. Default: False")
parser.add_argument(
"--nnadapter_device_names",
default="",
type=str,
help="Set nnadapter device names")
parser.add_argument(
"--nnadapter_context_properties",
default="",
type=str,
help="Set nnadapter context properties")
parser.add_argument(
"--nnadapter_model_cache_dir",
default="",
type=str,
help="Set nnadapter model cache dir")
parser.add_argument(
"--nnadapter_subgraph_partition_config_path",
default="",
type=str,
help="Set nnadapter subgraph partition config path")
parser.add_argument(
"--nnadapter_mixed_precision_quantization_config_path",
default="",
type=str,
help="Set nnadapter mixed precision quantization config path")
def RunModel(args):
# 1. Set config information
config = CxxConfig()
if args.model_file != '' and args.param_file != '':
config.set_model_file(args.model_file)
config.set_param_file(args.param_file)
else:
config.set_model_dir(args.model_dir)
if platform.machine() in ["x86_64", "x64", "AMD64"]:
platform_place = Place(TargetType.X86, PrecisionType.FP32)
else:
platform_place = Place(TargetType.ARM, PrecisionType.FP32)
if args.backend.upper() in ["ARM"]:
places = [Place(TargetType.ARM, PrecisionType.FP32)]
elif args.backend.upper() in ["X86"]:
places = [Place(TargetType.X86, PrecisionType.FP32)]
elif args.backend.upper() in ["OPENCL", "X86_OPENCL"]:
places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
platform_place, Place(TargetType.Host, PrecisionType.FP32)
]
'''
Set opencl kernel binary.
Large addtitional prepare time is cost due to algorithm selecting and
building kernel from source code.
Prepare time can be reduced dramitically after building algorithm file
and OpenCL kernel binary on the first running.
The 1st running time will be a bit longer due to the compiling time if
you don't call `set_opencl_binary_path_name` explicitly.
So call `set_opencl_binary_path_name` explicitly is strongly
recommended.
Make sure you have write permission of the binary path.
We strongly recommend each model has a unique binary name.
'''
bin_path = "./"
bin_name = "lite_opencl_kernel.bin"
config.set_opencl_binary_path_name(bin_path, bin_name)
'''
opencl tune option:
CL_TUNE_NONE
CL_TUNE_RAPID
CL_TUNE_NORMAL
CL_TUNE_EXHAUSTIVE
'''
tuned_path = "./"
tuned_name = "lite_opencl_tuned.bin"
config.set_opencl_tune(CLTuneMode.CL_TUNE_NORMAL, tuned_path,
tuned_name, 4)
'''
opencl precision option:
CL_PRECISION_AUTO, first fp16 if valid, default
CL_PRECISION_FP32, force fp32
CL_PRECISION_FP16, force fp16
'''
config.set_opencl_precision(CLPrecisionType.CL_PRECISION_AUTO)
elif args.backend.upper() in ["METAL"]:
# set metallib path
import paddlelite, os
module_path = os.path.dirname(paddlelite.__file__)
config.set_metal_lib_path(module_path + "/libs/lite.metallib")
config.set_metal_use_mps(True)
# set places for Metal
places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
elif args.backend.upper() in ["NNADAPTER"]:
places = [
Place(TargetType.NNAdapter, PrecisionType.FP32), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
if args.nnadapter_device_names == "":
print(
"Please set nnadapter_device_names when backend = nnadapter!")
return
config.set_nnadapter_device_names(
args.nnadapter_device_names.split(","))
config.set_nnadapter_context_properties(
args.nnadapter_context_properties)
config.set_nnadapter_model_cache_dir(args.nnadapter_model_cache_dir)
config.set_nnadapter_subgraph_partition_config_path(
args.nnadapter_subgraph_partition_config_path)
config.set_nnadapter_mixed_precision_quantization_config_path(
args.nnadapter_mixed_precision_quantization_config_path)
else:
raise ValueError("Unsupported backend: %s." % args.backend)
config.set_valid_places(places)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
optimized_model_dir = "opt_" + args.backend
predictor.save_optimized_model(optimized_model_dir)
# 3. Set input data
input_tensor = predictor.get_input(0)
c, h, w = args.input_shape[1], args.input_shape[2], args.input_shape[3]
read_image = len(args.image_path) != 0 and len(args.label_path) != 0
if read_image == True:
import cv2
with open(args.label_path, "r") as f:
label_list = f.readlines()
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
image_data = cv2.imread(args.image_path)
image_data = cv2.resize(image_data, (h, w))
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = image_data.transpose((2, 0, 1)) / 255.0
image_data = (image_data - np.array(image_mean).reshape(
(3, 1, 1))) / np.array(image_std).reshape((3, 1, 1))
image_data = image_data.reshape([1, c, h, w]).astype('float32')
input_tensor.from_numpy(image_data)
else:
input_tensor.from_numpy(np.ones((1, c, h, w)).astype("float32"))
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
output_data = output_tensor.numpy()
if args.print_results == True:
print("result data:\n{}".format(output_data))
print("mean:{:.6e}, std:{:.6e}, min:{:.6e}, max:{:.6e}".format(
np.mean(output_data),
np.std(output_data), np.min(output_data), np.max(output_data)))
# 6. Post-process
if read_image == True:
output_data = output_data.flatten()
class_id = np.argmax(output_data)
class_name = label_list[class_id]
score = output_data[class_id]
print("class_name: {} score: {}".format(class_name, score))
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
|
[
"numpy.mean",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.min",
"numpy.max",
"os.path.dirname",
"numpy.array",
"cv2.cvtColor",
"numpy.std",
"platform.machine",
"cv2.resize",
"cv2.imread"
] |
[((873, 898), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (896, 898), False, 'import argparse\n'), ((2870, 2888), 'platform.machine', 'platform.machine', ([], {}), '()\n', (2886, 2888), False, 'import platform\n'), ((7710, 7737), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (7720, 7737), False, 'import cv2\n'), ((7759, 7789), 'cv2.resize', 'cv2.resize', (['image_data', '(h, w)'], {}), '(image_data, (h, w))\n', (7769, 7789), False, 'import cv2\n'), ((7811, 7854), 'cv2.cvtColor', 'cv2.cvtColor', (['image_data', 'cv2.COLOR_BGR2RGB'], {}), '(image_data, cv2.COLOR_BGR2RGB)\n', (7823, 7854), False, 'import cv2\n'), ((8767, 8789), 'numpy.argmax', 'np.argmax', (['output_data'], {}), '(output_data)\n', (8776, 8789), True, 'import numpy as np\n'), ((8560, 8580), 'numpy.mean', 'np.mean', (['output_data'], {}), '(output_data)\n', (8567, 8580), True, 'import numpy as np\n'), ((8590, 8609), 'numpy.std', 'np.std', (['output_data'], {}), '(output_data)\n', (8596, 8609), True, 'import numpy as np\n'), ((8611, 8630), 'numpy.min', 'np.min', (['output_data'], {}), '(output_data)\n', (8617, 8630), True, 'import numpy as np\n'), ((8632, 8651), 'numpy.max', 'np.max', (['output_data'], {}), '(output_data)\n', (8638, 8651), True, 'import numpy as np\n'), ((5556, 5592), 'os.path.dirname', 'os.path.dirname', (['paddlelite.__file__'], {}), '(paddlelite.__file__)\n', (5571, 5592), False, 'import paddlelite, os\n'), ((8007, 8026), 'numpy.array', 'np.array', (['image_std'], {}), '(image_std)\n', (8015, 8026), True, 'import numpy as np\n'), ((8204, 8225), 'numpy.ones', 'np.ones', (['(1, c, h, w)'], {}), '((1, c, h, w))\n', (8211, 8225), True, 'import numpy as np\n'), ((7951, 7971), 'numpy.array', 'np.array', (['image_mean'], {}), '(image_mean)\n', (7959, 7971), True, 'import numpy as np\n')]
|
from selenium import webdriver
navegador = webdriver.Chrome()
navegador.get("https://webstatic-sea.mihoyo.com/ys/event/signin-sea/index.html?act_id=e202102251931481&lang=pt-pt")
|
[
"selenium.webdriver.Chrome"
] |
[((44, 62), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (60, 62), False, 'from selenium import webdriver\n')]
|
import argparse
from argparse import ArgumentError
from libvirt_vm_optimizer.util.utils import Profile
class Settings:
def __init__(self, libvirt_xml=None,
output_xml=None,
in_place=False,
profile=Profile.DEFAULT,
force_multithreaded_pinning=False,
connection_uri=None):
self.libvirt_xml = libvirt_xml
self.output_xml = output_xml
self.profile = profile
self.in_place = in_place
self.connection_uri = connection_uri
self.force_multithreaded_pinning = force_multithreaded_pinning
class ArgParser:
@staticmethod
def require_args():
parser = argparse.ArgumentParser(usage='libvirt-vm-optimizer.py [LIBVIRT_XML]\n'
'\n'
' - optimizes LIBVIRT_XML (supports kvm|qemu)')
parser.add_argument('LIBVIRT_XML', nargs='?',
help=f'VM libvirt.xml (will read from stdin if not specified)')
parser.add_argument('-o', '--output', type=str, nargs='?',
dest='output',
required=False, const=True,
help=f'output file (will be printed to stdout if not specified)')
parser.add_argument('-i', '--in-place', action='store_true',
dest='in_place',
help=f'edit files in place')
parser.add_argument('-p', '--profile', type=str, nargs='?',
dest='profile',
default='default',
required=False, const=True,
help=f'one of (default, cpu, server )')
parser.add_argument('-m', '--force-multithreaded-pinning', action='store_true',
dest='multithreaded_pinning',
help=f'setup CPU pinning in simultaneous multithreading systems (experimental and may be slower)')
parser.add_argument('-c', '--connect', type=str, nargs='?',
dest='uri',
default='qemu:///system',
required=False, const=True,
help=f'connection URI (uses default connection if not specified)')
args = parser.parse_args()
return ArgParser._as_settings(args)
@staticmethod
def _as_settings(args):
libvirt_xml = args.LIBVIRT_XML
output_xml = args.output
profile = Profile.from_str(args.profile)
in_place = args.in_place
uri = args.uri
multithreaded_pinning = args.multithreaded_pinning
if in_place and not libvirt_xml:
raise ArgumentError(None, message="no LIBVIRT_XML specified")
return Settings(libvirt_xml=libvirt_xml,
output_xml=output_xml,
in_place=in_place,
profile=profile,
force_multithreaded_pinning=multithreaded_pinning,
connection_uri=uri)
|
[
"libvirt_vm_optimizer.util.utils.Profile.from_str",
"argparse.ArgumentError",
"argparse.ArgumentParser"
] |
[((698, 828), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""libvirt-vm-optimizer.py [LIBVIRT_XML]\n\n - optimizes LIBVIRT_XML (supports kvm|qemu)"""'}), '(usage=\n """libvirt-vm-optimizer.py [LIBVIRT_XML]\n\n - optimizes LIBVIRT_XML (supports kvm|qemu)"""\n )\n', (721, 828), False, 'import argparse\n'), ((2586, 2616), 'libvirt_vm_optimizer.util.utils.Profile.from_str', 'Profile.from_str', (['args.profile'], {}), '(args.profile)\n', (2602, 2616), False, 'from libvirt_vm_optimizer.util.utils import Profile\n'), ((2792, 2847), 'argparse.ArgumentError', 'ArgumentError', (['None'], {'message': '"""no LIBVIRT_XML specified"""'}), "(None, message='no LIBVIRT_XML specified')\n", (2805, 2847), False, 'from argparse import ArgumentError\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains class MRT_File.
The MRT_File class contains the functionality to load and parse
mrt files. This is done through a series of steps, detailed in README.
"""
__authors__ = ["<NAME>", "<NAME>"]
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
import logging
from .tables import MRT_Announcements_Table
from ....utils import utils
from ....utils.base_classes import File
class MRT_File(File):
"""Converts MRT files to CSVs and then inserts them into a database.
In depth explanation in README.
"""
__slots__ = []
def parse_file(self, bgpscanner=True):
"""Parses a downloaded file and inserts it into the database
if bgpscanner is set to True, bgpscanner is used to parser files
which is faster, but ignores malformed announcements. While
these malformed announcements are few and far between, bgpdump
does not ignore them and should be used for full data runs. For
testing however, bgpscanner is much faster and has almost all
data required. More in depth explanation at the top of the file
Note that when tested for speed, logging doesn't slow down parse_files
Or it does, and I just turned it off wrong.
"""
# Sets CSV path
self.csv_name = f"{self.csv_dir}/{os.path.basename(self.path)}.csv"
# Parses the MRT file into a csv file
self._convert_dump_to_csv(bgpscanner)
# Inserts the csv file into the MRT_Announcements Table
utils.csv_to_db(MRT_Announcements_Table, self.csv_name)
# Deletes all old files
utils.delete_paths([self.path, self.csv_name])
utils.incriment_bar(logging.root.level)
########################
### Helper Functions ###
########################
def _convert_dump_to_csv(self, bgpscanner=True):
"""Parses MRT file into a CSV
This function uses bgpscanner to first be able to read
the MRT file. This is because BGPScanner is the fastest tool to
use for this task. The drawback of bgpscanner is that it ignores
malformed announcements. There aren't a lot of these, and it's
much faster, but for a full data set the slower tool bgpdump
should be used. Then the sed commands parse the file and
format the data for a CSV. Then this is stored as a tab
delimited CSV file, and the original is deleted. For a more in
depth explanation see top of file. For parsing spefics, see each
function listed below.
"""
args = self._bgpscanner_args() if bgpscanner else self._bgpdump_args()
# writes to a csv
args += '> ' + self.csv_name
utils.run_cmds(args)
logging.debug(f"Wrote {self.csv_name}\n\tFrom {self.url}")
utils.delete_paths(self.path)
def _bgpscanner_args(self):
"""Parses MRT file into a CSV using bgpscanner
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below.
"""
# I know this may seem unmaintanable, that's because this is a
# Fast way to to this. Please, calm down.
# Turns out not fast - idk if other regexes are faster
# bgpscanner outputs this format:
# TYPE|SUBNETS|AS_PATH|NEXT_HOP|ORIGIN|ATOMIC_AGGREGATE|
# AGGREGATOR|COMMUNITIES|SOURCE|TIMESTAMP|ASN 32 BIT
# Example: =|172.16.58.3/24|14061 6453 9498 45528 45528|
# 192.168.3.11|i|||
# 6453:50 6453:1000 6453:1100 6453:1113 14061:402 14061:2000
# 14061:2002 14061:4000 14061:4002|192.168.3.11 14061|
# 1545345848|1
# Also please note: sed needs escape characters, so if something
# is escaped once it is for sed. If it is escaped twice, it is
# to escape something in sed, and a second escape for the python
# Below are the things that need to be escaped:
# Parenthesis are escaped because they are sed capture groups
# + is escaped to get sed's special plus (at least one)
# . is escaped for sed to recognize it as a period to match
# / is escaped for sed to match the actual forward slash
# performs bgpdump on the file
bash_args = 'bgpscanner '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f1,2,3,10'
# Now we have TYPE|SUBNETS|AS_PATH|TIMESTAMP
# Ex: =|172.16.58.3/24|14061 6453 9498 45528 45528|1545345848
# Makes sure gets announcement, withdrawl, or rib
# -n for no output if nothing there
bash_args += ' | sed -n "s/[=|+|-]|'
# Now we focus on SUBNETS|AS_PATH|TIMESTAMP
# Ex: 172.16.58.3/24|14061 6453 9498 45528 45528|1545345848
# Gets three capture groups.
# The first capture group is the prefix
# Captures chars normally in IPV4 or IPV6 prefixes
bash_args += '\([0|1|2|3|4|5|6|7|8|9|%|\.|\:|a|b|c|d|e|f|/]\+\)|'
# I left this old code here in case someone can figure it out
# https://unix.stackexchange.com/questions/145402/
# It appears sed doesn't support this kind of alternation
# It appears you cannot perform alternation with char classes
# So while it is slower to use ^, that is the way it will run
# until someone can figure out a crazier sed command. And even
# if you could, it would appear that it wouldn't be cross
# platform compatable, so it probably shouldn't be done anyways
# The regex for prefix is done in this way instead of non
# greedy matching because sed doesn't have non greedy matching
# so instead the | must be excluded which is slower than this
# bash_args += '\([[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
# bash_args += '\.[[:digit:]]\+\/[[:digit:]]\+|'
# Now we match for ipv6 prefixes
# bash_args += '[0|1|2|3|4|5|6|7|8|9|%|\:|\.|a|b|c|d|e|f]*]\)|'
# Now we focus on AS_PATH|TIMESTAMP
# Ex: 14061 6453 9498 45528 45528|1545345848
# Second capture group is as path except for the last number
bash_args += '\([^{]*[[:space:]]\)*'
# Now we have all but the last number
# Ex: 45528|1545345848
# Third capture group is the origin
bash_args += '\([^{]*\)'
# Now we have just the time
# Example: |1545345848
# Fourth capture group is the time
bash_args += '|\(.*\)'
# Replacement with the capture groups
# Must double escape here or python freaks out
bash_args += '/\\1\\t{\\2\\3}\\t\\3\\t\\4/p" | '
# Replaces spaces in array to commas
# Need to pipe to new sed because you need the -n -p args
# to make sed not output the full string if it doesn't match
# And you cannot add -e args after that
bash_args += 'sed -e "s/ /, /g" '
return bash_args
def _bgpdump_args(self):
"""Parses MRT file into a CSV using bgpdump
For a more in depth explanation see _convert_dump_to_csv. For
explanation on specifics of the parsing, see below. Also note,
you must use the updated bgpdump tool, not the apt repo.
"""
# performs bgpdump on the file
bash_args = 'bgpdump -q -m -t change '
bash_args += self.path
# Cuts out columns we don't need
bash_args += ' | cut -d "|" -f2,6,7 '
# Deletes any announcements with as sets
bash_args += '|sed -e "/{.*}/d" '
# Performs regex matching with sed and adds brackets to as_path
bash_args += '-e "s/\(.*|.*|\)\(.*$\)/\\1{\\2}/g" '
# Replaces pipes and spaces with commas for csv insertion
# leaves out first one: -e "s/, / /"
bash_args += '-e "s/ /, /g" -e "s/|/\t/g" '
# Adds a column for the origin
bash_args += '-e "s/\([[:digit:]]\+\)}/\\1}\t\\1/g"'
# Rearrange columns to match for csv_to_db
bash_args += '| awk \'BEGIN {FS="\t"};{OFS="\t"};{ print '
bash_args += '$2, $3, $4, $1}\''
return bash_args
|
[
"os.path.basename",
"logging.debug"
] |
[((2857, 2917), 'logging.debug', 'logging.debug', (['f"""Wrote {self.csv_name}\n\tFrom {self.url}"""'], {}), '(f"""Wrote {self.csv_name}\n\tFrom {self.url}""")\n', (2870, 2917), False, 'import logging\n'), ((1454, 1481), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (1470, 1481), False, 'import os\n')]
|
#!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess as sp
import cgi
fs = cgi.FieldStorage()
cmd = fs.getvalue("command")
output = sp.getoutput("sudo "+cmd)
print("<body style='padding: 40px;'>")
print('<h1 style="color:#df405a;" >Output</h1>')
print("<pre>{}</pre>".format(output))
print("</body>")
|
[
"subprocess.getoutput",
"cgi.FieldStorage"
] |
[((111, 129), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (127, 129), False, 'import cgi\n'), ((172, 199), 'subprocess.getoutput', 'sp.getoutput', (["('sudo ' + cmd)"], {}), "('sudo ' + cmd)\n", (184, 199), True, 'import subprocess as sp\n')]
|
from flask import Flask
from flask_restful_swagger.swagger import SwaggerRegistry
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@patch("flask_restful_swagger.swagger._get_current_registry")
@patch("flask_restful_swagger.swagger.render_homepage")
def test_get_swagger_registry(homepage, registry):
mock_registry = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
registry.return_value = mock_registry
app = Flask(__name__)
resource = SwaggerRegistry()
bases = [base.__name__ for base in SwaggerRegistry.__mro__]
assert sorted(bases) == [
"MethodView",
"Resource",
"SwaggerRegistry",
"View",
"object",
]
with app.test_request_context(path="/some_path.html"):
_ = resource.get()
assert homepage.called
homepage.assert_called_once_with(
"mock_pathmock_spec_endpoint_path/_/resource_list.json"
)
with app.test_request_context(path="/some_path"):
homepage.reset_mock()
response = resource.get()
assert not homepage.called
assert response == mock_registry
|
[
"mock.patch",
"flask_restful_swagger.swagger.SwaggerRegistry",
"flask.Flask"
] |
[((175, 235), 'mock.patch', 'patch', (['"""flask_restful_swagger.swagger._get_current_registry"""'], {}), "('flask_restful_swagger.swagger._get_current_registry')\n", (180, 235), False, 'from mock import patch\n'), ((237, 291), 'mock.patch', 'patch', (['"""flask_restful_swagger.swagger.render_homepage"""'], {}), "('flask_restful_swagger.swagger.render_homepage')\n", (242, 291), False, 'from mock import patch\n'), ((647, 662), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (652, 662), False, 'from flask import Flask\n'), ((679, 696), 'flask_restful_swagger.swagger.SwaggerRegistry', 'SwaggerRegistry', ([], {}), '()\n', (694, 696), False, 'from flask_restful_swagger.swagger import SwaggerRegistry\n')]
|
# This codes are referenced from the Github repo (https://github.com/parulnith/Building-a-Simple-Chatbot-in-Python-using-NLTK/blob/master/chatbot.py)
# Loading the required packages
import nltk
import random
import string
import warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from textblob import TextBlob
# Setup
warnings.filterwarnings('ignore') # Ignore warning messages
f = open('corpus_linguistics.txt', 'r') # opening the corpus
text = f.read() # reading the corpus
# Convert all text from corpus to lower case
text = text.lower()
# Perform tokenization
sent_tokens = nltk.sent_tokenize(text)
word_tokens = nltk.word_tokenize(text)
# Initialize set of greetings and responses
user_greetings = ["hi", "hello", "good morning", "hey", "what's up"]
bot_greetings = ["Hello, how may I be of assistance?"]
user_gratitude = ["thank you", "thanks", "that was helpful"]
bot_gratitude = ["You're welcome! Is there anything else you need?",
"Happy to help! Are there other questions that I could help "
"with?"]
bot_exit_text = ["Thank you for using my services. Have a great day!",
"Hope I was helpful. See you later :)", "Bye!"]
languages = {"en": "English", "fr": "French", "es": "Spanish",
"la": "Latin"}
# Text Preprocessing
lemmatizer = nltk.stem.WordNetLemmatizer() # Text Lemmatization
# Function to perform lemmatization
def LemTokens(tokens):
return [lemmatizer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
# Function to perform normalization
def LemNormalize(text):
return LemTokens(
nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# Generating response
def respond(input_text):
bot_message = ""
sent_tokens.append(input_text)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english') # TF-IDF approach
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx = vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if req_tfidf == 0:
bot_message += "Apologies, I cannot understand your question. Please " \
"rephrase your question and try again. "
else:
bot_message += sent_tokens[idx]
return bot_message
# Perform sentiment analysis
def extract_sentiment(text):
processed_text = TextBlob(text) # Here, we use the textblob module to implement sentiment analysis
sentiment = processed_text.sentiment
if sentiment.polarity < 0: # we manually set the rule for testing the mood of a sentence
return "negative"
elif sentiment.polarity > 0:
return "positive"
else:
return "neutral"
# Language detection
def get_language(text):
processed_text = TextBlob(text)
return processed_text.detect_language()
# Interact with chatbot framework based on input from user
def bot(choice, input_text):
exit_status = False
while exit_status is False:
input_text = input_text.lower() # lowercase the input
if input_text != 'bye':
if choice == "1":
if input_text in user_greetings: # Generate random response from the greetings set
return random.choice(bot_greetings)
else:
if input_text in user_gratitude: # Generate random response from the gratitude set
return random.choice(bot_gratitude)
else:
return respond(input_text) # Generate a response using NLTK that answers the user's question
sent_tokens.remove(input_text)
elif choice == "2":
return_string = "Detected Language: " + languages[
get_language(input_text)] + "\n" # Language detection
if get_language(input_text) == "en":
return_string += "Detected Sentiment: " + extract_sentiment(
input_text) # Sentiment analysis
else:
return_string += "Sentiment can only be detected for " \
"text in English "
return return_string
else:
exit_status = True
return "Invalid choice!\nOnly 1 and 2 are valid choices " \
"\nPlease try running the program again. "
else:
exit_status = True
return random.choice(bot_exit_text)
|
[
"textblob.TextBlob",
"random.choice",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.word_tokenize",
"nltk.stem.WordNetLemmatizer",
"nltk.sent_tokenize",
"sklearn.feature_extraction.text.TfidfVectorizer",
"warnings.filterwarnings"
] |
[((395, 428), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (418, 428), False, 'import warnings\n'), ((660, 684), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (678, 684), False, 'import nltk\n'), ((699, 723), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (717, 723), False, 'import nltk\n'), ((1392, 1421), 'nltk.stem.WordNetLemmatizer', 'nltk.stem.WordNetLemmatizer', ([], {}), '()\n', (1419, 1421), False, 'import nltk\n'), ((1919, 1980), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'tokenizer': 'LemNormalize', 'stop_words': '"""english"""'}), "(tokenizer=LemNormalize, stop_words='english')\n", (1934, 1980), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2058, 2093), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tfidf[-1]', 'tfidf'], {}), '(tfidf[-1], tfidf)\n', (2075, 2093), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2515, 2529), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (2523, 2529), False, 'from textblob import TextBlob\n'), ((2919, 2933), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (2927, 2933), False, 'from textblob import TextBlob\n'), ((4601, 4629), 'random.choice', 'random.choice', (['bot_exit_text'], {}), '(bot_exit_text)\n', (4614, 4629), False, 'import random\n'), ((3374, 3402), 'random.choice', 'random.choice', (['bot_greetings'], {}), '(bot_greetings)\n', (3387, 3402), False, 'import random\n'), ((3559, 3587), 'random.choice', 'random.choice', (['bot_gratitude'], {}), '(bot_gratitude)\n', (3572, 3587), False, 'import random\n')]
|
import subprocess
import os.path
import json
import time
import urllib.parse
from typing import Any, Tuple
import config
from requests_html import HTMLSession
from markdownify import markdownify
class Data:
def __init__(
self,
data_file_path: str = config.DATA_FILE_PATH,
preload: bool = False,
fetch_rule: bool = True,
) -> None:
super().__init__()
self.data_file_path = data_file_path
data = None
if preload:
data = self.load() # load from existing data file
if ( # check whether the data file is valid
not data
or abs(time.time() - data["timestamp"]) / 60
> config.DATA_RENEW_THRESHOLD_IN_MIN
):
data = self.fetch()
self.data = data
def load(self, path: str = None) -> Any:
data = None
if not path:
path = self.data_file_path
if os.path.exists(path):
with open(path, "r") as fp:
data_ser = json.load(fp)
data = json.loads(data_ser)
self.data = data
return data
def fetch(self, url: str = config.DATA_API_URL) -> Any:
# fetcch data
print("\n-------------Start fetching data-------------")
r = subprocess.check_output(f"curl {url}", shell=True)
print("\n-------------Finish fetching data-------------")
print("\n-------------Start serializing data-------------")
json_data = json.loads(r.decode("utf-8"))
# indexing based on question frontend id
data = {}
for q in json_data["stat_status_pairs"]:
qid = q["stat"]["frontend_question_id"]
if qid in data:
raise RuntimeError(f"question #{qid} already exists, duplicate!")
else:
data[str(qid).zfill(config.QID_PADDING_SIZE)] = q
print(f"Total feteched questions: {len(data)} ")
data["timestamp"] = time.time()
print("\n-------------Finish serializing data-------------")
return data
def do_persistence(
self, data_serialized: str = None, path=config.DATA_FILE_PATH
) -> None:
print("\n-------------Start data persistence-------------")
if not data_serialized:
data_serialized = json.dumps(self.data)
if not data_serialized or not path:
raise RuntimeError("invalid input data or file path.")
with open(path, "w") as fp:
json.dump(data_serialized, fp)
print("\n-------------Finish data persistence-------------")
class Problem:
def __init__(self, qid: int, blob: Any, auto_parse=False) -> None:
super().__init__()
self.qid = str(qid).zfill(config.QID_PADDING_SIZE)
self.difficulty = blob["difficulty"]["level"]
self.is_paid = blob["paid_only"]
self.stat = blob["stat"]
if auto_parse:
self.parse(self.stat)
def parse(self, stat=None):
self.total_acs, self.total_submitted, self.ac_rate = self._parse_statistics(
stat
)
self.title = self._parse_title(stat)
self.title_slug = self._parse_title_slug(stat)
self.url = self._parse_url(stat)
self.url_solution = self._parse_url_solution(stat)
def _parse_statistics(self, stat) -> Tuple[int, int]:
acs, submissions = stat["total_acs"], stat["total_submitted"]
return acs, submissions, acs / submissions if submissions > 0 else 0
def _parse_title(self, stat):
return stat["question__title"]
def _parse_title_slug(self, stat):
return stat["question__title_slug"]
def _parse_url(self, stat):
title_slug = self._parse_title_slug(stat)
return urllib.parse.urljoin(config.PROBLEM_URL_PREFIX, title_slug)
def _parse_url_solution(self, stat):
# be careful about the urljoin behavior: base abs url + part only(will swap if exists)
return (
urllib.parse.urljoin(
config.PROBLEM_URL_PREFIX, stat["question__article__slug"] + "/solution"
)
if stat["question__article__slug"]
else None
)
def _scrape_n_render(self, url=None):
if not url:
url = self.url
response = HTMLSession().get(url)
response.html.render()
return response
def scrape(self, url=None):
r = self._scrape_n_render(url=url)
# self.content, self.contetnt_md = self._scrape_problem_content(r.html)
# with open("html-content.html", "w") as f:
# f.write(r.html.html)
# with open("html-raw-content.html", "w") as f:
# f.write(r.html.raw_html.decode("utf-8"))
self.tags = self._scrape_problem_topics(r.html)
self.companies = self._scrape_problem_companies(r.html)
def _scrape_problem_topics(self, html):
t_elements = html.xpath("//a[starts-with(@class,'topic-tag')]/span")
return [t.text for t in t_elements]
def _scrape_problem_companies(self, html):
# companies tags are only available to paid user.
# TODO: add login and cookies support
t_elements = html.xpath("//a[starts-with(@href,'/company')]")
return [t.text for t in t_elements]
def _scrape_problem_content(self, html):
content = html.xpath("//div[contains(@class,'question-content')]/div")[0]
markdown_content = markdownify(self.html_preprocess(content.html))
# with open("test.md", "w") as fp:
# fp.write(md_out)
return content, markdown_content
def html2markdown_preprocess(self, html: str) -> str:
# replace all <code>,</code> to inline markdown code: `backtip`
# replace all \n newline to <br> in html, otherwise it cannot be parsed as newline
# replace all <pre></pre> to code block ```, default type is json for better display
res = (
html.replace("<code>", "`")
.replace("</code>", "`")
.replace("\n", "<br>")
.replace("<pre>", "```json<br>")
.replace("</pre>", "```<br>")
)
return res
|
[
"subprocess.check_output",
"json.loads",
"json.dumps",
"requests_html.HTMLSession",
"json.load",
"time.time",
"json.dump"
] |
[((1311, 1361), 'subprocess.check_output', 'subprocess.check_output', (['f"""curl {url}"""'], {'shell': '(True)'}), "(f'curl {url}', shell=True)\n", (1334, 1361), False, 'import subprocess\n'), ((1996, 2007), 'time.time', 'time.time', ([], {}), '()\n', (2005, 2007), False, 'import time\n'), ((2338, 2359), 'json.dumps', 'json.dumps', (['self.data'], {}), '(self.data)\n', (2348, 2359), False, 'import json\n'), ((2521, 2551), 'json.dump', 'json.dump', (['data_serialized', 'fp'], {}), '(data_serialized, fp)\n', (2530, 2551), False, 'import json\n'), ((1047, 1060), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1056, 1060), False, 'import json\n'), ((1084, 1104), 'json.loads', 'json.loads', (['data_ser'], {}), '(data_ser)\n', (1094, 1104), False, 'import json\n'), ((4326, 4339), 'requests_html.HTMLSession', 'HTMLSession', ([], {}), '()\n', (4337, 4339), False, 'from requests_html import HTMLSession\n'), ((653, 664), 'time.time', 'time.time', ([], {}), '()\n', (662, 664), False, 'import time\n')]
|
import os
import sys
os.chdir(os.path.dirname(__file__))
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(sys.path[0], '/var/www/haproxy-wi/app/'))
from bottle import route, run, template, hook, response, request, post
import sql
import funct
def return_dict_from_out(id, out):
data = {}
data[id] = {}
for k in out:
if "Ncat:" not in k:
k = k.split(':')
data[id][k[0]] = k[1].strip()
else:
data[id] = {"error":"Can\'t connect to HAproxy"}
return data
def check_permit_to_server(id):
servers = sql.select_servers(id_hostname=id)
login = request.headers.get('login')
for s in servers:
servers = sql.get_dick_permit(username=login, ip=s[2])
return servers
def get_server(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
data = {
'id':s[0],
'hostname':s[1],
'ip':s[2],
'group':s[3],
'virt':s[4],
'enable':s[5],
'master':s[6],
'creds':s[7]
}
except:
server = data
return dict(server=data)
def get_status(id):
try:
servers = check_permit_to_server(id)
for s in servers:
cmd = 'echo "show info" |nc %s %s -w 1|grep -e "Ver\|CurrConns\|Maxco\|MB\|Uptime:"' % (s[2], sql.get_setting('haproxy_sock_port'))
out = funct.subprocess_execute(cmd)
data = return_dict_from_out(id, out[0])
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(status=data)
def get_all_statuses():
data = {}
try:
servers = sql.select_servers()
login = request.headers.get('login')
sock_port = sql.get_setting('haproxy_sock_port')
for s in servers:
servers = sql.get_dick_permit(username=login)
for s in servers:
cmd = 'echo "show info" |nc %s %s -w 1|grep -e "Ver\|CurrConns\|Maxco\|MB\|Uptime:"' % (s[2], sock_port)
data[s[2]] = {}
out = funct.subprocess_execute(cmd)
data[s[2]] = return_dict_from_out(s[1], out[0])
except:
data = {"error":"Cannot find the server"}
return dict(error=data)
return dict(status=data)
def actions(id, action):
if action == 'start' or action == 'stop' or action == 'restart':
try:
servers = check_permit_to_server(id)
for s in servers:
cmd = [ "sudo systemctl %s haproxy" % action ]
error = funct.ssh_command(s[2], cmd)
done = error if error else 'done'
data = {'id':s[0],'ip':s[2],'action':action,'hostname':s[1],'status':done}
return dict(status=data)
except:
return dict(status='error')
else:
return dict(status='wrong action')
def runtime(id):
data = {}
try:
action = request.headers.get('action')
haproxy_sock = sql.get_setting('haproxy_sock')
servers = check_permit_to_server(id)
cmd = [ 'echo "%s" |sudo socat stdio %s' % (action, haproxy_sock) ]
for s in servers:
out = funct.ssh_command(s[2], cmd)
data = {}
data[id] = {}
sep_data = out.split('\r\n')
data[id] = {'ouput':sep_data}
return dict(status=data)
except:
return dict(status='error')
def show_backends(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
out = funct.show_backends(s[2], ret=1)
data = {id: out}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(backends=data)
def get_config(id):
data = {}
try:
servers = check_permit_to_server(id)
for s in servers:
cfg = '/tmp/'+s[2]+'.cfg'
out = funct.get_config(s[2], cfg)
os.system("sed -i 's/\\n/\n/g' "+cfg)
try:
conf = open(cfg, "r")
config_read = conf.read()
conf.close
except IOError:
conf = '<br />Can\'t read import config file'
data = {id: config_read}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def upload_config(id):
data = {}
body = request.body.getvalue().decode('utf-8')
save = request.headers.get('action')
login = request.headers.get('login')
if save == '':
save = 'save'
elif save == 'restart':
save = ''
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
cfg = '/tmp/'+ip+'.cfg'
cfg_for_save = hap_configs_dir + ip + "-" + funct.get_data('config') + ".cfg"
try:
with open(cfg, "w") as conf:
conf.write(body)
return_mess = 'config was uploaded'
os.system("/bin/cp %s %s" % (cfg, cfg_for_save))
out = funct.upload_and_restart(ip, cfg, just_save=save)
funct.logging('localhost', " config was uploaded via REST API", login=login)
if out:
return_mess == out
except IOError:
return_mess = "cannot upload config"
data = {id: return_mess}
except:
data = {}
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def add_to_config(id):
data = {}
body = request.body.getvalue().decode('utf-8')
save = request.headers.get('action')
hap_configs_dir = funct.get_config_var('configs', 'haproxy_save_configs_dir')
login = request.headers.get('login')
if save == '':
save = 'save'
elif save == 'restart':
save = ''
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
cfg = '/tmp/'+ip+'.cfg'
cfg_for_save = hap_configs_dir + ip + "-" + funct.get_data('config') + ".cfg"
out = funct.get_config(ip, cfg)
try:
with open(cfg, "a") as conf:
conf.write('\n'+body+'\n')
return_mess = 'section was added to the config'
os.system("/bin/cp %s %s" % (cfg, cfg_for_save))
funct.logging('localhost', " section was added via REST API", login=login)
out = funct.upload_and_restart(ip, cfg, just_save=save)
if out:
return_mess = out
except IOError:
return_mess = "cannot upload config"
data = {id: return_mess}
except:
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
return dict(config=data)
def show_log(id):
data = {}
rows = request.headers.get('rows')
waf = request.headers.get('waf')
grep = request.headers.get('grep')
hour = request.headers.get('starthour')
minut = request.headers.get('startminut')
hour1 = request.headers.get('endhour')
minut1 = request.headers.get('endminut')
if rows is None:
rows = '10'
if waf is None:
waf = '0'
if hour is None:
hour = '00'
if minut is None:
minut = '00'
if hour1 is None:
hour1 = '24'
if minut1 is None:
minut1 = '00'
try:
servers = check_permit_to_server(id)
for s in servers:
ip = s[2]
except:
data[id] = {"error":"Cannot find the server"}
return dict(error=data)
out = funct.show_haproxy_log(ip, rows=rows, waf=str(waf), grep=grep, hour=str(hour), minut=str(minut), hour1=str(hour1), minut1=str(minut1), html=0)
data = {id: out}
return dict(log=data)
|
[
"bottle.request.body.getvalue",
"funct.upload_and_restart",
"funct.ssh_command",
"sql.select_servers",
"os.path.join",
"funct.get_config_var",
"sql.get_dick_permit",
"funct.logging",
"os.path.dirname",
"sql.get_setting",
"funct.subprocess_execute",
"funct.show_backends",
"funct.get_config",
"os.system",
"funct.get_data",
"bottle.request.headers.get"
] |
[((30, 55), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (45, 55), False, 'import os\n'), ((73, 98), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (88, 98), False, 'import os\n'), ((116, 169), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""/var/www/haproxy-wi/app/"""'], {}), "(sys.path[0], '/var/www/haproxy-wi/app/')\n", (128, 169), False, 'import os\n'), ((545, 579), 'sql.select_servers', 'sql.select_servers', ([], {'id_hostname': 'id'}), '(id_hostname=id)\n', (563, 579), False, 'import sql\n'), ((589, 617), 'bottle.request.headers.get', 'request.headers.get', (['"""login"""'], {}), "('login')\n", (608, 617), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((3951, 3980), 'bottle.request.headers.get', 'request.headers.get', (['"""action"""'], {}), "('action')\n", (3970, 3980), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((3990, 4018), 'bottle.request.headers.get', 'request.headers.get', (['"""login"""'], {}), "('login')\n", (4009, 4018), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((4920, 4949), 'bottle.request.headers.get', 'request.headers.get', (['"""action"""'], {}), "('action')\n", (4939, 4949), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((4969, 5028), 'funct.get_config_var', 'funct.get_config_var', (['"""configs"""', '"""haproxy_save_configs_dir"""'], {}), "('configs', 'haproxy_save_configs_dir')\n", (4989, 5028), False, 'import funct\n'), ((5038, 5066), 'bottle.request.headers.get', 'request.headers.get', (['"""login"""'], {}), "('login')\n", (5057, 5066), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((5950, 5977), 'bottle.request.headers.get', 'request.headers.get', (['"""rows"""'], {}), "('rows')\n", (5969, 5977), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((5985, 6011), 'bottle.request.headers.get', 'request.headers.get', (['"""waf"""'], {}), "('waf')\n", (6004, 6011), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((6020, 6047), 'bottle.request.headers.get', 'request.headers.get', (['"""grep"""'], {}), "('grep')\n", (6039, 6047), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((6056, 6088), 'bottle.request.headers.get', 'request.headers.get', (['"""starthour"""'], {}), "('starthour')\n", (6075, 6088), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((6098, 6131), 'bottle.request.headers.get', 'request.headers.get', (['"""startminut"""'], {}), "('startminut')\n", (6117, 6131), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((6141, 6171), 'bottle.request.headers.get', 'request.headers.get', (['"""endhour"""'], {}), "('endhour')\n", (6160, 6171), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((6182, 6213), 'bottle.request.headers.get', 'request.headers.get', (['"""endminut"""'], {}), "('endminut')\n", (6201, 6213), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((654, 698), 'sql.get_dick_permit', 'sql.get_dick_permit', ([], {'username': 'login', 'ip': 's[2]'}), '(username=login, ip=s[2])\n', (673, 698), False, 'import sql\n'), ((1273, 1302), 'funct.subprocess_execute', 'funct.subprocess_execute', (['cmd'], {}), '(cmd)\n', (1297, 1302), False, 'import funct\n'), ((1530, 1550), 'sql.select_servers', 'sql.select_servers', ([], {}), '()\n', (1548, 1550), False, 'import sql\n'), ((1561, 1589), 'bottle.request.headers.get', 'request.headers.get', (['"""login"""'], {}), "('login')\n", (1580, 1589), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((1604, 1640), 'sql.get_setting', 'sql.get_setting', (['"""haproxy_sock_port"""'], {}), "('haproxy_sock_port')\n", (1619, 1640), False, 'import sql\n'), ((2624, 2653), 'bottle.request.headers.get', 'request.headers.get', (['"""action"""'], {}), "('action')\n", (2643, 2653), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((2671, 2702), 'sql.get_setting', 'sql.get_setting', (['"""haproxy_sock"""'], {}), "('haproxy_sock')\n", (2686, 2702), False, 'import sql\n'), ((5336, 5361), 'funct.get_config', 'funct.get_config', (['ip', 'cfg'], {}), '(ip, cfg)\n', (5352, 5361), False, 'import funct\n'), ((1680, 1715), 'sql.get_dick_permit', 'sql.get_dick_permit', ([], {'username': 'login'}), '(username=login)\n', (1699, 1715), False, 'import sql\n'), ((1877, 1906), 'funct.subprocess_execute', 'funct.subprocess_execute', (['cmd'], {}), '(cmd)\n', (1901, 1906), False, 'import funct\n'), ((2845, 2873), 'funct.ssh_command', 'funct.ssh_command', (['s[2]', 'cmd'], {}), '(s[2], cmd)\n', (2862, 2873), False, 'import funct\n'), ((3154, 3186), 'funct.show_backends', 'funct.show_backends', (['s[2]'], {'ret': '(1)'}), '(s[2], ret=1)\n', (3173, 3186), False, 'import funct\n'), ((3483, 3510), 'funct.get_config', 'funct.get_config', (['s[2]', 'cfg'], {}), '(s[2], cfg)\n', (3499, 3510), False, 'import funct\n'), ((3514, 3556), 'os.system', 'os.system', (['("""sed -i \'s/\\\\n/\n/g\' """ + cfg)'], {}), '("""sed -i \'s/\\\\n/\n/g\' """ + cfg)\n', (3523, 3556), False, 'import os\n'), ((3903, 3926), 'bottle.request.body.getvalue', 'request.body.getvalue', ([], {}), '()\n', (3924, 3926), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((4385, 4433), 'os.system', 'os.system', (["('/bin/cp %s %s' % (cfg, cfg_for_save))"], {}), "('/bin/cp %s %s' % (cfg, cfg_for_save))\n", (4394, 4433), False, 'import os\n'), ((4443, 4492), 'funct.upload_and_restart', 'funct.upload_and_restart', (['ip', 'cfg'], {'just_save': 'save'}), '(ip, cfg, just_save=save)\n', (4467, 4492), False, 'import funct\n'), ((4496, 4572), 'funct.logging', 'funct.logging', (['"""localhost"""', '""" config was uploaded via REST API"""'], {'login': 'login'}), "('localhost', ' config was uploaded via REST API', login=login)\n", (4509, 4572), False, 'import funct\n'), ((4872, 4895), 'bottle.request.body.getvalue', 'request.body.getvalue', ([], {}), '()\n', (4893, 4895), False, 'from bottle import route, run, template, hook, response, request, post\n'), ((5486, 5534), 'os.system', 'os.system', (["('/bin/cp %s %s' % (cfg, cfg_for_save))"], {}), "('/bin/cp %s %s' % (cfg, cfg_for_save))\n", (5495, 5534), False, 'import os\n'), ((5538, 5612), 'funct.logging', 'funct.logging', (['"""localhost"""', '""" section was added via REST API"""'], {'login': 'login'}), "('localhost', ' section was added via REST API', login=login)\n", (5551, 5612), False, 'import funct\n'), ((5622, 5671), 'funct.upload_and_restart', 'funct.upload_and_restart', (['ip', 'cfg'], {'just_save': 'save'}), '(ip, cfg, just_save=save)\n', (5646, 5671), False, 'import funct\n'), ((2301, 2329), 'funct.ssh_command', 'funct.ssh_command', (['s[2]', 'cmd'], {}), '(s[2], cmd)\n', (2318, 2329), False, 'import funct\n'), ((4246, 4270), 'funct.get_data', 'funct.get_data', (['"""config"""'], {}), "('config')\n", (4260, 4270), False, 'import funct\n'), ((5294, 5318), 'funct.get_data', 'funct.get_data', (['"""config"""'], {}), "('config')\n", (5308, 5318), False, 'import funct\n'), ((1223, 1259), 'sql.get_setting', 'sql.get_setting', (['"""haproxy_sock_port"""'], {}), "('haproxy_sock_port')\n", (1238, 1259), False, 'import sql\n')]
|
""" Waymo dataset with votes.
Author: <NAME>
Date: 2020
"""
import os
import sys
import numpy as np
import pickle
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..', 'utils'))
from box_util import get_corners_from_labels_array
import pc_util
import waymo_utils
from model_util_waymo import WaymoDatasetConfig
DC = WaymoDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 128 # maximum number of objects allowed per scene
# RAW_LABELS = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
class WaymoDetectionVotesDataset(Dataset):
def __init__(self, split_set='train', num_points=180000,
use_height=False,
augment=False,
verbose:bool = True):
# self.mapping_labels = {1:0,2:1,4:2} # map dataset labels to our labels to handle discarded classes
# self.excluded_labels = [0,3] # exclude unknowns and signs labels
self.split_set = split_set
self.type2class = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.classes = ['TYPE_VEHICLE'] #, 'TYPE_PEDESTRIAN', 'TYPE_CYCLIST']
self.data_path = os.path.join(BASE_DIR,
'dataset') # TODO: rename to votes data path
# self.raw_data_path = os.path.join(BASE_DIR, 'dataset')
# access segments dictionary list
# load segments_dict_list dictionary
self.segments_dict_list_path = os.path.join(self.data_path, split_set, 'segments_dict_list')
if not os.path.exists(self.segments_dict_list_path):
raise ValueError('segments Dictionary list is not found, make sure to preprocess the data first')
with open(self.segments_dict_list_path, 'rb') as f:
self.segments_dict_list = pickle.load(f)
self.num_segments = len(self.segments_dict_list)
if verbose: print("No of segments in the dataset is {}".format(len(self.segments_dict_list)))
self.num_frames = 0
for segment_dict in self.segments_dict_list:
# add total number of frames in every segment
self.num_frames += segment_dict['frame_count']
# self.scan_names = sorted(list(set([os.path.basename(x).split("_")[1].split('.')[0] for x in os.listdir(os.path.join(self.data_path, 'training', 'votes'))])))
self.num_points = num_points
self.augment = augment
self.use_height = use_height
def __len__(self):
return self.num_frames
def resolve_idx_to_frame_path(self, idx):
''' Get Global idx and transorm into segment frame idx
'''
frame_idx = idx
for segment_dict in self.segments_dict_list:
if frame_idx >= segment_dict['frame_count']:
frame_idx -= segment_dict['frame_count']
else:
frames_list = os.listdir(os.path.join(self.data_path, self.split_set, segment_dict['id']))
frame_path = os.path.join(self.data_path, self.split_set, segment_dict['id'], frames_list[frame_idx])
if not os.path.exists(frame_path):
raise ValueError("Frame path doesn't exist, error in idx_to_frame_path function")
return frame_path
def filtrate_objects(self, labels):
'''
obje_list Nx8 array contains all annotated objects
'''
type_whitelist = [self.class2type[i] for i in self.classes]
# remove unwanted classes
rows_to_be_deleted = []
for i in range(labels.shape[0]):
if not labels[i,0] in type_whitelist:
rows_to_be_deleted.append(i)
labels = np.delete(labels, rows_to_be_deleted, 0)
return labels
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
frame_data_path = self.resolve_idx_to_frame_path(idx)
segment_id = frame_data_path.split('/')[-2]
frame_idx = frame_data_path.split('/')[-1].split('_')[-1].split('.')[0]
# print('data idx is ', idx)
# print('extracted segment id is ', segment_id)
# print('extracted frame idx is ', frame_idx)
# print("path is ", frame_data_path)
point_cloud = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))['pc'] # Nx3
if not os.path.exists(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx))):
print('this path does not exist !!')
print(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))
assert point_cloud.shape[1] == 3
frame_data_path = os.path.join(self.data_path, self.split_set,'{}'.format(segment_id) ,'{}_{}.npz'.format(segment_id, frame_idx))
frame_data = np.load(frame_data_path)
labels = frame_data['labels']
assert labels.shape[1] == 8
# print('labels types before filterations ', labels[:,0])
labels = self.filtrate_objects(labels)
# print('labels types after filterations ', labels[:,0])
# create bboxes matrix
bboxes = np.zeros_like(labels)
for i in range(labels.shape[0]):
# if labels[i,0] in self.excluded_labels: # skip signs and unknown labels
# continue
bboxes[i, 0:3] = labels[i,4:7] #centers
bboxes[i, 3:6] = labels[i,1:4] #lwh
bboxes[i, 6] = labels[i,7] # heading
bboxes[i, 7] = DC.raw2used_labels[labels[i,0]] #label
point_votes = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id) ,'{}_{}_votes.npz'.format(segment_id, frame_idx)))['point_votes'] # Nx10
assert point_votes.shape[1] == 10
point_cloud = point_cloud[:,0:3]
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
raise NotImplementedError
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = waymo_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = np.transpose(get_corners_from_labels_array(bbox)) # 8 x 3
# import pdb; pdb.set_trace()
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:bboxes.shape[0]] = bboxes[:,-1] # from 0 to 4
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
# ret_dict['scan_idx'] = np.array(idx).astype(np.int64) # TODO: wrong indicator, add frame name and segment name instead
# ret_dict['max_gt_bboxes'] = max_bboxes #ABAHNASY: not used parameter
return ret_dict
def viz_votes(pc, point_votes, point_votes_mask):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_obj_voted2 = pc_obj + point_votes[inds,3:6]
pc_obj_voted3 = pc_obj + point_votes[inds,6:9]
pc_util.write_ply(pc_obj, 'pc_obj.ply')
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1.ply')
pc_util.write_ply(pc_obj_voted2, 'pc_obj_voted2.ply')
pc_util.write_ply(pc_obj_voted3, 'pc_obj_voted3.ply')
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = DC.class2angle(angle_classes[i], angle_residuals[i])
box_size = DC.class2size(size_classes[i], size_residuals[i])
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs.ply')
pc_util.write_ply(label[mask==1,:], 'gt_centroids.ply')
def get_sem_cls_statistics():
""" Compute number of objects for each semantic class """
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
sem_cls_cnt = {}
for i in range(len(d)):
if i%10==0: print(i)
sample = d[i]
pc = sample['point_clouds']
sem_cls = sample['sem_cls_label']
mask = sample['box_label_mask']
for j in sem_cls:
if mask[j] == 0: continue
if sem_cls[j] not in sem_cls_cnt:
sem_cls_cnt[sem_cls[j]] = 0
sem_cls_cnt[sem_cls[j]] += 1
print(sem_cls_cnt)
if __name__=='__main__':
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
# for i in range(len(d)):
sample = d[0]
print(sample['vote_label'].shape, sample['vote_label_mask'].shape)
pc_util.write_ply(sample['point_clouds'], 'pc.ply')
viz_votes(sample['point_clouds'], sample['vote_label'], sample['vote_label_mask'])
viz_obb(sample['point_clouds'], sample['center_label'], sample['box_label_mask'],
sample['heading_class_label'], sample['heading_residual_label'],
sample['size_class_label'], sample['size_residual_label'])
|
[
"numpy.array",
"sys.path.append",
"os.path.exists",
"box_util.get_corners_from_labels_array",
"numpy.random.random",
"numpy.delete",
"numpy.max",
"pc_util.random_sampling",
"numpy.min",
"numpy.tile",
"pc_util.write_oriented_bbox",
"pickle.load",
"model_util_waymo.WaymoDatasetConfig",
"numpy.transpose",
"pc_util.write_ply",
"os.path.join",
"waymo_utils.rotz",
"numpy.zeros",
"numpy.expand_dims",
"os.path.abspath",
"numpy.percentile",
"numpy.load",
"numpy.zeros_like"
] |
[((307, 332), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (322, 332), False, 'import sys\n'), ((527, 547), 'model_util_waymo.WaymoDatasetConfig', 'WaymoDatasetConfig', ([], {}), '()\n', (545, 547), False, 'from model_util_waymo import WaymoDatasetConfig\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((349, 386), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""utils"""'], {}), "(BASE_DIR, '..', 'utils')\n", (361, 386), False, 'import os\n'), ((12906, 12945), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj', '"""pc_obj.ply"""'], {}), "(pc_obj, 'pc_obj.ply')\n", (12923, 12945), False, 'import pc_util\n'), ((12950, 13003), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted1', '"""pc_obj_voted1.ply"""'], {}), "(pc_obj_voted1, 'pc_obj_voted1.ply')\n", (12967, 13003), False, 'import pc_util\n'), ((13008, 13061), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted2', '"""pc_obj_voted2.ply"""'], {}), "(pc_obj_voted2, 'pc_obj_voted2.ply')\n", (13025, 13061), False, 'import pc_util\n'), ((13066, 13119), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted3', '"""pc_obj_voted3.ply"""'], {}), "(pc_obj_voted3, 'pc_obj_voted3.ply')\n", (13083, 13119), False, 'import pc_util\n'), ((13871, 13929), 'pc_util.write_oriented_bbox', 'pc_util.write_oriented_bbox', (['oriented_boxes', '"""gt_obbs.ply"""'], {}), "(oriented_boxes, 'gt_obbs.ply')\n", (13898, 13929), False, 'import pc_util\n'), ((13934, 13992), 'pc_util.write_ply', 'pc_util.write_ply', (['label[mask == 1, :]', '"""gt_centroids.ply"""'], {}), "(label[mask == 1, :], 'gt_centroids.ply')\n", (13951, 13992), False, 'import pc_util\n'), ((14807, 14858), 'pc_util.write_ply', 'pc_util.write_ply', (["sample['point_clouds']", '"""pc.ply"""'], {}), "(sample['point_clouds'], 'pc.ply')\n", (14824, 14858), False, 'import pc_util\n'), ((1456, 1489), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""dataset"""'], {}), "(BASE_DIR, 'dataset')\n", (1468, 1489), False, 'import os\n'), ((1751, 1812), 'os.path.join', 'os.path.join', (['self.data_path', 'split_set', '"""segments_dict_list"""'], {}), "(self.data_path, split_set, 'segments_dict_list')\n", (1763, 1812), False, 'import os\n'), ((3968, 4008), 'numpy.delete', 'np.delete', (['labels', 'rows_to_be_deleted', '(0)'], {}), '(labels, rows_to_be_deleted, 0)\n', (3977, 4008), True, 'import numpy as np\n'), ((6136, 6160), 'numpy.load', 'np.load', (['frame_data_path'], {}), '(frame_data_path)\n', (6143, 6160), True, 'import numpy as np\n'), ((6469, 6490), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {}), '(labels)\n', (6482, 6490), True, 'import numpy as np\n'), ((9181, 9207), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9189, 9207), True, 'import numpy as np\n'), ((9230, 9256), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9238, 9256), True, 'import numpy as np\n'), ((9281, 9305), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9289, 9305), True, 'import numpy as np\n'), ((9332, 9356), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9340, 9356), True, 'import numpy as np\n'), ((9380, 9404), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9388, 9404), True, 'import numpy as np\n'), ((9430, 9456), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9438, 9456), True, 'import numpy as np\n'), ((9478, 9499), 'numpy.zeros', 'np.zeros', (['MAX_NUM_OBJ'], {}), '(MAX_NUM_OBJ)\n', (9486, 9499), True, 'import numpy as np\n'), ((9565, 9591), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 8)'], {}), '((MAX_NUM_OBJ, 8))\n', (9573, 9591), True, 'import numpy as np\n'), ((10517, 10543), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 6)'], {}), '((MAX_NUM_OBJ, 6))\n', (10525, 10543), True, 'import numpy as np\n'), ((11228, 11302), 'pc_util.random_sampling', 'pc_util.random_sampling', (['point_cloud', 'self.num_points'], {'return_choices': '(True)'}), '(point_cloud, self.num_points, return_choices=True)\n', (11251, 11302), False, 'import pc_util\n'), ((11892, 11913), 'numpy.zeros', 'np.zeros', (['MAX_NUM_OBJ'], {}), '(MAX_NUM_OBJ)\n', (11900, 11913), True, 'import numpy as np\n'), ((13559, 13570), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (13567, 13570), True, 'import numpy as np\n'), ((1828, 1872), 'os.path.exists', 'os.path.exists', (['self.segments_dict_list_path'], {}), '(self.segments_dict_list_path)\n', (1842, 1872), False, 'import os\n'), ((2082, 2096), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2093, 2096), False, 'import pickle\n'), ((7209, 7247), 'numpy.percentile', 'np.percentile', (['point_cloud[:, 2]', '(0.99)'], {}), '(point_cloud[:, 2], 0.99)\n', (7222, 7247), True, 'import numpy as np\n'), ((7695, 7722), 'waymo_utils.rotz', 'waymo_utils.rotz', (['rot_angle'], {}), '(rot_angle)\n', (7711, 7722), False, 'import waymo_utils\n'), ((7754, 7780), 'numpy.zeros_like', 'np.zeros_like', (['point_votes'], {}), '(point_votes)\n', (7767, 7780), True, 'import numpy as np\n'), ((10797, 10821), 'numpy.min', 'np.min', (['corners_3d[:, 0]'], {}), '(corners_3d[:, 0])\n', (10803, 10821), True, 'import numpy as np\n'), ((10840, 10864), 'numpy.min', 'np.min', (['corners_3d[:, 1]'], {}), '(corners_3d[:, 1])\n', (10846, 10864), True, 'import numpy as np\n'), ((10883, 10907), 'numpy.min', 'np.min', (['corners_3d[:, 2]'], {}), '(corners_3d[:, 2])\n', (10889, 10907), True, 'import numpy as np\n'), ((10926, 10950), 'numpy.max', 'np.max', (['corners_3d[:, 0]'], {}), '(corners_3d[:, 0])\n', (10932, 10950), True, 'import numpy as np\n'), ((10969, 10993), 'numpy.max', 'np.max', (['corners_3d[:, 1]'], {}), '(corners_3d[:, 1])\n', (10975, 10993), True, 'import numpy as np\n'), ((11012, 11036), 'numpy.max', 'np.max', (['corners_3d[:, 2]'], {}), '(corners_3d[:, 2])\n', (11018, 11036), True, 'import numpy as np\n'), ((11062, 11172), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2, (ymin + ymax) / 2, (zmin + zmax) / 2, xmax - xmin, ymax -\n ymin, zmax - zmin]'], {}), '([(xmin + xmax) / 2, (ymin + ymax) / 2, (zmin + zmax) / 2, xmax -\n xmin, ymax - ymin, zmax - zmin])\n', (11070, 11172), True, 'import numpy as np\n'), ((3271, 3363), 'os.path.join', 'os.path.join', (['self.data_path', 'self.split_set', "segment_dict['id']", 'frames_list[frame_idx]'], {}), "(self.data_path, self.split_set, segment_dict['id'],\n frames_list[frame_idx])\n", (3283, 3363), False, 'import os\n'), ((7866, 7887), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (7878, 7887), True, 'import numpy as np\n'), ((7974, 7995), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (7986, 7995), True, 'import numpy as np\n'), ((8084, 8105), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8096, 8105), True, 'import numpy as np\n'), ((8168, 8189), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8180, 8189), True, 'import numpy as np\n'), ((8241, 8262), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8253, 8262), True, 'import numpy as np\n'), ((8697, 8720), 'numpy.tile', 'np.tile', (['scale_ratio', '(3)'], {}), '(scale_ratio, 3)\n', (8704, 8720), True, 'import numpy as np\n'), ((10652, 10687), 'box_util.get_corners_from_labels_array', 'get_corners_from_labels_array', (['bbox'], {}), '(bbox)\n', (10681, 10687), False, 'from box_util import get_corners_from_labels_array\n'), ((3176, 3240), 'os.path.join', 'os.path.join', (['self.data_path', 'self.split_set', "segment_dict['id']"], {}), "(self.data_path, self.split_set, segment_dict['id'])\n", (3188, 3240), False, 'import os\n'), ((3383, 3409), 'os.path.exists', 'os.path.exists', (['frame_path'], {}), '(frame_path)\n', (3397, 3409), False, 'import os\n'), ((7354, 7379), 'numpy.expand_dims', 'np.expand_dims', (['height', '(1)'], {}), '(height, 1)\n', (7368, 7379), True, 'import numpy as np\n'), ((8628, 8646), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8644, 8646), True, 'import numpy as np\n'), ((7616, 7634), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7632, 7634), True, 'import numpy as np\n')]
|
import unittest
import sin_testing as st
import pexpect
import os
class TestFail(st.SmokeTest):
@st.sinan("build")
def build(self, child, app_desc):
if not os.path.isfile(os.path.join(os.getcwd(),
"test", "test_module.erl")):
raise "Nome module file"
child.expect(pexpect.EOF)
if not os.path.isfile(os.path.join(os.getcwd(),
"_build", app_desc.project_name,
"lib", app_desc.project_name + "-" +
app_desc.project_version, "ebin",
"test_module.beam")):
raise "File Not Built"
def output_testdir(self):
path = os.path.join(os.getcwd(), "test")
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
Module = """
-module(test_module).
-export([test/0]).
test() ->
ok."""
module_file = os.path.join(path, "test_module.erl")
new_file = open(module_file, "w")
new_file.write(Module)
new_file.close()
@st.sinan("gen foo")
def run_custom_gen(self, child, appdesc):
child.expect("your name> ")
child.sendline(appdesc.user_name)
child.expect("your email> ")
child.sendline(appdesc.email)
child.expect('copyright holder \("%s"\)> ' % appdesc.user_name)
child.sendline()
child.expect('project version> ')
child.sendline(appdesc.project_version)
child.expect('Please specify the ERTS version \(".*"\)> ')
child.sendline()
child.expect('Is this a single application project \("n"\)> ')
child.sendline("y")
child.expect('Would you like a build config\? \("y"\)> ')
child.sendline()
child.expect("Project was created, you should be good to go!")
child.expect(pexpect.EOF)
def test_gen_name(self):
appdesc = st.AppDesc(user_name = "Smoke Test Gen",
email = "<EMAIL>",
copyright_holder = "Smoke Test Copy, LLC.",
# This needs to match the gen name since
# we are overriding it
project_name = "foo",
project_version = "0.134.0.0")
self.run_custom_gen(appdesc)
currentdir = os.getcwd()
projdir = os.path.join(currentdir, appdesc.project_name)
os.chdir(projdir)
self.output_testdir()
self.build(appdesc)
os.chdir(currentdir)
if __name__ == '__main__':
unittest.main()
|
[
"sin_testing.AppDesc",
"os.makedirs",
"sin_testing.sinan",
"os.path.join",
"os.getcwd",
"os.chdir",
"unittest.main"
] |
[((104, 121), 'sin_testing.sinan', 'st.sinan', (['"""build"""'], {}), "('build')\n", (112, 121), True, 'import sin_testing as st\n'), ((1243, 1262), 'sin_testing.sinan', 'st.sinan', (['"""gen foo"""'], {}), "('gen foo')\n", (1251, 1262), True, 'import sin_testing as st\n'), ((2764, 2779), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2777, 2779), False, 'import unittest\n'), ((1101, 1138), 'os.path.join', 'os.path.join', (['path', '"""test_module.erl"""'], {}), "(path, 'test_module.erl')\n", (1113, 1138), False, 'import os\n'), ((2085, 2236), 'sin_testing.AppDesc', 'st.AppDesc', ([], {'user_name': '"""Smoke Test Gen"""', 'email': '"""<EMAIL>"""', 'copyright_holder': '"""Smoke Test Copy, LLC."""', 'project_name': '"""foo"""', 'project_version': '"""0.134.0.0"""'}), "(user_name='Smoke Test Gen', email='<EMAIL>', copyright_holder=\n 'Smoke Test Copy, LLC.', project_name='foo', project_version='0.134.0.0')\n", (2095, 2236), True, 'import sin_testing as st\n'), ((2540, 2551), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2549, 2551), False, 'import os\n'), ((2570, 2616), 'os.path.join', 'os.path.join', (['currentdir', 'appdesc.project_name'], {}), '(currentdir, appdesc.project_name)\n', (2582, 2616), False, 'import os\n'), ((2625, 2642), 'os.chdir', 'os.chdir', (['projdir'], {}), '(projdir)\n', (2633, 2642), False, 'import os\n'), ((2711, 2731), 'os.chdir', 'os.chdir', (['currentdir'], {}), '(currentdir)\n', (2719, 2731), False, 'import os\n'), ((810, 821), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (819, 821), False, 'import os\n'), ((856, 873), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (867, 873), False, 'import os\n'), ((203, 214), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (212, 214), False, 'import os\n'), ((404, 415), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (413, 415), False, 'import os\n')]
|
from budgetportal.models import (
FinancialYear,
Sphere,
Government,
Department,
Programme,
)
from django.core.management import call_command
from django.test import TestCase
from tempfile import NamedTemporaryFile
from StringIO import StringIO
import yaml
class BasicPagesTestCase(TestCase):
def setUp(self):
year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=year, name='National')
provincial = Sphere.objects.create(financial_year=year, name='Provincial')
# governments
self.fake_national_government = Government.objects.create(sphere=national, name='South Africa')
self.fake_provincial_government = Government.objects.create(
sphere=provincial,
name='Free State'
)
def test_load_departments_national(self):
filename = 'budgetportal/tests/test_data/test_management_commands_national_departments.csv'
call_command('load_departments', '2030-31', 'national', filename)
presidency = Department.objects.get(government=self.fake_national_government, name='The Presidency')
self.assertEqual(presidency.vote_number, 1)
self.assertTrue(presidency.is_vote_primary)
self.assertIn("To serve the president", presidency.intro)
self.assertIn("Facilitate a common", presidency.intro)
self.assertTrue(presidency.website_url, 'www.thepresidency.gov.za')
parliament = Department.objects.get(government=self.fake_national_government, vote_number=2)
self.assertEqual(parliament.name, 'Parliament')
self.assertTrue(parliament.is_vote_primary)
self.assertIn("Provide the support services", parliament.intro)
self.assertIn("These are aligned", parliament.intro)
self.assertTrue(parliament.website_url, 'www.parliament.gov.za')
def test_load_departments_provincial(self):
filename = 'budgetportal/tests/test_data/test_management_commands_provincial_departments.csv'
call_command('load_departments', '2030-31', 'provincial', filename)
premier = Department.objects.get(
government=self.fake_provincial_government,
name='Premier'
)
self.assertEqual(premier.vote_number, 1)
self.assertTrue(premier.is_vote_primary)
self.assertIn("Implementing all national legislation within functional areas", premier.intro)
self.assertIn("Leading Free State", premier.intro)
self.assertTrue(premier.website_url, 'www.testpremier.gov.za')
legislature = Department.objects.get(
government=self.fake_provincial_government,
name='Free State Legislature'
)
self.assertEqual(legislature.vote_number, 2)
self.assertTrue(legislature.is_vote_primary)
self.assertIn("The legislative authority of a", legislature.intro)
self.assertIn("The vision of the Free State Legislature", legislature.intro)
self.assertTrue(premier.website_url, 'www.testlegislature.co.za')
class ExportImportProgrammesTestCase(TestCase):
def setUp(self):
self.year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=self.year, name='National')
# governments
south_africa = Government.objects.create(sphere=national, name='South Africa')
self.department = Department.objects.create(
government=south_africa,
name="Some Department",
vote_number=1,
is_vote_primary=True,
intro=""
)
Programme.objects.create(
department=self.department,
name="A programme",
programme_number=1
)
Programme.objects.create(
department=self.department,
name="Another programme",
programme_number=2
)
def test_load_programmes_from_export(self):
"""Test that exported programmes can be loaded correctly"""
with NamedTemporaryFile() as csv_file:
# Download the CSV
response = self.client.get('/2030-31/national/programmes.csv')
self.assertEqual(response.status_code, 200)
csv_file.write(response.content)
csv_file.flush()
# Delete all programmes
Programme.objects.all().delete()
# Create them again
out = StringIO()
result = call_command('load_programmes', '2030-31', 'national', csv_file.name, stdout=out)
result = yaml.load(out.getvalue())
self.assertEqual(result['number_added'], 2)
# Check that it was successful
programme_1 = Programme.objects.get(department=self.department, programme_number=1)
programme_2 = Programme.objects.get(department=self.department, programme_number=2)
self.assertEqual("A programme", programme_1.name)
self.assertEqual("Another programme", programme_2.name)
class ExportImportDepartmentsTestCase(TestCase):
def setUp(self):
self.year = FinancialYear.objects.create(slug="2030-31")
# spheres
national = Sphere.objects.create(financial_year=self.year, name='National')
Sphere.objects.create(financial_year=self.year, name='Provincial')
# governments
self.fake_national_government = Government.objects.create(sphere=national, name='South Africa')
self.department_one = Department.objects.create(
government=self.fake_national_government,
name="Some Department 1",
vote_number=1,
is_vote_primary=True,
intro="",
website_url="test.com"
)
self.department_one = Department.objects.create(
government=self.fake_national_government,
name="Some Department 2",
vote_number=2,
is_vote_primary=False,
intro="",
website_url=None
)
def test_load_departments_from_export(self):
"""Test that exported departments can be loaded correctly
Note: departments export currently do national and provincial, so this only works
because we are not creating any provincial departments prior to exporting. """
with NamedTemporaryFile() as csv_file:
# Download the CSV
response = self.client.get('/2030-31/departments.csv')
self.assertEqual(response.status_code, 200)
csv_file.write(response.content)
csv_file.flush()
# Delete all departments
Department.objects.all().delete()
# Create them again
out = StringIO()
result = call_command('load_departments', '2030-31', 'national', csv_file.name, stdout=out)
result = yaml.load(out.getvalue())
# self.assertEqual(result['number_added'], 2)
# Check that it was successful
dept_1 = Department.objects.get(government=self.fake_national_government, vote_number=1)
dept_2 = Department.objects.get(government=self.fake_national_government, vote_number=2)
self.assertEqual("Some Department 1", dept_1.name)
self.assertEqual("Some Department 2", dept_2.name)
|
[
"StringIO.StringIO",
"budgetportal.models.Programme.objects.all",
"budgetportal.models.Department.objects.get",
"budgetportal.models.Department.objects.all",
"django.core.management.call_command",
"budgetportal.models.Programme.objects.get",
"budgetportal.models.FinancialYear.objects.create",
"budgetportal.models.Programme.objects.create",
"tempfile.NamedTemporaryFile",
"budgetportal.models.Department.objects.create",
"budgetportal.models.Government.objects.create",
"budgetportal.models.Sphere.objects.create"
] |
[((350, 394), 'budgetportal.models.FinancialYear.objects.create', 'FinancialYear.objects.create', ([], {'slug': '"""2030-31"""'}), "(slug='2030-31')\n", (378, 394), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((433, 492), 'budgetportal.models.Sphere.objects.create', 'Sphere.objects.create', ([], {'financial_year': 'year', 'name': '"""National"""'}), "(financial_year=year, name='National')\n", (454, 492), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((514, 575), 'budgetportal.models.Sphere.objects.create', 'Sphere.objects.create', ([], {'financial_year': 'year', 'name': '"""Provincial"""'}), "(financial_year=year, name='Provincial')\n", (535, 575), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((639, 702), 'budgetportal.models.Government.objects.create', 'Government.objects.create', ([], {'sphere': 'national', 'name': '"""South Africa"""'}), "(sphere=national, name='South Africa')\n", (664, 702), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((745, 808), 'budgetportal.models.Government.objects.create', 'Government.objects.create', ([], {'sphere': 'provincial', 'name': '"""Free State"""'}), "(sphere=provincial, name='Free State')\n", (770, 808), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((998, 1063), 'django.core.management.call_command', 'call_command', (['"""load_departments"""', '"""2030-31"""', '"""national"""', 'filename'], {}), "('load_departments', '2030-31', 'national', filename)\n", (1010, 1063), False, 'from django.core.management import call_command\n'), ((1086, 1178), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_national_government', 'name': '"""The Presidency"""'}), "(government=self.fake_national_government, name=\n 'The Presidency')\n", (1108, 1178), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((1505, 1584), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_national_government', 'vote_number': '(2)'}), '(government=self.fake_national_government, vote_number=2)\n', (1527, 1584), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((2058, 2125), 'django.core.management.call_command', 'call_command', (['"""load_departments"""', '"""2030-31"""', '"""provincial"""', 'filename'], {}), "('load_departments', '2030-31', 'provincial', filename)\n", (2070, 2125), False, 'from django.core.management import call_command\n'), ((2145, 2232), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_provincial_government', 'name': '"""Premier"""'}), "(government=self.fake_provincial_government, name=\n 'Premier')\n", (2167, 2232), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((2615, 2717), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_provincial_government', 'name': '"""Free State Legislature"""'}), "(government=self.fake_provincial_government, name=\n 'Free State Legislature')\n", (2637, 2717), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3178, 3222), 'budgetportal.models.FinancialYear.objects.create', 'FinancialYear.objects.create', ([], {'slug': '"""2030-31"""'}), "(slug='2030-31')\n", (3206, 3222), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3261, 3325), 'budgetportal.models.Sphere.objects.create', 'Sphere.objects.create', ([], {'financial_year': 'self.year', 'name': '"""National"""'}), "(financial_year=self.year, name='National')\n", (3282, 3325), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3372, 3435), 'budgetportal.models.Government.objects.create', 'Government.objects.create', ([], {'sphere': 'national', 'name': '"""South Africa"""'}), "(sphere=national, name='South Africa')\n", (3397, 3435), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3463, 3588), 'budgetportal.models.Department.objects.create', 'Department.objects.create', ([], {'government': 'south_africa', 'name': '"""Some Department"""', 'vote_number': '(1)', 'is_vote_primary': '(True)', 'intro': '""""""'}), "(government=south_africa, name='Some Department',\n vote_number=1, is_vote_primary=True, intro='')\n", (3488, 3588), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3663, 3759), 'budgetportal.models.Programme.objects.create', 'Programme.objects.create', ([], {'department': 'self.department', 'name': '"""A programme"""', 'programme_number': '(1)'}), "(department=self.department, name='A programme',\n programme_number=1)\n", (3687, 3759), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((3810, 3913), 'budgetportal.models.Programme.objects.create', 'Programme.objects.create', ([], {'department': 'self.department', 'name': '"""Another programme"""', 'programme_number': '(2)'}), "(department=self.department, name=\n 'Another programme', programme_number=2)\n", (3834, 3913), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5165, 5209), 'budgetportal.models.FinancialYear.objects.create', 'FinancialYear.objects.create', ([], {'slug': '"""2030-31"""'}), "(slug='2030-31')\n", (5193, 5209), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5248, 5312), 'budgetportal.models.Sphere.objects.create', 'Sphere.objects.create', ([], {'financial_year': 'self.year', 'name': '"""National"""'}), "(financial_year=self.year, name='National')\n", (5269, 5312), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5321, 5387), 'budgetportal.models.Sphere.objects.create', 'Sphere.objects.create', ([], {'financial_year': 'self.year', 'name': '"""Provincial"""'}), "(financial_year=self.year, name='Provincial')\n", (5342, 5387), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5451, 5514), 'budgetportal.models.Government.objects.create', 'Government.objects.create', ([], {'sphere': 'national', 'name': '"""South Africa"""'}), "(sphere=national, name='South Africa')\n", (5476, 5514), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5546, 5719), 'budgetportal.models.Department.objects.create', 'Department.objects.create', ([], {'government': 'self.fake_national_government', 'name': '"""Some Department 1"""', 'vote_number': '(1)', 'is_vote_primary': '(True)', 'intro': '""""""', 'website_url': '"""test.com"""'}), "(government=self.fake_national_government, name=\n 'Some Department 1', vote_number=1, is_vote_primary=True, intro='',\n website_url='test.com')\n", (5571, 5719), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((5823, 5991), 'budgetportal.models.Department.objects.create', 'Department.objects.create', ([], {'government': 'self.fake_national_government', 'name': '"""Some Department 2"""', 'vote_number': '(2)', 'is_vote_primary': '(False)', 'intro': '""""""', 'website_url': 'None'}), "(government=self.fake_national_government, name=\n 'Some Department 2', vote_number=2, is_vote_primary=False, intro='',\n website_url=None)\n", (5848, 5991), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((4086, 4106), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (4104, 4106), False, 'from tempfile import NamedTemporaryFile\n'), ((4490, 4500), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (4498, 4500), False, 'from StringIO import StringIO\n'), ((4522, 4607), 'django.core.management.call_command', 'call_command', (['"""load_programmes"""', '"""2030-31"""', '"""national"""', 'csv_file.name'], {'stdout': 'out'}), "('load_programmes', '2030-31', 'national', csv_file.name,\n stdout=out)\n", (4534, 4607), False, 'from django.core.management import call_command\n'), ((4777, 4846), 'budgetportal.models.Programme.objects.get', 'Programme.objects.get', ([], {'department': 'self.department', 'programme_number': '(1)'}), '(department=self.department, programme_number=1)\n', (4798, 4846), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((4873, 4942), 'budgetportal.models.Programme.objects.get', 'Programme.objects.get', ([], {'department': 'self.department', 'programme_number': '(2)'}), '(department=self.department, programme_number=2)\n', (4894, 4942), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((6372, 6392), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (6390, 6392), False, 'from tempfile import NamedTemporaryFile\n'), ((6770, 6780), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (6778, 6780), False, 'from StringIO import StringIO\n'), ((6802, 6888), 'django.core.management.call_command', 'call_command', (['"""load_departments"""', '"""2030-31"""', '"""national"""', 'csv_file.name'], {'stdout': 'out'}), "('load_departments', '2030-31', 'national', csv_file.name,\n stdout=out)\n", (6814, 6888), False, 'from django.core.management import call_command\n'), ((7055, 7134), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_national_government', 'vote_number': '(1)'}), '(government=self.fake_national_government, vote_number=1)\n', (7077, 7134), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((7156, 7235), 'budgetportal.models.Department.objects.get', 'Department.objects.get', ([], {'government': 'self.fake_national_government', 'vote_number': '(2)'}), '(government=self.fake_national_government, vote_number=2)\n', (7178, 7235), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((4406, 4429), 'budgetportal.models.Programme.objects.all', 'Programme.objects.all', ([], {}), '()\n', (4427, 4429), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n'), ((6685, 6709), 'budgetportal.models.Department.objects.all', 'Department.objects.all', ([], {}), '()\n', (6707, 6709), False, 'from budgetportal.models import FinancialYear, Sphere, Government, Department, Programme\n')]
|
import json
import logging
import ssl
import sys
from oidcrp.exception import ResponseError
logger = logging.getLogger(__name__)
def load_json(file_name):
with open(file_name) as fp:
js = json.load(fp)
return js
def fed_parse_response(instance, info, sformat="", state="", **kwargs):
if sformat in ['jose', 'jws', 'jwe']:
resp = instance.post_parse_response(info, state=state)
if not resp:
logger.error('Missing or faulty response')
raise ResponseError("Missing or faulty response")
return resp
else:
return instance.parse_response(info, sformat, state, **kwargs)
def compact(qsdict):
res = {}
for key, val in qsdict.items():
if isinstance(val, int):
res[key] = val
elif len(val) == 1:
res[key] = val[0]
else:
res[key] = val
return res
|
[
"logging.getLogger",
"json.load",
"oidcrp.exception.ResponseError"
] |
[((103, 130), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (120, 130), False, 'import logging\n'), ((204, 217), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (213, 217), False, 'import json\n'), ((506, 549), 'oidcrp.exception.ResponseError', 'ResponseError', (['"""Missing or faulty response"""'], {}), "('Missing or faulty response')\n", (519, 549), False, 'from oidcrp.exception import ResponseError\n')]
|
"""state consumed
Revision ID: 0be658f07ac6
Revises: bd1e892d0609
Create Date: 2021-07-18 21:26:04.588007
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy import String
# revision identifiers, used by Alembic.
revision = '0be658f07ac6'
down_revision = 'bd1e892d0609'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Create an ad-hoc table to use for the insert statement.
states_table = table('states',
column('cd', String),
column('description', String)
)
op.bulk_insert(
states_table,
[
{'cd': 'CONSUMED', 'description': 'CONSUMED by a corp'}
]
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("DELETE FROM states WHERE cd = 'CONSUMED';")
# ### end Alembic commands ###
|
[
"sqlalchemy.sql.column",
"alembic.op.bulk_insert",
"alembic.op.execute"
] |
[((693, 784), 'alembic.op.bulk_insert', 'op.bulk_insert', (['states_table', "[{'cd': 'CONSUMED', 'description': 'CONSUMED by a corp'}]"], {}), "(states_table, [{'cd': 'CONSUMED', 'description':\n 'CONSUMED by a corp'}])\n", (707, 784), False, 'from alembic import op\n'), ((950, 1005), 'alembic.op.execute', 'op.execute', (['"""DELETE FROM states WHERE cd = \'CONSUMED\';"""'], {}), '("DELETE FROM states WHERE cd = \'CONSUMED\';")\n', (960, 1005), False, 'from alembic import op\n'), ((581, 601), 'sqlalchemy.sql.column', 'column', (['"""cd"""', 'String'], {}), "('cd', String)\n", (587, 601), False, 'from sqlalchemy.sql import table, column\n'), ((630, 659), 'sqlalchemy.sql.column', 'column', (['"""description"""', 'String'], {}), "('description', String)\n", (636, 659), False, 'from sqlalchemy.sql import table, column\n')]
|
import numpy as np
class Agent:
def __init__(self):
self.q_table = np.zeros(shape=(3, ))
self.rewards = []
self.averaged_rewards = []
self.total_rewards = 0
self.action_cursor = 1
class HystereticAgentMatrix:
def __init__(self, environment, increasing_learning_rate=0.9, decreasing_learning_rate=0.1,
discount_factor=0.9, exploration_rate=0.01):
self.environment = environment
self.discount_factor = discount_factor
self.exploration_rate = exploration_rate
self.increasing_learning_rate = increasing_learning_rate
self.decreasing_learning_rate = decreasing_learning_rate
# Setup q_table
self.num_of_action = self.environment.actions.n
self.states_dim_x = self.environment.states.dim_x
self.states_dim_y = self.environment.states.dim_y
# Agents
self.num_of_agents = 2
self.agents = []
for i in range(self.num_of_agents):
self.agents.append(Agent())
self.steps = 1
def step(self):
actions = []
for agent in self.agents:
# Determine Actions
action = self.get_action(agent)
actions.append(action)
# Take action and update
for agent in self.agents:
# Previous State capture (Previous q value, previous position)
q_p = agent.q_table[agent.action_cursor]
# Take action
obs, reward, done, valid = self.environment.step(action=actions, agent_id=0)
# Update Q-table
bellman_value = reward + self.discount_factor * (np.max(agent.q_table[agent.action_cursor]) - q_p)
if bellman_value >= 0:
new_q = q_p + self.increasing_learning_rate * bellman_value
else:
new_q = q_p + self.decreasing_learning_rate * bellman_value
agent.q_table[agent.action_cursor] = new_q
# self.exploration_rate = self.exploration_rate / self.steps
agent.total_rewards += reward
agent.rewards.append(reward)
if self.steps > 1:
agent.averaged_rewards.append(agent.total_rewards / (self.steps + 5))
self.steps += 1
def set_exploration_rate(self, rate):
self.exploration_rate = rate
def get_action(self, agent):
if np.random.randint(0, 100) / 100 < self.exploration_rate:
# Explore
action = np.random.randint(0, self.num_of_action)
else:
action = np.argmax(agent.q_table)
agent.action_cursor = action
return action
def get_averaged_rewards(self, agent_id=0):
return self.agents[agent_id].averaged_rewards, self.agents[agent_id + 1].averaged_rewards
def get_rewards(self):
return self.agents[0].rewards, self.agents[1].rewards
def reset_reward(self):
for agent in self.agents:
agent.rewards = []
agent.averaged_rewards = []
|
[
"numpy.zeros",
"numpy.argmax",
"numpy.random.randint",
"numpy.max"
] |
[((80, 100), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (88, 100), True, 'import numpy as np\n'), ((2484, 2524), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_of_action'], {}), '(0, self.num_of_action)\n', (2501, 2524), True, 'import numpy as np\n'), ((2560, 2584), 'numpy.argmax', 'np.argmax', (['agent.q_table'], {}), '(agent.q_table)\n', (2569, 2584), True, 'import numpy as np\n'), ((2384, 2409), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2401, 2409), True, 'import numpy as np\n'), ((1646, 1688), 'numpy.max', 'np.max', (['agent.q_table[agent.action_cursor]'], {}), '(agent.q_table[agent.action_cursor])\n', (1652, 1688), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of TensorFlow kernel layers.
Classes:
GroupAttention: A simple group-specific attention layer.
Kernel: A kernel that allows the user to separately specify a
distance and similarity function.
AttentionKernel: A kernel that uses group-specific attention
weights and allows the user to separately specify a distance
and similarity function.
GroupAttentionVariational: A variational group attention layer.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
import psiz.keras.initializers as pk_initializers
from psiz.keras.layers.variational import Variational
from psiz.keras.layers.distances.minkowski import WeightedMinkowski
from psiz.models.base import GroupLevel
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttention'
)
class GroupAttention(tf.keras.layers.Layer):
"""Group-specific attention weights."""
def __init__(
self, n_group=1, n_dim=None, fit_group=None,
embeddings_initializer=None, embeddings_regularizer=None,
embeddings_constraint=None, **kwargs):
"""Initialize.
Arguments:
n_dim: An integer indicating the dimensionality of the
embeddings. Must be equal to or greater than one.
n_group (optional): An integer indicating the number of
different population groups in the embedding. A
separate set of attention weights will be inferred for
each group. Must be equal to or greater than one.
fit_group: Boolean indicating if variable is trainable.
shape=(n_group,)
Raises:
ValueError: If `n_dim` or `n_group` arguments are invalid.
"""
super(GroupAttention, self).__init__(**kwargs)
if (n_group < 1):
raise ValueError(
"The number of groups (`n_group`) must be an integer greater "
"than 0."
)
self.n_group = n_group
if (n_dim < 1):
raise ValueError(
"The dimensionality (`n_dim`) must be an integer "
"greater than 0."
)
self.n_dim = n_dim
# Handle initializer.
if embeddings_initializer is None:
if self.n_group == 1:
embeddings_initializer = tf.keras.initializers.Ones()
else:
scale = self.n_dim
alpha = np.ones((self.n_dim))
embeddings_initializer = pk_initializers.RandomAttention(
alpha, scale
)
self.embeddings_initializer = tf.keras.initializers.get(
embeddings_initializer
)
# Handle regularizer.
self.embeddings_regularizer = tf.keras.regularizers.get(
embeddings_regularizer
)
# Handle constraints.
if embeddings_constraint is None:
embeddings_constraint = pk_constraints.NonNegNorm(
scale=self.n_dim
)
self.embeddings_constraint = tf.keras.constraints.get(
embeddings_constraint
)
if fit_group is None:
if self.n_group == 1:
fit_group = False # TODO default should always be train
else:
fit_group = True
self.fit_group = fit_group
self.embeddings = self.add_weight(
shape=(self.n_group, self.n_dim),
initializer=self.embeddings_initializer,
trainable=fit_group, name='w', dtype=K.floatx(),
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint
)
self.mask_zero = False
def call(self, inputs):
"""Call.
Inflate weights by `group_id`.
Arguments:
inputs: A Tensor denoting `group_id`.
"""
output = tf.gather(self.embeddings, inputs)
# Add singleton dimension for sample_size.
output = tf.expand_dims(output, axis=0)
return output
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'n_group': int(self.n_group),
'n_dim': int(self.n_dim),
'fit_group': self.fit_group,
'embeddings_initializer':
tf.keras.initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
tf.keras.regularizers.serialize(self.embeddings_regularizer),
'embeddings_constraint':
tf.keras.constraints.serialize(self.embeddings_constraint)
})
return config
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='Kernel'
)
class Kernel(GroupLevel):
"""A basic population-wide kernel."""
def __init__(self, distance=None, similarity=None, **kwargs):
"""Initialize."""
super(Kernel, self).__init__(**kwargs)
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
"""
z_0 = inputs[0]
z_1 = inputs[1]
# group = inputs[-1][:, self.group_level]
# Create identity attention weights.
attention = tf.ones_like(z_0)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='AttentionKernel'
)
class AttentionKernel(GroupLevel):
"""Attention kernel container."""
def __init__(
self, n_dim=None, attention=None, distance=None, similarity=None,
**kwargs):
"""Initialize.
Arguments:
n_dim: The dimensionality of the attention weights. This
should match the dimensionality of the embedding.
attention: A attention layer. If this is specified, the
argument `n_dim` is ignored.
distance: A distance layer.
similarity: A similarity layer.
"""
super(AttentionKernel, self).__init__(**kwargs)
if attention is None:
attention = GroupAttention(n_dim=n_dim, n_group=1)
self.attention = attention
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
group: A tf.Tensor denoting group assignments.
shape = (batch_size, k)
"""
z_0 = inputs[0]
z_1 = inputs[1]
group = inputs[-1]
# Expand attention weights.
attention = self.attention(group[:, self.group_level])
# Add singleton inner dimensions that are not related to sample_size,
# batch_size or vector dimensionality.
attention_shape = tf.shape(attention)
sample_size = tf.expand_dims(attention_shape[0], axis=0)
batch_size = tf.expand_dims(attention_shape[1], axis=0)
dim_size = tf.expand_dims(attention_shape[-1], axis=0)
n_expand = tf.rank(z_0) - tf.rank(attention)
shape_exp = tf.ones(n_expand, dtype=attention_shape[0].dtype)
shape_exp = tf.concat(
(sample_size, batch_size, shape_exp, dim_size), axis=0
)
attention = tf.reshape(attention, shape_exp)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
# @property
# def n_dim(self):
# """Getter method for n_dim."""
# return self.attention.n_dim
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.attention.n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.attention.kl_weight = kl_weight
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
# 'n_dim': int(self.n_dim),
'attention': tf.keras.utils.serialize_keras_object(self.attention),
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['attention'] = tf.keras.layers.deserialize(config['attention'])
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttentionVariational'
)
class GroupAttentionVariational(Variational):
"""Variational analog of group-specific attention weights."""
def __init__(self, **kwargs):
"""Initialize.
Arguments:
kwargs: Additional key-word arguments.
"""
super(GroupAttentionVariational, self).__init__(**kwargs)
def call(self, inputs):
"""Call.
Grab `group_id` only.
Arguments:
inputs: A Tensor denoting a trial's group membership.
"""
# Run forward pass through variational posterior layer.
outputs = self.posterior(inputs)
# Apply KL divergence between posterior and prior.
self.add_kl_loss(self.posterior.embeddings, self.prior.embeddings)
return outputs
@property
def n_group(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[0]
@property
def n_dim(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[1]
@property
def mask_zero(self):
"""Getter method for embeddings `mask_zero`."""
return self.posterior.mask_zero
@property
def embeddings(self):
"""Getter method for embeddings posterior mode."""
return self.posterior.embeddings
|
[
"tensorflow.shape",
"psiz.keras.initializers.RandomAttention",
"tensorflow.keras.utils.serialize_keras_object",
"tensorflow.keras.initializers.Ones",
"tensorflow.ones_like",
"tensorflow.keras.initializers.serialize",
"tensorflow.rank",
"tensorflow.concat",
"tensorflow.keras.regularizers.get",
"tensorflow.keras.constraints.serialize",
"psiz.keras.layers.distances.minkowski.WeightedMinkowski",
"numpy.ones",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.gather",
"tensorflow.python.keras.backend.floatx",
"tensorflow.reshape",
"psiz.keras.constraints.NonNegNorm",
"tensorflow.expand_dims",
"tensorflow.keras.constraints.get",
"tensorflow.keras.regularizers.serialize",
"tensorflow.ones",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.deserialize"
] |
[((1520, 1618), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""GroupAttention"""'}), "(package='psiz.keras.layers',\n name='GroupAttention')\n", (1562, 1618), True, 'import tensorflow as tf\n'), ((5502, 5592), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""Kernel"""'}), "(package='psiz.keras.layers',\n name='Kernel')\n", (5544, 5592), True, 'import tensorflow as tf\n'), ((8384, 8483), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""AttentionKernel"""'}), "(package='psiz.keras.layers',\n name='AttentionKernel')\n", (8426, 8483), True, 'import tensorflow as tf\n'), ((12988, 13097), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""GroupAttentionVariational"""'}), "(package='psiz.keras.layers',\n name='GroupAttentionVariational')\n", (13030, 13097), True, 'import tensorflow as tf\n'), ((3453, 3502), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['embeddings_initializer'], {}), '(embeddings_initializer)\n', (3478, 3502), True, 'import tensorflow as tf\n'), ((3594, 3643), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['embeddings_regularizer'], {}), '(embeddings_regularizer)\n', (3619, 3643), True, 'import tensorflow as tf\n'), ((3886, 3933), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['embeddings_constraint'], {}), '(embeddings_constraint)\n', (3910, 3933), True, 'import tensorflow as tf\n'), ((4714, 4748), 'tensorflow.gather', 'tf.gather', (['self.embeddings', 'inputs'], {}), '(self.embeddings, inputs)\n', (4723, 4748), True, 'import tensorflow as tf\n'), ((4817, 4847), 'tensorflow.expand_dims', 'tf.expand_dims', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (4831, 4847), True, 'import tensorflow as tf\n'), ((7500, 7517), 'tensorflow.ones_like', 'tf.ones_like', (['z_0'], {}), '(z_0)\n', (7512, 7517), True, 'import tensorflow as tf\n'), ((8201, 8248), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['distance']"], {}), "(config['distance'])\n", (8228, 8248), True, 'import tensorflow as tf\n'), ((8280, 8329), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['similarity']"], {}), "(config['similarity'])\n", (8307, 8329), True, 'import tensorflow as tf\n'), ((10643, 10662), 'tensorflow.shape', 'tf.shape', (['attention'], {}), '(attention)\n', (10651, 10662), True, 'import tensorflow as tf\n'), ((10685, 10727), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[0]'], {'axis': '(0)'}), '(attention_shape[0], axis=0)\n', (10699, 10727), True, 'import tensorflow as tf\n'), ((10749, 10791), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[1]'], {'axis': '(0)'}), '(attention_shape[1], axis=0)\n', (10763, 10791), True, 'import tensorflow as tf\n'), ((10811, 10854), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[-1]'], {'axis': '(0)'}), '(attention_shape[-1], axis=0)\n', (10825, 10854), True, 'import tensorflow as tf\n'), ((10929, 10978), 'tensorflow.ones', 'tf.ones', (['n_expand'], {'dtype': 'attention_shape[0].dtype'}), '(n_expand, dtype=attention_shape[0].dtype)\n', (10936, 10978), True, 'import tensorflow as tf\n'), ((10999, 11064), 'tensorflow.concat', 'tf.concat', (['(sample_size, batch_size, shape_exp, dim_size)'], {'axis': '(0)'}), '((sample_size, batch_size, shape_exp, dim_size), axis=0)\n', (11008, 11064), True, 'import tensorflow as tf\n'), ((11107, 11139), 'tensorflow.reshape', 'tf.reshape', (['attention', 'shape_exp'], {}), '(attention, shape_exp)\n', (11117, 11139), True, 'import tensorflow as tf\n'), ((12727, 12775), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['attention']"], {}), "(config['attention'])\n", (12754, 12775), True, 'import tensorflow as tf\n'), ((12805, 12852), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['distance']"], {}), "(config['distance'])\n", (12832, 12852), True, 'import tensorflow as tf\n'), ((12884, 12933), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['similarity']"], {}), "(config['similarity'])\n", (12911, 12933), True, 'import tensorflow as tf\n'), ((3775, 3818), 'psiz.keras.constraints.NonNegNorm', 'pk_constraints.NonNegNorm', ([], {'scale': 'self.n_dim'}), '(scale=self.n_dim)\n', (3800, 3818), True, 'import psiz.keras.constraints as pk_constraints\n'), ((5856, 5875), 'psiz.keras.layers.distances.minkowski.WeightedMinkowski', 'WeightedMinkowski', ([], {}), '()\n', (5873, 5875), False, 'from psiz.keras.layers.distances.minkowski import WeightedMinkowski\n'), ((9305, 9324), 'psiz.keras.layers.distances.minkowski.WeightedMinkowski', 'WeightedMinkowski', ([], {}), '()\n', (9322, 9324), False, 'from psiz.keras.layers.distances.minkowski import WeightedMinkowski\n'), ((10875, 10887), 'tensorflow.rank', 'tf.rank', (['z_0'], {}), '(z_0)\n', (10882, 10887), True, 'import tensorflow as tf\n'), ((10890, 10908), 'tensorflow.rank', 'tf.rank', (['attention'], {}), '(attention)\n', (10897, 10908), True, 'import tensorflow as tf\n'), ((3162, 3190), 'tensorflow.keras.initializers.Ones', 'tf.keras.initializers.Ones', ([], {}), '()\n', (3188, 3190), True, 'import tensorflow as tf\n'), ((3268, 3287), 'numpy.ones', 'np.ones', (['self.n_dim'], {}), '(self.n_dim)\n', (3275, 3287), True, 'import numpy as np\n'), ((3331, 3376), 'psiz.keras.initializers.RandomAttention', 'pk_initializers.RandomAttention', (['alpha', 'scale'], {}), '(alpha, scale)\n', (3362, 3376), True, 'import psiz.keras.initializers as pk_initializers\n'), ((4372, 4382), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4380, 4382), True, 'from tensorflow.python.keras import backend as K\n'), ((5176, 5236), 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self.embeddings_initializer'], {}), '(self.embeddings_initializer)\n', (5207, 5236), True, 'import tensorflow as tf\n'), ((5292, 5352), 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self.embeddings_regularizer'], {}), '(self.embeddings_regularizer)\n', (5323, 5352), True, 'import tensorflow as tf\n'), ((5407, 5465), 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self.embeddings_constraint'], {}), '(self.embeddings_constraint)\n', (5437, 5465), True, 'import tensorflow as tf\n'), ((7880, 7932), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.distance'], {}), '(self.distance)\n', (7917, 7932), True, 'import tensorflow as tf\n'), ((7960, 8014), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.similarity'], {}), '(self.similarity)\n', (7997, 8014), True, 'import tensorflow as tf\n'), ((12326, 12379), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.attention'], {}), '(self.attention)\n', (12363, 12379), True, 'import tensorflow as tf\n'), ((12405, 12457), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.distance'], {}), '(self.distance)\n', (12442, 12457), True, 'import tensorflow as tf\n'), ((12485, 12539), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.similarity'], {}), '(self.similarity)\n', (12522, 12539), True, 'import tensorflow as tf\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Kulado Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import kulado
import kulado.runtime
from .. import utilities, tables
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, account_endpoint=None, location=None, name=None, pool_allocation_mode=None, primary_access_key=None, resource_group_name=None, secondary_access_key=None, storage_account_id=None, tags=None, id=None):
if account_endpoint and not isinstance(account_endpoint, str):
raise TypeError("Expected argument 'account_endpoint' to be a str")
__self__.account_endpoint = account_endpoint
"""
The account endpoint used to interact with the Batch service.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The Azure Region in which this Batch account exists.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
The Batch account name.
"""
if pool_allocation_mode and not isinstance(pool_allocation_mode, str):
raise TypeError("Expected argument 'pool_allocation_mode' to be a str")
__self__.pool_allocation_mode = pool_allocation_mode
"""
The pool allocation mode configured for this Batch account.
"""
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
__self__.primary_access_key = primary_access_key
"""
The Batch account primary access key.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
__self__.secondary_access_key = secondary_access_key
"""
The Batch account secondary access key.
"""
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
__self__.storage_account_id = storage_account_id
"""
The ID of the Storage Account used for this Batch account.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A map of tags assigned to the Batch account.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_account(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Batch Account.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/batch_account.html.markdown.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__ret__ = await kulado.runtime.invoke('azure:batch/getAccount:getAccount', __args__, opts=opts)
return GetAccountResult(
account_endpoint=__ret__.get('accountEndpoint'),
location=__ret__.get('location'),
name=__ret__.get('name'),
pool_allocation_mode=__ret__.get('poolAllocationMode'),
primary_access_key=__ret__.get('primaryAccessKey'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_access_key=__ret__.get('secondaryAccessKey'),
storage_account_id=__ret__.get('storageAccountId'),
tags=__ret__.get('tags'),
id=__ret__.get('id'))
|
[
"kulado.runtime.invoke"
] |
[((3673, 3752), 'kulado.runtime.invoke', 'kulado.runtime.invoke', (['"""azure:batch/getAccount:getAccount"""', '__args__'], {'opts': 'opts'}), "('azure:batch/getAccount:getAccount', __args__, opts=opts)\n", (3694, 3752), False, 'import kulado\n')]
|
import os.path as osp
import numpy as np
import math
from tqdm import tqdm
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from torchvision import transforms, datasets
from ofa.utils import AverageMeter, accuracy
from ofa.model_zoo import ofa_specialized
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
import copy
import random
def evaluate_ofa_resnet_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['w']) == 6 and len(net_config['e']) == 18 and len(net_config['d']) == 5
ofa_net.set_active_subnet(w=net_config['w'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, 224, batch_size)
top1 = validate(subnet, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_resnet_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['w']) == 6 and len(net_config1['e']) == 18 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(w=net_config1['w'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, 224, batch_size)
ofa_net.set_active_subnet(w=net_config2['w'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, 224, batch_size)
# assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['ks']) == 20 and len(net_config['e']) == 20 and len(net_config['d']) == 5
ofa_net.set_active_subnet(ks=net_config['ks'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, net_config['r'][0], batch_size)
top1 = validate(subnet, path, net_config['r'][0], data_loader, batch_size, device)
return top1
def evaluate_ofa_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['ks']) == 20 and len(net_config1['e']) == 20 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(ks=net_config1['ks'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, net_config1['r'][0], batch_size)
ofa_net.set_active_subnet(ks=net_config2['ks'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, net_config2['r'][0], batch_size)
assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, net_config2['r'][0], data_loader, batch_size, device)
return top1
def calib_bn(net, path, image_size, batch_size, num_images=2000):
# print('Creating dataloader for resetting BN running statistics...')
dataset = datasets.ImageFolder(
osp.join(
path,
'train'),
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(
mean=[
0.485,
0.456,
0.406],
std=[
0.229,
0.224,
0.225]
),
])
)
chosen_indexes = np.random.choice(list(range(len(dataset))), num_images)
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sub_sampler,
batch_size=batch_size,
num_workers=16,
pin_memory=True,
drop_last=False,
)
# print('Resetting BN running statistics (this may take 10-20 seconds)...')
set_running_statistics(net, data_loader)
def ensemble_validate(nets, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
print('use cuda')
for net in nets:
net = torch.nn.DataParallel(net).to(device)
else:
for net in nets:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
for net in nets:
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
n = len(nets)
output = 0
for i, net in enumerate(nets):
if i == 0:
output =net(images)
else:
output+=net(images)
output = output/n
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.3f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def validate(net, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
net = torch.nn.DataParallel(net).to(device)
else:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
output = net(images)
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.1f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def evaluate_ofa_specialized(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
def select_platform_name():
valid_platform_name = [
'pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops'
]
print("Please select a hardware platform from ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
while True:
platform_name = input()
platform_name = platform_name.lower()
if platform_name in valid_platform_name:
return platform_name
print("Platform name is invalid! Please select in ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
def select_netid(platform_name):
platform_efficiency_map = {
'pixel1': {
143: 'pixel1_lat@[email protected]_finetune@75',
132: 'pixel1_lat@[email protected]_finetune@75',
79: 'pixel1_lat@[email protected]_finetune@75',
58: 'pixel1_lat@[email protected]_finetune@75',
40: 'pixel1_lat@[email protected]_finetune@25',
28: 'pixel1_lat@[email protected]_finetune@25',
20: 'pixel1_lat@[email protected]_finetune@25',
},
'pixel2': {
62: 'pixel2_lat@[email protected]_finetune@25',
50: 'pixel2_lat@[email protected]_finetune@25',
35: 'pixel2_lat@[email protected]_finetune@25',
25: 'pixel2_lat@[email protected]_finetune@25',
},
'note10': {
64: 'note10_lat@[email protected]_finetune@75',
50: 'note10_lat@[email protected]_finetune@75',
41: 'note10_lat@[email protected]_finetune@75',
30: 'note10_lat@[email protected]_finetune@75',
22: 'note10_lat@[email protected]_finetune@25',
16: 'note10_lat@[email protected]_finetune@25',
11: 'note10_lat@[email protected]_finetune@25',
8: 'note10_lat@[email protected]_finetune@25',
},
'note8': {
65: 'note8_lat@[email protected]_finetune@25',
49: 'note8_lat@[email protected]_finetune@25',
31: 'note8_lat@[email protected]_finetune@25',
22: 'note8_lat@[email protected]_finetune@25',
},
's7edge': {
88: 's7edge_lat@[email protected]_finetune@25',
58: 's7edge_lat@[email protected]_finetune@25',
41: 's7edge_lat@[email protected]_finetune@25',
29: 's7edge_lat@[email protected]_finetune@25',
},
'lg-g8': {
24: 'LG-G8_lat@[email protected]_finetune@25',
16: 'LG-G8_lat@[email protected]_finetune@25',
11: 'LG-G8_lat@[email protected]_finetune@25',
8: 'LG-G8_lat@[email protected]_finetune@25',
},
'1080ti': {
27: '1080ti_gpu64@[email protected]_finetune@25',
22: '1080ti_gpu64@[email protected]_finetune@25',
15: '1080ti_gpu64@[email protected]_finetune@25',
12: '1080ti_gpu64@[email protected]_finetune@25',
},
'v100': {
11: 'v100_gpu64@[email protected]_finetune@25',
9: 'v100_gpu64@[email protected]_finetune@25',
6: 'v100_gpu64@[email protected]_finetune@25',
5: 'v100_gpu64@[email protected]_finetune@25',
},
'tx2': {
96: 'tx2_gpu16@[email protected]_finetune@25',
80: 'tx2_gpu16@[email protected]_finetune@25',
47: 'tx2_gpu16@[email protected]_finetune@25',
35: 'tx2_gpu16@[email protected]_finetune@25',
},
'cpu': {
17: 'cpu_lat@[email protected]_finetune@25',
15: 'cpu_lat@[email protected]_finetune@25',
11: 'cpu_lat@[email protected]_finetune@25',
10: 'cpu_lat@[email protected]_finetune@25',
},
'flops': {
595: 'flops@[email protected]_finetune@75',
482: 'flops@[email protected]_finetune@75',
389: 'flops@[email protected]_finetune@75',
}
}
sub_efficiency_map = platform_efficiency_map[platform_name]
if not platform_name == 'flops':
print("Now, please specify a latency constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'ms. (Please just input the number.) \n')
else:
print("Now, please specify a FLOPs constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'MFLOPs. (Please just input the number.) \n')
while True:
efficiency_constraint = input()
if not efficiency_constraint.isdigit():
print('Sorry, please input an integer! \n')
continue
efficiency_constraint = int(efficiency_constraint)
if not efficiency_constraint in sub_efficiency_map.keys():
print('Sorry, please choose a value from: ', sorted(list(sub_efficiency_map.keys())), '.\n')
continue
return sub_efficiency_map[efficiency_constraint]
if not ensemble:
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
validate(net, path, image_size, data_loader, batch_size, device)
else:
nets = []
for i in range(2):
print('{}model'.format(i))
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
nets.append(net)
ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
return net_id
net_id = ['pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'flops@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', ]
def evaluate_ofa_space(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
for i in range(1, n):
for j in range(i):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return net_id[best_team[0]], net_id[best_team[1]]
def evaluate_ofa_best_acc_team(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
i = n-1
for j in range(18, n):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
def evaluate_ofa_random_sample(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
acc_list = []
space = []
best_team =[]
for k in range(20):
nets = []
team = []
i = random.randint(0, n-1)
j = (i + random.randint(1, n-1)) % n
print('i:{} j:{}'.format(i, j))
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
acc_list.append(acc)
if acc>best_acc:
best_acc=acc
best_team = team
avg_acc = np.mean(acc_list)
std_acc = np.std(acc_list, ddof=1)
var_acc = np.var(acc_list)
print("avg{} var{} std{}".format(avg_acc, std_acc, var_acc))
print('best_random_team best_acc{}'.format(best_team, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
sort_net_id=['tx2_gpu16@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'cpu_lat@11ms_top1@72. 0_finetune@25', '1080ti_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25', 'LG-G8_lat@11ms_to [email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', '1080ti_gpu 64@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75']
|
[
"torch.nn.CrossEntropyLoss",
"ofa.model_zoo.ofa_specialized",
"numpy.array",
"torchvision.transforms.ColorJitter",
"copy.deepcopy",
"numpy.mean",
"ofa.utils.accuracy",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"random.randint",
"torchvision.transforms.RandomHorizontalFlip",
"ofa.imagenet_classification.elastic_nn.utils.set_running_statistics",
"torchvision.transforms.Normalize",
"numpy.std",
"torchvision.transforms.CenterCrop",
"math.ceil",
"os.path.join",
"ofa.utils.AverageMeter",
"numpy.var"
] |
[((4567, 4607), 'ofa.imagenet_classification.elastic_nn.utils.set_running_statistics', 'set_running_statistics', (['net', 'data_loader'], {}), '(net, data_loader)\n', (4589, 4607), False, 'from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics\n'), ((5384, 5398), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5396, 5398), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((5410, 5424), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5422, 5424), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((5436, 5450), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5448, 5450), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7349, 7363), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7361, 7363), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7375, 7389), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7387, 7389), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7401, 7415), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7413, 7415), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((17124, 17145), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (17137, 17145), False, 'import copy\n'), ((18434, 18455), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (18447, 18455), False, 'import copy\n'), ((19766, 19787), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (19779, 19787), False, 'import copy\n'), ((20719, 20736), 'numpy.mean', 'np.mean', (['acc_list'], {}), '(acc_list)\n', (20726, 20736), True, 'import numpy as np\n'), ((20751, 20775), 'numpy.std', 'np.std', (['acc_list'], {'ddof': '(1)'}), '(acc_list, ddof=1)\n', (20757, 20775), True, 'import numpy as np\n'), ((20790, 20806), 'numpy.var', 'np.var', (['acc_list'], {}), '(acc_list)\n', (20796, 20806), True, 'import numpy as np\n'), ((3559, 3582), 'os.path.join', 'osp.join', (['path', '"""train"""'], {}), "(path, 'train')\n", (3567, 3582), True, 'import os.path as osp\n'), ((14066, 14113), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'net_id', 'pretrained': '(True)'}), '(net_id=net_id, pretrained=True)\n', (14081, 14113), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17088, 17105), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (17096, 17105), True, 'import numpy as np\n'), ((18398, 18415), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (18406, 18415), True, 'import numpy as np\n'), ((18784, 18838), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (18799, 18838), False, 'from ofa.model_zoo import ofa_specialized\n'), ((18890, 18944), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (18905, 18944), False, 'from ofa.model_zoo import ofa_specialized\n'), ((19730, 19747), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (19738, 19747), True, 'import numpy as np\n'), ((20059, 20083), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (20073, 20083), False, 'import random\n'), ((20239, 20293), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (20254, 20293), False, 'from ofa.model_zoo import ofa_specialized\n'), ((20345, 20399), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (20360, 20399), False, 'from ofa.model_zoo import ofa_specialized\n'), ((5032, 5065), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (5053, 5065), False, 'from torchvision import transforms, datasets\n'), ((5075, 5096), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5094, 5096), False, 'from torchvision import transforms, datasets\n'), ((5106, 5181), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5126, 5181), False, 'from torchvision import transforms, datasets\n'), ((5268, 5289), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5287, 5289), True, 'import torch.nn as nn\n'), ((7026, 7059), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (7047, 7059), False, 'from torchvision import transforms, datasets\n'), ((7069, 7090), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7088, 7090), False, 'from torchvision import transforms, datasets\n'), ((7100, 7175), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7120, 7175), False, 'from torchvision import transforms, datasets\n'), ((7262, 7283), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7281, 7283), True, 'import torch.nn as nn\n'), ((14411, 14458), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'net_id', 'pretrained': '(True)'}), '(net_id=net_id, pretrained=True)\n', (14426, 14458), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17508, 17562), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (17523, 17562), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17622, 17676), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (17637, 17676), False, 'from ofa.model_zoo import ofa_specialized\n'), ((3650, 3690), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['image_size'], {}), '(image_size)\n', (3678, 3690), False, 'from torchvision import transforms, datasets\n'), ((3704, 3737), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3735, 3737), False, 'from torchvision import transforms, datasets\n'), ((3751, 3814), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(32.0 / 255.0)', 'saturation': '(0.5)'}), '(brightness=32.0 / 255.0, saturation=0.5)\n', (3773, 3814), False, 'from torchvision import transforms, datasets\n'), ((3826, 3847), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3845, 3847), False, 'from torchvision import transforms, datasets\n'), ((3861, 3936), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3881, 3936), False, 'from torchvision import transforms, datasets\n'), ((6121, 6158), 'ofa.utils.accuracy', 'accuracy', (['output', 'labels'], {'topk': '(1, 5)'}), '(output, labels, topk=(1, 5))\n', (6129, 6158), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7840, 7877), 'ofa.utils.accuracy', 'accuracy', (['output', 'labels'], {'topk': '(1, 5)'}), '(output, labels, topk=(1, 5))\n', (7848, 7877), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((20099, 20123), 'random.randint', 'random.randint', (['(1)', '(n - 1)'], {}), '(1, n - 1)\n', (20113, 20123), False, 'import random\n'), ((4991, 5020), 'math.ceil', 'math.ceil', (['(image_size / 0.875)'], {}), '(image_size / 0.875)\n', (5000, 5020), False, 'import math\n'), ((6985, 7014), 'math.ceil', 'math.ceil', (['(image_size / 0.875)'], {}), '(image_size / 0.875)\n', (6994, 7014), False, 'import math\n')]
|
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import numpy as np
#-------------------------------------------------------------
def plot_subfigure(axis, array, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, cmin, cmax, cmap):
xMin = 1.0e30
xMax = -1.0e30
yMin = 1.0e30
yMax = -1.0e30
cmap = plt.get_cmap(cmap)
patches = []
colors = []
for iCell in range(0,nCells):
if (yCell[iCell] > 0.0):
vertices = []
for iVertexOnCell in range(0,nEdgesOnCell[iCell]):
iVertex = verticesOnCell[iCell,iVertexOnCell]
vertices.append((xVertex[iVertex],zVertex[iVertex]))
colors.append(array[iCell])
patches.append(Polygon(vertices))
xMin = min(xMin,xVertex[iVertex])
xMax = max(xMax,xVertex[iVertex])
yMin = min(yMin,zVertex[iVertex])
yMax = max(yMax,zVertex[iVertex])
pc = PatchCollection(patches, cmap=cmap)
pc.set_array(np.array(colors))
pc.set_clim(cmin, cmax)
axis.add_collection(pc)
axis.set_xlim(xMin,xMax)
axis.set_ylim(yMin,yMax)
axis.set_aspect("equal")
axis.ticklabel_format(style='plain')
axis.tick_params(axis='x', \
which='both', \
bottom=False, \
top=False, \
labelbottom=False)
axis.tick_params(axis='y', \
which='both', \
left=False, \
right=False, \
labelleft=False)
#-------------------------------------------------------------
def plot_testcase():
nGrids = [2562,10242,40962,163842]
testTypes = ["cosine_bell","slotted_cylinder"]
methods = ["IR","IR","upwind"]
iTimes = [0,-1,-1]
for nGrid in nGrids:
print("nGrid: ", nGrid)
fig, axes = plt.subplots(3,4)
iTestType = -1
for testType in testTypes:
iTestType += 1
print(" Test type: ", testType)
iMethod = -1
for method, iTime in zip(methods,iTimes):
iMethod += 1
print(" Method: ", method, ", iTime: ", iTime)
filenamein = "./output_%s_%s_%i/output.2000.nc" %(method,testType,nGrid)
filein = Dataset(filenamein,"r")
nCells = len(filein.dimensions["nCells"])
nEdgesOnCell = filein.variables["nEdgesOnCell"][:]
verticesOnCell = filein.variables["verticesOnCell"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
zCell = filein.variables["zCell"][:]
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
zVertex = filein.variables["zVertex"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
iceAreaCategory = filein.variables["iceAreaCategory"][:]
filein.close()
iceAreaCell = np.sum(iceAreaCategory,axis=(2,3))
plot_subfigure(axes[iMethod,iTestType*2], iceAreaCell[iTime], nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, 0.0, 1.0, "viridis")
iceAreaCellDiff = iceAreaCell[iTime] - iceAreaCell[0]
if (iMethod != 0):
plot_subfigure(axes[iMethod,iTestType*2+1], iceAreaCellDiff, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, -1.0, 1.0, "bwr")
else:
axes[iMethod,iTestType*2+1].axis('off')
plt.savefig("advection_%6.6i.png" %(nGrid),dpi=300)
plt.cla()
plt.close(fig)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
plot_testcase()
|
[
"matplotlib.pyplot.savefig",
"netCDF4.Dataset",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.get_cmap"
] |
[((505, 523), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (517, 523), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1167), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'cmap': 'cmap'}), '(patches, cmap=cmap)\n', (1147, 1167), False, 'from matplotlib.collections import PatchCollection\n'), ((1185, 1201), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (1193, 1201), True, 'import numpy as np\n'), ((2065, 2083), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {}), '(3, 4)\n', (2077, 2083), True, 'import matplotlib.pyplot as plt\n'), ((3858, 3909), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('advection_%6.6i.png' % nGrid)"], {'dpi': '(300)'}), "('advection_%6.6i.png' % nGrid, dpi=300)\n", (3869, 3909), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3927), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3925, 3927), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3950), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3945, 3950), True, 'import matplotlib.pyplot as plt\n'), ((917, 934), 'matplotlib.patches.Polygon', 'Polygon', (['vertices'], {}), '(vertices)\n', (924, 934), False, 'from matplotlib.patches import Polygon\n'), ((2507, 2531), 'netCDF4.Dataset', 'Dataset', (['filenamein', '"""r"""'], {}), "(filenamein, 'r')\n", (2514, 2531), False, 'from netCDF4 import Dataset\n'), ((3254, 3290), 'numpy.sum', 'np.sum', (['iceAreaCategory'], {'axis': '(2, 3)'}), '(iceAreaCategory, axis=(2, 3))\n', (3260, 3290), True, 'import numpy as np\n')]
|
import glob
import json
import os
import subprocess
import time
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
import geopandas as gpd
import rasterio
import numpy as np
from shapely.geometry import Polygon
class PipelineError(RuntimeError):
def __init__(self, message):
self.message = message
def listlike(arg):
'''Checks whether an argument is list-like, returns boolean'''
return not hasattr(arg, "strip") and (hasattr(arg, "__getitem__")
or hasattr(arg, "__iter__"))
def clean_dir(dir_to_clean, file_extensions):
'''Deletes files with specified extension(s) from a directory.
This function is intended to help cleanup outputs from command line
tools that we do not want to keep. Files to be deleted will be
identified using a wildcard with that file extension in dir_to_clean.
Parameters
----------
dir_to_clean: string, path
path to directory to delete files from
file_extension: string or list-like of strings
file extensions that will be used for identifying files to remove,
such as ['.tfw', '.kml'].
'''
if listlike(file_extensions):
for ext in file_extensions:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
elif type(file_extension) == str:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
else:
raise (TypeError,
'file_extensions needs to be a string or list-like of strings.')
def clean_buffer_polys(poly_shp,
tile_shp,
odir,
simp_tol=None,
simp_topol=None):
"""Removes polygons within the buffer zone of a tile.
This function removes polygons from a shapefile that fall in the buffered
area of point cloud tile. When building footprints or tree crowns (for
example) are delineated from a point cloud, a buffer around the tile is
generally be used to avoid edge effects. This tool computes the centroid of
each polygon and determines whether it falls within the bounds of the
unbuffered tile. It outputs a new shapefile containing only those polygons
whose centroids fall within the unbuffered tile.
The polygons may be simplified using optional arguments simp_tol and
simp_topol to reduce the number of points that define their boundaries.
Parameters
----------
polygons_shp: string, path to shapefile (required)
A shapefile containing the polygons delineated within a buffered tile.
tile_shp: string, path to shapefile (required)
A shapefile containing the bounds of the tile WITHOUT buffers
odir: string, path to directory (required)
Path to the output directory for the new shapefile
simp_tol = numeric,
Tolerance level for simplification. All points within a simplified
geometry will be no more than simp_tol from the original.
simp_topol = boolean (optional)
Whether or not to preserve topology of polygons. If False, a quicker
algorithm will be used, but may produce self-intersecting or otherwise
invalid geometries.
"""
fname = os.path.basename(poly_shp)
outfile = os.path.join(odir, fname)
os.makedirs(odir, exist_ok=True)
tile_boundary = gpd.read_file(tile_shp)
polys = gpd.read_file(poly_shp)
# boolean indicator of whether each polygon falls within tile boundary
clean_polys_ix = polys.centroid.within(tile_boundary.loc[0].geometry)
# retrieve the polygons within the boundary
clean_polys = polys[clean_polys_ix]
if simp_tol:
clean_polys = clean_polys.simplify(simp_tol, simp_topol)
if len(clean_polys) > 0:
clean_polys.to_file(outfile)
def clip_tile_from_shp(in_raster, in_shp, odir, buffer=0):
'''Clips a raster image to the bounding box of a shapefile.
The input raster will be clipped using a rasterio command line tool. The
output raster will have the same name and file type as the input raster, and
will be written to the output directory, odir. The process is executed using
subprocess.run().
Parameters
----------
in_raster: string, path to file
raster image to be clipped
in_shp: string, path to file
shapefile from which bounding box is calculated to clip the raster
odir: string, path
output directory where clipped raster will be stored
buffer: numeric
additional buffer to add to total bounding box of shapefile when
clipping the raster
Returns
-------
proc_clip: CompletedProcess
The result of executing subprocess.run using the rio clip command.
'''
basename = os.path.basename(in_raster)
# read the shapefile using geopandas and calculate its bounds
gdf = gpd.read_file(in_shp)
tile_bnds = ' '.join(str(x) for x in gdf.buffer(buffer).total_bounds)
# create the output directory if it doesn't already exist
os.makedirs(odir, exist_ok=True)
outfile = os.path.join(odir, basename)
# clip the raster
proc_clip = subprocess.run(
['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_clip
def convert_project(infile, outfile, crs):
'''Converts a raster to another format and specifies its projection.
Uses rasterio command line tool executed using subprocess. The file
generated will have the same name and be in the same folder as the input
file.
Parameters
----------
infile: string, path to file
input raster to be converted
outfile: string, path to file
output raster to be generated
crs: string
specification of coordinate reference system to use following rasterio
command line tool (RIO) formatting (e.g., 'EPSG:3857')
Returns
-------
proc_convert: CompletedProcess
result of executing subprocess.run using rio convert
proc_project: CompletedProcess
result of executing subprocess.run using rio edit-info
'''
# convert the file to the new format
proc_convert = subprocess.run(['rio', 'convert', infile, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
# add the projection info
proc_project = subprocess.run(['rio', 'edit-info', '--crs', crs, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_convert, proc_project
def validation_summary(xml_dir, verbose=False):
'''
Generates a summary of validation results for a directory of lidar files
Parameters
----------
xml_dir : string, path to directory
directory containing xml files produced by LASvalidate
verbose : boolean
whether or not to include the messages describing why any files
produced warning or failed validation.
Returns
-------
summary_report : a printed report
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
passed = 0
warnings = 0
failed = 0
parse_errors = 0
warning_messages = []
failed_messages = []
for validation_report in xmls:
try:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'pass':
passed += 1
else:
variable = root.find('report').find('details').find(
result).find('variable').text
note = root.find('report').find('details').find(result).find(
'note').text
if result == 'fail':
failed += 1
failed_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
elif result == 'warning':
warnings += 1
warning_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
except ParseError:
parse_errors += 1
summary = '''LASvalidate Summary
====================
Passed: {:,d}
Failed: {:,d}
Warnings: {:,d}
ParseErrors: {:,d}
'''.format(passed, failed, warnings, parse_errors)
details = '''Details
========
{}
{}
'''.format('\n'.join(failed_messages), '\n'.join(warning_messages))
print(summary)
if verbose:
print(details)
def move_invalid_tiles(xml_dir, dest_dir):
'''Moves lidar data that fail validation checks into a new directory
Parameters
----------
xml_dir : string, path to directory
where the xml reports produced by LASvalidate can be found
dest_dir : str, path to directory
where you would like the point cloud and associated files to be moved
Returns
-------
A printed statement about how many tiles were moved.
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
invalid_dir = dest_dir
num_invalid = 0
for validation_report in xmls:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'fail':
# move the lidar file to a different folder
os.makedirs(invalid_dir, exist_ok=True)
for invalid_file in glob.glob(
os.path.join(xml_dir, tile_id + '*')):
basename = os.path.basename(invalid_file)
os.rename(invalid_file, os.path.join(invalid_dir, basename))
num_invalid += 1
print('Moved files for {} invalid tiles to {}'.format(
num_invalid, invalid_dir))
def get_bbox_as_poly(infile, epsg=None):
"""Uses PDAL's info tool to extract the bounding box of a file as a
shapely Polygon. If an EPSG code is provided, a GeoDataFrame is returned.
Parameters
----------
infile : str, path to file
path to input file that PDAL can read
epsg : int
EPSG code defining the coordinate reference system. Optional.
Returns
-------
bbox_poly : Polygon or GeoDataFrame
By default (no EPSG is provided), a shapely Polygon with the bounding
box as its coordinates is returned. If an EPSG code is specified,
bbox_poly is returned as a GeoPandas GeoDataFrame.
"""
result = subprocess.run(['pdal', 'info', infile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
json_result = json.loads(result.stdout.decode())
coords = json_result['stats']['bbox']['native']['boundary']['coordinates']
geometry = Polygon(*coords)
if epsg:
bbox_poly = gpd.GeoDataFrame(
geometry=[geometry], crs={'init': 'epsg:{}'.format(epsg)})
else:
bbox_poly = Polygon(*coords)
return bbox_poly
def fname(path):
"""returns the filename as basename split from extension.
Parameters
-----------
path : str, path to file
filepath from which filename will be sliced
Returns
--------
filename : str
name of file, split from extension
"""
filename = os.path.basename(path).split('.')[0]
return filename
def annulus(inner_radius, outer_radius, dtype=np.uint8):
"""Generates a flat, donut-shaped (annular) structuring element.
A pixel is within the neighborhood if the euclidean distance between
it and the origin falls between the inner and outer radii (inclusive).
Parameters
----------
inner_radius : int
The inner radius of the annular structuring element
outer_radius : int
The outer radius of the annular structuring element
dtype : data-type
The data type of the structuring element
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood are 1
and 0 otherwise
"""
L = np.arange(-outer_radius, outer_radius + 1)
X, Y = np.meshgrid(L, L)
selem = np.array(
((X**2 + Y**2) <= outer_radius**2) * (
(X**2 + Y**2) >= inner_radius**2),
dtype=dtype)
return selem
def inspect_failures(failed_dir):
"""Prints error messages reported for tiles that failed in the lidar
processing pipeline.
Parameters
----------
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
for filename in failed:
with open(filename) as f:
print([line for line in f.readlines() if line.rstrip() != ''])
print('----------------------')
def processing_summary(all_tiles, already_finished, processing_tiles,
finished_dir, failed_dir, start_time):
"""Prints a summary indicating progress of a lidar processing pipeline.
Parameters
----------
all_tiles : list-like
all tiles within a lidar acquisition
already_finished : list-like
tiles which were successfully processed in a previous execution of the
processing pipeline
processing_tiles : list-like
tiles which are being processed during the currently executing pipeline
finished_dir : string, path to directory
path to directory containing text files indicating any tiles which have
finished processing
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
start_time : float
time the pipeline execution began, produced by time.time()
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
finished = glob.glob(os.path.join(finished_dir, '*.txt'))
summary = '''
Processing Summary
-------------------
{:>5,d} tiles in acquisition
{:>5,d} tiles previously finished in acquisition
{:>5,d} tiles being processed in this run
{:>5,d} tiles from this run finished
{:>5,d} tiles failed
'''.format(
len(all_tiles), len(already_finished), len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)), len(failed))
total_percent_unfinished = int(70 * (1 - len(finished) / len(all_tiles)))
total_percent_finished = int(70 * len(finished) / len(all_tiles))
total_percent_failed = int(70 * len(failed) / len(all_tiles))
this_run_unfinished = int(70 - 70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
this_run_finished = int(70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
progress_bars = '|' + '=' * this_run_finished + ' '* this_run_unfinished +\
'!' * total_percent_failed + '| {:.1%} this run\n'.format((len(finished)\
- (len(all_tiles) - len(processing_tiles))) / len(processing_tiles)) + \
'|' + '=' * total_percent_finished + ' ' * total_percent_unfinished + '!' \
* total_percent_failed + '| {:.1%} total'.format(len(finished) / \
len(all_tiles))
print(summary)
print(progress_bars)
time_to_complete(start_time, len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)))
def print_dhms(s):
"""Prints number of days, hours, minutes, and seconds
represented by number of seconds provided as input.
Parameters
----------
s : numeric
seconds
"""
days = s // (24 * 3600)
s = s % (24 * 3600)
hours = s // 3600
s %= 3600
minutes = s // 60
s %= 60
seconds = s
if days > 0:
print(f'{days:2.0f}d {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
elif hours > 0:
print(f' {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
else:
print(f' {minutes:2.0f}m {seconds:2.0f}s')
def time_to_complete(start_time, num_jobs, jobs_completed):
"""Prints elapsed time and estimated time of completion.
Parameters
----------
start_time : float
time the pipeline execution began, produced by time.time()
num_jobs : int
total number of jobs to be completed
jobs_completed : int
number of jobs completed so far
"""
if jobs_completed == 0:
print('\nNo jobs completed yet.')
else:
time_now = time.time()
elapsed = time_now - start_time
prop_complete = jobs_completed / num_jobs
est_completion = elapsed / prop_complete
time_left = est_completion - elapsed
print('\nelapsed: ', end='\t')
print_dhms(elapsed)
print('remaining: ', end='\t')
print_dhms(time_left)
def make_buffered_fishnet(xmin, ymin, xmax, ymax, crs, spacing=1000,
buffer=50):
"""Makes a GeoDataFrame with a fishnet grid that has overlapping edges.
Converts an existing lidar tiling scheme into one that has overlapping
tiles and which is aligned with a grid based on the spacing parameter.
Parameters
----------
xmin, ymin, xmax, ymax : numeric
Values indicating the extent of the existing lidar data.
crs : Coordinate Reference System
Must be readable by GeoPandas to create a GeoDataFrame.
spacing : int
Length and width of tiles in new tiling scheme prior to buffering
buffer : int
Amount of overlap between neighboring tiles.
"""
xmin, ymin = (
np.floor(np.array([xmin, ymin]) // spacing) * spacing).astype(int)
xmax, ymax = (
np.ceil(np.array([xmax, ymax]) // spacing) * spacing).astype(int) + spacing
xx, yy = np.meshgrid(
np.arange(xmin, xmax + spacing, spacing),
np.arange(ymin, ymax + spacing, spacing))
xx_leftbuff = xx[:, :-1] - buffer
xx_rightbuff = xx[:, 1:] + buffer
yy_downbuff = yy[:-1, :] - buffer
yy_upbuff = yy[1:, :] + buffer
ll = np.stack((
xx_leftbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, :-1].ravel())).T # skip right-most column
ul = np.stack((
xx_leftbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, :-1].ravel())).T # skip right-most column
ur = np.stack((
xx_rightbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, 1:].ravel())).T # skip left-most column
lr = np.stack((
xx_rightbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, 1:].ravel())).T # skip left-most column
buff_fishnet = np.stack([ll, ul, ur, lr])
polys = [
Polygon(buff_fishnet[:, i, :]) for i in range(buff_fishnet.shape[1])
]
ll_names = [x for x in (ll + buffer).astype(int).astype(str)]
tile_ids = [
'_'.join(tile) + '_{}'.format(str(spacing)) for tile in ll_names
]
buff_fishnet_gdf = gpd.GeoDataFrame(geometry=polys, crs=crs)
buff_fishnet_gdf['tile_id'] = tile_ids
return buff_fishnet_gdf.set_index('tile_id')
def get_intersecting_tiles(src_tiles, new_tiles):
"""Identifies tiles from src that intersect tiles in new_tiles.
This function is intended to identify the files which should be read for
retiling a lidar acquisition into the new_tiles layout.
src_tiles is expected to have a 'file_name' field.
Parameters
----------
src_tiles : GeoDataFrame
Original tiling scheme for lidar acquisition
new_tiles : GeoDataFrame
New tiling scheme for lidar acquisition, such as one created by the
make_buffered_fishnet function
Returns
-------
joined_tiles : GeoDataFrame
Each row shows a tile from new_tiles that intersected with one or more
tiles from src_tiles. The list of tiles from src_tiles that intersect
each tile in new_tiles are formatted as a space-delimited string.
"""
joined = gpd.sjoin(new_tiles, src_tiles)
joined_tiles = joined.groupby(level=0)['file_name'].apply(list).apply(
' '.join).to_frame()
joined_tiles.index.name = 'tile_id'
joined_tiles = joined_tiles.rename({
'file_name': 'intersecting_files'
},
axis=1)
return joined_tiles
def parse_coords_from_tileid(tile_id):
"""Get the coordinates of the lower left corner of the tile, assuming the
tile has been named in the pattern {XMIN}_{YMIN}_{LENGTH}.
Parameters
----------
tile_id : string
assumed tile_id follows the naming convention of {LLX}_{LLY}_{LENGTH}
where:
LLX = x-coordinate of lower-left corner of tile (in projected units)
LLY = y-coordinate of lower-left corner of tile (in projected units)
LENGTH = length of the raster (in projected units), assumed to be a
square tile shape
Returns
-------
llx, lly, length : int
x- and y- coordinates of lower-left corner and length of raster
"""
tile_parts = tile_id.split('_')
if len(tile_parts) == 2:
llx, lly = [int(coord) for coord in tile_parts]
length = 1000 # assumed tile width if not explicit in tile_id
elif len(tile_parts) == 3:
llx, lly, length = [int(coord) for coord in tile_parts]
return llx, lly, length
|
[
"geopandas.sjoin",
"xml.etree.ElementTree.parse",
"geopandas.read_file",
"os.makedirs",
"subprocess.run",
"os.path.join",
"numpy.array",
"shapely.geometry.Polygon",
"numpy.stack",
"os.path.basename",
"time.time",
"numpy.meshgrid",
"geopandas.GeoDataFrame",
"numpy.arange",
"os.remove"
] |
[((3566, 3592), 'os.path.basename', 'os.path.basename', (['poly_shp'], {}), '(poly_shp)\n', (3582, 3592), False, 'import os\n'), ((3607, 3632), 'os.path.join', 'os.path.join', (['odir', 'fname'], {}), '(odir, fname)\n', (3619, 3632), False, 'import os\n'), ((3637, 3669), 'os.makedirs', 'os.makedirs', (['odir'], {'exist_ok': '(True)'}), '(odir, exist_ok=True)\n', (3648, 3669), False, 'import os\n'), ((3691, 3714), 'geopandas.read_file', 'gpd.read_file', (['tile_shp'], {}), '(tile_shp)\n', (3704, 3714), True, 'import geopandas as gpd\n'), ((3727, 3750), 'geopandas.read_file', 'gpd.read_file', (['poly_shp'], {}), '(poly_shp)\n', (3740, 3750), True, 'import geopandas as gpd\n'), ((5096, 5123), 'os.path.basename', 'os.path.basename', (['in_raster'], {}), '(in_raster)\n', (5112, 5123), False, 'import os\n'), ((5200, 5221), 'geopandas.read_file', 'gpd.read_file', (['in_shp'], {}), '(in_shp)\n', (5213, 5221), True, 'import geopandas as gpd\n'), ((5363, 5395), 'os.makedirs', 'os.makedirs', (['odir'], {'exist_ok': '(True)'}), '(odir, exist_ok=True)\n', (5374, 5395), False, 'import os\n'), ((5410, 5438), 'os.path.join', 'os.path.join', (['odir', 'basename'], {}), '(odir, basename)\n', (5422, 5438), False, 'import os\n'), ((5477, 5603), 'subprocess.run', 'subprocess.run', (["['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n", (5491, 5603), False, 'import subprocess\n'), ((6542, 6645), 'subprocess.run', 'subprocess.run', (["['rio', 'convert', infile, outfile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'convert', infile, outfile], stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n", (6556, 6645), False, 'import subprocess\n'), ((6759, 6871), 'subprocess.run', 'subprocess.run', (["['rio', 'edit-info', '--crs', crs, outfile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'edit-info', '--crs', crs, outfile], stderr=\n subprocess.PIPE, stdout=subprocess.PIPE)\n", (6773, 6871), False, 'import subprocess\n'), ((11001, 11094), 'subprocess.run', 'subprocess.run', (["['pdal', 'info', infile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['pdal', 'info', infile], stderr=subprocess.PIPE, stdout=\n subprocess.PIPE)\n", (11015, 11094), False, 'import subprocess\n'), ((11295, 11311), 'shapely.geometry.Polygon', 'Polygon', (['*coords'], {}), '(*coords)\n', (11302, 11311), False, 'from shapely.geometry import Polygon\n'), ((12570, 12612), 'numpy.arange', 'np.arange', (['(-outer_radius)', '(outer_radius + 1)'], {}), '(-outer_radius, outer_radius + 1)\n', (12579, 12612), True, 'import numpy as np\n'), ((12624, 12641), 'numpy.meshgrid', 'np.meshgrid', (['L', 'L'], {}), '(L, L)\n', (12635, 12641), True, 'import numpy as np\n'), ((12654, 12761), 'numpy.array', 'np.array', (['((X ** 2 + Y ** 2 <= outer_radius ** 2) * (X ** 2 + Y ** 2 >= inner_radius **\n 2))'], {'dtype': 'dtype'}), '((X ** 2 + Y ** 2 <= outer_radius ** 2) * (X ** 2 + Y ** 2 >= \n inner_radius ** 2), dtype=dtype)\n', (12662, 12761), True, 'import numpy as np\n'), ((19114, 19140), 'numpy.stack', 'np.stack', (['[ll, ul, ur, lr]'], {}), '([ll, ul, ur, lr])\n', (19122, 19140), True, 'import numpy as np\n'), ((19425, 19466), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': 'polys', 'crs': 'crs'}), '(geometry=polys, crs=crs)\n', (19441, 19466), True, 'import geopandas as gpd\n'), ((20428, 20459), 'geopandas.sjoin', 'gpd.sjoin', (['new_tiles', 'src_tiles'], {}), '(new_tiles, src_tiles)\n', (20437, 20459), True, 'import geopandas as gpd\n'), ((7475, 7505), 'os.path.join', 'os.path.join', (['xml_dir', '"""*.xml"""'], {}), "(xml_dir, '*.xml')\n", (7487, 7505), False, 'import os\n'), ((9495, 9525), 'os.path.join', 'os.path.join', (['xml_dir', '"""*.xml"""'], {}), "(xml_dir, '*.xml')\n", (9507, 9525), False, 'import os\n'), ((9694, 9721), 'xml.etree.ElementTree.parse', 'ET.parse', (['validation_report'], {}), '(validation_report)\n', (9702, 9721), True, 'import xml.etree.ElementTree as ET\n'), ((11465, 11481), 'shapely.geometry.Polygon', 'Polygon', (['*coords'], {}), '(*coords)\n', (11472, 11481), False, 'from shapely.geometry import Polygon\n'), ((13138, 13171), 'os.path.join', 'os.path.join', (['failed_dir', '"""*.txt"""'], {}), "(failed_dir, '*.txt')\n", (13150, 13171), False, 'import os\n'), ((14335, 14368), 'os.path.join', 'os.path.join', (['failed_dir', '"""*.txt"""'], {}), "(failed_dir, '*.txt')\n", (14347, 14368), False, 'import os\n'), ((14395, 14430), 'os.path.join', 'os.path.join', (['finished_dir', '"""*.txt"""'], {}), "(finished_dir, '*.txt')\n", (14407, 14430), False, 'import os\n'), ((16995, 17006), 'time.time', 'time.time', ([], {}), '()\n', (17004, 17006), False, 'import time\n'), ((18294, 18334), 'numpy.arange', 'np.arange', (['xmin', '(xmax + spacing)', 'spacing'], {}), '(xmin, xmax + spacing, spacing)\n', (18303, 18334), True, 'import numpy as np\n'), ((18344, 18384), 'numpy.arange', 'np.arange', (['ymin', '(ymax + spacing)', 'spacing'], {}), '(ymin, ymax + spacing, spacing)\n', (18353, 18384), True, 'import numpy as np\n'), ((19164, 19194), 'shapely.geometry.Polygon', 'Polygon', (['buff_fishnet[:, i, :]'], {}), '(buff_fishnet[:, i, :])\n', (19171, 19194), False, 'from shapely.geometry import Polygon\n'), ((7766, 7793), 'xml.etree.ElementTree.parse', 'ET.parse', (['validation_report'], {}), '(validation_report)\n', (7774, 7793), True, 'import xml.etree.ElementTree as ET\n'), ((9916, 9955), 'os.makedirs', 'os.makedirs', (['invalid_dir'], {'exist_ok': '(True)'}), '(invalid_dir, exist_ok=True)\n', (9927, 9955), False, 'import os\n'), ((1373, 1388), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1382, 1388), False, 'import os\n'), ((1643, 1658), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1652, 1658), False, 'import os\n'), ((10019, 10055), 'os.path.join', 'os.path.join', (['xml_dir', "(tile_id + '*')"], {}), "(xml_dir, tile_id + '*')\n", (10031, 10055), False, 'import os\n'), ((10085, 10115), 'os.path.basename', 'os.path.basename', (['invalid_file'], {}), '(invalid_file)\n', (10101, 10115), False, 'import os\n'), ((11810, 11832), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (11826, 11832), False, 'import os\n'), ((9629, 9664), 'os.path.basename', 'os.path.basename', (['validation_report'], {}), '(validation_report)\n', (9645, 9664), False, 'import os\n'), ((10156, 10191), 'os.path.join', 'os.path.join', (['invalid_dir', 'basename'], {}), '(invalid_dir, basename)\n', (10168, 10191), False, 'import os\n'), ((7697, 7732), 'os.path.basename', 'os.path.basename', (['validation_report'], {}), '(validation_report)\n', (7713, 7732), False, 'import os\n'), ((18098, 18120), 'numpy.array', 'np.array', (['[xmin, ymin]'], {}), '([xmin, ymin])\n', (18106, 18120), True, 'import numpy as np\n'), ((18191, 18213), 'numpy.array', 'np.array', (['[xmax, ymax]'], {}), '([xmax, ymax])\n', (18199, 18213), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import unittest
import sys
sys.path.insert(0, '.')
from random import choice
from PIL import Image
from stego.encoder import embed
from stego.decoder import extract, _decompress, IncorrectPassword
from stego.base import make_array, as_string, extract_metadata
images = ['test/rgba.png', 'test/cmyk.tiff', 'test/greyscale.bmp']
image = choice(images)
message = b'Pixels -> smallest unit(small colored square) that constitutes an images.'
key = b'my_secret_key'
def test_embed(message, password):
imageobj = Image.open(image)
embed(imageobj, message, password)
def test_extract(password):
imageobj = Image.open(image)
img_data = make_array(imageobj.getdata())
exif = extract_metadata(img_data)
content = as_string(img_data[slice(24, exif.size)])
if password:
content = _decompress(content, key=password)
else:
content = _decompress(content)
return content
class SampleTestMessage(unittest.TestCase):
def test_message(self):
test_embed(message, None)
content = test_extract(None)
self.assertEqual(message, content)
def test_message_with_encryption(self):
test_embed(message,key)
content = test_extract(key)
self.assertEqual(message, content)
self.assertRaises(IncorrectPassword,test_extract, b'random')
if __name__ == '__main__':
unittest.main()
|
[
"sys.path.insert",
"random.choice",
"PIL.Image.open",
"stego.decoder._decompress",
"stego.encoder.embed",
"unittest.main",
"stego.base.extract_metadata"
] |
[((52, 75), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""."""'], {}), "(0, '.')\n", (67, 75), False, 'import sys\n'), ((363, 377), 'random.choice', 'choice', (['images'], {}), '(images)\n', (369, 377), False, 'from random import choice\n'), ((537, 554), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (547, 554), False, 'from PIL import Image\n'), ((556, 590), 'stego.encoder.embed', 'embed', (['imageobj', 'message', 'password'], {}), '(imageobj, message, password)\n', (561, 590), False, 'from stego.encoder import embed\n'), ((632, 649), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (642, 649), False, 'from PIL import Image\n'), ((701, 727), 'stego.base.extract_metadata', 'extract_metadata', (['img_data'], {}), '(img_data)\n', (717, 727), False, 'from stego.base import make_array, as_string, extract_metadata\n'), ((1293, 1308), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1306, 1308), False, 'import unittest\n'), ((807, 841), 'stego.decoder._decompress', '_decompress', (['content'], {'key': 'password'}), '(content, key=password)\n', (818, 841), False, 'from stego.decoder import extract, _decompress, IncorrectPassword\n'), ((861, 881), 'stego.decoder._decompress', '_decompress', (['content'], {}), '(content)\n', (872, 881), False, 'from stego.decoder import extract, _decompress, IncorrectPassword\n')]
|
import pandas as pd
from rpy2 import robjects
from epysurv.simulation.utils import add_date_time_index_to_frame, r_list_to_frame
def test_add_date_time_index_to_frame():
df = add_date_time_index_to_frame(pd.DataFrame({"a": [1, 2, 3]}))
freq = pd.infer_freq(df.index)
assert freq == "W-MON"
def test_r_list_to_frame():
example_r_list = robjects.r("simulated = list(n_cases = 1:10)")
as_frame = r_list_to_frame(example_r_list, ["n_cases"])
expected_frame = pd.DataFrame(
{"n_cases": list(range(1, 11)), "timestep": list(range(1, 11))}
)
pd.testing.assert_frame_equal(as_frame, expected_frame)
|
[
"pandas.infer_freq",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"epysurv.simulation.utils.r_list_to_frame",
"rpy2.robjects.r"
] |
[((254, 277), 'pandas.infer_freq', 'pd.infer_freq', (['df.index'], {}), '(df.index)\n', (267, 277), True, 'import pandas as pd\n'), ((356, 402), 'rpy2.robjects.r', 'robjects.r', (['"""simulated = list(n_cases = 1:10)"""'], {}), "('simulated = list(n_cases = 1:10)')\n", (366, 402), False, 'from rpy2 import robjects\n'), ((418, 462), 'epysurv.simulation.utils.r_list_to_frame', 'r_list_to_frame', (['example_r_list', "['n_cases']"], {}), "(example_r_list, ['n_cases'])\n", (433, 462), False, 'from epysurv.simulation.utils import add_date_time_index_to_frame, r_list_to_frame\n'), ((582, 637), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['as_frame', 'expected_frame'], {}), '(as_frame, expected_frame)\n', (611, 637), True, 'import pandas as pd\n'), ((211, 241), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2, 3]}"], {}), "({'a': [1, 2, 3]})\n", (223, 241), True, 'import pandas as pd\n')]
|
import os
import pytest
from dashboard_generator import DashboardGenerator
def test_generate_widget_ensure_return_value_is_dict(env_variables):
response = DashboardGenerator()._generate_widget(y=1, period=60, pipeline='foo')
assert type(response) == dict
def test_generate_widget_ensure_values_are_used_properly_in_widget(env_variables):
y = 1
period = 60
pipeline = 'foo'
dimension = 'PipelineName'
response = DashboardGenerator()._generate_widget(y, period, pipeline)
for metric in response['properties']['metrics']:
if 'SuccessCount' in metric:
assert metric == [
'Pipeline',
'SuccessCount',
dimension,
pipeline,
{
'color': '#000000',
'label': 'Success Count',
'stat': 'Sum'
}
]
assert response['properties']['region'] == os.environ['AWS_REGION']
assert response['properties']['title'] == pipeline
assert response['properties']['period'] == period
|
[
"dashboard_generator.DashboardGenerator"
] |
[((162, 182), 'dashboard_generator.DashboardGenerator', 'DashboardGenerator', ([], {}), '()\n', (180, 182), False, 'from dashboard_generator import DashboardGenerator\n'), ((445, 465), 'dashboard_generator.DashboardGenerator', 'DashboardGenerator', ([], {}), '()\n', (463, 465), False, 'from dashboard_generator import DashboardGenerator\n')]
|
"""
Tests for opencadd.structure.superposition.engines.mda
"""
import pytest
from opencadd.structure.core import Structure
from opencadd.structure.superposition.engines.mda import MDAnalysisAligner
def test_mda_instantiation():
aligner = MDAnalysisAligner()
def test_mda_calculation():
aligner = MDAnalysisAligner()
structures = [Structure.from_pdbid(pdb_id) for pdb_id in ["4u3y", "4u40"]]
result = aligner.calculate(structures)
# Check API compliance
assert "superposed" in result
assert "scores" in result
assert "rmsd" in result["scores"]
assert "metadata" in result
# Check RMSD values
# TODO: pytest.approx is not working reliably - check with Dennis too, he has the same problem
assert pytest.approx(result["scores"]["rmsd"], 1.989)
|
[
"pytest.approx",
"opencadd.structure.core.Structure.from_pdbid",
"opencadd.structure.superposition.engines.mda.MDAnalysisAligner"
] |
[((245, 264), 'opencadd.structure.superposition.engines.mda.MDAnalysisAligner', 'MDAnalysisAligner', ([], {}), '()\n', (262, 264), False, 'from opencadd.structure.superposition.engines.mda import MDAnalysisAligner\n'), ((309, 328), 'opencadd.structure.superposition.engines.mda.MDAnalysisAligner', 'MDAnalysisAligner', ([], {}), '()\n', (326, 328), False, 'from opencadd.structure.superposition.engines.mda import MDAnalysisAligner\n'), ((748, 794), 'pytest.approx', 'pytest.approx', (["result['scores']['rmsd']", '(1.989)'], {}), "(result['scores']['rmsd'], 1.989)\n", (761, 794), False, 'import pytest\n'), ((347, 375), 'opencadd.structure.core.Structure.from_pdbid', 'Structure.from_pdbid', (['pdb_id'], {}), '(pdb_id)\n', (367, 375), False, 'from opencadd.structure.core import Structure\n')]
|
import engine
print("Python: Script 2")
class Rotation(metaclass=engine.MetaComponent):
def __init__(self):
self.trans = 5
result = engine.query(Color)
print("Python: Query colors from Script 2")
for c in result:
c.string()
print("--------------------")
|
[
"engine.query"
] |
[((147, 166), 'engine.query', 'engine.query', (['Color'], {}), '(Color)\n', (159, 166), False, 'import engine\n')]
|
import gzip, os, struct, zipfile, io
class SmartFileReader(object):
def __init__(self, file, *args, **kwargs):
if file[-3:]=='.gz':
with open(file, 'rb') as f:
f.seek(-4, 2)
self._filesize = struct.unpack('I', f.read(4))[0]
self.file = gzip.open(file, *args, **kwargs)
elif file[-4:]=='.zip':
zf = zipfile.ZipFile(file, 'r')
zf_info = zf.infolist()
if len(zf_info)!=1:
raise TypeError("zip archive files must contain a single member file for SmartFileReader")
zf_info = zf_info[0]
self.file = zf.open(zf_info.filename, 'r', *args, **kwargs)
self._filesize = zf_info.file_size
else:
self.file = open(file, 'rt', *args, **kwargs)
self._filesize = os.fstat(self.file.fileno()).st_size
def __getattr__(self, name):
return getattr(self.file, name)
def __setattr__(self, name, value):
if name in ['file', 'percentread', '_filesize']:
return object.__setattr__(self, name, value)
return setattr(self.file, name, value)
def __delattr__(self, name):
return delattr(self.file, name)
def percentread(self):
try:
return (float(self.file.tell())/float(self._filesize)*100)
except io.UnsupportedOperation:
return 1.0-(float(self.file._left)/float(self._filesize)*100)
def __iter__(self):
return self.file.__iter__()
def bytesread(self):
try:
b = float(self.file.tell())
except:
return "error in bytesread"
labels = ['B','KB','MB','GB','TB']
scale = 0
while scale < 4 and b > 1024:
b /= 1024
scale += 1
return "{:.2f}{}".format(b,labels[scale])
|
[
"zipfile.ZipFile",
"gzip.open"
] |
[((258, 290), 'gzip.open', 'gzip.open', (['file', '*args'], {}), '(file, *args, **kwargs)\n', (267, 290), False, 'import gzip, os, struct, zipfile, io\n'), ((325, 351), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file', '"""r"""'], {}), "(file, 'r')\n", (340, 351), False, 'import gzip, os, struct, zipfile, io\n')]
|
from pathlib import Path
from typing import Dict
from eodatasets3 import serialise
from .common import assert_same, dump_roundtrip
def test_valid_document_works(tmp_path: Path, example_metadata: Dict):
generated_doc = dump_roundtrip(example_metadata)
# Do a serialisation roundtrip and check that it's still identical.
reserialised_doc = dump_roundtrip(
serialise.to_doc(serialise.from_doc(generated_doc))
)
assert_same(generated_doc, reserialised_doc)
assert serialise.from_doc(generated_doc) == serialise.from_doc(reserialised_doc)
|
[
"eodatasets3.serialise.from_doc"
] |
[((498, 531), 'eodatasets3.serialise.from_doc', 'serialise.from_doc', (['generated_doc'], {}), '(generated_doc)\n', (516, 531), False, 'from eodatasets3 import serialise\n'), ((535, 571), 'eodatasets3.serialise.from_doc', 'serialise.from_doc', (['reserialised_doc'], {}), '(reserialised_doc)\n', (553, 571), False, 'from eodatasets3 import serialise\n'), ((395, 428), 'eodatasets3.serialise.from_doc', 'serialise.from_doc', (['generated_doc'], {}), '(generated_doc)\n', (413, 428), False, 'from eodatasets3 import serialise\n')]
|
import numpy as np
import matplotlib.pyplot as plt
####################
def merge_dicts(list_of_dicts):
results = {}
for d in list_of_dicts:
for key in d.keys():
if key in results.keys():
results[key].append(d[key])
else:
results[key] = [d[key]]
return results
####################
comp_pJ = 22. * 1e-12 / 32. / 16.
num_layers = 6
num_comparator = 8
results = np.load('results.npy', allow_pickle=True).item()
y_mean = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_std = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_pJ = np.zeros(shape=(2, 2, 2, 2, num_layers))
cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
nmac = np.zeros(shape=(2, 2, 2, 2, num_layers))
array = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_ron = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_roff = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_adc = np.zeros(shape=(2, 2, 2, 2, num_layers, num_comparator))
y_energy = np.zeros(shape=(2, 2, 2, 2, num_layers))
array_util = np.zeros(shape=(2, 2, 2, 2, num_layers))
for key in sorted(results.keys()):
(skip, cards, alloc, profile) = key
alloc = 1 if alloc == 'block' else 0
layer_results = results[key]
max_cycle = 0
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_mean[skip][cards][alloc][profile][layer] = np.mean(rdict['mean'])
y_std[skip][cards][alloc][profile][layer] = np.mean(rdict['std'])
############################
y_ron[skip][cards][alloc][profile][layer] = np.sum(rdict['ron'])
y_roff[skip][cards][alloc][profile][layer] = np.sum(rdict['roff'])
y_adc[skip][cards][alloc][profile][layer] = np.sum(rdict['adc'], axis=0)
y_energy[skip][cards][alloc][profile][layer] += y_ron[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += y_roff[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += np.sum(y_adc[skip][cards][alloc][profile][layer] * np.array([1,2,3,4,5,6,7,8]) * comp_pJ)
y_mac_per_cycle[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / np.sum(rdict['cycle'])
y_mac_per_pJ[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / 1e12 / np.sum(y_energy[skip][cards][alloc][profile][layer])
############################
cycle[skip][cards][alloc][profile][layer] = np.mean(rdict['cycle'])
nmac[skip][cards][alloc][profile][layer] = np.mean(rdict['nmac'])
array[skip][cards][alloc][profile][layer] = np.mean(rdict['array'])
############################
max_cycle = max(max_cycle, np.mean(rdict['cycle']))
############################
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_cycle = np.mean(rdict['cycle'])
y_stall = np.mean(rdict['stall'])
y_array = np.mean(rdict['array'])
array_util[skip][cards][alloc][profile][layer] = (y_array * y_cycle - y_stall) / (y_array * max_cycle)
############################
####################
layers = np.array(range(1, 6+1))
skip_none = int(np.max(cycle[1, 0, 0, 0]))
skip_layer = int(np.max(cycle[1, 0, 0, 1]))
skip_block = int(np.max(cycle[1, 0, 1, 1]))
cards_none = int(np.max(cycle[1, 1, 0, 0]))
cards_layer = int(np.max(cycle[1, 1, 0, 1]))
cards_block = int(np.max(cycle[1, 1, 1, 1]))
height = [skip_none, skip_layer, skip_block, cards_none, cards_layer, cards_block]
x = ['skip/none', 'skip/layer', 'skip/block', 'cards/none', 'cards/layer', 'cards/block']
####################
plt.rcParams.update({'font.size': 12})
####################
plt.cla()
plt.clf()
plt.close()
plt.ylabel('# Cycles')
# plt.xlabel('Method')
plt.xticks(range(len(x)), x, rotation=45)
width = 0.2
plt.bar(x=x, height=height, width=width)
ax = plt.gca()
for i, h in enumerate(height):
# print (i, h)
ax.text(i - width, h + np.min(height)*0.02, str(h), fontdict={'size': 12})
fig = plt.gcf()
fig.set_size_inches(9, 5)
plt.tight_layout()
fig.savefig('cycles.png', dpi=300)
####################
|
[
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"matplotlib.pyplot.bar",
"numpy.sum",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.load",
"matplotlib.pyplot.cla"
] |
[((502, 542), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (510, 542), True, 'import numpy as np\n'), ((551, 591), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (559, 591), True, 'import numpy as np\n'), ((611, 651), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (619, 651), True, 'import numpy as np\n'), ((667, 707), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (675, 707), True, 'import numpy as np\n'), ((717, 757), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (725, 757), True, 'import numpy as np\n'), ((765, 805), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (773, 805), True, 'import numpy as np\n'), ((814, 854), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (822, 854), True, 'import numpy as np\n'), ((864, 904), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (872, 904), True, 'import numpy as np\n'), ((914, 954), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (922, 954), True, 'import numpy as np\n'), ((963, 1019), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers, num_comparator)'}), '(shape=(2, 2, 2, 2, num_layers, num_comparator))\n', (971, 1019), True, 'import numpy as np\n'), ((1031, 1071), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (1039, 1071), True, 'import numpy as np\n'), ((1086, 1126), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (1094, 1126), True, 'import numpy as np\n'), ((3861, 3899), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (3880, 3899), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3932), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3942), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3940, 3942), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3954), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3952, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3978), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# Cycles"""'], {}), "('# Cycles')\n", (3966, 3978), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4098), 'matplotlib.pyplot.bar', 'plt.bar', ([], {'x': 'x', 'height': 'height', 'width': 'width'}), '(x=x, height=height, width=width)\n', (4065, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4114), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4112, 4114), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4260), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4258, 4260), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4305), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4303, 4305), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3438), 'numpy.max', 'np.max', (['cycle[1, 0, 0, 0]'], {}), '(cycle[1, 0, 0, 0])\n', (3419, 3438), True, 'import numpy as np\n'), ((3457, 3482), 'numpy.max', 'np.max', (['cycle[1, 0, 0, 1]'], {}), '(cycle[1, 0, 0, 1])\n', (3463, 3482), True, 'import numpy as np\n'), ((3501, 3526), 'numpy.max', 'np.max', (['cycle[1, 0, 1, 1]'], {}), '(cycle[1, 0, 1, 1])\n', (3507, 3526), True, 'import numpy as np\n'), ((3547, 3572), 'numpy.max', 'np.max', (['cycle[1, 1, 0, 0]'], {}), '(cycle[1, 1, 0, 0])\n', (3553, 3572), True, 'import numpy as np\n'), ((3592, 3617), 'numpy.max', 'np.max', (['cycle[1, 1, 0, 1]'], {}), '(cycle[1, 1, 0, 1])\n', (3598, 3617), True, 'import numpy as np\n'), ((3637, 3662), 'numpy.max', 'np.max', (['cycle[1, 1, 1, 1]'], {}), '(cycle[1, 1, 1, 1])\n', (3643, 3662), True, 'import numpy as np\n'), ((443, 484), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (450, 484), True, 'import numpy as np\n'), ((1495, 1517), 'numpy.mean', 'np.mean', (["rdict['mean']"], {}), "(rdict['mean'])\n", (1502, 1517), True, 'import numpy as np\n'), ((1570, 1591), 'numpy.mean', 'np.mean', (["rdict['std']"], {}), "(rdict['std'])\n", (1577, 1591), True, 'import numpy as np\n'), ((1691, 1711), 'numpy.sum', 'np.sum', (["rdict['ron']"], {}), "(rdict['ron'])\n", (1697, 1711), True, 'import numpy as np\n'), ((1765, 1786), 'numpy.sum', 'np.sum', (["rdict['roff']"], {}), "(rdict['roff'])\n", (1771, 1786), True, 'import numpy as np\n'), ((1839, 1867), 'numpy.sum', 'np.sum', (["rdict['adc']"], {'axis': '(0)'}), "(rdict['adc'], axis=0)\n", (1845, 1867), True, 'import numpy as np\n'), ((2588, 2611), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2595, 2611), True, 'import numpy as np\n'), ((2663, 2685), 'numpy.mean', 'np.mean', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2670, 2685), True, 'import numpy as np\n'), ((2738, 2761), 'numpy.mean', 'np.mean', (["rdict['array']"], {}), "(rdict['array'])\n", (2745, 2761), True, 'import numpy as np\n'), ((3068, 3091), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (3075, 3091), True, 'import numpy as np\n'), ((3110, 3133), 'numpy.mean', 'np.mean', (["rdict['stall']"], {}), "(rdict['stall'])\n", (3117, 3133), True, 'import numpy as np\n'), ((3152, 3175), 'numpy.mean', 'np.mean', (["rdict['array']"], {}), "(rdict['array'])\n", (3159, 3175), True, 'import numpy as np\n'), ((2291, 2312), 'numpy.sum', 'np.sum', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2297, 2312), True, 'import numpy as np\n'), ((2315, 2337), 'numpy.sum', 'np.sum', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2321, 2337), True, 'import numpy as np\n'), ((2428, 2480), 'numpy.sum', 'np.sum', (['y_energy[skip][cards][alloc][profile][layer]'], {}), '(y_energy[skip][cards][alloc][profile][layer])\n', (2434, 2480), True, 'import numpy as np\n'), ((2844, 2867), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2851, 2867), True, 'import numpy as np\n'), ((2397, 2418), 'numpy.sum', 'np.sum', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2403, 2418), True, 'import numpy as np\n'), ((4192, 4206), 'numpy.min', 'np.min', (['height'], {}), '(height)\n', (4198, 4206), True, 'import numpy as np\n'), ((2188, 2222), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8])\n', (2196, 2222), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# getversion.py - Parse version numbers from C header files.
#
import os
import re
import sys
import traceback
__all__ = ['Parser', 'Result']
class Result:
pass
class Parser:
def __init__(self):
self.patterns = {}
def search(self, define_name, value_name):
'Add the name of a define to the list of search pattenrs.'
self.patterns[define_name] = value_name
def parse(self, file):
'Parse the file, extracting defines into a Result object.'
stream = open(file, 'rt')
result = Result()
regex = re.compile(r'^\s*#\s*define\s+(\w+)\s+(\d+)')
for line in stream.readlines():
match = regex.match(line)
if match:
try:
name = self.patterns[match.group(1)]
except:
continue
setattr(result, name, int(match.group(2)))
stream.close()
return result
def svn_extractor(parser, include_file):
'''Pull values from svn.version.h'''
p.search('SVN_VER_MAJOR', 'major')
p.search('SVN_VER_MINOR', 'minor')
p.search('SVN_VER_PATCH', 'patch')
try:
r = p.parse(include_file)
except IOError:
typ, val, tb = sys.exc_info()
msg = ''.join(traceback.format_exception_only(typ, val))
usage_and_exit(msg)
sys.stdout.write("%d.%d.%d" % (r.major, r.minor, r.patch))
def sqlite_extractor(parser, include_file):
'''Pull values from sqlite3.h'''
p.search('SQLITE_VERSION_NUMBER', 'version')
try:
r = p.parse(include_file)
except IOError:
typ, val, tb = sys.exc_info()
msg = ''.join(traceback.format_exception_only(typ, val))
usage_and_exit(msg)
major = r.version / 1000000
minor = (r.version - (major * 1000000)) / 1000
micro = (r.version - (major * 1000000) - (minor * 1000))
sys.stdout.write("%d.%d.%d" % (major, minor, micro))
extractors = {
'SVN' : svn_extractor,
# 'SQLITE' : sqlite_extractor, # not used
}
def usage_and_exit(msg):
if msg:
sys.stderr.write("%s\n\n" % msg)
sys.stderr.write("usage: %s [SVN|SQLITE] [header_file]\n" % \
os.path.basename(sys.argv[0]))
sys.stderr.flush()
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) == 3:
extractor = extractors[sys.argv[1]]
include_file = sys.argv[2]
else:
usage_and_exit("Incorrect number of arguments")
# Extract and print the version number
p = Parser()
extractor(p, include_file)
|
[
"traceback.format_exception_only",
"re.compile",
"sys.stderr.flush",
"sys.stderr.write",
"sys.exc_info",
"os.path.basename",
"sys.exit",
"sys.stdout.write"
] |
[((2031, 2089), 'sys.stdout.write', 'sys.stdout.write', (["('%d.%d.%d' % (r.major, r.minor, r.patch))"], {}), "('%d.%d.%d' % (r.major, r.minor, r.patch))\n", (2047, 2089), False, 'import sys\n'), ((2533, 2585), 'sys.stdout.write', 'sys.stdout.write', (["('%d.%d.%d' % (major, minor, micro))"], {}), "('%d.%d.%d' % (major, minor, micro))\n", (2549, 2585), False, 'import sys\n'), ((2850, 2868), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (2866, 2868), False, 'import sys\n'), ((2871, 2882), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2879, 2882), False, 'import sys\n'), ((1349, 1399), 're.compile', 're.compile', (['"""^\\\\s*#\\\\s*define\\\\s+(\\\\w+)\\\\s+(\\\\d+)"""'], {}), "('^\\\\s*#\\\\s*define\\\\s+(\\\\w+)\\\\s+(\\\\d+)')\n", (1359, 1399), False, 'import re\n'), ((2716, 2748), 'sys.stderr.write', 'sys.stderr.write', (["('%s\\n\\n' % msg)"], {}), "('%s\\n\\n' % msg)\n", (2732, 2748), False, 'import sys\n'), ((1929, 1943), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1941, 1943), False, 'import sys\n'), ((2293, 2307), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2305, 2307), False, 'import sys\n'), ((2817, 2846), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2833, 2846), False, 'import os\n'), ((1962, 2003), 'traceback.format_exception_only', 'traceback.format_exception_only', (['typ', 'val'], {}), '(typ, val)\n', (1993, 2003), False, 'import traceback\n'), ((2326, 2367), 'traceback.format_exception_only', 'traceback.format_exception_only', (['typ', 'val'], {}), '(typ, val)\n', (2357, 2367), False, 'import traceback\n')]
|
#!/usr/bin/python
import httplib
import random
import argparse
import sys
#Get options
parser = argparse.ArgumentParser(
description='Testing vote app')
parser.add_argument(
'-port',
type=int,
help='port of server',
default=8000)
parser.add_argument(
'-host',
type=str,
help='server name/ip',
default="localhost")
args = parser.parse_args()
#Color table
colorList = ["blue", "orange", "red", "green", "yellow" ]
colorSize = len(colorList) - 1
#Connect with server
conn = httplib.HTTPConnection(args.host, args.port)
#initial request
conn.request("GET", "/")
r1 = conn.getresponse()
#print(r1.status, r1.reason)
print(r1.read())
#vote loop
count = 0
while count < 100 :
count = count + 1
nColor = random.randint(0, colorSize)
conn.request("GET", "/v1/vote?color="+colorList[nColor])
r1 = conn.getresponse()
#print(r1.read())
print
# view current results
conn.request("GET", "/v1/listVotes")
r1 = conn.getresponse()
print(r1.read())
conn.request("GET", "/v1/listWorkers")
r1 = conn.getresponse()
print(r1.read())
conn.close()
|
[
"httplib.HTTPConnection",
"random.randint",
"argparse.ArgumentParser"
] |
[((97, 152), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Testing vote app"""'}), "(description='Testing vote app')\n", (120, 152), False, 'import argparse\n'), ((555, 599), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (577, 599), False, 'import httplib\n'), ((791, 819), 'random.randint', 'random.randint', (['(0)', 'colorSize'], {}), '(0, colorSize)\n', (805, 819), False, 'import random\n')]
|
from uuid import UUID
import django_rq
import logging
from datetime import datetime, timezone, timedelta
from django.core.mail import mail_managers
from django.db.models import Count
from django.db.models.functions import TruncDay
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render
from operator import itemgetter
from researcher_workspace.utils import offset_month_and_year
from vm_manager.models import Instance, Resize, Volume
from vm_manager.utils.utils import get_nectar
from vm_manager.vm_functions.resize_vm import downsize_expired_supersized_vms
from vm_manager.utils.Check_ResearchDesktop_Availability import \
check_availability
logger = logging.getLogger(__name__)
def test_function(request):
if not request.user.is_superuser:
raise Http404()
return HttpResponse(_generate_weekly_availability_report(),
content_type='text/plain')
def admin_worker(request):
if not request.user.is_superuser:
raise Http404()
return HttpResponse("do something", content_type='text/plain')
def db_check(request):
if not request.user.is_superuser:
raise Http404()
n = get_nectar()
nova_servers = n.nova.servers.list()
cinder_volumes = n.cinder.volumes.list()
db_deleted_instances = Instance.objects.exclude(deleted=None) \
.values_list('id', flat=True)
deleted_instances = [
(server.id, server.name, server.metadata.get('environment', ''))
for server in nova_servers
if UUID(server.id) in db_deleted_instances]
db_deleted_volumes = Volume.objects.exclude(deleted=None) \
.values_list('id', flat=True)
deleted_volumes = [
(volume.id, volume.name, volume.metadata.get('environment',
volume.name[-1]))
for volume in cinder_volumes
if UUID(volume.id) in db_deleted_volumes]
db_instances = Instance.objects.filter(deleted=None) \
.values_list('id', flat=True)
missing_instances = [
(server.id, server.name, server.metadata.get('environment', ''))
for server in nova_servers if UUID(server.id) not in db_instances]
db_volumes = Volume.objects.filter(deleted=None) \
.values_list('id', flat=True)
missing_volumes = [
(volume.id, volume.name, volume.metadata.get('environment',
volume.name[-1]))
for volume in cinder_volumes if UUID(volume.id) not in db_volumes]
return render(request, 'vm_manager/db_check.html',
{'missing_instances': missing_instances,
'missing_volumes': missing_volumes,
'deleted_instances': deleted_instances,
'deleted_volumes': deleted_volumes, })
def start_downsizing_cron_job(requesting_feature):
scheduler = django_rq.get_scheduler('default')
# Uncomment this line temporarily to activate this function
"""scheduler.cron("59 13 * * *", downsize_expired_supersized_vms, requesting_feature)"""
# Set cron job to 13:59 UTC as the server runs on UTC
# and that translates to 23:59 AEST or 0:59 AEDT (during daylight savings)
return scheduler.get_jobs(with_times=True)
def _generate_weekly_availability_report():
try:
availability = check_availability()
mail_managers("Weekly Availability Report", availability)
except Exception as e:
logger.error(
f"The Check_ResearchDesktop_Availability script returned: {e}.")
def vm_report_for_csv(reporting_months, operating_systems):
now = datetime.now(timezone.utc)
# A dict of zero values for the last year and this month so far
date_list = [
(offset_month_and_year(month_offset, now.month, now.year), 0)
for month_offset in range(reporting_months, 0, -1)]
start_date = datetime(day=1, month=date_list[0][0][0],
year=date_list[0][0][1], tzinfo=timezone.utc)
empty_date_dict = dict(date_list)
results = []
# table of peak number of simultaneous vms of each OS
data_lists = [
[operating_system, empty_date_dict.copy()]
for operating_system in operating_systems]
for operating_system, instance_count in data_lists:
date_counts = _get_vm_info(operating_system)['vm_count']
for date_count in date_counts:
date_count["simple_date"] = (
date_count["date"].month, date_count["date"].year)
for date in instance_count:
date_counts_from_this_month = [
date_count["count"] for date_count in date_counts
if date_count["simple_date"] == date]
if date_counts_from_this_month:
instance_count[date] = max(date_counts_from_this_month)
results.append({"name": "Peak VMs per month", "values": data_lists})
# table of number of resizes per month
data_lists = [
[operating_system, empty_date_dict.copy()]
for operating_system in operating_systems]
for operating_system, resize_count in data_lists:
resizes = Resize.objects.filter(
instance__boot_volume__operating_system=operating_system,
requested__gte=start_date)
for resize in resizes:
resize.start = (resize.requested.month
+ 12 * resize.requested.year)
if resize.expired():
resize.end = resize.expired()
else:
resize.end = datetime.now(timezone.utc)
resize.end = resize.end.month + 12 * resize.end.year
for (month, year) in resize_count.keys():
resize_count_month = month + 12 * year
for resize in resizes:
if resize.start <= resize_count_month <= resize.end:
resize_count[(month, year)] += 1
results.append({"name": "Boosts", "values": data_lists})
return results
def vm_report_for_page(operating_system):
vm_count = Instance.objects.filter(deleted=None,
boot_volume__operating_system=
operating_system).count()
vm_info = _get_vm_info(operating_system)
return {'vm_count': {operating_system: vm_count},
'vm_info': {operating_system: vm_info}}
def _get_vm_info(operating_system):
vms = Instance.objects.filter(boot_volume__operating_system=
operating_system).order_by('created')
error_dates = vms.filter(error_flag__isnull=False) \
.order_by('error_flag') \
.annotate(date=TruncDay('error_flag')) \
.values('date') \
.annotate(errored_count=Count('id')) \
.order_by('date')
deleted = [
{'date': vm.deleted, 'count': -1}
for vm in vms.order_by('deleted') if vm.deleted]
created = [{'date': vm.created, 'count': 1} for vm in vms]
# `sorted` uses timsort, which means that for sorting two concatenated
# sorted lists, it actually just merges the two lists in O(n)
vm_count = sorted(created + deleted, key=itemgetter('date'))
count = 0
for date_obj in vm_count:
count += date_obj['count']
date_obj['count'] = count
resizes = Resize.objects.filter(instance__boot_volume__operating_system=
operating_system)
resize_list = [
resize.expired() for resize in resizes if resize.expired()]
downsized = [
{'date': expiry, 'count': -1} for expiry in sorted(resize_list)]
supersized = [
{'date': resize.requested, 'count': 1} for resize in resizes]
resize_count = sorted(downsized + supersized, key=itemgetter('date'))
count = 0
for date_obj in resize_count:
count += date_obj['count']
date_obj['count'] = count
return {'vm_count': vm_count,
'error_dates': error_dates,
'resizes': resize_count}
|
[
"logging.getLogger",
"django.db.models.Count",
"vm_manager.utils.utils.get_nectar",
"django.core.mail.mail_managers",
"operator.itemgetter",
"django_rq.get_scheduler",
"django.shortcuts.render",
"datetime.datetime",
"django.http.HttpResponse",
"vm_manager.models.Volume.objects.exclude",
"vm_manager.models.Instance.objects.filter",
"django.db.models.functions.TruncDay",
"vm_manager.models.Resize.objects.filter",
"vm_manager.models.Volume.objects.filter",
"uuid.UUID",
"researcher_workspace.utils.offset_month_and_year",
"datetime.datetime.now",
"vm_manager.utils.Check_ResearchDesktop_Availability.check_availability",
"vm_manager.models.Instance.objects.exclude",
"django.http.Http404"
] |
[((709, 736), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (726, 736), False, 'import logging\n'), ((1046, 1101), 'django.http.HttpResponse', 'HttpResponse', (['"""do something"""'], {'content_type': '"""text/plain"""'}), "('do something', content_type='text/plain')\n", (1058, 1101), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1197, 1209), 'vm_manager.utils.utils.get_nectar', 'get_nectar', ([], {}), '()\n', (1207, 1209), False, 'from vm_manager.utils.utils import get_nectar\n'), ((2682, 2892), 'django.shortcuts.render', 'render', (['request', '"""vm_manager/db_check.html"""', "{'missing_instances': missing_instances, 'missing_volumes': missing_volumes,\n 'deleted_instances': deleted_instances, 'deleted_volumes': deleted_volumes}"], {}), "(request, 'vm_manager/db_check.html', {'missing_instances':\n missing_instances, 'missing_volumes': missing_volumes,\n 'deleted_instances': deleted_instances, 'deleted_volumes': deleted_volumes}\n )\n", (2688, 2892), False, 'from django.shortcuts import render\n'), ((3026, 3060), 'django_rq.get_scheduler', 'django_rq.get_scheduler', (['"""default"""'], {}), "('default')\n", (3049, 3060), False, 'import django_rq\n'), ((3765, 3791), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (3777, 3791), False, 'from datetime import datetime, timezone, timedelta\n'), ((4025, 4117), 'datetime.datetime', 'datetime', ([], {'day': '(1)', 'month': 'date_list[0][0][0]', 'year': 'date_list[0][0][1]', 'tzinfo': 'timezone.utc'}), '(day=1, month=date_list[0][0][0], year=date_list[0][0][1], tzinfo=\n timezone.utc)\n', (4033, 4117), False, 'from datetime import datetime, timezone, timedelta\n'), ((7473, 7552), 'vm_manager.models.Resize.objects.filter', 'Resize.objects.filter', ([], {'instance__boot_volume__operating_system': 'operating_system'}), '(instance__boot_volume__operating_system=operating_system)\n', (7494, 7552), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((819, 828), 'django.http.Http404', 'Http404', ([], {}), '()\n', (826, 828), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1025, 1034), 'django.http.Http404', 'Http404', ([], {}), '()\n', (1032, 1034), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1179, 1188), 'django.http.Http404', 'Http404', ([], {}), '()\n', (1186, 1188), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((3480, 3500), 'vm_manager.utils.Check_ResearchDesktop_Availability.check_availability', 'check_availability', ([], {}), '()\n', (3498, 3500), False, 'from vm_manager.utils.Check_ResearchDesktop_Availability import check_availability\n'), ((3509, 3566), 'django.core.mail.mail_managers', 'mail_managers', (['"""Weekly Availability Report"""', 'availability'], {}), "('Weekly Availability Report', availability)\n", (3522, 3566), False, 'from django.core.mail import mail_managers\n'), ((5269, 5380), 'vm_manager.models.Resize.objects.filter', 'Resize.objects.filter', ([], {'instance__boot_volume__operating_system': 'operating_system', 'requested__gte': 'start_date'}), '(instance__boot_volume__operating_system=\n operating_system, requested__gte=start_date)\n', (5290, 5380), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((1324, 1362), 'vm_manager.models.Instance.objects.exclude', 'Instance.objects.exclude', ([], {'deleted': 'None'}), '(deleted=None)\n', (1348, 1362), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((1654, 1690), 'vm_manager.models.Volume.objects.exclude', 'Volume.objects.exclude', ([], {'deleted': 'None'}), '(deleted=None)\n', (1676, 1690), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((2036, 2073), 'vm_manager.models.Instance.objects.filter', 'Instance.objects.filter', ([], {'deleted': 'None'}), '(deleted=None)\n', (2059, 2073), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((2333, 2368), 'vm_manager.models.Volume.objects.filter', 'Volume.objects.filter', ([], {'deleted': 'None'}), '(deleted=None)\n', (2354, 2368), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((3887, 3943), 'researcher_workspace.utils.offset_month_and_year', 'offset_month_and_year', (['month_offset', 'now.month', 'now.year'], {}), '(month_offset, now.month, now.year)\n', (3908, 3943), False, 'from researcher_workspace.utils import offset_month_and_year\n'), ((6156, 6246), 'vm_manager.models.Instance.objects.filter', 'Instance.objects.filter', ([], {'deleted': 'None', 'boot_volume__operating_system': 'operating_system'}), '(deleted=None, boot_volume__operating_system=\n operating_system)\n', (6179, 6246), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((6528, 6599), 'vm_manager.models.Instance.objects.filter', 'Instance.objects.filter', ([], {'boot_volume__operating_system': 'operating_system'}), '(boot_volume__operating_system=operating_system)\n', (6551, 6599), False, 'from vm_manager.models import Instance, Resize, Volume\n'), ((7325, 7343), 'operator.itemgetter', 'itemgetter', (['"""date"""'], {}), "('date')\n", (7335, 7343), False, 'from operator import itemgetter\n'), ((7912, 7930), 'operator.itemgetter', 'itemgetter', (['"""date"""'], {}), "('date')\n", (7922, 7930), False, 'from operator import itemgetter\n'), ((1587, 1602), 'uuid.UUID', 'UUID', (['server.id'], {}), '(server.id)\n', (1591, 1602), False, 'from uuid import UUID\n'), ((1977, 1992), 'uuid.UUID', 'UUID', (['volume.id'], {}), '(volume.id)\n', (1981, 1992), False, 'from uuid import UUID\n'), ((2278, 2293), 'uuid.UUID', 'UUID', (['server.id'], {}), '(server.id)\n', (2282, 2293), False, 'from uuid import UUID\n'), ((2635, 2650), 'uuid.UUID', 'UUID', (['volume.id'], {}), '(volume.id)\n', (2639, 2650), False, 'from uuid import UUID\n'), ((5667, 5693), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (5679, 5693), False, 'from datetime import datetime, timezone, timedelta\n'), ((6906, 6917), 'django.db.models.Count', 'Count', (['"""id"""'], {}), "('id')\n", (6911, 6917), False, 'from django.db.models import Count\n'), ((6796, 6818), 'django.db.models.functions.TruncDay', 'TruncDay', (['"""error_flag"""'], {}), "('error_flag')\n", (6804, 6818), False, 'from django.db.models.functions import TruncDay\n')]
|
import pytest
from common.common import NETTING_ACCOUNT
from fixture.application import Application
@pytest.fixture(scope="session")
def app(request):
base_url = request.config.getoption("--base_url")
fixture = Application(base_url)
fixture.wd.maximize_window()
fixture.wd.implicitly_wait(10)
yield fixture
fixture.destroy()
def pytest_addoption(parser):
parser.addoption(
"--base_url",
action="store",
default="https://trade.mql5.com/trade",
help="base_url",
)
@pytest.fixture()
def auth_netting(app):
if not app.auth.is_auth(NETTING_ACCOUNT.login):
app.auth.open()
app.auth.auth_terminal(NETTING_ACCOUNT.login, NETTING_ACCOUNT.password)
|
[
"pytest.fixture",
"fixture.application.Application"
] |
[((103, 134), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (117, 134), False, 'import pytest\n'), ((533, 549), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (547, 549), False, 'import pytest\n'), ((221, 242), 'fixture.application.Application', 'Application', (['base_url'], {}), '(base_url)\n', (232, 242), False, 'from fixture.application import Application\n')]
|
import os
import re
#hack for python2 support
try:
from .blkdiscoveryutil import *
except:
from blkdiscoveryutil import *
class Blkid(BlkDiscoveryUtil):
def parse_line(self,line):
details = {}
diskline = line.split(':',1)
if len(diskline) < 2:
return
path = diskline[0]
for match in re.finditer('(\S+)\=\"([^\"]+)\"',diskline[1]):
details[match.group(1)] = match.group(2)
return path, details
def find_disks(self,output):
disklist = []
blockdevices = []
for disk in os.listdir("/sys/block"):
blockdevices.append('/dev/' + disk)
for path, details in output.items():
if path in blockdevices:
disklist.append(path)
continue
m1 = re.search('(p\d+$)',path)
m2 = re.search('(\d+$)',path)
if not m2:
disklist.append(path)
continue
if m1:
match = m1
else:
match = m2
disk = path.rsplit(match.group(1))[0]
if disk in disklist:
continue
if not disk in blockdevices:
continue
disklist.append(disk)
return disklist
def details(self):
retval = {}
rawdata = self.call_blkid()
disklist = self.find_disks(rawdata)
#we need to call blkid with a disk to get the partition info, weird
for path in disklist:
output = self.call_blkid(path)
if not output.get(path):
continue
retval[path] = output[path]
return retval
def call_blkid(self,device=None):
retval = {}
self.subprocess_check_output(["blkid", '-g'])
cmdarray = ["blkid", '-o', 'full']
if device:
cmdarray.append(device)
rawoutput = self.subprocess_check_output(cmdarray)
for line in rawoutput.splitlines():
path, details = self.parse_line(line)
retval[path] = details
return self.stringify(retval)
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
l = Blkid()
devdata = l.call_blkid()
pp.pprint(devdata)
disks = l.find_disks(devdata)
pp.pprint(disks)
details = l.details()
pp.pprint(details)
|
[
"os.listdir",
"re.finditer",
"pprint.PrettyPrinter",
"re.search"
] |
[((2183, 2213), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (2203, 2213), False, 'import pprint\n'), ((349, 395), 're.finditer', 're.finditer', (['"""(\\\\S+)\\\\="([^"]+)\\""""', 'diskline[1]'], {}), '(\'(\\\\S+)\\\\="([^"]+)"\', diskline[1])\n', (360, 395), False, 'import re\n'), ((581, 605), 'os.listdir', 'os.listdir', (['"""/sys/block"""'], {}), "('/sys/block')\n", (591, 605), False, 'import os\n'), ((817, 844), 're.search', 're.search', (['"""(p\\\\d+$)"""', 'path'], {}), "('(p\\\\d+$)', path)\n", (826, 844), False, 'import re\n'), ((860, 886), 're.search', 're.search', (['"""(\\\\d+$)"""', 'path'], {}), "('(\\\\d+$)', path)\n", (869, 886), False, 'import re\n')]
|
#!/usr/bin/python3
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Any, Iterable, Optional, Type, Union
import paleomix.common.versions as versions
from paleomix.common.command import (
AtomicCmd,
InputFile,
OptionsType,
OutputFile,
ParallelCmds,
TempOutputFile,
)
from paleomix.node import CommandNode, Node, NodeError
from paleomix.nodes.bwa import (
_get_max_threads,
_get_node_description,
_new_cleanup_command,
)
BOWTIE2_VERSION = versions.Requirement(
call=("bowtie2", "--version"),
regexp=r"version (\d+\.\d+\.\d+)",
specifiers=">=2.3.0",
)
class Bowtie2IndexNode(CommandNode):
def __init__(self, input_file: str, dependencies: Iterable[Node] = ()):
command = _bowtie2_template(
(
"bowtie2-build",
InputFile(input_file),
TempOutputFile(input_file),
),
reference=input_file,
iotype=OutputFile,
)
CommandNode.__init__(
self,
command=command,
description="creating Bowtie2 index for %s" % (input_file,),
dependencies=dependencies,
)
class Bowtie2Node(CommandNode):
def __init__(
self,
input_file_1: str,
input_file_2: Optional[str],
output_file: str,
reference: str,
threads: int = 2,
mapping_options: OptionsType = {},
cleanup_options: OptionsType = {},
dependencies: Iterable[Node] = (),
):
aln = _bowtie2_template(
["bowtie2"],
reference=reference,
stdout=AtomicCmd.PIPE,
)
fixed_options: OptionsType = {
"--threads": _get_max_threads(reference, threads),
"-x": reference,
}
if input_file_1 and not input_file_2:
fixed_options["-U"] = input_file_1
elif input_file_1 and input_file_2:
fixed_options["-1"] = input_file_1
fixed_options["-2"] = input_file_2
else:
raise NodeError(
"Input 1, OR both input 1 and input 2 must "
"be specified for Bowtie2 node"
)
aln.merge_options(
user_options=mapping_options,
fixed_options=fixed_options,
)
cleanup = _new_cleanup_command(
stdin=aln,
in_reference=reference,
out_bam=output_file,
max_threads=fixed_options["--threads"],
paired_end=input_file_1 and input_file_2,
options=cleanup_options,
)
description = _get_node_description(
name="Bowtie2",
input_files_1=input_file_1,
input_files_2=input_file_2,
reference=reference,
)
CommandNode.__init__(
self,
command=ParallelCmds([aln, cleanup]),
description=description,
threads=threads,
dependencies=dependencies,
)
def _bowtie2_template(
call: Any,
reference: str,
iotype: Union[Type[InputFile], Type[OutputFile]] = InputFile,
**kwargs: Any
):
return AtomicCmd(
call,
extra_files=[
iotype(reference + postfix)
for postfix in (
".1.bt2",
".2.bt2",
".3.bt2",
".4.bt2",
".rev.1.bt2",
".rev.2.bt2",
)
],
requirements=[BOWTIE2_VERSION],
**kwargs
)
|
[
"paleomix.common.command.TempOutputFile",
"paleomix.nodes.bwa._get_max_threads",
"paleomix.common.command.InputFile",
"paleomix.nodes.bwa._new_cleanup_command",
"paleomix.node.NodeError",
"paleomix.node.CommandNode.__init__",
"paleomix.nodes.bwa._get_node_description",
"paleomix.common.command.ParallelCmds",
"paleomix.common.versions.Requirement"
] |
[((1544, 1661), 'paleomix.common.versions.Requirement', 'versions.Requirement', ([], {'call': "('bowtie2', '--version')", 'regexp': '"""version (\\\\d+\\\\.\\\\d+\\\\.\\\\d+)"""', 'specifiers': '""">=2.3.0"""'}), "(call=('bowtie2', '--version'), regexp=\n 'version (\\\\d+\\\\.\\\\d+\\\\.\\\\d+)', specifiers='>=2.3.0')\n", (1564, 1661), True, 'import paleomix.common.versions as versions\n'), ((2049, 2185), 'paleomix.node.CommandNode.__init__', 'CommandNode.__init__', (['self'], {'command': 'command', 'description': "('creating Bowtie2 index for %s' % (input_file,))", 'dependencies': 'dependencies'}), "(self, command=command, description=\n 'creating Bowtie2 index for %s' % (input_file,), dependencies=dependencies)\n", (2069, 2185), False, 'from paleomix.node import CommandNode, Node, NodeError\n'), ((3398, 3589), 'paleomix.nodes.bwa._new_cleanup_command', '_new_cleanup_command', ([], {'stdin': 'aln', 'in_reference': 'reference', 'out_bam': 'output_file', 'max_threads': "fixed_options['--threads']", 'paired_end': '(input_file_1 and input_file_2)', 'options': 'cleanup_options'}), "(stdin=aln, in_reference=reference, out_bam=output_file,\n max_threads=fixed_options['--threads'], paired_end=input_file_1 and\n input_file_2, options=cleanup_options)\n", (3418, 3589), False, 'from paleomix.nodes.bwa import _get_max_threads, _get_node_description, _new_cleanup_command\n'), ((3688, 3806), 'paleomix.nodes.bwa._get_node_description', '_get_node_description', ([], {'name': '"""Bowtie2"""', 'input_files_1': 'input_file_1', 'input_files_2': 'input_file_2', 'reference': 'reference'}), "(name='Bowtie2', input_files_1=input_file_1,\n input_files_2=input_file_2, reference=reference)\n", (3709, 3806), False, 'from paleomix.nodes.bwa import _get_max_threads, _get_node_description, _new_cleanup_command\n'), ((2783, 2819), 'paleomix.nodes.bwa._get_max_threads', '_get_max_threads', (['reference', 'threads'], {}), '(reference, threads)\n', (2799, 2819), False, 'from paleomix.nodes.bwa import _get_max_threads, _get_node_description, _new_cleanup_command\n'), ((1883, 1904), 'paleomix.common.command.InputFile', 'InputFile', (['input_file'], {}), '(input_file)\n', (1892, 1904), False, 'from paleomix.common.command import AtomicCmd, InputFile, OptionsType, OutputFile, ParallelCmds, TempOutputFile\n'), ((1922, 1948), 'paleomix.common.command.TempOutputFile', 'TempOutputFile', (['input_file'], {}), '(input_file)\n', (1936, 1948), False, 'from paleomix.common.command import AtomicCmd, InputFile, OptionsType, OutputFile, ParallelCmds, TempOutputFile\n'), ((3124, 3213), 'paleomix.node.NodeError', 'NodeError', (['"""Input 1, OR both input 1 and input 2 must be specified for Bowtie2 node"""'], {}), "(\n 'Input 1, OR both input 1 and input 2 must be specified for Bowtie2 node')\n", (3133, 3213), False, 'from paleomix.node import CommandNode, Node, NodeError\n'), ((3931, 3959), 'paleomix.common.command.ParallelCmds', 'ParallelCmds', (['[aln, cleanup]'], {}), '([aln, cleanup])\n', (3943, 3959), False, 'from paleomix.common.command import AtomicCmd, InputFile, OptionsType, OutputFile, ParallelCmds, TempOutputFile\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #내 맥북에서 발생되는 에러를 없애기 위한 코드
import tensorflow as tf
#using matrix
x_data = [[73., 80., 75.], [93., 88., 93.,], [89., 91., 90.], [96., 98., 100.], [73., 66., 70.]]
y_data = [[152.], [185.], [180.], [196.], [142.]]
X = tf.placeholder(tf.float32, shape=[None, 3]) #n개의 데이터가 들어올 것이다. tensorflow에서는 none으로 표현한다.
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W)+b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost:", cost_val, "\nPrediction:\n", hy_val)
|
[
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.matmul",
"tensorflow.square"
] |
[((270, 313), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 3]'}), '(tf.float32, shape=[None, 3])\n', (284, 313), True, 'import tensorflow as tf\n'), ((364, 407), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (378, 407), True, 'import tensorflow as tf\n'), ((613, 667), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (646, 667), True, 'import tensorflow as tf\n'), ((708, 720), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (718, 720), True, 'import tensorflow as tf\n'), ((425, 449), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 1]'], {}), '([3, 1])\n', (441, 449), True, 'import tensorflow as tf\n'), ((482, 503), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (498, 503), True, 'import tensorflow as tf\n'), ((532, 547), 'tensorflow.matmul', 'tf.matmul', (['X', 'W'], {}), '(X, W)\n', (541, 547), True, 'import tensorflow as tf\n'), ((573, 598), 'tensorflow.square', 'tf.square', (['(hypothesis - Y)'], {}), '(hypothesis - Y)\n', (582, 598), True, 'import tensorflow as tf\n'), ((730, 763), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (761, 763), True, 'import tensorflow as tf\n')]
|
from rubicon.repository.asynchronous import AsynchronousBaseRepository
from rubicon.repository.utils import json
class S3Repository(AsynchronousBaseRepository):
"""The asynchronous S3 repository uses `asyncio` to
persist Rubicon data to a remote S3 bucket.
S3 credentials can be specified via environment variables
or the credentials file in '~/.aws'.
Parameters
----------
root_dir : str
The full S3 path (including 's3://') to persist
Rubicon data to.
loop : asyncio.unix_events._UnixSelectorEventLoop, optional
The event loop the asynchronous calling program is running on.
It should not be necessary to provide this parameter in
standard asynchronous operating cases.
"""
PROTOCOL = "s3"
async def _connect(self):
"""Asynchronously connect to the underlying S3 persistence layer.
Note
----
This function must be run before any other that reaches
out to S3. It is implicitly called by such functions.
"""
await self.filesystem._connect()
async def _persist_bytes(self, bytes_data, path):
"""Asynchronously persists the raw bytes `bytes_data`
to the S3 bucket defined by `path`.
"""
await self.filesystem._pipe_file(path, bytes_data)
async def _persist_domain(self, domain, path):
"""Asynchronously persists the Rubicon object `domain`
to the S3 bucket defined by `path`.
"""
await self.filesystem._pipe_file(path, json.dumps(domain))
|
[
"rubicon.repository.utils.json.dumps"
] |
[((1538, 1556), 'rubicon.repository.utils.json.dumps', 'json.dumps', (['domain'], {}), '(domain)\n', (1548, 1556), False, 'from rubicon.repository.utils import json\n')]
|
####
# This script demonstrates how to use the Tableau Server Client
# to create new projects, both at the root level and how to nest them using
# parent_id.
#
#
# To run the script, you must have installed Python 3.6 or later.
####
import argparse
import logging
import sys
import tableauserverclient as TSC
def create_project(server, project_item):
try:
project_item = server.projects.create(project_item)
print('Created a new project called: %s' % project_item.name)
return project_item
except TSC.ServerResponseError:
print('We have already created this project: %s' % project_item.name)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Create new projects.')
# Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--site', '-S', help='site name')
parser.add_argument('--token-name', '-p', required=True,
help='name of the personal access token used to sign into the server')
parser.add_argument('--token-value', '-v', required=True,
help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
# Options specific to this sample
# This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
# Without parent_id specified, projects are created at the top level.
top_level_project = TSC.ProjectItem(name='Top Level Project')
top_level_project = create_project(server, top_level_project)
# Specifying parent_id creates a nested projects.
child_project = TSC.ProjectItem(name='Child Project', parent_id=top_level_project.id)
child_project = create_project(server, child_project)
# Projects can be nested at any level.
grand_child_project = TSC.ProjectItem(name='Grand Child Project', parent_id=child_project.id)
grand_child_project = create_project(server, grand_child_project)
if __name__ == '__main__':
main()
|
[
"logging.basicConfig",
"argparse.ArgumentParser",
"tableauserverclient.ProjectItem",
"sys.exit",
"tableauserverclient.PersonalAccessTokenAuth",
"tableauserverclient.Server"
] |
[((683, 742), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create new projects."""'}), "(description='Create new projects.')\n", (706, 742), False, 'import argparse\n'), ((1737, 1777), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging_level'}), '(level=logging_level)\n', (1756, 1777), False, 'import logging\n'), ((1798, 1884), 'tableauserverclient.PersonalAccessTokenAuth', 'TSC.PersonalAccessTokenAuth', (['args.token_name', 'args.token_value'], {'site_id': 'args.site'}), '(args.token_name, args.token_value, site_id=args\n .site)\n', (1825, 1884), True, 'import tableauserverclient as TSC\n'), ((1893, 1941), 'tableauserverclient.Server', 'TSC.Server', (['args.server'], {'use_server_version': '(True)'}), '(args.server, use_server_version=True)\n', (1903, 1941), True, 'import tableauserverclient as TSC\n'), ((2185, 2226), 'tableauserverclient.ProjectItem', 'TSC.ProjectItem', ([], {'name': '"""Top Level Project"""'}), "(name='Top Level Project')\n", (2200, 2226), True, 'import tableauserverclient as TSC\n'), ((2380, 2449), 'tableauserverclient.ProjectItem', 'TSC.ProjectItem', ([], {'name': '"""Child Project"""', 'parent_id': 'top_level_project.id'}), "(name='Child Project', parent_id=top_level_project.id)\n", (2395, 2449), True, 'import tableauserverclient as TSC\n'), ((2590, 2661), 'tableauserverclient.ProjectItem', 'TSC.ProjectItem', ([], {'name': '"""Grand Child Project"""', 'parent_id': 'child_project.id'}), "(name='Grand Child Project', parent_id=child_project.id)\n", (2605, 2661), True, 'import tableauserverclient as TSC\n'), ((644, 655), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (652, 655), False, 'import sys\n')]
|
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests the open source construction environments."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import dm_construction
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string("backend", "docker", "")
def _make_random_action(action_spec, observation):
"""Makes a random action given an action spec and observation."""
# Sample the random action.
action = {}
for name, spec in action_spec.items():
if name == "Index":
value = np.random.randint(observation["n_edge"])
elif spec.dtype in (np.int32, np.int64, int):
value = np.random.randint(spec.minimum, spec.maximum + 1)
else:
value = np.random.uniform(spec.minimum, spec.maximum)
action[name] = value
return action
def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,
random_choice_before_reset=False):
"""Take random actions in the given environment."""
np.random.seed(seed)
action_spec = env.action_spec()
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
trajectory = [timestep]
actions = [None]
for _ in range(num_steps):
if timestep.last():
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
action = _make_random_action(action_spec, timestep.observation)
timestep = env.step(action)
trajectory.append(timestep)
actions.append(action)
return trajectory, actions
class TestEnvironments(parameterized.TestCase):
def _make_environment(
self, problem_type, curriculum_sample, wrapper_type, backend_type=None):
"""Make the new version of the construction task."""
if backend_type is None:
backend_type = FLAGS.backend
return dm_construction.get_environment(
problem_type,
unity_environment=self._unity_envs[backend_type],
wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample)
@classmethod
def setUpClass(cls):
super(TestEnvironments, cls).setUpClass()
# Construct the unity environment.
cls._unity_envs = {
"docker": dm_construction.get_unity_environment("docker"),
}
@classmethod
def tearDownClass(cls):
super(TestEnvironments, cls).tearDownClass()
for env in cls._unity_envs.values():
env.close()
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_discrete_relative_environments_curriculum_sample(self, name):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, True, "discrete_relative")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_continuous_absolute_environments_curriculum_sample(self, name):
"""Smoke test for continuous absolute wrapper w/ curriculum_sample=True."""
env = self._make_environment(name, True, "continuous_absolute")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("connecting_additional_layer", "connecting", "additional_layer"),
("connecting_mixed_height_targets", "connecting", "mixed_height_targets"),
("silhouette_double_the_targets", "silhouette", "double_the_targets"),)
def test_generalization_modes(self, name, generalization_mode):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, False, "discrete_relative")
_random_unroll(env, difficulty=generalization_mode)
if __name__ == "__main__":
absltest.main()
|
[
"dm_construction.get_unity_environment",
"numpy.random.choice",
"absl.testing.parameterized.named_parameters",
"absl.testing.absltest.main",
"dm_construction.get_environment",
"numpy.random.randint",
"numpy.random.seed",
"numpy.random.uniform",
"absl.flags.DEFINE_string"
] |
[((850, 894), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""backend"""', '"""docker"""', '""""""'], {}), "('backend', 'docker', '')\n", (869, 894), False, 'from absl import flags\n'), ((1580, 1600), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1594, 1600), True, 'import numpy as np\n'), ((3014, 3204), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('covering', 'covering')", "('covering_hard', 'covering_hard')", "('connecting', 'connecting')", "('silhouette', 'silhouette')", "('marble_run', 'marble_run')"], {}), "(('covering', 'covering'), ('covering_hard',\n 'covering_hard'), ('connecting', 'connecting'), ('silhouette',\n 'silhouette'), ('marble_run', 'marble_run'))\n", (3044, 3204), False, 'from absl.testing import parameterized\n'), ((3515, 3705), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('covering', 'covering')", "('covering_hard', 'covering_hard')", "('connecting', 'connecting')", "('silhouette', 'silhouette')", "('marble_run', 'marble_run')"], {}), "(('covering', 'covering'), ('covering_hard',\n 'covering_hard'), ('connecting', 'connecting'), ('silhouette',\n 'silhouette'), ('marble_run', 'marble_run'))\n", (3545, 3705), False, 'from absl.testing import parameterized\n'), ((4020, 4275), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('connecting_additional_layer', 'connecting', 'additional_layer')", "('connecting_mixed_height_targets', 'connecting', 'mixed_height_targets')", "('silhouette_double_the_targets', 'silhouette', 'double_the_targets')"], {}), "(('connecting_additional_layer', 'connecting',\n 'additional_layer'), ('connecting_mixed_height_targets', 'connecting',\n 'mixed_height_targets'), ('silhouette_double_the_targets', 'silhouette',\n 'double_the_targets'))\n", (4050, 4275), False, 'from absl.testing import parameterized\n'), ((4584, 4599), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4597, 4599), False, 'from absl.testing import absltest\n'), ((1672, 1702), 'numpy.random.choice', 'np.random.choice', (['[8]'], {'p': '[1.0]'}), '([8], p=[1.0])\n', (1688, 1702), True, 'import numpy as np\n'), ((2446, 2615), 'dm_construction.get_environment', 'dm_construction.get_environment', (['problem_type'], {'unity_environment': 'self._unity_envs[backend_type]', 'wrapper_type': 'wrapper_type', 'curriculum_sample': 'curriculum_sample'}), '(problem_type, unity_environment=self.\n _unity_envs[backend_type], wrapper_type=wrapper_type, curriculum_sample\n =curriculum_sample)\n', (2477, 2615), False, 'import dm_construction\n'), ((1139, 1179), 'numpy.random.randint', 'np.random.randint', (["observation['n_edge']"], {}), "(observation['n_edge'])\n", (1156, 1179), True, 'import numpy as np\n'), ((2805, 2852), 'dm_construction.get_unity_environment', 'dm_construction.get_unity_environment', (['"""docker"""'], {}), "('docker')\n", (2842, 2852), False, 'import dm_construction\n'), ((1244, 1293), 'numpy.random.randint', 'np.random.randint', (['spec.minimum', '(spec.maximum + 1)'], {}), '(spec.minimum, spec.maximum + 1)\n', (1261, 1293), True, 'import numpy as np\n'), ((1318, 1363), 'numpy.random.uniform', 'np.random.uniform', (['spec.minimum', 'spec.maximum'], {}), '(spec.minimum, spec.maximum)\n', (1335, 1363), True, 'import numpy as np\n'), ((1891, 1921), 'numpy.random.choice', 'np.random.choice', (['[8]'], {'p': '[1.0]'}), '([8], p=[1.0])\n', (1907, 1921), True, 'import numpy as np\n')]
|
from dataclasses import asdict
from hanzi_font_deconstructor.common.generate_training_data import (
STROKE_VIEW_BOX,
get_training_input_svg_and_masks,
)
from os import path, makedirs
from pathlib import Path
import shutil
import argparse
PROJECT_ROOT = Path(__file__).parents[2]
DEST_FOLDER = PROJECT_ROOT / "data"
parser = argparse.ArgumentParser(
description="Generate training data for a model to deconstruct hanzi into strokes"
)
parser.add_argument("--max-strokes-per-img", default=5, type=int)
parser.add_argument("--total-images", default=50, type=int)
args = parser.parse_args()
if __name__ == "__main__":
# create and empty the dest folder
if path.exists(DEST_FOLDER):
shutil.rmtree(DEST_FOLDER)
makedirs(DEST_FOLDER)
makedirs(DEST_FOLDER / "sample_svgs")
# create the data
data = {
"viewbox": STROKE_VIEW_BOX,
"imgs": [],
}
for i in range(args.total_images):
(img_svg, stroke_masks) = get_training_input_svg_and_masks(256)
label = f"{i}-{len(stroke_masks)}"
with open(DEST_FOLDER / "sample_svgs" / f"{label}.svg", "w") as img_file:
img_file.write(img_svg)
print(".")
print("Done!")
|
[
"os.path.exists",
"os.makedirs",
"argparse.ArgumentParser",
"pathlib.Path",
"shutil.rmtree",
"hanzi_font_deconstructor.common.generate_training_data.get_training_input_svg_and_masks"
] |
[((335, 447), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate training data for a model to deconstruct hanzi into strokes"""'}), "(description=\n 'Generate training data for a model to deconstruct hanzi into strokes')\n", (358, 447), False, 'import argparse\n'), ((676, 700), 'os.path.exists', 'path.exists', (['DEST_FOLDER'], {}), '(DEST_FOLDER)\n', (687, 700), False, 'from os import path, makedirs\n'), ((741, 762), 'os.makedirs', 'makedirs', (['DEST_FOLDER'], {}), '(DEST_FOLDER)\n', (749, 762), False, 'from os import path, makedirs\n'), ((767, 804), 'os.makedirs', 'makedirs', (["(DEST_FOLDER / 'sample_svgs')"], {}), "(DEST_FOLDER / 'sample_svgs')\n", (775, 804), False, 'from os import path, makedirs\n'), ((262, 276), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (266, 276), False, 'from pathlib import Path\n'), ((710, 736), 'shutil.rmtree', 'shutil.rmtree', (['DEST_FOLDER'], {}), '(DEST_FOLDER)\n', (723, 736), False, 'import shutil\n'), ((976, 1013), 'hanzi_font_deconstructor.common.generate_training_data.get_training_input_svg_and_masks', 'get_training_input_svg_and_masks', (['(256)'], {}), '(256)\n', (1008, 1013), False, 'from hanzi_font_deconstructor.common.generate_training_data import STROKE_VIEW_BOX, get_training_input_svg_and_masks\n')]
|
from generic_dataset.data_pipeline import DataPipeline
from generic_dataset.generic_sample import synchronize_on_fields
from generic_dataset.sample_generator import SampleGenerator
import numpy as np
import generic_dataset.utilities.save_load_methods as slm
pipeline_rgb_to_gbr = DataPipeline().add_operation(lambda data, engine: (data[:, :, [2, 1, 0]], engine))
@synchronize_on_fields(field_names={'field_3'}, check_pipeline=False)
def field_3_is_positive(sample) -> bool:
return sample.get_field_3() > 0
# To model a regression problem, label_set parameter must be empty
GeneratedSampleRegression = SampleGenerator(name='GeneratedSampleRegression', label_set=set()).add_dataset_field(field_name='rgb_image', field_type=np.ndarray, save_function=slm.save_compressed_numpy_array, load_function=slm.load_compressed_numpy_array) \
.add_dataset_field(field_name='bgr_image', field_type=np.ndarray, save_function=slm.save_cv2_image_bgr, load_function=slm.load_cv2_image_bgr) \
.add_field(field_name='field_3', field_type=int) \
.add_custom_pipeline(method_name='create_pipeline_convert_rgb_to_bgr', elaborated_field='rgb_image', final_field='bgr_image', pipeline=pipeline_rgb_to_gbr) \
.add_custom_method(method_name='field_3_is_positive', function=field_3_is_positive) \
.generate_sample_class()
|
[
"generic_dataset.data_pipeline.DataPipeline",
"generic_dataset.generic_sample.synchronize_on_fields"
] |
[((367, 435), 'generic_dataset.generic_sample.synchronize_on_fields', 'synchronize_on_fields', ([], {'field_names': "{'field_3'}", 'check_pipeline': '(False)'}), "(field_names={'field_3'}, check_pipeline=False)\n", (388, 435), False, 'from generic_dataset.generic_sample import synchronize_on_fields\n'), ((281, 295), 'generic_dataset.data_pipeline.DataPipeline', 'DataPipeline', ([], {}), '()\n', (293, 295), False, 'from generic_dataset.data_pipeline import DataPipeline\n')]
|
from setuptools import setup
setup(name='neural_networks_tfw1',
version='0.1',
description='Implementing Neural Networks with Tensorflow',
packages=['neural_networks_tfw1'],
author='<NAME>',
author_email='<EMAIL>',
zip_safe=False)
|
[
"setuptools.setup"
] |
[((30, 245), 'setuptools.setup', 'setup', ([], {'name': '"""neural_networks_tfw1"""', 'version': '"""0.1"""', 'description': '"""Implementing Neural Networks with Tensorflow"""', 'packages': "['neural_networks_tfw1']", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'zip_safe': '(False)'}), "(name='neural_networks_tfw1', version='0.1', description=\n 'Implementing Neural Networks with Tensorflow', packages=[\n 'neural_networks_tfw1'], author='<NAME>', author_email='<EMAIL>',\n zip_safe=False)\n", (35, 245), False, 'from setuptools import setup\n')]
|
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from wagtail.wagtailcore.models import PageRevision, GroupPagePermission
from wagtail.wagtailusers.models import UserProfile
# The following will check to see if we can import task from celery -
# if not then we definitely haven't installed it
try:
from celery.decorators import task
NO_CELERY = False
except:
NO_CELERY = True
# However, we could have installed celery for other projects. So we will also
# check if we have defined the BROKER_URL setting. If not then definitely we
# haven't configured it.
if NO_CELERY or not hasattr(settings, 'BROKER_URL'):
# So if we enter here we will define a different "task" decorator that
# just returns the original function and sets its delay attribute to
# point to the original function: This way, the send_notification
# function will be actually called instead of the the
# send_notification.delay()
def task(f):
f.delay=f
return f
def users_with_page_permission(page, permission_type, include_superusers=True):
# Get user model
User = get_user_model()
# Find GroupPagePermission records of the given type that apply to this page or an ancestor
ancestors_and_self = list(page.get_ancestors()) + [page]
perm = GroupPagePermission.objects.filter(permission_type=permission_type, page__in=ancestors_and_self)
q = Q(groups__page_permissions=perm)
# Include superusers
if include_superusers:
q |= Q(is_superuser=True)
return User.objects.filter(is_active=True).filter(q).distinct()
@task
def send_notification(page_revision_id, notification, excluded_user_id):
# Get revision
revision = PageRevision.objects.get(id=page_revision_id)
# Get list of recipients
if notification == 'submitted':
# Get list of publishers
recipients = users_with_page_permission(revision.page, 'publish')
elif notification in ['rejected', 'approved']:
# Get submitter
recipients = [revision.user]
else:
return
# Get list of email addresses
email_addresses = [
recipient.email for recipient in recipients
if recipient.email and recipient.id != excluded_user_id and getattr(UserProfile.get_for_user(recipient), notification + '_notifications')
]
# Return if there are no email addresses
if not email_addresses:
return
# Get email subject and content
template = 'wagtailadmin/notifications/' + notification + '.html'
rendered_template = render_to_string(template, dict(revision=revision, settings=settings)).split('\n')
email_subject = rendered_template[0]
email_content = '\n'.join(rendered_template[1:])
# Get from email
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = '<EMAIL>'
# Send email
send_mail(email_subject, email_content, from_email, email_addresses)
@task
def send_email_task(email_subject, email_content, email_addresses, from_email=None):
if not from_email:
if hasattr(settings, 'WAGTAILADMIN_NOTIFICATION_FROM_EMAIL'):
from_email = settings.WAGTAILADMIN_NOTIFICATION_FROM_EMAIL
elif hasattr(settings, 'DEFAULT_FROM_EMAIL'):
from_email = settings.DEFAULT_FROM_EMAIL
else:
from_email = '<EMAIL>'
send_mail(email_subject, email_content, from_email, email_addresses)
|
[
"django.contrib.auth.get_user_model",
"django.core.mail.send_mail",
"wagtail.wagtailcore.models.GroupPagePermission.objects.filter",
"wagtail.wagtailusers.models.UserProfile.get_for_user",
"wagtail.wagtailcore.models.PageRevision.objects.get",
"django.db.models.Q"
] |
[((1258, 1274), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1272, 1274), False, 'from django.contrib.auth import get_user_model\n'), ((1444, 1544), 'wagtail.wagtailcore.models.GroupPagePermission.objects.filter', 'GroupPagePermission.objects.filter', ([], {'permission_type': 'permission_type', 'page__in': 'ancestors_and_self'}), '(permission_type=permission_type,\n page__in=ancestors_and_self)\n', (1478, 1544), False, 'from wagtail.wagtailcore.models import PageRevision, GroupPagePermission\n'), ((1549, 1581), 'django.db.models.Q', 'Q', ([], {'groups__page_permissions': 'perm'}), '(groups__page_permissions=perm)\n', (1550, 1581), False, 'from django.db.models import Q\n'), ((1853, 1898), 'wagtail.wagtailcore.models.PageRevision.objects.get', 'PageRevision.objects.get', ([], {'id': 'page_revision_id'}), '(id=page_revision_id)\n', (1877, 1898), False, 'from wagtail.wagtailcore.models import PageRevision, GroupPagePermission\n'), ((3190, 3258), 'django.core.mail.send_mail', 'send_mail', (['email_subject', 'email_content', 'from_email', 'email_addresses'], {}), '(email_subject, email_content, from_email, email_addresses)\n', (3199, 3258), False, 'from django.core.mail import send_mail\n'), ((3677, 3745), 'django.core.mail.send_mail', 'send_mail', (['email_subject', 'email_content', 'from_email', 'email_addresses'], {}), '(email_subject, email_content, from_email, email_addresses)\n', (3686, 3745), False, 'from django.core.mail import send_mail\n'), ((1648, 1668), 'django.db.models.Q', 'Q', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (1649, 1668), False, 'from django.db.models import Q\n'), ((2400, 2435), 'wagtail.wagtailusers.models.UserProfile.get_for_user', 'UserProfile.get_for_user', (['recipient'], {}), '(recipient)\n', (2424, 2435), False, 'from wagtail.wagtailusers.models import UserProfile\n')]
|
import pandas as pd
import dateutil
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from .either import Either
import re
import urllib.parse
rexp = re.compile(r".*page=([^=']{10,}).*")
TOOLNAME = "scopes"
TOOLTIP = "List scopes"
def parse(extend=None, args=None):
return (
stdargs.Parser("Get Scopes", ["filename", "limit"])
.add("--portfolios", action="store_true")
.extend(extend)
.parse(args)
)
def process_args(api, args):
results = []
def fetch_page(page_token):
return api.call.list_portfolios(page=page_token)
def got_page(result):
if args.portfolios:
df = lpt.to_df(
result,
["id.scope", "id.code", "is_derived", "type", "parent_portfolio_id"],
)
df.columns = ["Scope", "Portfolio", "Derived", "Type", "Parent"]
else:
df = (
pd.DataFrame({"Scopes": [v.id.scope for v in result.content.values]})
.groupby("Scopes")
.size()
.reset_index()
)
results.append(df)
links = [l for l in result.content.links if l.relation == "NextPage"]
if len(links) > 0:
match = rexp.match(links[0].href)
if match:
return urllib.parse.unquote(match.group(1))
return None
page = Either(None)
while True:
page = fetch_page(page.right).bind(got_page)
if page.is_left():
return page
if page.right == None:
break
return lpt.trim_df(
pd.concat(results, ignore_index=True, sort=False),
args.limit,
sort=["Scope", "Portfolio"] if args.portfolios else "Scopes",
)
# Standalone tool
def main(parse=parse, display_df=lpt.display_df):
return lpt.standard_flow(parse, lse.connect, process_args, display_df)
|
[
"re.compile",
"lusidtools.lpt.stdargs.Parser",
"pandas.DataFrame",
"lusidtools.lpt.lpt.to_df",
"pandas.concat",
"lusidtools.lpt.lpt.standard_flow"
] |
[((198, 233), 're.compile', 're.compile', (['""".*page=([^=\']{10,}).*"""'], {}), '(".*page=([^=\']{10,}).*")\n', (208, 233), False, 'import re\n'), ((1873, 1936), 'lusidtools.lpt.lpt.standard_flow', 'lpt.standard_flow', (['parse', 'lse.connect', 'process_args', 'display_df'], {}), '(parse, lse.connect, process_args, display_df)\n', (1890, 1936), False, 'from lusidtools.lpt import lpt\n'), ((1645, 1694), 'pandas.concat', 'pd.concat', (['results'], {'ignore_index': '(True)', 'sort': '(False)'}), '(results, ignore_index=True, sort=False)\n', (1654, 1694), True, 'import pandas as pd\n'), ((701, 792), 'lusidtools.lpt.lpt.to_df', 'lpt.to_df', (['result', "['id.scope', 'id.code', 'is_derived', 'type', 'parent_portfolio_id']"], {}), "(result, ['id.scope', 'id.code', 'is_derived', 'type',\n 'parent_portfolio_id'])\n", (710, 792), False, 'from lusidtools.lpt import lpt\n'), ((338, 389), 'lusidtools.lpt.stdargs.Parser', 'stdargs.Parser', (['"""Get Scopes"""', "['filename', 'limit']"], {}), "('Get Scopes', ['filename', 'limit'])\n", (352, 389), False, 'from lusidtools.lpt import stdargs\n'), ((962, 1031), 'pandas.DataFrame', 'pd.DataFrame', (["{'Scopes': [v.id.scope for v in result.content.values]}"], {}), "({'Scopes': [v.id.scope for v in result.content.values]})\n", (974, 1031), True, 'import pandas as pd\n')]
|
from redgrease import GearsBuilder
gb = GearsBuilder()
gb.run()
|
[
"redgrease.GearsBuilder"
] |
[((41, 55), 'redgrease.GearsBuilder', 'GearsBuilder', ([], {}), '()\n', (53, 55), False, 'from redgrease import GearsBuilder\n')]
|
from copy import copy
from itertools import groupby
import unittest
from datetime import datetime, timedelta
from typing import List
from activitywatch.base import Watcher, Activity, Logger
from activitywatch.settings import Settings
from activitywatch.utils import floor_datetime, ceil_datetime
from activitywatch.filters.split import split_by_interval, overlaps
from activitywatch.filters.chunk import chunk_by_tags
class MockWatcher(Watcher):
def run(self):
pass
def wait(self):
pass
identifier = "mock"
def __init__(self):
settings = Settings()
settings["watchers"][self.identifier] = {}
Watcher.__init__(self)
class MockLogger(Logger):
def log(self, activities: List[Activity]):
pass
def wait(self):
pass
identifier = "mock"
def __init__(self):
settings = Settings()
settings["loggers"][self.identifier] = {}
Logger.__init__(self)
class LoggerWatcherTest(unittest.TestCase):
def test_activity_flow(self):
watcher = MockWatcher()
logger = MockLogger()
logger.add_watcher(watcher)
watcher.dispatch_activity(Activity("test", datetime.now()-timedelta(days=1), datetime.now()))
activities = logger.flush_activities()
self.assertTrue(len(activities) == 1)
activities = logger.flush_activities()
self.assertTrue(len(activities) == 0)
class ActivityTest(unittest.TestCase):
def test_to_zenobase(self):
TAG = "something"
activity = Activity(TAG, started_at=datetime.now(), ended_at=datetime.now())
event = activity.to_zenobase_event()
self.assertTrue(event["tag"] == TAG)
class SettingsTest(unittest.TestCase):
def test_instance(self):
self.assertIs(Settings(), Settings())
HOUR = timedelta(hours=1)
class SplitActivityTest(unittest.TestCase):
def test_by_hour(self):
dt = datetime(2015, 1, 1, 8, 30)
td = timedelta(hours=3, minutes=23)
activity = Activity([], dt, dt+td)
split = split_by_interval([copy(activity), copy(activity)], interval=HOUR)
self.assertEqual(len(split), 8)
activity.end += -td + timedelta(minutes=2)
split = split_by_interval([copy(activity)], interval=HOUR)
self.assertEqual(len(split), 1)
def test_ceil_hour(self):
def ceil_hour(td):
return ceil_datetime(td, td=timedelta(hours=1))
self.assertEqual(ceil_hour(datetime(2015, 1, 1, 6, 2)), datetime(2015, 1, 1, 7))
self.assertEqual(ceil_hour(datetime(2015, 1, 1, 6, 2)), ceil_hour(datetime(2015, 1, 1, 6, 58)))
self.assertNotEqual(ceil_hour(datetime(2015, 1, 1, 5, 2)), ceil_hour(datetime(2015, 1, 1, 6, 4)))
def test_floor_hour(self):
def floor_hour(td):
return floor_datetime(td, td=timedelta(hours=1))
self.assertEqual(floor_hour(datetime(2015, 1, 1, 6, 2)), datetime(2015, 1, 1, 6))
self.assertEqual(floor_hour(datetime(2015, 1, 1, 6, 2)), floor_hour(datetime(2015, 1, 1, 6, 5)))
def test_overlaps_hour(self):
def overlaps_hours(td):
return overlaps(td, interval=timedelta(hours=1))
activity = Activity([], datetime(2015, 1, 1, 5, 23), datetime(2015, 1, 1, 6, 6))
self.assertTrue(overlaps_hours(activity))
activity = Activity([], datetime(2015, 1, 1, 5, 23), datetime(2015, 1, 1, 6, 0, 0, 1))
self.assertTrue(overlaps_hours(activity))
activity = Activity([], datetime(2015, 1, 1, 6, 30), datetime(2015, 1, 1, 6, 59))
self.assertFalse(overlaps_hours(activity))
class ChunkTest(unittest.TestCase):
def test_chunk_by_tags(self):
interval = timedelta(minutes=5)
start = floor_datetime(datetime.now(), interval)
activities = [Activity(["test"], start, start+interval*0.5),
Activity(["test2"], start+interval, start+interval*1.5),
Activity(["test"], start+interval*2, start+interval*2.5)]
self.assertEqual(3, len(activities))
activities.append(Activity(["test"], start+interval, start+interval*1.5))
self.assertEqual(4, len(activities))
self.assertEqual(2, len(chunk_by_tags(activities)))
|
[
"datetime.datetime",
"activitywatch.filters.chunk.chunk_by_tags",
"activitywatch.base.Activity",
"copy.copy",
"datetime.datetime.now",
"activitywatch.settings.Settings",
"activitywatch.base.Watcher.__init__",
"datetime.timedelta",
"activitywatch.base.Logger.__init__"
] |
[((1826, 1844), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1835, 1844), False, 'from datetime import datetime, timedelta\n'), ((584, 594), 'activitywatch.settings.Settings', 'Settings', ([], {}), '()\n', (592, 594), False, 'from activitywatch.settings import Settings\n'), ((654, 676), 'activitywatch.base.Watcher.__init__', 'Watcher.__init__', (['self'], {}), '(self)\n', (670, 676), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((868, 878), 'activitywatch.settings.Settings', 'Settings', ([], {}), '()\n', (876, 878), False, 'from activitywatch.settings import Settings\n'), ((937, 958), 'activitywatch.base.Logger.__init__', 'Logger.__init__', (['self'], {}), '(self)\n', (952, 958), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((1932, 1959), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(8)', '(30)'], {}), '(2015, 1, 1, 8, 30)\n', (1940, 1959), False, 'from datetime import datetime, timedelta\n'), ((1973, 2003), 'datetime.timedelta', 'timedelta', ([], {'hours': '(3)', 'minutes': '(23)'}), '(hours=3, minutes=23)\n', (1982, 2003), False, 'from datetime import datetime, timedelta\n'), ((2023, 2048), 'activitywatch.base.Activity', 'Activity', (['[]', 'dt', '(dt + td)'], {}), '([], dt, dt + td)\n', (2031, 2048), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((3712, 3732), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (3721, 3732), False, 'from datetime import datetime, timedelta\n'), ((1794, 1804), 'activitywatch.settings.Settings', 'Settings', ([], {}), '()\n', (1802, 1804), False, 'from activitywatch.settings import Settings\n'), ((1806, 1816), 'activitywatch.settings.Settings', 'Settings', ([], {}), '()\n', (1814, 1816), False, 'from activitywatch.settings import Settings\n'), ((2202, 2222), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (2211, 2222), False, 'from datetime import datetime, timedelta\n'), ((2513, 2536), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(7)'], {}), '(2015, 1, 1, 7)\n', (2521, 2536), False, 'from datetime import datetime, timedelta\n'), ((2935, 2958), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)'], {}), '(2015, 1, 1, 6)\n', (2943, 2958), False, 'from datetime import datetime, timedelta\n'), ((3226, 3253), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(5)', '(23)'], {}), '(2015, 1, 1, 5, 23)\n', (3234, 3253), False, 'from datetime import datetime, timedelta\n'), ((3255, 3281), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(6)'], {}), '(2015, 1, 1, 6, 6)\n', (3263, 3281), False, 'from datetime import datetime, timedelta\n'), ((3366, 3393), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(5)', '(23)'], {}), '(2015, 1, 1, 5, 23)\n', (3374, 3393), False, 'from datetime import datetime, timedelta\n'), ((3395, 3427), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(0)', '(0)', '(1)'], {}), '(2015, 1, 1, 6, 0, 0, 1)\n', (3403, 3427), False, 'from datetime import datetime, timedelta\n'), ((3512, 3539), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(30)'], {}), '(2015, 1, 1, 6, 30)\n', (3520, 3539), False, 'from datetime import datetime, timedelta\n'), ((3541, 3568), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(59)'], {}), '(2015, 1, 1, 6, 59)\n', (3549, 3568), False, 'from datetime import datetime, timedelta\n'), ((3764, 3778), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3776, 3778), False, 'from datetime import datetime, timedelta\n'), ((3813, 3862), 'activitywatch.base.Activity', 'Activity', (["['test']", 'start', '(start + interval * 0.5)'], {}), "(['test'], start, start + interval * 0.5)\n", (3821, 3862), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((3882, 3943), 'activitywatch.base.Activity', 'Activity', (["['test2']", '(start + interval)', '(start + interval * 1.5)'], {}), "(['test2'], start + interval, start + interval * 1.5)\n", (3890, 3943), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((3961, 4025), 'activitywatch.base.Activity', 'Activity', (["['test']", '(start + interval * 2)', '(start + interval * 2.5)'], {}), "(['test'], start + interval * 2, start + interval * 2.5)\n", (3969, 4025), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((4091, 4151), 'activitywatch.base.Activity', 'Activity', (["['test']", '(start + interval)', '(start + interval * 1.5)'], {}), "(['test'], start + interval, start + interval * 1.5)\n", (4099, 4151), False, 'from activitywatch.base import Watcher, Activity, Logger\n'), ((1223, 1237), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1235, 1237), False, 'from datetime import datetime, timedelta\n'), ((1571, 1585), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1583, 1585), False, 'from datetime import datetime, timedelta\n'), ((1596, 1610), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1608, 1610), False, 'from datetime import datetime, timedelta\n'), ((2083, 2097), 'copy.copy', 'copy', (['activity'], {}), '(activity)\n', (2087, 2097), False, 'from copy import copy\n'), ((2099, 2113), 'copy.copy', 'copy', (['activity'], {}), '(activity)\n', (2103, 2113), False, 'from copy import copy\n'), ((2258, 2272), 'copy.copy', 'copy', (['activity'], {}), '(activity)\n', (2262, 2272), False, 'from copy import copy\n'), ((2484, 2510), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(2)'], {}), '(2015, 1, 1, 6, 2)\n', (2492, 2510), False, 'from datetime import datetime, timedelta\n'), ((2573, 2599), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(2)'], {}), '(2015, 1, 1, 6, 2)\n', (2581, 2599), False, 'from datetime import datetime, timedelta\n'), ((2612, 2639), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(58)'], {}), '(2015, 1, 1, 6, 58)\n', (2620, 2639), False, 'from datetime import datetime, timedelta\n'), ((2680, 2706), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(5)', '(2)'], {}), '(2015, 1, 1, 5, 2)\n', (2688, 2706), False, 'from datetime import datetime, timedelta\n'), ((2719, 2745), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(4)'], {}), '(2015, 1, 1, 6, 4)\n', (2727, 2745), False, 'from datetime import datetime, timedelta\n'), ((2906, 2932), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(2)'], {}), '(2015, 1, 1, 6, 2)\n', (2914, 2932), False, 'from datetime import datetime, timedelta\n'), ((2996, 3022), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(2)'], {}), '(2015, 1, 1, 6, 2)\n', (3004, 3022), False, 'from datetime import datetime, timedelta\n'), ((3036, 3062), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(6)', '(5)'], {}), '(2015, 1, 1, 6, 5)\n', (3044, 3062), False, 'from datetime import datetime, timedelta\n'), ((4225, 4250), 'activitywatch.filters.chunk.chunk_by_tags', 'chunk_by_tags', (['activities'], {}), '(activities)\n', (4238, 4250), False, 'from activitywatch.filters.chunk import chunk_by_tags\n'), ((1189, 1203), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1201, 1203), False, 'from datetime import datetime, timedelta\n'), ((1204, 1221), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1213, 1221), False, 'from datetime import datetime, timedelta\n'), ((2428, 2446), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2437, 2446), False, 'from datetime import datetime, timedelta\n'), ((2849, 2867), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2858, 2867), False, 'from datetime import datetime, timedelta\n'), ((3173, 3191), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3182, 3191), False, 'from datetime import datetime, timedelta\n')]
|
import json
from collections import OrderedDict
from typing import Union, List
import clip
import torch
import torch.nn as nn
import torch.nn.functional as F
from libs.definitions import ROOT
label_file = ROOT / 'imagenet_class_index.json'
with open(label_file, 'r') as f:
labels = json.load(f)
_DEFAULT_CLASSNAMES = [value[1] for value in labels.values()]
# templates are copied from https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
_DEFAULT_TEMPLATES = [
'a bad photo of a {}.',
'a photo of many {}.',
'a sculpture of a {}.',
'a photo of the hard to see {}.',
'a low resolution photo of the {}.',
'a rendering of a {}.',
'graffiti of a {}.',
'a bad photo of the {}.',
'a cropped photo of the {}.',
'a tattoo of a {}.',
'the embroidered {}.',
'a photo of a hard to see {}.',
'a bright photo of a {}.',
'a photo of a clean {}.',
'a photo of a dirty {}.',
'a dark photo of the {}.',
'a drawing of a {}.',
'a photo of my {}.',
'the plastic {}.',
'a photo of the cool {}.',
'a close-up photo of a {}.',
'a black and white photo of the {}.',
'a painting of the {}.',
'a painting of a {}.',
'a pixelated photo of the {}.',
'a sculpture of the {}.',
'a bright photo of the {}.',
'a cropped photo of a {}.',
'a plastic {}.',
'a photo of the dirty {}.',
'a jpeg corrupted photo of a {}.',
'a blurry photo of the {}.',
'a photo of the {}.',
'a good photo of the {}.',
'a rendering of the {}.',
'a {} in a video game.',
'a photo of one {}.',
'a doodle of a {}.',
'a close-up photo of the {}.',
'a photo of a {}.',
'the origami {}.',
'the {} in a video game.',
'a sketch of a {}.',
'a doodle of the {}.',
'a origami {}.',
'a low resolution photo of a {}.',
'the toy {}.',
'a rendition of the {}.',
'a photo of the clean {}.',
'a photo of a large {}.',
'a rendition of a {}.',
'a photo of a nice {}.',
'a photo of a weird {}.',
'a blurry photo of a {}.',
'a cartoon {}.',
'art of a {}.',
'a sketch of the {}.',
'a embroidered {}.',
'a pixelated photo of a {}.',
'itap of the {}.',
'a jpeg corrupted photo of the {}.',
'a good photo of a {}.',
'a plushie {}.',
'a photo of the nice {}.',
'a photo of the small {}.',
'a photo of the weird {}.',
'the cartoon {}.',
'art of the {}.',
'a drawing of the {}.',
'a photo of the large {}.',
'a black and white photo of a {}.',
'the plushie {}.',
'a dark photo of a {}.',
'itap of a {}.',
'graffiti of the {}.',
'a toy {}.',
'itap of my {}.',
'a photo of a cool {}.',
'a photo of a small {}.',
'a tattoo of the {}.',
]
class DenseClip(nn.Module):
_AVAILABLE_MODELS = ['RN50', 'RN50x16'] # refer to Table 3. in the paper
def __init__(self,
name: str,
classnames: List[str] = None,
templates: List[str] = None,
device: Union[str, torch.device] = 'cuda' if torch.cuda.is_available() else 'cpu',
jit: bool = False, download_root: str = None):
super(DenseClip, self).__init__()
self.clip_model, self.preprocess = clip.load(name, device, jit, download_root)
if classnames is None:
classnames = _DEFAULT_CLASSNAMES
if templates is None:
templates = _DEFAULT_TEMPLATES
self._init_visual(device)
self._init_zeroshot_classifier(classnames, templates, device)
def _init_visual(self, device):
self.visual = self.clip_model.visual
self.conv1 = nn.Conv2d(self.visual.attnpool.v_proj.in_features,
self.visual.attnpool.v_proj.out_features,
kernel_size=(1, 1)).to(device).to(self.dtype)
self.conv2 = nn.Conv2d(self.visual.attnpool.c_proj.in_features,
self.visual.attnpool.c_proj.out_features,
kernel_size=(1, 1)).to(device).to(self.dtype)
conv1_weight_shape = (*self.visual.attnpool.v_proj.weight.shape, 1, 1)
conv2_weight_shape = (*self.visual.attnpool.c_proj.weight.shape, 1, 1)
self.conv1.load_state_dict(
OrderedDict(weight=self.visual.attnpool.v_proj.weight.reshape(conv1_weight_shape),
bias=self.visual.attnpool.v_proj.bias))
self.conv2.load_state_dict(
OrderedDict(weight=self.visual.attnpool.c_proj.weight.reshape(conv2_weight_shape),
bias=self.visual.attnpool.c_proj.bias))
@torch.no_grad()
def _init_zeroshot_classifier(self, classnames, templates, device):
# refer to: https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] # format with class
texts = clip.tokenize(texts).to(device) # tokenize
class_embeddings = self.clip_model.encode_text(texts) # embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
# shape: [E, C]
# where E is the dimension of an embedding and C is the number of classes.
self.zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def _stem(self, x):
for conv, bn in [(self.visual.conv1, self.visual.bn1),
(self.visual.conv2, self.visual.bn2),
(self.visual.conv3, self.visual.bn3)]:
x = self.visual.relu(bn(conv(x)))
x = self.visual.avgpool(x)
return x
def encode_image(self, image):
image = image.type(self.dtype)
feature = self._stem(image)
feature = self.visual.layer1(feature)
feature = self.visual.layer2(feature)
feature = self.visual.layer3(feature)
feature = self.visual.layer4(feature)
# removed attnpool
feature = self.conv1(feature)
feature = self.conv2(feature)
return feature
def forward(self, images):
# [B, E, h, w]
features = self.encode_image(images)
# [B, w, h, E]
features_t = features.transpose(1, 3)
# [B, w, h, C]
output_t = features_t @ self.zeroshot_weights
# [B, C, h, w]
output = output_t.transpose(1, 3)
output = F.interpolate(output, size=images.shape[-2:], mode='bilinear')
return output
@staticmethod
def available_models():
return DenseClip._AVAILABLE_MODELS
class Clip(nn.Module):
_AVAILABLE_MODELS = ['RN50', 'RN50x16'] # refer to Table 3. in the paper
def __init__(self,
name: str,
classnames: List[str] = None,
templates: List[str] = None,
device: Union[str, torch.device] = 'cuda' if torch.cuda.is_available() else 'cpu',
jit: bool = False, download_root: str = None):
super(Clip, self).__init__()
self.clip_model, self.preprocess = clip.load(name, device, jit, download_root)
if classnames is None:
classnames = _DEFAULT_CLASSNAMES
if templates is None:
templates = _DEFAULT_TEMPLATES
self._init_zeroshot_classifier(classnames, templates, device)
@torch.no_grad()
def _init_zeroshot_classifier(self, classnames, templates, device):
# refer to: https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] # format with class
texts = clip.tokenize(texts).to(device) # tokenize
class_embeddings = self.clip_model.encode_text(texts) # embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
# shape: [E, C]
# where E is the dimension of an embedding and C is the number of classes.
self.zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
def encode_image(self, image):
feature = self.clip_model.encode_image(image)
feature /= feature.norm(dim=-1, keepdim=True)
return feature
def forward(self, images):
features = self.encode_image(images)
output = features @ self.zeroshot_weights
return F.softmax(output, dim=-1)
@staticmethod
def available_models():
return Clip._AVAILABLE_MODELS
|
[
"torch.stack",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"clip.load",
"json.load",
"torch.no_grad",
"clip.tokenize",
"torch.nn.functional.softmax"
] |
[((289, 301), 'json.load', 'json.load', (['f'], {}), '(f)\n', (298, 301), False, 'import json\n'), ((4697, 4712), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4710, 4712), False, 'import torch\n'), ((7731, 7746), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7744, 7746), False, 'import torch\n'), ((3317, 3360), 'clip.load', 'clip.load', (['name', 'device', 'jit', 'download_root'], {}), '(name, device, jit, download_root)\n', (3326, 3360), False, 'import clip\n'), ((6792, 6854), 'torch.nn.functional.interpolate', 'F.interpolate', (['output'], {'size': 'images.shape[-2:]', 'mode': '"""bilinear"""'}), "(output, size=images.shape[-2:], mode='bilinear')\n", (6805, 6854), True, 'import torch.nn.functional as F\n'), ((7459, 7502), 'clip.load', 'clip.load', (['name', 'device', 'jit', 'download_root'], {}), '(name, device, jit, download_root)\n', (7468, 7502), False, 'import clip\n'), ((8988, 9013), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (8997, 9013), True, 'import torch.nn.functional as F\n'), ((3130, 3155), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3153, 3155), False, 'import torch\n'), ((7277, 7302), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7300, 7302), False, 'import torch\n'), ((5597, 5633), 'torch.stack', 'torch.stack', (['zeroshot_weights'], {'dim': '(1)'}), '(zeroshot_weights, dim=1)\n', (5608, 5633), False, 'import torch\n'), ((8631, 8667), 'torch.stack', 'torch.stack', (['zeroshot_weights'], {'dim': '(1)'}), '(zeroshot_weights, dim=1)\n', (8642, 8667), False, 'import torch\n'), ((5077, 5097), 'clip.tokenize', 'clip.tokenize', (['texts'], {}), '(texts)\n', (5090, 5097), False, 'import clip\n'), ((8111, 8131), 'clip.tokenize', 'clip.tokenize', (['texts'], {}), '(texts)\n', (8124, 8131), False, 'import clip\n'), ((3720, 3837), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.visual.attnpool.v_proj.in_features', 'self.visual.attnpool.v_proj.out_features'], {'kernel_size': '(1, 1)'}), '(self.visual.attnpool.v_proj.in_features, self.visual.attnpool.\n v_proj.out_features, kernel_size=(1, 1))\n', (3729, 3837), True, 'import torch.nn as nn\n'), ((3942, 4059), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.visual.attnpool.c_proj.in_features', 'self.visual.attnpool.c_proj.out_features'], {'kernel_size': '(1, 1)'}), '(self.visual.attnpool.c_proj.in_features, self.visual.attnpool.\n c_proj.out_features, kernel_size=(1, 1))\n', (3951, 4059), True, 'import torch.nn as nn\n')]
|
import time
import pytest
from flask import g
from flask import session
import paho.mqtt.client as paho
from SmartSleep.db import get_db
from flask import json
import runpy
msg_nr = 0
messages = [""]
broker = 'broker.emqx.io'
port = 1883
def update_contor():
global msg_nr
msg_nr += 1
def on_message(client, userdata, message):
received = json.loads(message.payload)
if "status" in received:
assert received['status'] == messages[msg_nr]
update_contor()
elif "db" in received:
assert received["db"] == messages[msg_nr]
update_contor()
def test_cooling_system(client, auth):
global msg_nr
msg_nr = 0
global messages
messages = ['16',
"Setting the temperature system level to 1.0", "New temperature system level set to 1.0",
'16',
"Setting the temperature system level to 2.0", "New temperature system level set to 2.0",
'16',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
'19',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
"18"
]
time.sleep(2)
client_mqtt = paho.Client("client-test-snoring")
client_mqtt.on_message = on_message
client_mqtt.connect(broker)
client_mqtt.loop_start()
client_mqtt.subscribe("SmartSleep/SoundSensor")
auth.login()
response = client.post(f"/config/start_to_sleep?sleep_now={True}")
assert response.status_code == 200
response = client.post("/config/temp?temperature=18")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=19")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=18")
assert response.status_code == 200
time.sleep(1.5)
|
[
"paho.mqtt.client.Client",
"flask.json.loads",
"time.sleep"
] |
[((357, 384), 'flask.json.loads', 'json.loads', (['message.payload'], {}), '(message.payload)\n', (367, 384), False, 'from flask import json\n'), ((1499, 1512), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1509, 1512), False, 'import time\n'), ((1532, 1566), 'paho.mqtt.client.Client', 'paho.Client', (['"""client-test-snoring"""'], {}), "('client-test-snoring')\n", (1543, 1566), True, 'import paho.mqtt.client as paho\n'), ((1950, 1965), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (1960, 1965), False, 'import time\n'), ((2071, 2086), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2081, 2086), False, 'import time\n'), ((2192, 2207), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2202, 2207), False, 'import time\n'), ((2313, 2328), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2323, 2328), False, 'import time\n'), ((2434, 2449), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2444, 2449), False, 'import time\n'), ((2555, 2570), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2565, 2570), False, 'import time\n'), ((2676, 2691), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2686, 2691), False, 'import time\n'), ((2797, 2812), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2807, 2812), False, 'import time\n')]
|
import theano.tensor as tt
def explin(x):
return tt.where(x >= 0, 1 + x, tt.exp(x))
def log_exp1p(x):
return tt.log1p(tt.exp(x))
|
[
"theano.tensor.exp"
] |
[((76, 85), 'theano.tensor.exp', 'tt.exp', (['x'], {}), '(x)\n', (82, 85), True, 'import theano.tensor as tt\n'), ((124, 133), 'theano.tensor.exp', 'tt.exp', (['x'], {}), '(x)\n', (130, 133), True, 'import theano.tensor as tt\n')]
|
from pyEtherCAT import MasterEtherCAT #ライブラリの読出し
nic = "eth0" # ネットワークカードのアドレスを記載
cat = MasterEtherCAT.MasterEtherCAT(nic)
ADP = 0x0000 #1台目
ADDR = 0x0E00 #コアレジスタのアドレス
cat.APRD(IDX=0x00, ADP=ADP, ADO=ADDR, DATA=[0,0,0,0,0,0,0,0]) #DATAは0を8個(64bit分)の枠を指示
(DATA, WKC) = cat.socket_read() #結果を読出し
print("[0x{:04X}]= 0x{:02x}{:02x},0x{:02x}{:02x},0x{:02x}{:02x},0x{:02x}{:02x}".format(ADDR, DATA[7],DATA[6],DATA[5],DATA[4],DATA[3],DATA[2],DATA[1],DATA[0]))
#読み出したデータを表示する
|
[
"pyEtherCAT.MasterEtherCAT.MasterEtherCAT"
] |
[((88, 122), 'pyEtherCAT.MasterEtherCAT.MasterEtherCAT', 'MasterEtherCAT.MasterEtherCAT', (['nic'], {}), '(nic)\n', (117, 122), False, 'from pyEtherCAT import MasterEtherCAT\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pprint
from base import BaseObject
from base import FileIO
class PythonLOCParser(BaseObject):
""" Parse T/LOC from a Python File
"""
def __init__(self,
file_path: str,
is_debug: bool = False):
"""
Created:
24-Dec-2019
<EMAIL>
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1637#issuecomment-16802191
:param file_path:
link to a python file
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._file_path = file_path
def _lines(self) -> list:
lines = FileIO.file_to_lines(self._file_path, use_sort=False)
return lines
def process(self) -> dict:
lines = self._lines()
loc = len(lines)
tloc = len([line for line in lines if line and len(line.strip())])
d_result = {
"Provenance": str(self.__class__.__name__),
"FilePath": self._file_path,
"LOC": str(loc),
"TLOC": str(tloc)}
if self._is_debug:
self.logger.debug('\n'.join([
"LOC Parsing Complete",
pprint.pformat(d_result, indent=4)]))
return d_result
|
[
"base.BaseObject.__init__",
"base.FileIO.file_to_lines",
"pprint.pformat"
] |
[((581, 616), 'base.BaseObject.__init__', 'BaseObject.__init__', (['self', '__name__'], {}), '(self, __name__)\n', (600, 616), False, 'from base import BaseObject\n'), ((735, 788), 'base.FileIO.file_to_lines', 'FileIO.file_to_lines', (['self._file_path'], {'use_sort': '(False)'}), '(self._file_path, use_sort=False)\n', (755, 788), False, 'from base import FileIO\n'), ((1279, 1313), 'pprint.pformat', 'pprint.pformat', (['d_result'], {'indent': '(4)'}), '(d_result, indent=4)\n', (1293, 1313), False, 'import pprint\n')]
|
import os
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_vtable_extraction_x86_64():
p = angr.Project(os.path.join(test_location, "x86_64", "cpp_classes"), auto_load_libs=False)
vtables_sizes = {0x403cb0: 24, 0x403cd8: 16, 0x403cf8: 16, 0x403d18: 16}
vtable_analysis = p.analyses.VtableFinder()
vtables = vtable_analysis.vtables_list
assert len(vtables) == 4
for vtable in vtables:
assert vtable.vaddr in [0x403cb0, 0x403cd8, 0x403cf8, 0x403d18]
assert vtables_sizes[vtable.vaddr] == vtable.size
if __name__ == "__main__":
test_vtable_extraction_x86_64()
|
[
"os.path.realpath",
"os.path.join"
] |
[((71, 97), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n'), ((197, 249), 'os.path.join', 'os.path.join', (['test_location', '"""x86_64"""', '"""cpp_classes"""'], {}), "(test_location, 'x86_64', 'cpp_classes')\n", (209, 249), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from django_countries.serializer_fields import CountryField
from .models import Event, CountryEvent
class CountryEventSerializer(serializers.ModelSerializer):
code = serializers.ReadOnlyField(source='country.code')
name = serializers.SerializerMethodField()
class Meta:
model = CountryEvent
fields = ('code', 'name')
def get_name(self, obj):
return obj.country.name
class EventsSerializer(serializers.ModelSerializer):
events_country = CountryEventSerializer(many=True, read_only=True)
class Meta:
model = Event
fields = (
'name', 'description', 'start', 'end', 'events_country'
)
|
[
"rest_framework.serializers.SerializerMethodField",
"rest_framework.serializers.ReadOnlyField"
] |
[((237, 285), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""country.code"""'}), "(source='country.code')\n", (262, 285), False, 'from rest_framework import serializers\n'), ((297, 332), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (330, 332), False, 'from rest_framework import serializers\n')]
|
from collections import Counter
import numpy as np
def keep_word(word):
return word.is_alpha
def unique_words(problems):
return set([word.lemma_ for problem in problems for word in problem.tokens() if keep_word(word)])
def create_word2idx(vocab):
return {word: idx for idx, word in enumerate(vocab)}
class BagOfWordsFeature():
def __init__(self, corpus):
self.vocab = list(unique_words(corpus))
# Mapping from words to their index in the feature vector.
self.word2idx = create_word2idx(self.vocab)
def process(self, problem):
features = np.zeros(len(self.vocab))
words = [word.lemma_ for word in problem.tokens() if keep_word(word)]
freqs = Counter(words)
for word in freqs:
# Skip unknown words.
if word in self.word2idx:
features[self.word2idx[word]] = freqs[word]
return features
|
[
"collections.Counter"
] |
[((718, 732), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (725, 732), False, 'from collections import Counter\n')]
|
from threading import Lock
from time import time
from ui import Menu
from ui.utils import clamp, check_value_lock, to_be_foreground
class NumberedMenu(Menu):
"""
This Menu allows the user to jump to entries using the numpad. If the menu is 10 entries or less
the navigation is instant. Otherwise, it lets the user type multiple digits to navigate to entries beyond 10th.
The `input_delay` parameter controls how long, and if, the menu waits before considering an input as definitive.
If `input_delay` is 0, then only the 10 first entries can be navigated to using the keypad.
The `prepend_numbers` parameters controls whether the entries should be prefixed by their number.
(default: `True`)
"""
def __init__(self, *args, **kwargs):
self.prepend_numbers = kwargs.pop('prepend_numbers', True)
self.input_delay = kwargs.pop('input_delay', 1)
Menu.__init__(self, *args, **kwargs)
self.__locked_name__ = None
self.value_lock = Lock()
self.numeric_keymap = {"KEY_{}".format(i): i for i in range(10)}
self.last_input_time = 0
self.current_input = None
@property
def entry_count(self):
return len(self.contents)
def before_activate(self):
Menu.before_activate(self)
self.last_input_time = -self.input_delay
def idle_loop(self):
Menu.idle_loop(self)
self.check_character_state()
def set_keymap(self):
Menu.set_keymap(self)
self.i.set_streaming(self.on_key_pressed)
def deactivate(self):
Menu.deactivate(self)
self.i.remove_streaming()
@to_be_foreground
def on_key_pressed(self, key):
if key == "KEY_RIGHT" and self.is_multi_digit():
self.confirm_current_input()
if key not in self.numeric_keymap:
return
if self.is_multi_digit():
self.process_multi_digit_input(key)
else:
self.process_single_digit_input(key)
self.view.refresh()
def process_single_digit_input(self, key):
self.move_to_entry(self.numeric_keymap[key])
def process_multi_digit_input(self, key):
self.last_input_time = time()
if not self.current_input:
self.current_input = str(self.numeric_keymap[key])
else:
self.current_input += str(self.numeric_keymap[key])
def move_to_entry(self, index):
if self.pointer == index:
# Moving to the same item that's already selected
# let's interpret this as KEY_ENTER
self.current_input = None
self.select_entry()
return
self.pointer = clamp(index, 0, len(self.contents) - 1)
self.current_input = None
self.view.refresh()
def process_contents(self):
Menu.process_contents(self)
if self.prepend_numbers:
self.prepend_entry_text()
def prepend_entry_text(self):
# prepend numbers to each entry name
if self.is_multi_digit():
self.contents = [["{} {}".format(i, entry[0]), entry[1]]
for i, entry in enumerate(self.contents)]
else:
for i, entry in enumerate(self.contents[:10]):
entry[0] = "{} {}".format(i, entry[0])
@check_value_lock
def check_character_state(self):
if self.is_current_input_finished():
self.move_to_entry(int(self.current_input))
def is_multi_digit(self):
return self.input_delay > 0
def is_current_input_finished(self):
# nothing in the buffer
if not self.current_input:
return False
# no need to let the user input '100' if we have 20 entries
if len(str(self.current_input)) == len(str(self.entry_count)):
return True
# user typed 2 and we have 19 entries, going to the most likely option
if int(self.current_input) * 10 > self.entry_count:
return True
# user typed 17 and we have 12 entries
if int(self.current_input) >= self.entry_count:
return True
now = time()
elapsed = now - self.last_input_time
if self.is_multi_digit() and elapsed >= self.input_delay: # delay wait is over
return True
return False
def confirm_current_input(self):
if self.current_input is None:
return
self.move_to_entry(int(self.current_input))
|
[
"ui.Menu.set_keymap",
"threading.Lock",
"ui.Menu.__init__",
"ui.Menu.process_contents",
"ui.Menu.idle_loop",
"ui.Menu.before_activate",
"ui.Menu.deactivate",
"time.time"
] |
[((906, 942), 'ui.Menu.__init__', 'Menu.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (919, 942), False, 'from ui import Menu\n'), ((1005, 1011), 'threading.Lock', 'Lock', ([], {}), '()\n', (1009, 1011), False, 'from threading import Lock\n'), ((1268, 1294), 'ui.Menu.before_activate', 'Menu.before_activate', (['self'], {}), '(self)\n', (1288, 1294), False, 'from ui import Menu\n'), ((1378, 1398), 'ui.Menu.idle_loop', 'Menu.idle_loop', (['self'], {}), '(self)\n', (1392, 1398), False, 'from ui import Menu\n'), ((1471, 1492), 'ui.Menu.set_keymap', 'Menu.set_keymap', (['self'], {}), '(self)\n', (1486, 1492), False, 'from ui import Menu\n'), ((1578, 1599), 'ui.Menu.deactivate', 'Menu.deactivate', (['self'], {}), '(self)\n', (1593, 1599), False, 'from ui import Menu\n'), ((2205, 2211), 'time.time', 'time', ([], {}), '()\n', (2209, 2211), False, 'from time import time\n'), ((2824, 2851), 'ui.Menu.process_contents', 'Menu.process_contents', (['self'], {}), '(self)\n', (2845, 2851), False, 'from ui import Menu\n'), ((4138, 4144), 'time.time', 'time', ([], {}), '()\n', (4142, 4144), False, 'from time import time\n')]
|
import math
import torch
from bisect import bisect_right
class _LRScheduler:
def __init__(self, optimizer, last_epoch=-1):
self.optimizer = optimizer
self.base_lr = optimizer.lr
self.last_epoch = last_epoch
def step(self):
self.last_epoch += 1
self.optimizer.lr = self.get_lr()
class StepLR(_LRScheduler):
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):
super().__init__(optimizer, last_epoch)
self.step_size = step_size
self.gamma = gamma
def get_lr(self):
return self.base_lr * self.gamma ** (self.last_epoch // self.step_size)
class MultiStepLR(_LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
super().__init__(optimizer, last_epoch)
self.milestones = milestones
self.gamma = gamma
def get_lr(self):
return self.base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer, gamma, last_epoch=-1):
super().__init__(optimizer, last_epoch)
self.gamma = gamma
def get_lr(self):
return self.base_lr * self.gamma ** self.last_epoch
class CosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
super().__init__(optimizer, last_epoch)
self.T_max = T_max
self.eta_min = eta_min
def get_lr(self):
return self.eta_min + (self.base_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
|
[
"math.cos",
"bisect.bisect_right"
] |
[((931, 977), 'bisect.bisect_right', 'bisect_right', (['self.milestones', 'self.last_epoch'], {}), '(self.milestones, self.last_epoch)\n', (943, 977), False, 'from bisect import bisect_right\n'), ((1535, 1583), 'math.cos', 'math.cos', (['(math.pi * self.last_epoch / self.T_max)'], {}), '(math.pi * self.last_epoch / self.T_max)\n', (1543, 1583), False, 'import math\n')]
|
import os
import platform
import unittest
import nose
from conans import tools
from conans.errors import ConanException
from conans.model.version import Version
from conans import __version__ as client_version
from conans.model import settings
from conans.test.utils.tools import TestClient
from conans.test.assets.visual_project_files import get_vs_project_files
class vswhereTest(unittest.TestCase):
# Environment supossed:
# - BuildTools 14 (2015)
# - VS Community 14 (2015)
#
# - BuildTools 15 (2017) OR VS Community 15 (2017)
modern_products = 1 # 2017 or higher versions without BuildTools -> vswhere()
all_modern_products = 2 # 2017 or higher versions with BuildTools -> vswhere(products=["*"])
modern_and_legacy_products = 2 # 2017 and lower versions (without BuildTools) -> vswhere(legacy=True)
only_legacy_products = 1
all_products = 3
def setUp(self):
if platform.system() != "Windows":
raise nose.SkipTest("Only Windows test")
if Version(client_version) < Version("1.1.0-dev"):
raise nose.SkipTest("Only >= 1.1.0-dev version")
def vs_comntools_test(self):
# Fake path
with tools.environment_append({"VS150COMNTOOLS": "fake/path/here"}):
path = tools.vs_comntools("15")
self.assertEqual(path, "fake/path/here")
# VS 14 path
path = tools.vs_comntools("14")
self.assertEqual(path, "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\Common7\\Tools\\")
# VS 15 path (shouldn't be found as VS150COMNTOOLS is not set by default)
path = tools.vs_comntools("15")
self.assertEqual(path, None)
def vswhere_test(self):
# products and legacy not allowed
self.assertRaises(ConanException, tools.vswhere, products=["*"], legacy=True)
# Detect only one product (VS Community 15) as vswhere default detection
nproducts = len(tools.vswhere())
self.assertEqual(nproducts, self.modern_products)
# Detect only modern products (VS Community 15 & BuildTools 15)
products = tools.vswhere(products=["*"])
nproducts = len(products)
self.assertEqual(nproducts, self.all_modern_products)
installation_paths = [product["installationPath"] for product in products]
self.assertTrue(any("Community" in install_path for install_path in installation_paths))
self.assertTrue(any("BuildTools" in install_path for install_path in installation_paths))
# Detect also legacy products but no modern BuildTools
products = tools.vswhere(legacy=True)
nproducts = len(products)
self.assertEqual(nproducts, self.modern_and_legacy_products)
installation_paths = [product["installationPath"] for product in products]
self.assertTrue(any("Community" in install_path for install_path in installation_paths))
self.assertTrue(any("Microsoft Visual Studio 14.0" in install_path for install_path in installation_paths))
# Detect all installed products
products = tools.vswhere(products=["*"])
products += tools.vswhere(legacy=["*"])
seen_products = []
for product in products:
if product not in seen_products:
seen_products.append(product)
products = seen_products
nproducts = len(products)
self.assertEqual(nproducts, self.all_products)
installation_paths = [product["installationPath"] for product in products]
self.assertTrue(any("Community" in install_path for install_path in installation_paths))
self.assertTrue(any("BuildTools" in install_path for install_path in installation_paths))
self.assertTrue(any("Microsoft Visual Studio 14.0" in install_path for install_path in installation_paths))
def vs_installation_path_test(self):
# Default behaviour
install_path = tools.vs_installation_path("15")
self.assertIn("Community", install_path)
install_path = tools.vs_installation_path("14")
self.assertIn("Microsoft Visual Studio 14.0", install_path)
# only BuildTools detection
install_path = tools.vs_installation_path("15", preference=["BuildTools"])
self.assertIn("BuildTools", install_path)
install_path = tools.vs_installation_path("14", preference=["BuildTools"])
self.assertIn("Microsoft Visual Studio 14.0", install_path)
# Ask for not installed versions
install_path = tools.vs_installation_path("15", preference=["Enterprise"])
self.assertIsNone(install_path)
install_path = tools.vs_installation_path("15", preference=["Professional"])
self.assertIsNone(install_path)
# Change preference order
install_path = tools.vs_installation_path("15", preference=["BuildTools", "Community", "Professional", "Enterprise"])
self.assertIn("BuildTools", install_path)
install_path = tools.vs_installation_path("15", preference=["Professional", "Enterprise", "Community"])
self.assertIn("Community", install_path)
# Preference order by env var
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE":"BuildTools, Community,Professional, Enterprise"})):
install_path = tools.vs_installation_path("15")
self.assertIn("BuildTools", install_path)
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE":"Professional, Enterprise,Community"})):
install_path = tools.vs_installation_path("15")
self.assertIn("Community", install_path)
def vvcars_command_test(self):
fake_settings = settings.Settings({"os":"Windows", "arch": "x86_64"})
# preference order with VS 15
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE":"BuildTools, Community,Professional, Enterprise"})):
command = tools.vcvars_command(settings=fake_settings, compiler_version="15")
self.assertNotIn("Community", command)
self.assertIn("VC/Auxiliary/Build/vcvarsall.bat", command)
self.assertIn("Microsoft Visual Studio\\2017\\BuildTools", command)
self.assertIn("VSCMD_START_DIR", command)
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE":"Professional, Enterprise,Community"})):
command = tools.vcvars_command(settings=fake_settings, compiler_version="15")
self.assertNotIn("BuildTools", command)
self.assertIn("VC/Auxiliary/Build/vcvarsall.bat", command)
self.assertIn("Microsoft Visual Studio\\2017\\Community", command)
self.assertIn("VSCMD_START_DIR", command)
# With VS 14 order of preference does not apply
command = tools.vcvars_command(settings=fake_settings, compiler_version="14")
self.assertNotIn("VSCMD_START_DIR", command)
self.assertIn("VC/vcvarsall.bat", command)
self.assertIn("Microsoft Visual Studio 14.0\\", command)
def build_test(self):
conan_build_vs = """
from conans import ConanFile, MSBuild, tools
class HelloConan(ConanFile):
name = "Hello"
version = "1.2.1"
settings = "os", "build_type", "arch", "compiler"
export_source = "*"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln", upgrade_project=False)
"""
client = TestClient()
files = get_vs_project_files()
files["conanfile.py"] = conan_build_vs
client.save(files)
with(tools.environment_append({"CONAN_PRINT_RUN_COMMANDS": "1"})):
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE": "BuildTools"})):
client.run("install .")
client.run("build .")
self.assertIn("BuildTools", client.out)
conan_build_vs = conan_build_vs.replace("upgrade_project=False", "upgrade_project=True")
files["conanfile.py"] = conan_build_vs
client.save(files)
with(tools.environment_append({"CONAN_VS_INSTALLATION_PREFERENCE":"BuildTools",
"CONAN_SKIP_VS_PROJECTS_UPGRADE":"True"})):
client.run("install .")
client.run("build .")
self.assertIn("BuildTools", client.out)
|
[
"conans.tools.vs_comntools",
"conans.model.settings.Settings",
"conans.tools.vcvars_command",
"nose.SkipTest",
"conans.tools.vs_installation_path",
"platform.system",
"conans.tools.environment_append",
"conans.test.assets.visual_project_files.get_vs_project_files",
"conans.tools.vswhere",
"conans.test.utils.tools.TestClient",
"conans.model.version.Version"
] |
[((1400, 1424), 'conans.tools.vs_comntools', 'tools.vs_comntools', (['"""14"""'], {}), "('14')\n", (1418, 1424), False, 'from conans import tools\n'), ((1629, 1653), 'conans.tools.vs_comntools', 'tools.vs_comntools', (['"""15"""'], {}), "('15')\n", (1647, 1653), False, 'from conans import tools\n'), ((2121, 2150), 'conans.tools.vswhere', 'tools.vswhere', ([], {'products': "['*']"}), "(products=['*'])\n", (2134, 2150), False, 'from conans import tools\n'), ((2617, 2643), 'conans.tools.vswhere', 'tools.vswhere', ([], {'legacy': '(True)'}), '(legacy=True)\n', (2630, 2643), False, 'from conans import tools\n'), ((3104, 3133), 'conans.tools.vswhere', 'tools.vswhere', ([], {'products': "['*']"}), "(products=['*'])\n", (3117, 3133), False, 'from conans import tools\n'), ((3154, 3181), 'conans.tools.vswhere', 'tools.vswhere', ([], {'legacy': "['*']"}), "(legacy=['*'])\n", (3167, 3181), False, 'from conans import tools\n'), ((3943, 3975), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {}), "('15')\n", (3969, 3975), False, 'from conans import tools\n'), ((4048, 4080), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""14"""'], {}), "('14')\n", (4074, 4080), False, 'from conans import tools\n'), ((4209, 4268), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {'preference': "['BuildTools']"}), "('15', preference=['BuildTools'])\n", (4235, 4268), False, 'from conans import tools\n'), ((4342, 4401), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""14"""'], {'preference': "['BuildTools']"}), "('14', preference=['BuildTools'])\n", (4368, 4401), False, 'from conans import tools\n'), ((4535, 4594), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {'preference': "['Enterprise']"}), "('15', preference=['Enterprise'])\n", (4561, 4594), False, 'from conans import tools\n'), ((4658, 4719), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {'preference': "['Professional']"}), "('15', preference=['Professional'])\n", (4684, 4719), False, 'from conans import tools\n'), ((4818, 4924), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {'preference': "['BuildTools', 'Community', 'Professional', 'Enterprise']"}), "('15', preference=['BuildTools', 'Community',\n 'Professional', 'Enterprise'])\n", (4844, 4924), False, 'from conans import tools\n'), ((4995, 5087), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {'preference': "['Professional', 'Enterprise', 'Community']"}), "('15', preference=['Professional', 'Enterprise',\n 'Community'])\n", (5021, 5087), False, 'from conans import tools\n'), ((5710, 5764), 'conans.model.settings.Settings', 'settings.Settings', (["{'os': 'Windows', 'arch': 'x86_64'}"], {}), "({'os': 'Windows', 'arch': 'x86_64'})\n", (5727, 5764), False, 'from conans.model import settings\n'), ((6813, 6880), 'conans.tools.vcvars_command', 'tools.vcvars_command', ([], {'settings': 'fake_settings', 'compiler_version': '"""14"""'}), "(settings=fake_settings, compiler_version='14')\n", (6833, 6880), False, 'from conans import tools\n'), ((7437, 7449), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (7447, 7449), False, 'from conans.test.utils.tools import TestClient\n'), ((7466, 7488), 'conans.test.assets.visual_project_files.get_vs_project_files', 'get_vs_project_files', ([], {}), '()\n', (7486, 7488), False, 'from conans.test.assets.visual_project_files import get_vs_project_files\n'), ((930, 947), 'platform.system', 'platform.system', ([], {}), '()\n', (945, 947), False, 'import platform\n'), ((980, 1014), 'nose.SkipTest', 'nose.SkipTest', (['"""Only Windows test"""'], {}), "('Only Windows test')\n", (993, 1014), False, 'import nose\n'), ((1026, 1049), 'conans.model.version.Version', 'Version', (['client_version'], {}), '(client_version)\n', (1033, 1049), False, 'from conans.model.version import Version\n'), ((1052, 1072), 'conans.model.version.Version', 'Version', (['"""1.1.0-dev"""'], {}), "('1.1.0-dev')\n", (1059, 1072), False, 'from conans.model.version import Version\n'), ((1092, 1134), 'nose.SkipTest', 'nose.SkipTest', (['"""Only >= 1.1.0-dev version"""'], {}), "('Only >= 1.1.0-dev version')\n", (1105, 1134), False, 'import nose\n'), ((1202, 1264), 'conans.tools.environment_append', 'tools.environment_append', (["{'VS150COMNTOOLS': 'fake/path/here'}"], {}), "({'VS150COMNTOOLS': 'fake/path/here'})\n", (1226, 1264), False, 'from conans import tools\n'), ((1285, 1309), 'conans.tools.vs_comntools', 'tools.vs_comntools', (['"""15"""'], {}), "('15')\n", (1303, 1309), False, 'from conans import tools\n'), ((1954, 1969), 'conans.tools.vswhere', 'tools.vswhere', ([], {}), '()\n', (1967, 1969), False, 'from conans import tools\n'), ((5185, 5301), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE':\n 'BuildTools, Community,Professional, Enterprise'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE':\n 'BuildTools, Community,Professional, Enterprise'})\n", (5209, 5301), False, 'from conans import tools\n'), ((5326, 5358), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {}), "('15')\n", (5352, 5358), False, 'from conans import tools\n'), ((5435, 5539), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE': 'Professional, Enterprise,Community'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE':\n 'Professional, Enterprise,Community'})\n", (5459, 5539), False, 'from conans import tools\n'), ((5564, 5596), 'conans.tools.vs_installation_path', 'tools.vs_installation_path', (['"""15"""'], {}), "('15')\n", (5590, 5596), False, 'from conans import tools\n'), ((5816, 5932), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE':\n 'BuildTools, Community,Professional, Enterprise'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE':\n 'BuildTools, Community,Professional, Enterprise'})\n", (5840, 5932), False, 'from conans import tools\n'), ((5952, 6019), 'conans.tools.vcvars_command', 'tools.vcvars_command', ([], {'settings': 'fake_settings', 'compiler_version': '"""15"""'}), "(settings=fake_settings, compiler_version='15')\n", (5972, 6019), False, 'from conans import tools\n'), ((6290, 6394), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE': 'Professional, Enterprise,Community'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE':\n 'Professional, Enterprise,Community'})\n", (6314, 6394), False, 'from conans import tools\n'), ((6414, 6481), 'conans.tools.vcvars_command', 'tools.vcvars_command', ([], {'settings': 'fake_settings', 'compiler_version': '"""15"""'}), "(settings=fake_settings, compiler_version='15')\n", (6434, 6481), False, 'from conans import tools\n'), ((7577, 7636), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_PRINT_RUN_COMMANDS': '1'}"], {}), "({'CONAN_PRINT_RUN_COMMANDS': '1'})\n", (7601, 7636), False, 'from conans import tools\n'), ((7656, 7732), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE': 'BuildTools'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE': 'BuildTools'})\n", (7680, 7732), False, 'from conans import tools\n'), ((8071, 8193), 'conans.tools.environment_append', 'tools.environment_append', (["{'CONAN_VS_INSTALLATION_PREFERENCE': 'BuildTools',\n 'CONAN_SKIP_VS_PROJECTS_UPGRADE': 'True'}"], {}), "({'CONAN_VS_INSTALLATION_PREFERENCE': 'BuildTools',\n 'CONAN_SKIP_VS_PROJECTS_UPGRADE': 'True'})\n", (8095, 8193), False, 'from conans import tools\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import argparse
import json
import os
from pinliner import __version__
import sys
TEMPLATE_FILE = 'importer.template'
TEMPLATE_PATTERN = '${CONTENTS}'
def output(cfg, what, newline=True):
# We need indentation for PEP8
cfg.outfile.write(what)
if newline:
cfg.outfile.write(os.linesep)
def process_file(cfg, base_dir, package_path):
if cfg.tagging:
output(cfg, '<tag:' + package_path + '>')
path = os.path.splitext(package_path)[0].replace(os.path.sep, '.')
package_start = cfg.outfile.tell()
full_path = os.path.join(base_dir, package_path)
with open(full_path, 'r') as f:
# Read the whole file
code = f.read()
# Insert escape character before ''' since we'll be using ''' to insert
# the code as a string
output(cfg, code.replace("'''", r"\'''"), newline=cfg.tagging)
package_end = cfg.outfile.tell()
is_package = 1 if path.endswith('__init__') else 0
if is_package:
path = path[:-9]
# Get file timestamp
timestamp = int(os.path.getmtime(full_path))
return path, is_package, package_start, package_end, timestamp
def template(cfg):
template_path = os.path.join(os.path.dirname(__file__), TEMPLATE_FILE)
with open(template_path) as f:
template = f.read()
prefix_end = template.index(TEMPLATE_PATTERN)
prefix_data = template[:prefix_end].replace('%{FORCE_EXC_HOOK}',
str(cfg.set_hook))
prefix_data = prefix_data.replace('%{DEFAULT_PACKAGE}',
cfg.default_package)
cfg.outfile.write(prefix_data)
postfix_begin = prefix_end + len(TEMPLATE_PATTERN)
return template[postfix_begin:]
def process_directory(cfg, base_dir, package_path):
files = []
contents = os.listdir(os.path.join(base_dir, package_path))
for content in contents:
next_path = os.path.join(package_path, content)
path = os.path.join(base_dir, next_path)
if is_module(path):
files.append(process_file(cfg, base_dir, next_path))
elif is_package(path):
files.extend(process_directory(cfg, base_dir, next_path))
return files
def process_files(cfg):
# template would look better as a context manager
postfix = template(cfg)
files = []
output(cfg, "'''")
for package_path in cfg.packages:
base_dir, module_name = os.path.split(package_path)
files.extend(process_directory(cfg, base_dir, module_name))
output(cfg, "'''")
# Transform the list into a dictionary
inliner_packages = {data[0]: data[1:] for data in files}
# Generate the references to the positions of the different packages and
# modules inside the main file.
# We don't use indent to decrease the number of bytes in the file
data = json.dumps(inliner_packages)
output(cfg, 2 * os.linesep + 'inliner_packages = ', newline=False)
data = data.replace('],', '],' + os.linesep + ' ')
data = data.replace('[', '[' + os.linesep + 8 * ' ')
data = '%s%s %s%s%s' % (data[0], os.linesep, data[1:-1], os.linesep,
data[-1])
output(cfg, data)
# No newline on last line, as we want output file to be PEP8 compliant.
output(cfg, postfix, newline=False)
cfg.outfile.close()
def parse_args():
class MyParser(argparse.ArgumentParser):
"""Class to print verbose help on error."""
def error(self, message):
self.print_help()
sys.stderr.write('\nERROR: %s\n' % message)
sys.exit(2)
general_description = """Pinliner - Python Inliner (Version %s)
This tool allows you to merge all files that comprise a Python package into
a single file and be able to use this single file as if it were a package.
Imports will work as usual so if you have a package structure like:
.
└── [my_package]
├── file_a.py
├── [sub_package]
│ ├── file_b.py
│ └── __init__.py
├── __init__.py
And you execute:
$ mkdir test
$ pinliner my_package test/my_package.py
$ cd test
$ python
You'll be able to use this file as if it were the real package:
>>> import my_package
>>> from my_package import file_a as a_file
>>> from my_package.sub_package import file_b
And __init__.py contents will be executed as expected when importing
my_package and you'll be able to access its contents like you would with your
normal package. Modules will also behave as usual.
By default there is no visible separation between the different modules'
source code, but one can be enabled for clarity with option --tag, which will
include a newline and a <tag:file_path> tag before each of the source files.
""" % __version__
general_epilog = None
parser = MyParser(description=general_description,
epilog=general_epilog, argument_default='',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('packages', nargs='+', help='Packages to inline.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('-o', '--outfile', nargs='?',
type=argparse.FileType('w'),
default=sys.stdout, help='Output file.')
parser.add_argument('--set-except', default=None, dest='set_hook',
action='store_true',
help='Force setting handler for uncaught exceptions.')
parser.add_argument('--no-except', default=None, dest='set_hook',
action='store_false',
help="Don't set handler for uncaught exceptions.")
parser.add_argument('--tag', default=False, dest='tagging',
action='store_true',
help="Mark with <tag:file_path> each added file.")
parser.add_argument('-d', '--default-pkg', default=None,
dest='default_package',
help='Define the default package when multiple '
'packages are inlined.')
cfg = parser.parse_args()
# If user didn't pass a default package determine one ourselves.
if cfg.default_package is None:
# For single package file default is the package, for multiple packaged
# files default is none (act as a bundle).
def_file = cfg.packages[0] if len(cfg.packages) == 1 else ''
cfg.default_package = def_file
return cfg
def is_module(module):
# This validation is poor, but good enough for now
return os.path.isfile(module) and module.endswith('.py')
def is_package(package):
init_file = os.path.join(package, '__init__.py')
return os.path.isdir(package) and os.path.isfile(init_file)
def validate_args(cfg):
missing = False
# This is weird now, but in the future we'll allow to inline multiple
# packages
for package in cfg.packages:
if not is_package(package):
sys.stderr.write('ERROR: %s is not a python package' % package)
missing = True
if missing:
sys.exit(1)
if cfg.default_package:
if cfg.default_package not in cfg.packages:
sys.stderr.write('ERROR: %s is not a valid default package' %
cfg.default_pkg)
sys.exit(2)
# Convert the default package from path to package
cfg.default_package = os.path.split(cfg.default_package)[1]
def main():
cfg = parse_args()
validate_args(cfg)
process_files(cfg)
if __name__ == '__main__':
main()
|
[
"argparse.FileType",
"json.dumps",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.path.isfile",
"os.path.dirname",
"sys.stderr.write",
"os.path.isdir",
"sys.exit",
"os.path.getmtime"
] |
[((640, 676), 'os.path.join', 'os.path.join', (['base_dir', 'package_path'], {}), '(base_dir, package_path)\n', (652, 676), False, 'import os\n'), ((2931, 2959), 'json.dumps', 'json.dumps', (['inliner_packages'], {}), '(inliner_packages)\n', (2941, 2959), False, 'import json\n'), ((6878, 6914), 'os.path.join', 'os.path.join', (['package', '"""__init__.py"""'], {}), "(package, '__init__.py')\n", (6890, 6914), False, 'import os\n'), ((1132, 1159), 'os.path.getmtime', 'os.path.getmtime', (['full_path'], {}), '(full_path)\n', (1148, 1159), False, 'import os\n'), ((1282, 1307), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1297, 1307), False, 'import os\n'), ((1913, 1949), 'os.path.join', 'os.path.join', (['base_dir', 'package_path'], {}), '(base_dir, package_path)\n', (1925, 1949), False, 'import os\n'), ((2000, 2035), 'os.path.join', 'os.path.join', (['package_path', 'content'], {}), '(package_path, content)\n', (2012, 2035), False, 'import os\n'), ((2051, 2084), 'os.path.join', 'os.path.join', (['base_dir', 'next_path'], {}), '(base_dir, next_path)\n', (2063, 2084), False, 'import os\n'), ((2512, 2539), 'os.path.split', 'os.path.split', (['package_path'], {}), '(package_path)\n', (2525, 2539), False, 'import os\n'), ((6785, 6807), 'os.path.isfile', 'os.path.isfile', (['module'], {}), '(module)\n', (6799, 6807), False, 'import os\n'), ((6926, 6948), 'os.path.isdir', 'os.path.isdir', (['package'], {}), '(package)\n', (6939, 6948), False, 'import os\n'), ((6953, 6978), 'os.path.isfile', 'os.path.isfile', (['init_file'], {}), '(init_file)\n', (6967, 6978), False, 'import os\n'), ((7310, 7321), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7318, 7321), False, 'import sys\n'), ((3618, 3661), 'sys.stderr.write', 'sys.stderr.write', (["('\\nERROR: %s\\n' % message)"], {}), "('\\nERROR: %s\\n' % message)\n", (3634, 3661), False, 'import sys\n'), ((3674, 3685), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3682, 3685), False, 'import sys\n'), ((5410, 5432), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (5427, 5432), False, 'import argparse\n'), ((7195, 7258), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: %s is not a python package' % package)"], {}), "('ERROR: %s is not a python package' % package)\n", (7211, 7258), False, 'import sys\n'), ((7415, 7493), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: %s is not a valid default package' % cfg.default_pkg)"], {}), "('ERROR: %s is not a valid default package' % cfg.default_pkg)\n", (7431, 7493), False, 'import sys\n'), ((7535, 7546), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7543, 7546), False, 'import sys\n'), ((7636, 7670), 'os.path.split', 'os.path.split', (['cfg.default_package'], {}), '(cfg.default_package)\n', (7649, 7670), False, 'import os\n'), ((525, 555), 'os.path.splitext', 'os.path.splitext', (['package_path'], {}), '(package_path)\n', (541, 555), False, 'import os\n')]
|
from dotenv import load_dotenv
load_dotenv()
import os
import boto3
#s3 = boto3.resource('s3')
s3 = boto3.resource('s3', aws_access_key_id=os.environ.get("AWS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_KEY"))
for bucket in s3.buckets.all():
print(bucket.name)
|
[
"os.environ.get",
"dotenv.load_dotenv"
] |
[((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((141, 169), 'os.environ.get', 'os.environ.get', (['"""AWS_KEY_ID"""'], {}), "('AWS_KEY_ID')\n", (155, 169), False, 'import os\n'), ((215, 247), 'os.environ.get', 'os.environ.get', (['"""AWS_SECRET_KEY"""'], {}), "('AWS_SECRET_KEY')\n", (229, 247), False, 'import os\n')]
|
#!/usr/bin/env python
# filename: pair.py
#
# Copyright (c) 2015 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import copy
import sys
import traceback
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from abtools import germlines
from abtools.alignment import global_alignment
from abtools.sequence import Sequence
class Pair(object):
'''
Holds a pair of sequences, corresponding to HC and LC of a single mAb.
Input is a list of dicts, with each dict containing sequence information from a single
chain, formatted as would be returned from a query on a MongoDB database containing
AbStar output.
'''
def __init__(self, seqs, name=None, h_selection_func=None, l_selection_func=None):
self._seqs = seqs
self._heavy = None
self._light = None
self._heavies = [s for s in seqs if s['chain'] == 'heavy']
self._lights = [s for s in seqs if s['chain'] in ['kappa', 'lambda']]
self._name = name
self._fasta = None
self._sample = None
self._subject = None
self._group = None
self._experiment = None
self._timepoint = None
self._is_pair = None
self._vrc01_like = None
self._lineage = None
self._select_heavy = h_selection_func
self._select_light = l_selection_func
def __eq__(self, other):
return (self.heavy, self.light) == (other.heavy, other.light)
def __ne__(self, other):
return not self == other
def __hash(self):
return hash((self.heavy, self.light))
@property
def heavy(self):
if self._heavy is None:
# self._heavies = [s for s in self._seqs if s['chain'] == 'heavy']
if len(self._heavies) > 0:
if self._select_heavy is not None:
self._heavy = Sequence(self._select_heavy(self._heavies))
else:
self._heavy = Sequence(self._heavies[0])
else:
self._heavy = None
return self._heavy
@heavy.setter
def heavy(self, heavy):
self._heavy = heavy
@property
def light(self):
if self._light is None:
# self._lights = [s for s in self._seqs if s['chain'] in ['kappa', 'lambda']]
if len(self._lights) > 0:
if self._select_light is not None:
self._light = Sequence(self._select_light(self._lights))
else:
self._light = Sequence(self._lights[0])
else:
self._light = None
return self._light
@light.setter
def light(self, light):
self._light = light
@property
def is_pair(self):
if all([self.heavy is not None, self.light is not None]):
return True
return False
@property
def lineage(self):
if self._lineage is None:
self._lineage = self.heavy['clonify']['id']
return self._lineage
@property
def vrc01_like(self):
if self._vrc01_like is None:
if any([self.heavy is None, self.light is None]):
self._vrc01_like = False
else:
self._vrc01_like = all([self.heavy['v_gene']['gene'] == 'IGHV1-2', self.light['cdr3_len'] == 5])
return self._vrc01_like
@property
def name(self):
if self._name is None:
if self.heavy is not None:
self._name = self.heavy['seq_id']
elif self.light is not None:
self._name = self.light['seq_id']
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def sample(self):
if self._sample is None:
slist = []
if self.experiment is not None:
slist.append(str(self.experiment))
if self.group is not None:
slist.append(str(self.group))
if self.subject is not None:
slist.append(str(self.subject))
if self.timepoint is not None:
slist.append(str(self.timepoint))
if slist:
self._sample = '|'.join(slist)
return self._sample
@property
def subject(self):
if self._subject is None:
if self.heavy is not None and 'subject' in list(self.heavy.keys()):
self._subject = self.heavy['subject']
elif self.light is not None and 'subject' in list(self.light.keys()):
self._subject = self.light['subject']
return self._subject
@subject.setter
def subject(self, subject):
self._subject = subject
@property
def group(self):
if self._group is None:
if self.heavy is not None and 'group' in list(self.heavy.keys()):
self._group = self.heavy['group']
elif self.light is not None and 'group' in list(self.light.keys()):
self._group = self.light['group']
return self._group
@group.setter
def group(self, group):
self._group = group
@property
def experiment(self):
if self._experiment is None:
if self.heavy is not None and 'experiment' in list(self.heavy.keys()):
self._experiment = self.heavy['experiment']
elif self.light is not None and 'experiment' in list(self.light.keys()):
self._experiment = self.light['experiment']
return self._experiment
@experiment.setter
def experiment(self, experiment):
self._experiment = experiment
@property
def timepoint(self):
if self._timepoint is None:
if self.heavy is not None and 'timepoint' in list(self.heavy.keys()):
self._timepoint = self.heavy['timepoint']
elif self.light is not None and 'timepoint' in list(self.light.keys()):
self._timepoint = self.light['timepoint']
return self._timepoint
@timepoint.setter
def timepoint(self, timepoint):
self._timepoint = timepoint
def refine(self, heavy=True, light=True, species='human'):
for seq in [s for s in [self.heavy, self.light] if s is not None]:
try:
self.remove_ambigs(seq)
self._refine_v(seq, species)
self._refine_j(seq, species)
self._retranslate(seq)
except:
print('REFINEMENT FAILED: {}, {} chain'.format(s['seq_id'], s['chain']))
print(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1]))
@staticmethod
def remove_ambigs(seq):
# fix Ns in the nucleotide sequence
vdj = ''
for s, g in zip(seq['vdj_nt'], seq['vdj_germ_nt']):
if s.upper() == 'N':
vdj += g
else:
vdj += s
seq['vdj_nt'] = vdj
# fix Xs in the amino acid sequence
vdj = ''
for s, g in zip(seq['vdj_aa'], seq['vdj_germ_aa']):
if s.upper() == 'X':
vdj += g
else:
vdj += s
seq['vdj_aa'] = vdj
@staticmethod
def _refine_v(seq, species):
'''
Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
vgerm = germlines.get_germline(seq['v_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], vgerm)
prepend = ''
for s, g in zip(aln.aligned_query, aln.aligned_target):
if s != '-':
break
else:
prepend += g
seq['vdj_nt'] = prepend + seq['vdj_nt']
@staticmethod
def _refine_j(seq, species):
'''
Completes the 3' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
jgerm = germlines.get_germline(seq['j_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], jgerm)
append = ''
for s, g in zip(aln.aligned_query[::-1], aln.aligned_target[::-1]):
if s != '-':
break
else:
append += g
seq['vdj_nt'] = seq['vdj_nt'] + append[::-1]
@staticmethod
def _retranslate(seq):
'''
Retranslates a nucleotide sequence following refinement.
Input is a Pair sequence (basically a dict of MongoDB output).
'''
if len(seq['vdj_nt']) % 3 != 0:
trunc = len(seq['vdj_nt']) % 3
seq['vdj_nt'] = seq['vdj_nt'][:-trunc]
seq['vdj_aa'] = Seq(seq['vdj_nt'], generic_dna).translate()
def fasta(self, key='vdj_nt', append_chain=True):
'''
Returns the sequence pair as a fasta string. If the Pair object contains
both heavy and light chain sequences, both will be returned as a single string.
By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change,
use the <key> option to select an alternate sequence.
By default, the chain (heavy or light) will be appended to the sequence name:
>MySequence_heavy
To just use the pair name (which will result in duplicate sequence names for Pair objects
with both heavy and light chains), set <append_chain> to False.
'''
fastas = []
for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]:
if s is not None:
c = '_{}'.format(chain) if append_chain else ''
fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key]))
return '\n'.join(fastas)
def get_pairs(db, collection, experiment=None, subject=None, group=None, name='seq_id',
delim=None, delim_occurance=1, pairs_only=False):
'''
Gets sequences and assigns them to the appropriate mAb pair, based on the sequence name.
Inputs:
::db:: is a pymongo database connection object
::collection:: is the collection name, as a string
If ::subject:: is provided, only sequences with a 'subject' field matching ::subject:: will
be included. ::subject:: can be either a single subject (as a string) or an iterable
(list or tuple) of subject strings.
If ::group:: is provided, only sequences with a 'group' field matching ::group:: will
be included. ::group:: can be either a single group (as a string) or an iterable
(list or tuple) of group strings.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
match = {}
if subject is not None:
if type(subject) in (list, tuple):
match['subject'] = {'$in': subject}
elif type(subject) in (str, str):
match['subject'] = subject
if group is not None:
if type(group) in (list, tuple):
match['group'] = {'$in': group}
elif type(group) in (str, str):
match['group'] = group
if experiment is not None:
if type(experiment) in (list, tuple):
match['experiment'] = {'$in': experiment}
elif type(experiment) in (str, str):
match['experiment'] = experiment
seqs = list(db[collection].find(match))
return assign_pairs(seqs, name=name, delim=delim,
delim_occurance=delim_occurance, pairs_only=pairs_only)
def assign_pairs(seqs, name='seq_id', delim=None, delim_occurance=1, pairs_only=False):
'''
Assigns sequences to the appropriate mAb pair, based on the sequence name.
Inputs:
::seqs:: is a list of dicts, of the format returned by querying a MongoDB containing
Abstar output.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
pdict = {}
for s in seqs:
if delim is not None:
pname = delim.join(s[name].split(delim)[:delim_occurance])
else:
pname = s[name]
if pname not in pdict:
pdict[pname] = [s, ]
else:
pdict[pname].append(s)
pairs = [Pair(pdict[n], name=n) for n in list(pdict.keys())]
if pairs_only:
pairs = [p for p in pairs if p.is_pair]
return pairs
def deduplicate(pairs, aa=False, ignore_primer_regions=False):
'''
Removes duplicate sequences from a list of Pair objects.
If a Pair has heavy and light chains, both chains must identically match heavy and light chains
from another Pair to be considered a duplicate. If a Pair has only a single chain,
identical matches to that chain will cause the single chain Pair to be considered a duplicate,
even if the comparison Pair has both chains.
Note that identical sequences are identified by simple string comparison, so sequences of
different length that are identical over the entirety of the shorter sequence are not
considered duplicates.
By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead,
set aa=True.
'''
nr_pairs = []
just_pairs = [p for p in pairs if p.is_pair]
single_chains = [p for p in pairs if not p.is_pair]
_pairs = just_pairs + single_chains
for p in _pairs:
duplicates = []
for nr in nr_pairs:
identical = True
vdj = 'vdj_aa' if aa else 'vdj_nt'
offset = 4 if aa else 12
if p.heavy is not None:
if nr.heavy is None:
identical = False
else:
heavy = p.heavy[vdj][offset:-offset] if ignore_primer_regions else p.heavy[vdj]
nr_heavy = nr.heavy[vdj][offset:-offset] if ignore_primer_regions else nr.heavy[vdj]
if heavy != nr_heavy:
identical = False
if p.light is not None:
if nr.light is None:
identical = False
else:
light = p.light[vdj][offset:-offset] if ignore_primer_regions else p.light[vdj]
nr_light = nr.light[vdj][offset:-offset] if ignore_primer_regions else nr.light[vdj]
if light != nr_light:
identical = False
duplicates.append(identical)
if any(duplicates):
continue
else:
nr_pairs.append(p)
return nr_pairs
def refine(pairs, heavy=True, light=True, species='human'):
refined_pairs = copy.deepcopy(pairs)
for p in refined_pairs:
p.refine(heavy, light, species)
return refined_pairs
|
[
"abtools.germlines.get_germline",
"Bio.Seq.Seq",
"abtools.sequence.Sequence",
"abtools.alignment.global_alignment",
"sys.exc_info",
"copy.deepcopy"
] |
[((16837, 16857), 'copy.deepcopy', 'copy.deepcopy', (['pairs'], {}), '(pairs)\n', (16850, 16857), False, 'import copy\n'), ((8464, 8518), 'abtools.germlines.get_germline', 'germlines.get_germline', (["seq['v_gene']['full']", 'species'], {}), "(seq['v_gene']['full'], species)\n", (8486, 8518), False, 'from abtools import germlines\n'), ((8533, 8571), 'abtools.alignment.global_alignment', 'global_alignment', (["seq['vdj_nt']", 'vgerm'], {}), "(seq['vdj_nt'], vgerm)\n", (8549, 8571), False, 'from abtools.alignment import global_alignment\n'), ((9028, 9082), 'abtools.germlines.get_germline', 'germlines.get_germline', (["seq['j_gene']['full']", 'species'], {}), "(seq['j_gene']['full'], species)\n", (9050, 9082), False, 'from abtools import germlines\n'), ((9097, 9135), 'abtools.alignment.global_alignment', 'global_alignment', (["seq['vdj_nt']", 'jgerm'], {}), "(seq['vdj_nt'], jgerm)\n", (9113, 9135), False, 'from abtools.alignment import global_alignment\n'), ((9742, 9773), 'Bio.Seq.Seq', 'Seq', (["seq['vdj_nt']", 'generic_dna'], {}), "(seq['vdj_nt'], generic_dna)\n", (9745, 9773), False, 'from Bio.Seq import Seq\n'), ((3021, 3047), 'abtools.sequence.Sequence', 'Sequence', (['self._heavies[0]'], {}), '(self._heavies[0])\n', (3029, 3047), False, 'from abtools.sequence import Sequence\n'), ((3583, 3608), 'abtools.sequence.Sequence', 'Sequence', (['self._lights[0]'], {}), '(self._lights[0])\n', (3591, 3608), False, 'from abtools.sequence import Sequence\n'), ((7648, 7662), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7660, 7662), False, 'import sys\n'), ((7667, 7681), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7679, 7681), False, 'import sys\n')]
|
""" Main program to launch proc/hdfs.py
"""
import argparse
import logging
from pars import addargs
import sys
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
from proc.hdfs import DIRHDFS
def gettestargs(parser) :
i = "/home/sbartkowski/work/webhdfsdirectory/testdata/inputhdfs.txt"
return parser.parse_args([i,"inimical1","14000","sb","/user/sb","dir1","/tmp/download","--dryrun"])
def getargs(parser) :
return parser.parse_args(sys.argv[1:])
def readargs():
parser = argparse.ArgumentParser(
description='Download HDFS using WEB REST/API')
addargs(parser)
# return gettestargs(parser)
return getargs(parser)
def main():
args = readargs()
T = DIRHDFS(args.host[0], args.port[0], args.user[0],args.regexp,args.dryrun)
T.downloadhdfsdir(args.userdir[0], args.usersubdir[0], args.localdir[0])
if __name__ == "__main__":
# execute only if run as a script
main()
|
[
"logging.basicConfig",
"pars.addargs",
"proc.hdfs.DIRHDFS",
"argparse.ArgumentParser"
] |
[((127, 202), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(message)s', level=logging.INFO)\n", (146, 202), False, 'import logging\n'), ((536, 607), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Download HDFS using WEB REST/API"""'}), "(description='Download HDFS using WEB REST/API')\n", (559, 607), False, 'import argparse\n'), ((621, 636), 'pars.addargs', 'addargs', (['parser'], {}), '(parser)\n', (628, 636), False, 'from pars import addargs\n'), ((741, 816), 'proc.hdfs.DIRHDFS', 'DIRHDFS', (['args.host[0]', 'args.port[0]', 'args.user[0]', 'args.regexp', 'args.dryrun'], {}), '(args.host[0], args.port[0], args.user[0], args.regexp, args.dryrun)\n', (748, 816), False, 'from proc.hdfs import DIRHDFS\n')]
|
"""JSON (de)serialization framework.
The framework presented here is somewhat based on `Go's "json" package`_
(especially the ``omitempty`` functionality).
.. _`Go's "json" package`: http://golang.org/pkg/encoding/json/
"""
import abc
import binascii
import logging
import OpenSSL
import six
from josepy import b64, errors, interfaces, util
logger = logging.getLogger(__name__)
class Field(object):
"""JSON object field.
:class:`Field` is meant to be used together with
:class:`JSONObjectWithFields`.
``encoder`` (``decoder``) is a callable that accepts a single
parameter, i.e. a value to be encoded (decoded), and returns the
serialized (deserialized) value. In case of errors it should raise
:class:`~josepy.errors.SerializationError`
(:class:`~josepy.errors.DeserializationError`).
Note, that ``decoder`` should perform partial serialization only.
:ivar str json_name: Name of the field when encoded to JSON.
:ivar default: Default value (used when not present in JSON object).
:ivar bool omitempty: If ``True`` and the field value is empty, then
it will not be included in the serialized JSON object, and
``default`` will be used for deserialization. Otherwise, if ``False``,
field is considered as required, value will always be included in the
serialized JSON objected, and it must also be present when
deserializing.
"""
__slots__ = ('json_name', 'default', 'omitempty', 'fdec', 'fenc')
def __init__(self, json_name, default=None, omitempty=False,
decoder=None, encoder=None):
# pylint: disable=too-many-arguments
self.json_name = json_name
self.default = default
self.omitempty = omitempty
self.fdec = self.default_decoder if decoder is None else decoder
self.fenc = self.default_encoder if encoder is None else encoder
@classmethod
def _empty(cls, value):
"""Is the provided value considered "empty" for this field?
This is useful for subclasses that might want to override the
definition of being empty, e.g. for some more exotic data types.
"""
return not isinstance(value, bool) and not value
def omit(self, value):
"""Omit the value in output?"""
return self._empty(value) and self.omitempty
def _update_params(self, **kwargs):
current = dict(json_name=self.json_name, default=self.default,
omitempty=self.omitempty,
decoder=self.fdec, encoder=self.fenc)
current.update(kwargs)
return type(self)(**current) # pylint: disable=star-args
def decoder(self, fdec):
"""Descriptor to change the decoder on JSON object field."""
return self._update_params(decoder=fdec)
def encoder(self, fenc):
"""Descriptor to change the encoder on JSON object field."""
return self._update_params(encoder=fenc)
def decode(self, value):
"""Decode a value, optionally with context JSON object."""
return self.fdec(value)
def encode(self, value):
"""Encode a value, optionally with context JSON object."""
return self.fenc(value)
@classmethod
def default_decoder(cls, value):
"""Default decoder.
Recursively deserialize into immutable types (
:class:`josepy.util.frozendict` instead of
:func:`dict`, :func:`tuple` instead of :func:`list`).
"""
# bases cases for different types returned by json.loads
if isinstance(value, list):
return tuple(cls.default_decoder(subvalue) for subvalue in value)
elif isinstance(value, dict):
return util.frozendict(
dict((cls.default_decoder(key), cls.default_decoder(value))
for key, value in six.iteritems(value)))
else: # integer or string
return value
@classmethod
def default_encoder(cls, value):
"""Default (passthrough) encoder."""
# field.to_partial_json() is no good as encoder has to do partial
# serialization only
return value
class JSONObjectWithFieldsMeta(abc.ABCMeta):
"""Metaclass for :class:`JSONObjectWithFields` and its subclasses.
It makes sure that, for any class ``cls`` with ``__metaclass__``
set to ``JSONObjectWithFieldsMeta``:
1. All fields (attributes of type :class:`Field`) in the class
definition are moved to the ``cls._fields`` dictionary, where
keys are field attribute names and values are fields themselves.
2. ``cls.__slots__`` is extended by all field attribute names
(i.e. not :attr:`Field.json_name`). Original ``cls.__slots__``
are stored in ``cls._orig_slots``.
In a consequence, for a field attribute name ``some_field``,
``cls.some_field`` will be a slot descriptor and not an instance
of :class:`Field`. For example::
some_field = Field('someField', default=())
class Foo(object):
__metaclass__ = JSONObjectWithFieldsMeta
__slots__ = ('baz',)
some_field = some_field
assert Foo.__slots__ == ('some_field', 'baz')
assert Foo._orig_slots == ()
assert Foo.some_field is not Field
assert Foo._fields.keys() == ['some_field']
assert Foo._fields['some_field'] is some_field
As an implementation note, this metaclass inherits from
:class:`abc.ABCMeta` (and not the usual :class:`type`) to mitigate
the metaclass conflict (:class:`ImmutableMap` and
:class:`JSONDeSerializable`, parents of :class:`JSONObjectWithFields`,
use :class:`abc.ABCMeta` as its metaclass).
"""
def __new__(mcs, name, bases, dikt):
fields = {}
for base in bases:
fields.update(getattr(base, '_fields', {}))
# Do not reorder, this class might override fields from base classes!
for key, value in tuple(six.iteritems(dikt)):
# not six.iterkeys() (in-place edit!)
if isinstance(value, Field):
fields[key] = dikt.pop(key)
dikt['_orig_slots'] = dikt.get('__slots__', ())
dikt['__slots__'] = tuple(
list(dikt['_orig_slots']) + list(six.iterkeys(fields)))
dikt['_fields'] = fields
return abc.ABCMeta.__new__(mcs, name, bases, dikt)
@six.add_metaclass(JSONObjectWithFieldsMeta)
class JSONObjectWithFields(util.ImmutableMap, interfaces.JSONDeSerializable):
# pylint: disable=too-few-public-methods
"""JSON object with fields.
Example::
class Foo(JSONObjectWithFields):
bar = Field('Bar')
empty = Field('Empty', omitempty=True)
@bar.encoder
def bar(value):
return value + 'bar'
@bar.decoder
def bar(value):
if not value.endswith('bar'):
raise errors.DeserializationError('No bar suffix!')
return value[:-3]
assert Foo(bar='baz').to_partial_json() == {'Bar': 'bazbar'}
assert Foo.from_json({'Bar': 'bazbar'}) == Foo(bar='baz')
assert (Foo.from_json({'Bar': 'bazbar', 'Empty': '!'})
== Foo(bar='baz', empty='!'))
assert Foo(bar='baz').bar == 'baz'
"""
@classmethod
def _defaults(cls):
"""Get default fields values."""
return dict([(slot, field.default) for slot, field
in six.iteritems(cls._fields)])
def __init__(self, **kwargs):
# pylint: disable=star-args
super(JSONObjectWithFields, self).__init__(
**(dict(self._defaults(), **kwargs)))
def encode(self, name):
"""Encode a single field.
:param str name: Name of the field to be encoded.
:raises errors.SerializationError: if field cannot be serialized
:raises errors.Error: if field could not be found
"""
try:
field = self._fields[name]
except KeyError:
raise errors.Error("Field not found: {0}".format(name))
return field.encode(getattr(self, name))
def fields_to_partial_json(self):
"""Serialize fields to JSON."""
jobj = {}
omitted = set()
for slot, field in six.iteritems(self._fields):
value = getattr(self, slot)
if field.omit(value):
omitted.add((slot, value))
else:
try:
jobj[field.json_name] = field.encode(value)
except errors.SerializationError as error:
raise errors.SerializationError(
'Could not encode {0} ({1}): {2}'.format(
slot, value, error))
return jobj
def to_partial_json(self):
return self.fields_to_partial_json()
@classmethod
def _check_required(cls, jobj):
missing = set()
for _, field in six.iteritems(cls._fields):
if not field.omitempty and field.json_name not in jobj:
missing.add(field.json_name)
if missing:
raise errors.DeserializationError(
'The following fields are required: {0}'.format(
','.join(missing)))
@classmethod
def fields_from_json(cls, jobj):
"""Deserialize fields from JSON."""
cls._check_required(jobj)
fields = {}
for slot, field in six.iteritems(cls._fields):
if field.json_name not in jobj and field.omitempty:
fields[slot] = field.default
else:
value = jobj[field.json_name]
try:
fields[slot] = field.decode(value)
except errors.DeserializationError as error:
raise errors.DeserializationError(
'Could not decode {0!r} ({1!r}): {2}'.format(
slot, value, error))
return fields
@classmethod
def from_json(cls, jobj):
return cls(**cls.fields_from_json(jobj))
def encode_b64jose(data):
"""Encode JOSE Base-64 field.
:param bytes data:
:rtype: `unicode`
"""
# b64encode produces ASCII characters only
return b64.b64encode(data).decode('ascii')
def decode_b64jose(data, size=None, minimum=False):
"""Decode JOSE Base-64 field.
:param unicode data:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
error_cls = TypeError if six.PY2 else binascii.Error
try:
decoded = b64.b64decode(data.encode())
except error_cls as error:
raise errors.DeserializationError(error)
if size is not None and ((not minimum and len(decoded) != size) or
(minimum and len(decoded) < size)):
raise errors.DeserializationError(
"Expected at least or exactly {0} bytes".format(size))
return decoded
def encode_hex16(value):
"""Hexlify.
:param bytes value:
:rtype: unicode
"""
return binascii.hexlify(value).decode()
def decode_hex16(value, size=None, minimum=False):
"""Decode hexlified field.
:param unicode value:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
value = value.encode()
if size is not None and ((not minimum and len(value) != size * 2) or
(minimum and len(value) < size * 2)):
raise errors.DeserializationError()
error_cls = TypeError if six.PY2 else binascii.Error
try:
return binascii.unhexlify(value)
except error_cls as error:
raise errors.DeserializationError(error)
def encode_cert(cert):
"""Encode certificate as JOSE Base-64 DER.
:type cert: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert.wrapped))
def decode_cert(b64der):
"""Decode JOSE Base-64 DER-encoded certificate.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
def encode_csr(csr):
"""Encode CSR as JOSE Base-64 DER.
:type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.wrapped))
def decode_csr(b64der):
"""Decode JOSE Base-64 DER-encoded CSR.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class TypedJSONObjectWithFields(JSONObjectWithFields):
"""JSON object with type."""
typ = NotImplemented
"""Type of the object. Subclasses must override."""
type_field_name = "type"
"""Field name used to distinguish different object types.
Subclasses will probably have to override this.
"""
TYPES = NotImplemented
"""Types registered for JSON deserialization"""
@classmethod
def register(cls, type_cls, typ=None):
"""Register class for JSON deserialization."""
typ = type_cls.typ if typ is None else typ
cls.TYPES[typ] = type_cls
return type_cls
@classmethod
def get_type_cls(cls, jobj):
"""Get the registered class for ``jobj``."""
if cls in six.itervalues(cls.TYPES):
if cls.type_field_name not in jobj:
raise errors.DeserializationError(
"Missing type field ({0})".format(cls.type_field_name))
# cls is already registered type_cls, force to use it
# so that, e.g Revocation.from_json(jobj) fails if
# jobj["type"] != "revocation".
return cls
if not isinstance(jobj, dict):
raise errors.DeserializationError(
"{0} is not a dictionary object".format(jobj))
try:
typ = jobj[cls.type_field_name]
except KeyError:
raise errors.DeserializationError("missing type field")
try:
return cls.TYPES[typ]
except KeyError:
raise errors.UnrecognizedTypeError(typ, jobj)
def to_partial_json(self):
"""Get JSON serializable object.
:returns: Serializable JSON object representing ACME typed object.
:meth:`validate` will almost certainly not work, due to reasons
explained in :class:`josepy.interfaces.IJSONSerializable`.
:rtype: dict
"""
jobj = self.fields_to_partial_json()
jobj[self.type_field_name] = self.typ
return jobj
@classmethod
def from_json(cls, jobj):
"""Deserialize ACME object from valid JSON object.
:raises josepy.errors.UnrecognizedTypeError: if type
of the ACME object has not been registered.
"""
# make sure subclasses don't cause infinite recursive from_json calls
type_cls = cls.get_type_cls(jobj)
return type_cls(**type_cls.fields_from_json(jobj))
|
[
"logging.getLogger",
"OpenSSL.crypto.dump_certificate",
"six.itervalues",
"six.add_metaclass",
"binascii.hexlify",
"josepy.errors.UnrecognizedTypeError",
"abc.ABCMeta.__new__",
"OpenSSL.crypto.dump_certificate_request",
"josepy.b64.b64encode",
"josepy.errors.DeserializationError",
"six.iteritems",
"six.iterkeys",
"binascii.unhexlify"
] |
[((356, 383), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (373, 383), False, 'import logging\n'), ((6373, 6416), 'six.add_metaclass', 'six.add_metaclass', (['JSONObjectWithFieldsMeta'], {}), '(JSONObjectWithFieldsMeta)\n', (6390, 6416), False, 'import six\n'), ((6326, 6369), 'abc.ABCMeta.__new__', 'abc.ABCMeta.__new__', (['mcs', 'name', 'bases', 'dikt'], {}), '(mcs, name, bases, dikt)\n', (6345, 6369), False, 'import abc\n'), ((8251, 8278), 'six.iteritems', 'six.iteritems', (['self._fields'], {}), '(self._fields)\n', (8264, 8278), False, 'import six\n'), ((8927, 8953), 'six.iteritems', 'six.iteritems', (['cls._fields'], {}), '(cls._fields)\n', (8940, 8953), False, 'import six\n'), ((9421, 9447), 'six.iteritems', 'six.iteritems', (['cls._fields'], {}), '(cls._fields)\n', (9434, 9447), False, 'import six\n'), ((11700, 11729), 'josepy.errors.DeserializationError', 'errors.DeserializationError', ([], {}), '()\n', (11727, 11729), False, 'from josepy import b64, errors, interfaces, util\n'), ((11811, 11836), 'binascii.unhexlify', 'binascii.unhexlify', (['value'], {}), '(value)\n', (11829, 11836), False, 'import binascii\n'), ((12112, 12187), 'OpenSSL.crypto.dump_certificate', 'OpenSSL.crypto.dump_certificate', (['OpenSSL.crypto.FILETYPE_ASN1', 'cert.wrapped'], {}), '(OpenSSL.crypto.FILETYPE_ASN1, cert.wrapped)\n', (12143, 12187), False, 'import OpenSSL\n'), ((12799, 12886), 'OpenSSL.crypto.dump_certificate_request', 'OpenSSL.crypto.dump_certificate_request', (['OpenSSL.crypto.FILETYPE_ASN1', 'csr.wrapped'], {}), '(OpenSSL.crypto.FILETYPE_ASN1, csr.\n wrapped)\n', (12838, 12886), False, 'import OpenSSL\n'), ((5960, 5979), 'six.iteritems', 'six.iteritems', (['dikt'], {}), '(dikt)\n', (5973, 5979), False, 'import six\n'), ((10227, 10246), 'josepy.b64.b64encode', 'b64.b64encode', (['data'], {}), '(data)\n', (10240, 10246), False, 'from josepy import b64, errors, interfaces, util\n'), ((10750, 10784), 'josepy.errors.DeserializationError', 'errors.DeserializationError', (['error'], {}), '(error)\n', (10777, 10784), False, 'from josepy import b64, errors, interfaces, util\n'), ((11160, 11183), 'binascii.hexlify', 'binascii.hexlify', (['value'], {}), '(value)\n', (11176, 11183), False, 'import binascii\n'), ((11882, 11916), 'josepy.errors.DeserializationError', 'errors.DeserializationError', (['error'], {}), '(error)\n', (11909, 11916), False, 'from josepy import b64, errors, interfaces, util\n'), ((12577, 12611), 'josepy.errors.DeserializationError', 'errors.DeserializationError', (['error'], {}), '(error)\n', (12604, 12611), False, 'from josepy import b64, errors, interfaces, util\n'), ((13273, 13307), 'josepy.errors.DeserializationError', 'errors.DeserializationError', (['error'], {}), '(error)\n', (13300, 13307), False, 'from josepy import b64, errors, interfaces, util\n'), ((14061, 14086), 'six.itervalues', 'six.itervalues', (['cls.TYPES'], {}), '(cls.TYPES)\n', (14075, 14086), False, 'import six\n'), ((14709, 14758), 'josepy.errors.DeserializationError', 'errors.DeserializationError', (['"""missing type field"""'], {}), "('missing type field')\n", (14736, 14758), False, 'from josepy import b64, errors, interfaces, util\n'), ((14850, 14889), 'josepy.errors.UnrecognizedTypeError', 'errors.UnrecognizedTypeError', (['typ', 'jobj'], {}), '(typ, jobj)\n', (14878, 14889), False, 'from josepy import b64, errors, interfaces, util\n'), ((6254, 6274), 'six.iterkeys', 'six.iterkeys', (['fields'], {}), '(fields)\n', (6266, 6274), False, 'import six\n'), ((7439, 7465), 'six.iteritems', 'six.iteritems', (['cls._fields'], {}), '(cls._fields)\n', (7452, 7465), False, 'import six\n'), ((3864, 3884), 'six.iteritems', 'six.iteritems', (['value'], {}), '(value)\n', (3877, 3884), False, 'import six\n')]
|
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import jpy
_JCallbackAdapter = jpy.get_type('io.deephaven.server.plugin.python.CallbackAdapter')
def initialize_all_and_register_into(callback: _JCallbackAdapter):
try:
from . import register
except ModuleNotFoundError as e:
# deephaven.plugin is an optional dependency, so if it can't be found, there are no Deephaven python plugins
# to register
if e.name == 'deephaven.plugin':
return
raise e
register.initialize_all_and_register_into(callback)
|
[
"jpy.get_type"
] |
[((102, 167), 'jpy.get_type', 'jpy.get_type', (['"""io.deephaven.server.plugin.python.CallbackAdapter"""'], {}), "('io.deephaven.server.plugin.python.CallbackAdapter')\n", (114, 167), False, 'import jpy\n')]
|
"""Tests for parse tree pretty printing that preserves formatting
Test case descriptions are in file test/data/output.test.
"""
import os.path
import re
from typing import Undefined, Any
from mypy import build
from mypy.myunit import Suite, run_test
from mypy.test.helpers import assert_string_arrays_equal
from mypy.test.data import parse_test_cases
from mypy.test.config import test_data_prefix, test_temp_dir
from mypy.parse import parse
from mypy.output import OutputVisitor
from mypy.errors import CompileError
# Files which contain test case descriptions.
output_files = ['output.test']
class OutputSuite(Suite):
def cases(self):
c = []
for f in output_files:
c += parse_test_cases(os.path.join(test_data_prefix, f),
test_output, test_temp_dir, True)
return c
def test_output(testcase):
"""Perform an identity source code transformation test case."""
expected = testcase.output
if expected == []:
expected = testcase.input
try:
src = '\n'.join(testcase.input)
# Parse and semantically analyze the source program.
# Test case names with a special suffix get semantically analyzed. This
# lets us test that semantic analysis does not break source code pretty
# printing.
if testcase.name.endswith('_SemanticAnalyzer'):
result = build.build('main',
target=build.SEMANTIC_ANALYSIS,
program_text=src,
flags=[build.TEST_BUILTINS],
alt_lib_path=test_temp_dir)
files = result.files
else:
files = {'main': parse(src, 'main')}
a = []
first = True
# Produce an output containing the pretty-printed forms (with original
# formatting) of all the relevant source files.
for fnam in sorted(files.keys()):
f = files[fnam]
# Omit the builtins and files marked for omission.
if (not f.path.endswith(os.sep + 'builtins.py') and
'-skip.' not in f.path):
# Add file name + colon for files other than the first.
if not first:
a.append('{}:'.format(fix_path(remove_prefix(
f.path, test_temp_dir))))
v = OutputVisitor()
f.accept(v)
s = v.output()
if s != '':
a += s.split('\n')
first = False
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
expected, a, 'Invalid source code output ({}, line {})'.format(
testcase.file, testcase.line))
def remove_prefix(path, prefix):
regexp = '^' + prefix.replace('\\', '\\\\')
np = re.sub(regexp, '', path)
if np.startswith(os.sep):
np = np[1:]
return np
def fix_path(path):
return path.replace('\\', '/')
if __name__ == '__main__':
import sys
run_test(OutputSuite(), sys.argv[1:])
|
[
"re.sub",
"mypy.build.build",
"mypy.output.OutputVisitor",
"mypy.parse.parse"
] |
[((2901, 2925), 're.sub', 're.sub', (['regexp', '""""""', 'path'], {}), "(regexp, '', path)\n", (2907, 2925), False, 'import re\n'), ((1409, 1540), 'mypy.build.build', 'build.build', (['"""main"""'], {'target': 'build.SEMANTIC_ANALYSIS', 'program_text': 'src', 'flags': '[build.TEST_BUILTINS]', 'alt_lib_path': 'test_temp_dir'}), "('main', target=build.SEMANTIC_ANALYSIS, program_text=src, flags\n =[build.TEST_BUILTINS], alt_lib_path=test_temp_dir)\n", (1420, 1540), False, 'from mypy import build\n'), ((1744, 1762), 'mypy.parse.parse', 'parse', (['src', '"""main"""'], {}), "(src, 'main')\n", (1749, 1762), False, 'from mypy.parse import parse\n'), ((2441, 2456), 'mypy.output.OutputVisitor', 'OutputVisitor', ([], {}), '()\n', (2454, 2456), False, 'from mypy.output import OutputVisitor\n')]
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from skl2onnx.common.data_types import onnx_built_with_ml
from test_utils import (
dump_one_class_classification,
dump_binary_classification,
dump_multiple_classification,
)
from test_utils import dump_multiple_regression, dump_single_regression
class TestSklearnDecisionTreeModels(unittest.TestCase):
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
def test_decision_tree_classifier(self):
model = DecisionTreeClassifier()
dump_one_class_classification(
model,
# Operator cast-1 is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
dump_binary_classification(
model,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
dump_multiple_classification(
model,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
def test_decision_tree_regressor(self):
model = DecisionTreeRegressor()
dump_single_regression(
model,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
dump_multiple_regression(
model,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2')",
)
if __name__ == "__main__":
unittest.main()
|
[
"sklearn.tree.DecisionTreeRegressor",
"test_utils.dump_binary_classification",
"sklearn.tree.DecisionTreeClassifier",
"test_utils.dump_one_class_classification",
"test_utils.dump_multiple_regression",
"test_utils.dump_multiple_classification",
"unittest.main",
"test_utils.dump_single_regression",
"skl2onnx.common.data_types.onnx_built_with_ml"
] |
[((2379, 2394), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2392, 2394), False, 'import unittest\n'), ((905, 929), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (927, 929), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((938, 1124), 'test_utils.dump_one_class_classification', 'dump_one_class_classification', (['model'], {'allow_failure': '"""StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""'}), '(model, allow_failure=\n "StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"\n )\n', (967, 1124), False, 'from test_utils import dump_one_class_classification, dump_binary_classification, dump_multiple_classification\n'), ((1309, 1492), 'test_utils.dump_binary_classification', 'dump_binary_classification', (['model'], {'allow_failure': '"""StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""'}), '(model, allow_failure=\n "StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"\n )\n', (1335, 1492), False, 'from test_utils import dump_one_class_classification, dump_binary_classification, dump_multiple_classification\n'), ((1613, 1798), 'test_utils.dump_multiple_classification', 'dump_multiple_classification', (['model'], {'allow_failure': '"""StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"""'}), '(model, allow_failure=\n "StrictVersion(onnx.__version__) < StrictVersion(\'1.3\') or StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.2.1\')"\n )\n', (1641, 1798), False, 'from test_utils import dump_one_class_classification, dump_binary_classification, dump_multiple_classification\n'), ((1972, 1995), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1993, 1995), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((2004, 2110), 'test_utils.dump_single_regression', 'dump_single_regression', (['model'], {'allow_failure': '"""StrictVersion(onnx.__version__) < StrictVersion(\'1.2\')"""'}), '(model, allow_failure=\n "StrictVersion(onnx.__version__) < StrictVersion(\'1.2\')")\n', (2026, 2110), False, 'from test_utils import dump_multiple_regression, dump_single_regression\n'), ((2178, 2286), 'test_utils.dump_multiple_regression', 'dump_multiple_regression', (['model'], {'allow_failure': '"""StrictVersion(onnx.__version__) < StrictVersion(\'1.2\')"""'}), '(model, allow_failure=\n "StrictVersion(onnx.__version__) < StrictVersion(\'1.2\')")\n', (2202, 2286), False, 'from test_utils import dump_multiple_regression, dump_single_regression\n'), ((763, 783), 'skl2onnx.common.data_types.onnx_built_with_ml', 'onnx_built_with_ml', ([], {}), '()\n', (781, 783), False, 'from skl2onnx.common.data_types import onnx_built_with_ml\n')]
|
"""Asset definitions for the simple_lakehouse example."""
import pandas as pd
from lakehouse import Column, computed_table, source_table
from pyarrow import date32, float64, string
sfo_q2_weather_sample_table = source_table(
path="data", columns=[Column("tmpf", float64()), Column("valid_date", string())],
)
@computed_table(
input_assets=[sfo_q2_weather_sample_table],
columns=[Column("valid_date", date32()), Column("max_tmpf", float64())],
)
def daily_temperature_highs_table(sfo_q2_weather_sample: pd.DataFrame) -> pd.DataFrame:
"""Computes the temperature high for each day"""
sfo_q2_weather_sample["valid_date"] = pd.to_datetime(sfo_q2_weather_sample["valid"])
return sfo_q2_weather_sample.groupby("valid_date").max().rename(columns={"tmpf": "max_tmpf"})
|
[
"pyarrow.date32",
"pyarrow.float64",
"pandas.to_datetime",
"pyarrow.string"
] |
[((643, 689), 'pandas.to_datetime', 'pd.to_datetime', (["sfo_q2_weather_sample['valid']"], {}), "(sfo_q2_weather_sample['valid'])\n", (657, 689), True, 'import pandas as pd\n'), ((267, 276), 'pyarrow.float64', 'float64', ([], {}), '()\n', (274, 276), False, 'from pyarrow import date32, float64, string\n'), ((300, 308), 'pyarrow.string', 'string', ([], {}), '()\n', (306, 308), False, 'from pyarrow import date32, float64, string\n'), ((415, 423), 'pyarrow.date32', 'date32', ([], {}), '()\n', (421, 423), False, 'from pyarrow import date32, float64, string\n'), ((445, 454), 'pyarrow.float64', 'float64', ([], {}), '()\n', (452, 454), False, 'from pyarrow import date32, float64, string\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 13:30:53 2017
@author: laoj
"""
import numpy as np
import pymc3 as pm
import theano.tensor as tt
from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape
from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky
from pymc3.math import tround
#%% n scaler, p 1D
#n = 183
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
# p = np.array([ 0.21245365, 0.41223126, 0.37531509])
n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
n = np.squeeze(n)
n = tt.shape_padright(n) if n.ndim == 1 else tt.as_tensor_variable(n)
n.ndim
n * p
#%%
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
#n = 183
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
#p = np.array([[ 0.21245365, 0.41223126, 0.37531509]])
#n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
#%%
class Multinomial(Discrete):
def __init__(self, n, p, *args, **kwargs):
super(Multinomial, self).__init__(*args, **kwargs)
p = p / tt.sum(p, axis=-1, keepdims=True)
n = np.squeeze(n) # works also if n is a tensor
if len(self.shape) > 1:
m = self.shape[-2]
try:
assert n.shape == (m,)
except (AttributeError, AssertionError):
n = n * tt.ones(m)
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
elif n.ndim == 1:
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
else:
# n is a scalar, p is a 1d array
self.n = tt.as_tensor_variable(n)
self.p = tt.as_tensor_variable(p)
self.mean = self.n * self.p
mode = tt.cast(tt.round(self.mean), 'int32')
diff = self.n - tt.sum(mode, axis=-1, keepdims=True)
inc_bool_arr = tt.abs_(diff) > 0
mode = tt.inc_subtensor(mode[inc_bool_arr.nonzero()],
diff[inc_bool_arr.nonzero()])
self.mode = mode
def _random(self, n, p, size=None):
original_dtype = p.dtype
# Set float type to float64 for numpy. This change is related to numpy issue #8317 (https://github.com/numpy/numpy/issues/8317)
p = p.astype('float64')
# Now, re-normalize all of the values in float64 precision. This is done inside the conditionals
if size == p.shape:
size = None
if (p.ndim == 1) and (n.ndim == 0):
p = p / p.sum()
randnum = np.random.multinomial(n, p.squeeze(), size=size)
else:
p = p / p.sum(axis=1, keepdims=True)
if n.shape[0] > p.shape[0]:
randnum = np.asarray([
np.random.multinomial(nn, p.squeeze(), size=size)
for nn in n
])
elif n.shape[0] < p.shape[0]:
randnum = np.asarray([
np.random.multinomial(n.squeeze(), pp, size=size)
for pp in p
])
else:
randnum = np.asarray([
np.random.multinomial(nn, pp, size=size)
for (nn, pp) in zip(n, p)
])
return randnum.astype(original_dtype)
def random(self, point=None, size=None):
n, p = draw_values([self.n, self.p], point=point)
samples = generate_samples(self._random, n, p,
dist_shape=self.shape,
size=size)
return samples
def logp(self, x):
n = self.n
p = self.p
return bound(
tt.sum(factln(n)) - tt.sum(factln(x)) + tt.sum(x * tt.log(p)),
tt.all(x >= 0),
tt.all(tt.eq(tt.sum(x, axis=-1, keepdims=True), n)),
tt.all(p <= 1),
tt.all(tt.eq(tt.sum(p, axis=-1), 1)),
tt.all(tt.ge(n, 0)),
broadcast_conditions=False
)
Multinomial.dist(1,np.ones(3)/3,shape=(6, 3)).mode.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).p.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).n.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).mean.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).random()
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
Multinomial.dist(n,p,shape=(6, 3)).logp(x=counts).eval()
#%%
with pm.Model() as model:
like = Multinomial('obs_ABC', n, p, observed=counts, shape=counts.shape)
#%%
paramall = (
[[.25, .25, .25, .25], 4, 2],
[[.25, .25, .25, .25], (1, 4), 3],
# 3: expect to fail
# [[.25, .25, .25, .25], (10, 4)],
[[.25, .25, .25, .25], (10, 1, 4), 5],
# 5: expect to fail
# [[[.25, .25, .25, .25]], (2, 4), [7, 11]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), 13],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), [17, 19]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (1, 2, 4), [23, 29]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (10, 2, 4), [31, 37]],
)
for p, shape, n in paramall:
with pm.Model() as model:
m = Multinomial('m', n=n, p=np.asarray(p), shape=shape)
print(m.random().shape)
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
sparsity=1 #not zero
beta=np.ones(counts.shape) #input for dirichlet
with pm.Model() as model:
theta=pm.Dirichlet('theta',beta/sparsity, shape = counts.shape)
transition=pm.Multinomial('transition',n,theta,observed=counts)
trace=pm.sample(1000)
#%%
import numpy as np
import pymc3 as pm
import theano.tensor as tt
def norm_simplex(p):
"""Sum-to-zero transformation."""
return (p.T / p.sum(axis=-1)).T
def ccmodel(beta, x):
"""Community composition model."""
return norm_simplex(tt.exp(tt.dot(x, tt.log(beta))))
class DirichletMultinomial(pm.Discrete):
"""Dirichlet Multinomial Model
"""
def __init__(self, alpha, *args, **kwargs):
super(DirichletMultinomial, self).__init__(*args, **kwargs)
self.alpha = alpha
def logp(self, x):
alpha = self.alpha
n = tt.sum(x, axis=-1)
sum_alpha = tt.sum(alpha, axis=-1)
const = (tt.gammaln(n + 1) + tt.gammaln(sum_alpha)) - tt.gammaln(n + sum_alpha)
series = tt.gammaln(x + alpha) - (tt.gammaln(x + 1) + tt.gammaln(alpha))
result = const + tt.sum(series, axis=-1)
return result
def as_col(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle(0, 'x')
else:
return np.asarray(x).reshape(-1, 1)
def as_row(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle('x', 0)
else:
return np.asarray(x).reshape(1, -1)
n, k, r = 25, 10, 2
x = np.random.randint(0, 1000, size=(n, k))
y = np.random.randint(0, 1000, size=n)
design = np.vstack((np.ones(25), np.random.randint(2, size=n))).T
with pm.Model() as model:
# Community composition
pi = pm.Dirichlet('pi', np.ones(k), shape=(r, k))
comp = pm.Deterministic('comp', ccmodel(pi, design))
# Inferred population density of observed taxa (hierarchical model)
rho = pm.Normal('rho', shape=r)
tau = pm.Lognormal('tau')
dens = pm.Lognormal('dens', tt.dot(design, rho), tau=tau, shape=n)
# Community composition *with* the spike
expected_recovery = as_col(1 / dens)
_comp = norm_simplex(tt.concatenate((comp, expected_recovery), axis=1))
# Variability
mu = pm.Lognormal('mu')
# Data
obs = DirichletMultinomial('obs', _comp * mu,
observed=tt.concatenate((x, as_col(y)), axis=1))
pm.sample(1000)
|
[
"pymc3.distributions.dist_math.factln",
"theano.tensor.ones",
"theano.tensor.all",
"theano.tensor.abs_",
"numpy.array",
"pymc3.sample",
"pymc3.distributions.distribution.generate_samples",
"theano.tensor.dot",
"theano.tensor.shape_padleft",
"theano.tensor.log",
"numpy.asarray",
"numpy.random.multinomial",
"theano.tensor.round",
"pymc3.distributions.distribution.draw_values",
"theano.tensor.as_tensor_variable",
"theano.tensor.concatenate",
"numpy.ones",
"theano.tensor.sum",
"numpy.squeeze",
"theano.tensor.shape_padright",
"pymc3.Model",
"pymc3.Normal",
"pymc3.Lognormal",
"pymc3.Dirichlet",
"pymc3.Multinomial",
"numpy.random.randint",
"theano.tensor.ge",
"theano.tensor.gammaln"
] |
[((406, 458), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (414, 458), True, 'import numpy as np\n'), ((468, 719), 'numpy.array', 'np.array', (['[[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, 0.3624182],\n [0.08813779, 0.54447146, 0.36739075], [0.18932804, 0.4630365, \n 0.34763546], [0.11006472, 0.49227755, 0.39765773], [0.17886852, \n 0.41098834, 0.41014314]]'], {}), '([[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, \n 0.3624182], [0.08813779, 0.54447146, 0.36739075], [0.18932804, \n 0.4630365, 0.34763546], [0.11006472, 0.49227755, 0.39765773], [\n 0.17886852, 0.41098834, 0.41014314]])\n', (476, 719), True, 'import numpy as np\n'), ((790, 814), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (811, 814), True, 'import theano.tensor as tt\n'), ((819, 843), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (840, 843), True, 'import theano.tensor as tt\n'), ((848, 861), 'numpy.squeeze', 'np.squeeze', (['n'], {}), '(n)\n', (858, 861), True, 'import numpy as np\n'), ((953, 1005), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (961, 1005), True, 'import numpy as np\n'), ((1025, 1276), 'numpy.array', 'np.array', (['[[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, 0.3624182],\n [0.08813779, 0.54447146, 0.36739075], [0.18932804, 0.4630365, \n 0.34763546], [0.11006472, 0.49227755, 0.39765773], [0.17886852, \n 0.41098834, 0.41014314]]'], {}), '([[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, \n 0.3624182], [0.08813779, 0.54447146, 0.36739075], [0.18932804, \n 0.4630365, 0.34763546], [0.11006472, 0.49227755, 0.39765773], [\n 0.17886852, 0.41098834, 0.41014314]])\n', (1033, 1276), True, 'import numpy as np\n'), ((1378, 1402), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (1399, 1402), True, 'import theano.tensor as tt\n'), ((4786, 4886), 'numpy.asarray', 'np.asarray', (['[[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93, 66], [27,\n 53, 70]]'], {}), '([[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93,\n 66], [27, 53, 70]])\n', (4796, 4886), True, 'import numpy as np\n'), ((5894, 5994), 'numpy.asarray', 'np.asarray', (['[[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93, 66], [27,\n 53, 70]]'], {}), '([[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93,\n 66], [27, 53, 70]])\n', (5904, 5994), True, 'import numpy as np\n'), ((6040, 6092), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (6048, 6092), True, 'import numpy as np\n'), ((6124, 6145), 'numpy.ones', 'np.ones', (['counts.shape'], {}), '(counts.shape)\n', (6131, 6145), True, 'import numpy as np\n'), ((7556, 7595), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(n, k)'}), '(0, 1000, size=(n, k))\n', (7573, 7595), True, 'import numpy as np\n'), ((7600, 7634), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': 'n'}), '(0, 1000, size=n)\n', (7617, 7634), True, 'import numpy as np\n'), ((866, 886), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (883, 886), True, 'import theano.tensor as tt\n'), ((907, 931), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (928, 931), True, 'import theano.tensor as tt\n'), ((4994, 5004), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5002, 5004), True, 'import pymc3 as pm\n'), ((6173, 6183), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (6181, 6183), True, 'import pymc3 as pm\n'), ((6204, 6262), 'pymc3.Dirichlet', 'pm.Dirichlet', (['"""theta"""', '(beta / sparsity)'], {'shape': 'counts.shape'}), "('theta', beta / sparsity, shape=counts.shape)\n", (6216, 6262), True, 'import pymc3 as pm\n'), ((6277, 6332), 'pymc3.Multinomial', 'pm.Multinomial', (['"""transition"""', 'n', 'theta'], {'observed': 'counts'}), "('transition', n, theta, observed=counts)\n", (6291, 6332), True, 'import pymc3 as pm\n'), ((6340, 6355), 'pymc3.sample', 'pm.sample', (['(1000)'], {}), '(1000)\n', (6349, 6355), True, 'import pymc3 as pm\n'), ((7707, 7717), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (7715, 7717), True, 'import pymc3 as pm\n'), ((7950, 7975), 'pymc3.Normal', 'pm.Normal', (['"""rho"""'], {'shape': 'r'}), "('rho', shape=r)\n", (7959, 7975), True, 'import pymc3 as pm\n'), ((7986, 8005), 'pymc3.Lognormal', 'pm.Lognormal', (['"""tau"""'], {}), "('tau')\n", (7998, 8005), True, 'import pymc3 as pm\n'), ((8270, 8288), 'pymc3.Lognormal', 'pm.Lognormal', (['"""mu"""'], {}), "('mu')\n", (8282, 8288), True, 'import pymc3 as pm\n'), ((8440, 8455), 'pymc3.sample', 'pm.sample', (['(1000)'], {}), '(1000)\n', (8449, 8455), True, 'import pymc3 as pm\n'), ((1607, 1620), 'numpy.squeeze', 'np.squeeze', (['n'], {}), '(n)\n', (1617, 1620), True, 'import numpy as np\n'), ((3882, 3924), 'pymc3.distributions.distribution.draw_values', 'draw_values', (['[self.n, self.p]'], {'point': 'point'}), '([self.n, self.p], point=point)\n', (3893, 3924), False, 'from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape\n'), ((3943, 4013), 'pymc3.distributions.distribution.generate_samples', 'generate_samples', (['self._random', 'n', 'p'], {'dist_shape': 'self.shape', 'size': 'size'}), '(self._random, n, p, dist_shape=self.shape, size=size)\n', (3959, 4013), False, 'from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape\n'), ((5769, 5779), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5777, 5779), True, 'import pymc3 as pm\n'), ((6932, 6950), 'theano.tensor.sum', 'tt.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (6938, 6950), True, 'import theano.tensor as tt\n'), ((6971, 6993), 'theano.tensor.sum', 'tt.sum', (['alpha'], {'axis': '(-1)'}), '(alpha, axis=-1)\n', (6977, 6993), True, 'import theano.tensor as tt\n'), ((7784, 7794), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (7791, 7794), True, 'import numpy as np\n'), ((8038, 8057), 'theano.tensor.dot', 'tt.dot', (['design', 'rho'], {}), '(design, rho)\n', (8044, 8057), True, 'import theano.tensor as tt\n'), ((8189, 8238), 'theano.tensor.concatenate', 'tt.concatenate', (['(comp, expected_recovery)'], {'axis': '(1)'}), '((comp, expected_recovery), axis=1)\n', (8203, 8238), True, 'import theano.tensor as tt\n'), ((1561, 1594), 'theano.tensor.sum', 'tt.sum', (['p'], {'axis': '(-1)', 'keepdims': '(True)'}), '(p, axis=-1, keepdims=True)\n', (1567, 1594), True, 'import theano.tensor as tt\n'), ((1880, 1900), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (1897, 1900), True, 'import theano.tensor as tt\n'), ((2304, 2323), 'theano.tensor.round', 'tt.round', (['self.mean'], {}), '(self.mean)\n', (2312, 2323), True, 'import theano.tensor as tt\n'), ((2358, 2394), 'theano.tensor.sum', 'tt.sum', (['mode'], {'axis': '(-1)', 'keepdims': '(True)'}), '(mode, axis=-1, keepdims=True)\n', (2364, 2394), True, 'import theano.tensor as tt\n'), ((2418, 2431), 'theano.tensor.abs_', 'tt.abs_', (['diff'], {}), '(diff)\n', (2425, 2431), True, 'import theano.tensor as tt\n'), ((4279, 4293), 'theano.tensor.all', 'tt.all', (['(x >= 0)'], {}), '(x >= 0)\n', (4285, 4293), True, 'import theano.tensor as tt\n'), ((4372, 4386), 'theano.tensor.all', 'tt.all', (['(p <= 1)'], {}), '(p <= 1)\n', (4378, 4386), True, 'import theano.tensor as tt\n'), ((7057, 7082), 'theano.tensor.gammaln', 'tt.gammaln', (['(n + sum_alpha)'], {}), '(n + sum_alpha)\n', (7067, 7082), True, 'import theano.tensor as tt\n'), ((7100, 7121), 'theano.tensor.gammaln', 'tt.gammaln', (['(x + alpha)'], {}), '(x + alpha)\n', (7110, 7121), True, 'import theano.tensor as tt\n'), ((7189, 7212), 'theano.tensor.sum', 'tt.sum', (['series'], {'axis': '(-1)'}), '(series, axis=-1)\n', (7195, 7212), True, 'import theano.tensor as tt\n'), ((7655, 7666), 'numpy.ones', 'np.ones', (['(25)'], {}), '(25)\n', (7662, 7666), True, 'import numpy as np\n'), ((7668, 7696), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'n'}), '(2, size=n)\n', (7685, 7696), True, 'import numpy as np\n'), ((1943, 1962), 'theano.tensor.shape_padleft', 'tt.shape_padleft', (['p'], {}), '(p)\n', (1959, 1962), True, 'import theano.tensor as tt\n'), ((2010, 2030), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (2027, 2030), True, 'import theano.tensor as tt\n'), ((2173, 2197), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (2194, 2197), True, 'import theano.tensor as tt\n'), ((2219, 2243), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (2240, 2243), True, 'import theano.tensor as tt\n'), ((4457, 4468), 'theano.tensor.ge', 'tt.ge', (['n', '(0)'], {}), '(n, 0)\n', (4462, 4468), True, 'import theano.tensor as tt\n'), ((5826, 5839), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (5836, 5839), True, 'import numpy as np\n'), ((6624, 6636), 'theano.tensor.log', 'tt.log', (['beta'], {}), '(beta)\n', (6630, 6636), True, 'import theano.tensor as tt\n'), ((7012, 7029), 'theano.tensor.gammaln', 'tt.gammaln', (['(n + 1)'], {}), '(n + 1)\n', (7022, 7029), True, 'import theano.tensor as tt\n'), ((7032, 7053), 'theano.tensor.gammaln', 'tt.gammaln', (['sum_alpha'], {}), '(sum_alpha)\n', (7042, 7053), True, 'import theano.tensor as tt\n'), ((7125, 7142), 'theano.tensor.gammaln', 'tt.gammaln', (['(x + 1)'], {}), '(x + 1)\n', (7135, 7142), True, 'import theano.tensor as tt\n'), ((7145, 7162), 'theano.tensor.gammaln', 'tt.gammaln', (['alpha'], {}), '(alpha)\n', (7155, 7162), True, 'import theano.tensor as tt\n'), ((7353, 7366), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7363, 7366), True, 'import numpy as np\n'), ((7500, 7513), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7510, 7513), True, 'import numpy as np\n'), ((2073, 2092), 'theano.tensor.shape_padleft', 'tt.shape_padleft', (['p'], {}), '(p)\n', (2089, 2092), True, 'import theano.tensor as tt\n'), ((4320, 4353), 'theano.tensor.sum', 'tt.sum', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (4326, 4353), True, 'import theano.tensor as tt\n'), ((4413, 4431), 'theano.tensor.sum', 'tt.sum', (['p'], {'axis': '(-1)'}), '(p, axis=-1)\n', (4419, 4431), True, 'import theano.tensor as tt\n'), ((4540, 4550), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4547, 4550), True, 'import numpy as np\n'), ((1848, 1858), 'theano.tensor.ones', 'tt.ones', (['m'], {}), '(m)\n', (1855, 1858), True, 'import theano.tensor as tt\n'), ((4211, 4220), 'pymc3.distributions.dist_math.factln', 'factln', (['n'], {}), '(n)\n', (4217, 4220), False, 'from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky\n'), ((4231, 4240), 'pymc3.distributions.dist_math.factln', 'factln', (['x'], {}), '(x)\n', (4237, 4240), False, 'from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky\n'), ((4255, 4264), 'theano.tensor.log', 'tt.log', (['p'], {}), '(p)\n', (4261, 4264), True, 'import theano.tensor as tt\n'), ((3669, 3709), 'numpy.random.multinomial', 'np.random.multinomial', (['nn', 'pp'], {'size': 'size'}), '(nn, pp, size=size)\n', (3690, 3709), True, 'import numpy as np\n')]
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Core features
"""
import asyncio
import concurrent
import logging
import os
import time
from implant import core
log = logging.getLogger(__name__)
class Echo(core.Command):
"""Demonstrate the basic command API."""
data = core.Parameter(default='ping', description='Meaningful data.')
async def local(self, context):
# custom protocol
# first: send
await context.channel.send_iteration("send to remote")
# second: receive
from_remote = []
async for x in context.channel:
from_remote.append(x)
log.debug("************ receiving from remote: %s", from_remote)
# third: wait for remote to finish and return result
remote_result = await context.remote_future
result = {
'local_data': self.data,
'from_remote': ''.join(from_remote),
}
result.update(remote_result)
return result
remote = core.CommandRemote('implant.commands.remotes.Echo')
class SystemLoad(core.Command):
async def local(self, context):
t, load = await context.remote_future
return t, load
async def remote(self, context):
t, load = time.time(), os.getloadavg()
return t, load
class Copy(core.Command):
src = core.Parameter(description='Source file at local side.')
dest = core.Parameter(description='Desatination file at remote side.')
def __init__(self, *args, **kwargs):
super(Copy, self).__init__(*args, **kwargs)
self.executor = concurrent.futures.ThreadPoolExecutor()
self.loop = asyncio.get_event_loop()
def __del__(self):
self.executor.shutdown(wait=True)
async def local(self, context):
with open(self.src, "rb") as f:
while True:
data = await self.loop.run_in_executor(self.executor, f.read, 0x8000)
if not data:
context.channel.send(StopAsyncIteration())
break
await context.channel.send(data)
result = await context.remote_future
return result
async def remote(self, context):
with open(self.dest, "wb") as f:
async for data in context.channel:
await self.loop.run_in_executor(self.executor, f.write, data)
|
[
"logging.getLogger",
"implant.core.CommandRemote",
"implant.core.Parameter",
"concurrent.futures.ThreadPoolExecutor",
"os.getloadavg",
"asyncio.get_event_loop",
"time.time"
] |
[((697, 724), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (714, 724), False, 'import logging\n'), ((811, 873), 'implant.core.Parameter', 'core.Parameter', ([], {'default': '"""ping"""', 'description': '"""Meaningful data."""'}), "(default='ping', description='Meaningful data.')\n", (825, 873), False, 'from implant import core\n'), ((1524, 1575), 'implant.core.CommandRemote', 'core.CommandRemote', (['"""implant.commands.remotes.Echo"""'], {}), "('implant.commands.remotes.Echo')\n", (1542, 1575), False, 'from implant import core\n'), ((1862, 1918), 'implant.core.Parameter', 'core.Parameter', ([], {'description': '"""Source file at local side."""'}), "(description='Source file at local side.')\n", (1876, 1918), False, 'from implant import core\n'), ((1930, 1993), 'implant.core.Parameter', 'core.Parameter', ([], {'description': '"""Desatination file at remote side."""'}), "(description='Desatination file at remote side.')\n", (1944, 1993), False, 'from implant import core\n'), ((2113, 2152), 'concurrent.futures.ThreadPoolExecutor', 'concurrent.futures.ThreadPoolExecutor', ([], {}), '()\n', (2150, 2152), False, 'import concurrent\n'), ((2173, 2197), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2195, 2197), False, 'import asyncio\n'), ((1771, 1782), 'time.time', 'time.time', ([], {}), '()\n', (1780, 1782), False, 'import time\n'), ((1784, 1799), 'os.getloadavg', 'os.getloadavg', ([], {}), '()\n', (1797, 1799), False, 'import os\n')]
|
import sqlite3
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS Produtos (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
price REAL,
compra_id INTEGER,
FOREIGN KEY (compra_id) REFERENCES Compras(id)
);
"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS Compras (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
date TEXT NOT NULL
);
"""
)
class Produto(object):
def getAll(self):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM Produtos;")
return [
{
"id": items[0],
"name": items[1],
"price": items[2],
"compra_id": items[3]
} for items in cursor.fetchall()
]
def getByCompra(self, compraId):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
print(f"SELECT * FROM Produtos WHERE compra_id = {compraId}")
cursor.execute(f"SELECT * FROM Produtos WHERE compra_id = {compraId}")
return [
{
"id": items[0],
"name": items[1],
"price": items[2],
} for items in cursor.fetchall()
]
def insert(self, *args):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
print(f"INSERT INTO Produtos (name, price, compra_id) VALUES ('{args[0]}', {args[1]}, {args[2]})")
cursor.execute(f"INSERT INTO Produtos (name, price, compra_id) VALUES ('{args[0]}', {args[1]}, {args[2]})")
def getById(self, id):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM Produtos WHERE id = {id} ;")
return [
{
"id": items[0],
"name": items[1],
"price": items[2]
} for items in cursor.fetchall()
][0]
def update(self, id, *args):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(f"UPDATE Produtos SET name = {args[0]}, price = {args[1]}, compra_id = {args[2]} WHERE id = {id};")
def delete(self, id):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(f"DELETE FROM Produtos WHERE id = {id}")
def deleteByCompra(self, compraId):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
print(f"DELETE FROM Produtos WHERE compra_id = {compraId}")
cursor.execute(f"DELETE FROM Produtos WHERE compra_id = {compraId}")
class Compra(object):
def __init__(self):
self.produto = Produto()
def getAll(self):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
print("SELECT * FROM Compras;")
cursor.execute("SELECT * FROM Compras;")
return [
{
"id": items[0],
"date": items[1],
"produtos": self.produto.getByCompra(items[0])
} for items in cursor.fetchall()
]
def insert(self, *args):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
print(f"INSERT INTO Compras (date) VALUES ('{args[0]}')")
cursor.execute(f"INSERT INTO Compras (date) VALUES ('{args[0]}')")
c = self.getAll()[-1]
ps = list(args[1])
for p in ps:
self.produto.insert(str(p["name"]), p["price"], c["id"])
# return self.getById(c.id)
def getById(self, id):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM Compras WHERE id = {id} ;")
return [
{
"id": items[0],
"date": items[1],
"produtos": self.produto.getByCompra(id)
} for items in cursor.fetchall()
][0]
def getByDate(self, date):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM Compras WHERE date = '{date}' ;")
return [
{
"id": items[0],
"date": items[1],
"produtos": self.produto.getByCompra(items[0])
} for items in cursor.fetchall()
]
def update(self, id, *args):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
cursor.execute(
"""
UPDATE Compras
SET date = ?, produto_id = ?
WHERE id = ?;
""", (*args, id)
)
def delete(self, id):
with sqlite3.connect('storage.db') as conn:
cursor = conn.cursor()
self.produto.deleteByCompra(self.getById(id)["id"])
print(f"DELETE FROM Compras WHERE id = {id}")
cursor.execute(f"DELETE FROM Compras WHERE id = {id}")
|
[
"sqlite3.connect"
] |
[((21, 50), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (36, 50), False, 'import sqlite3\n'), ((655, 684), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (670, 684), False, 'import sqlite3\n'), ((1091, 1120), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (1106, 1120), False, 'import sqlite3\n'), ((1580, 1609), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (1595, 1609), False, 'import sqlite3\n'), ((1926, 1955), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (1941, 1955), False, 'import sqlite3\n'), ((2336, 2365), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (2351, 2365), False, 'import sqlite3\n'), ((2577, 2606), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (2592, 2606), False, 'import sqlite3\n'), ((2773, 2802), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (2788, 2802), False, 'import sqlite3\n'), ((3116, 3145), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (3131, 3145), False, 'import sqlite3\n'), ((3573, 3602), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (3588, 3602), False, 'import sqlite3\n'), ((4020, 4049), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (4035, 4049), False, 'import sqlite3\n'), ((4450, 4479), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (4465, 4479), False, 'import sqlite3\n'), ((4891, 4920), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (4906, 4920), False, 'import sqlite3\n'), ((5206, 5235), 'sqlite3.connect', 'sqlite3.connect', (['"""storage.db"""'], {}), "('storage.db')\n", (5221, 5235), False, 'import sqlite3\n')]
|
# changelog.py -- Python module for Debian changelogs
# Copyright (C) 2006-7 <NAME> <<EMAIL>>
# Copyright (C) 2008 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# The parsing code is based on that from dpkg which is:
# Copyright 1996 <NAME>
# Copyright 2005 <NAME> <<EMAIL>>
# and licensed under the same license as above.
"""This module implements facilities to deal with Debian changelogs."""
from __future__ import absolute_import
import os
import pwd
import re
import socket
import warnings
import sys
import six
from debian import debian_support
# Python 3 doesn't have StandardError, but let's avoid changing our
# exception inheritance hierarchy for Python 2.
try:
_base_exception_class = StandardError
except NameError:
_base_exception_class = Exception
class ChangelogParseError(_base_exception_class):
"""Indicates that the changelog could not be parsed"""
is_user_error = True
def __init__(self, line):
self._line=line
def __str__(self):
return "Could not parse changelog: "+self._line
class ChangelogCreateError(_base_exception_class):
"""Indicates that changelog could not be created, as all the information
required was not given"""
class VersionError(_base_exception_class):
"""Indicates that the version does not conform to the required format"""
is_user_error = True
def __init__(self, version):
self._version=version
def __str__(self):
return "Could not parse version: "+self._version
# TODO(jsw): Remove this in favor of using debian_support.Version directly. I
# don't think we gain anything by using this empty subclass.
class Version(debian_support.Version):
"""Represents a version of a Debian package."""
# debian_support.Version now has all the functionality we need
class ChangeBlock(object):
"""Holds all the information about one block from the changelog."""
def __init__(self, package=None, version=None, distributions=None,
urgency=None, urgency_comment=None, changes=None,
author=None, date=None, other_pairs=None, encoding='utf-8'):
self._raw_version = None
self._set_version(version)
self.package = package
self.distributions = distributions
self.urgency = urgency or "unknown"
self.urgency_comment = urgency_comment or ''
self._changes = changes
self.author = author
self.date = date
self._trailing = []
self.other_pairs = other_pairs or {}
self._encoding = encoding
self._no_trailer = False
self._trailer_separator = " "
def _set_version(self, version):
if version is not None:
self._raw_version = str(version)
def _get_version(self):
return Version(self._raw_version)
version = property(_get_version, _set_version)
def other_keys_normalised(self):
norm_dict = {}
for (key, value) in other_pairs.items():
key = key[0].upper() + key[1:].lower()
m = xbcs_re.match(key)
if m is None:
key = "XS-%s" % key
norm_dict[key] = value
return norm_dict
def changes(self):
return self._changes
def add_trailing_line(self, line):
self._trailing.append(line)
def add_change(self, change):
if self._changes is None:
self._changes = [change]
else:
#Bit of trickery to keep the formatting nicer with a blank
#line at the end if there is one
changes = self._changes
changes.reverse()
added = False
for i in range(len(changes)):
m = blankline.match(changes[i])
if m is None:
changes.insert(i, change)
added = True
break
changes.reverse()
if not added:
changes.append(change)
self._changes = changes
def _get_bugs_closed_generic(self, type_re):
changes = six.u(' ').join(self._changes)
bugs = []
for match in type_re.finditer(changes):
closes_list = match.group(0)
for match in re.finditer(r"\d+", closes_list):
bugs.append(int(match.group(0)))
return bugs
@property
def bugs_closed(self):
return self._get_bugs_closed_generic(closes)
@property
def lp_bugs_closed(self):
return self._get_bugs_closed_generic(closeslp)
def _format(self):
# TODO(jsw): Switch to StringIO or a list to join at the end.
block = ""
if self.package is None:
raise ChangelogCreateError("Package not specified")
block += self.package + " "
if self._raw_version is None:
raise ChangelogCreateError("Version not specified")
block += "(" + self._raw_version + ") "
if self.distributions is None:
raise ChangelogCreateError("Distribution not specified")
block += self.distributions + "; "
if self.urgency is None:
raise ChangelogCreateError("Urgency not specified")
block += "urgency=" + self.urgency + self.urgency_comment
for (key, value) in self.other_pairs.items():
block += ", %s=%s" % (key, value)
block += '\n'
if self.changes() is None:
raise ChangelogCreateError("Changes not specified")
for change in self.changes():
block += change + "\n"
if not self._no_trailer:
if self.author is None:
raise ChangelogCreateError("Author not specified")
if self.date is None:
raise ChangelogCreateError("Date not specified")
block += " -- " + self.author + self._trailer_separator \
+ self.date + "\n"
for line in self._trailing:
block += line + "\n"
return block
if sys.version >= '3':
__str__ = _format
def __bytes__(self):
return str(self).encode(self._encoding)
else:
__unicode__ = _format
def __str__(self):
return unicode(self).encode(self._encoding)
topline = re.compile(r'^(\w%(name_chars)s*) \(([^\(\) \t]+)\)'
r'((\s+%(name_chars)s+)+)\;'
% {'name_chars': '[-+0-9a-z.]'},
re.IGNORECASE)
blankline = re.compile(r'^\s*$')
change = re.compile(r'^\s\s+.*$')
endline = re.compile(r'^ -- (.*) <(.*)>( ?)((\w+\,\s*)?\d{1,2}\s+\w+\s+'
r'\d{4}\s+\d{1,2}:\d\d:\d\d\s+[-+]\d{4}\s*)$')
endline_nodetails = re.compile(r'^ --(?: (.*) <(.*)>( ?)((\w+\,\s*)?\d{1,2}'
r'\s+\w+\s+\d{4}\s+\d{1,2}:\d\d:\d\d\s+[-+]\d{4}'
r'))?\s*$')
keyvalue= re.compile(r'^([-0-9a-z]+)=\s*(.*\S)$', re.IGNORECASE)
value_re = re.compile(r'^([-0-9a-z]+)((\s+.*)?)$', re.IGNORECASE)
xbcs_re = re.compile('^X[BCS]+-', re.IGNORECASE)
emacs_variables = re.compile(r'^(;;\s*)?Local variables:', re.IGNORECASE)
vim_variables = re.compile('^vim:', re.IGNORECASE)
cvs_keyword = re.compile(r'^\$\w+:.*\$')
comments = re.compile(r'^\# ')
more_comments = re.compile(r'^/\*.*\*/')
closes = re.compile(r'closes:\s*(?:bug)?\#?\s?\d+(?:,\s*(?:bug)?\#?\s?\d+)*',
re.IGNORECASE)
closeslp = re.compile(r'lp:\s+\#\d+(?:,\s*\#\d+)*', re.IGNORECASE)
old_format_re1 = re.compile(r'^(\w+\s+\w+\s+\d{1,2} \d{1,2}:\d{1,2}:\d{1,2}'
r'\s+[\w\s]*\d{4})\s+(.*)\s+(<|\()(.*)(\)|>)')
old_format_re2 = re.compile(r'^(\w+\s+\w+\s+\d{1,2},?\s*\d{4})\s+(.*)'
r'\s+(<|\()(.*)(\)|>)')
old_format_re3 = re.compile(r'^(\w[-+0-9a-z.]*) \(([^\(\) \t]+)\)\;?',
re.IGNORECASE)
old_format_re4 = re.compile(r'^([\w.+-]+)(-| )(\S+) Debian (\S+)',
re.IGNORECASE)
old_format_re5 = re.compile('^Changes from version (.*) to (.*):',
re.IGNORECASE)
old_format_re6 = re.compile(r'^Changes for [\w.+-]+-[\w.+-]+:?\s*$',
re.IGNORECASE)
old_format_re7 = re.compile(r'^Old Changelog:\s*$', re.IGNORECASE)
old_format_re8 = re.compile(r'^(?:\d+:)?\w[\w.+~-]*:?\s*$')
class Changelog(object):
"""Represents a debian/changelog file."""
# TODO(jsw): Avoid masking the 'file' built-in.
def __init__(self, file=None, max_blocks=None,
allow_empty_author=False, strict=False, encoding='utf-8'):
"""Initializer.
Args:
file: The contents of the changelog, either as a str, unicode object,
or an iterator of lines (each of which is either a str or unicode)
max_blocks: The maximum number of blocks to parse from the input.
(Default: no limit)
allow_empty_author: Whether to allow an empty author in the trailer
line of a change block. (Default: False)
strict: Whether to raise an exception if there are errors. (Default:
use a warning)
encoding: If the input is a str or iterator of str, the encoding to
use when interpreting the input.
"""
self._encoding = encoding
self._blocks = []
self.initial_blank_lines = []
if file is not None:
self.parse_changelog(file, max_blocks=max_blocks,
allow_empty_author=allow_empty_author,
strict=strict)
def _parse_error(self, message, strict):
if strict:
raise ChangelogParseError(message)
else:
warnings.warn(message)
def parse_changelog(self, file, max_blocks=None,
allow_empty_author=False, strict=True, encoding=None):
first_heading = "first heading"
next_heading_or_eof = "next heading of EOF"
start_of_change_data = "start of change data"
more_changes_or_trailer = "more change data or trailer"
slurp_to_end = "slurp to end"
encoding = encoding or self._encoding
if file is None:
self._parse_error('Empty changelog file.', strict)
return
self._blocks = []
self.initial_blank_lines = []
current_block = ChangeBlock(encoding=encoding)
changes = []
state = first_heading
old_state = None
if isinstance(file, bytes):
file = file.decode(encoding)
if isinstance(file, six.string_types):
# Make sure the changelog file is not empty.
if len(file.strip()) == 0:
self._parse_error('Empty changelog file.', strict)
return
file = file.splitlines()
for line in file:
if not isinstance(line, six.text_type):
line = line.decode(encoding)
# Support both lists of lines without the trailing newline and
# those with trailing newlines (e.g. when given a file object
# directly)
line = line.rstrip('\n')
if state == first_heading or state == next_heading_or_eof:
top_match = topline.match(line)
blank_match = blankline.match(line)
if top_match is not None:
if (max_blocks is not None
and len(self._blocks) >= max_blocks):
return
current_block.package = top_match.group(1)
current_block._raw_version = top_match.group(2)
current_block.distributions = top_match.group(3).lstrip()
pairs = line.split(";", 1)[1]
all_keys = {}
other_pairs = {}
for pair in pairs.split(','):
pair = pair.strip()
kv_match = keyvalue.match(pair)
if kv_match is None:
self._parse_error("Invalid key-value "
"pair after ';': %s" % pair, strict)
continue
key = kv_match.group(1)
value = kv_match.group(2)
if key.lower() in all_keys:
self._parse_error("Repeated key-value: "
"%s" % key.lower(), strict)
all_keys[key.lower()] = value
if key.lower() == "urgency":
val_match = value_re.match(value)
if val_match is None:
self._parse_error("Badly formatted "
"urgency value: %s" % value, strict)
else:
current_block.urgency = val_match.group(1)
comment = val_match.group(2)
if comment is not None:
current_block.urgency_comment = comment
else:
other_pairs[key] = value
current_block.other_pairs = other_pairs
state = start_of_change_data
elif blank_match is not None:
if state == first_heading:
self.initial_blank_lines.append(line)
else:
self._blocks[-1].add_trailing_line(line)
else:
emacs_match = emacs_variables.match(line)
vim_match = vim_variables.match(line)
cvs_match = cvs_keyword.match(line)
comments_match = comments.match(line)
more_comments_match = more_comments.match(line)
if ((emacs_match is not None or vim_match is not None)
and state != first_heading):
self._blocks[-1].add_trailing_line(line)
old_state = state
state = slurp_to_end
continue
if (cvs_match is not None or comments_match is not None
or more_comments_match is not None):
if state == first_heading:
self.initial_blank_lines.append(line)
else:
self._blocks[-1].add_trailing_line(line)
continue
if ((old_format_re1.match(line) is not None
or old_format_re2.match(line) is not None
or old_format_re3.match(line) is not None
or old_format_re4.match(line) is not None
or old_format_re5.match(line) is not None
or old_format_re6.match(line) is not None
or old_format_re7.match(line) is not None
or old_format_re8.match(line) is not None)
and state != first_heading):
self._blocks[-1].add_trailing_line(line)
old_state = state
state = slurp_to_end
continue
self._parse_error("Unexpected line while looking "
"for %s: %s" % (state, line), strict)
if state == first_heading:
self.initial_blank_lines.append(line)
else:
self._blocks[-1].add_trailing_line(line)
elif (state == start_of_change_data
or state == more_changes_or_trailer):
change_match = change.match(line)
end_match = endline.match(line)
end_no_details_match = endline_nodetails.match(line)
blank_match = blankline.match(line)
if change_match is not None:
changes.append(line)
state = more_changes_or_trailer
elif end_match is not None:
if end_match.group(3) != ' ':
self._parse_error("Badly formatted trailer "
"line: %s" % line, strict)
current_block._trailer_separator = end_match.group(3)
current_block.author = "%s <%s>" \
% (end_match.group(1), end_match.group(2))
current_block.date = end_match.group(4)
current_block._changes = changes
self._blocks.append(current_block)
changes = []
current_block = ChangeBlock(encoding=encoding)
state = next_heading_or_eof
elif end_no_details_match is not None:
if not allow_empty_author:
self._parse_error("Badly formatted trailer "
"line: %s" % line, strict)
continue
current_block._changes = changes
self._blocks.append(current_block)
changes = []
current_block = ChangeBlock(encoding=encoding)
state = next_heading_or_eof
elif blank_match is not None:
changes.append(line)
else:
cvs_match = cvs_keyword.match(line)
comments_match = comments.match(line)
more_comments_match = more_comments.match(line)
if (cvs_match is not None or comments_match is not None
or more_comments_match is not None):
changes.append(line)
continue
self._parse_error("Unexpected line while looking "
"for %s: %s" % (state, line), strict)
changes.append(line)
elif state == slurp_to_end:
if old_state == next_heading_or_eof:
self._blocks[-1].add_trailing_line(line)
else:
changes.append(line)
else:
assert False, "Unknown state: %s" % state
if ((state != next_heading_or_eof and state != slurp_to_end)
or (state == slurp_to_end and old_state != next_heading_or_eof)):
self._parse_error("Found eof where expected %s" % state,
strict)
current_block._changes = changes
current_block._no_trailer = True
self._blocks.append(current_block)
def get_version(self):
"""Return a Version object for the last version"""
return self._blocks[0].version
def set_version(self, version):
"""Set the version of the last changelog block
version can be a full version string, or a Version object
"""
self._blocks[0].version = Version(version)
version = property(get_version, set_version,
doc="Version object for last changelog block""")
### For convenience, let's expose some of the version properties
full_version = property(lambda self: self.version.full_version)
epoch = property(lambda self: self.version.epoch)
debian_version = property(lambda self: self.version.debian_revision)
debian_revision = property(lambda self: self.version.debian_revision)
upstream_version = property(lambda self: self.version.upstream_version)
def get_package(self):
"""Returns the name of the package in the last version."""
return self._blocks[0].package
def set_package(self, package):
self._blocks[0].package = package
package = property(get_package, set_package,
doc="Name of the package in the last version")
def get_versions(self):
"""Returns a list of version objects that the package went through."""
return [block.version for block in self._blocks]
versions = property(get_versions,
doc="List of version objects the package went through")
def _raw_versions(self):
return [block._raw_version for block in self._blocks]
def _format(self):
pieces = []
pieces.append(six.u('\n').join(self.initial_blank_lines))
for block in self._blocks:
pieces.append(six.text_type(block))
return six.u('').join(pieces)
if sys.version >= '3':
__str__ = _format
def __bytes__(self):
return str(self).encode(self._encoding)
else:
__unicode__ = _format
def __str__(self):
return unicode(self).encode(self._encoding)
def __iter__(self):
return iter(self._blocks)
def __getitem__(self, n):
""" select a changelog entry by number, version string, or Version
:param n: integer or str representing a version or Version object
"""
if type(n) is int:
return self._blocks[n]
elif type(n) is str:
return self[Version(n)]
return self._blocks[self.versions.index(n)]
def __len__(self):
return len(self._blocks)
def set_distributions(self, distributions):
self._blocks[0].distributions = distributions
distributions = property(lambda self: self._blocks[0].distributions,
set_distributions)
def set_urgency(self, urgency):
self._blocks[0].urgency = urgency
urgency = property(lambda self: self._blocks[0].urgency, set_urgency)
def add_change(self, change):
self._blocks[0].add_change(change)
def set_author(self, author):
self._blocks[0].author = author
author = property(lambda self: self._blocks[0].author, set_author)
def set_date(self, date):
self._blocks[0].date = date
date = property(lambda self: self._blocks[0].date, set_date)
def new_block(self, **kwargs):
kwargs.setdefault('encoding', self._encoding)
block = ChangeBlock(**kwargs)
block.add_trailing_line('')
self._blocks.insert(0, block)
def write_to_open_file(self, file):
file.write(self.__str__())
def get_maintainer():
"""Get the maintainer information in the same manner as dch.
This function gets the information about the current user for
the maintainer field using environment variables of gecos
informations as approriate.
It uses the same methods as dch to get the information, namely
DEBEMAIL, DEBFULLNAME, EMAIL, NAME, /etc/mailname and gecos.
:returns: a tuple of the full name, email pair as strings.
Either of the pair may be None if that value couldn't
be determined.
"""
env = os.environ
regex = re.compile(r"^(.*)\s+<(.*)>$")
# Split email and name
if 'DEBEMAIL' in env:
match_obj = regex.match(env['DEBEMAIL'])
if match_obj:
if not 'DEBFULLNAME' in env:
env['DEBFULLNAME'] = match_obj.group(1)
env['DEBEMAIL'] = match_obj.group(2)
if 'DEBEMAIL' not in env or 'DEBFULLNAME' not in env:
if 'EMAIL' in env:
match_obj = regex.match(env['EMAIL'])
if match_obj:
if not 'DEBFULLNAME' in env:
env['DEBFULLNAME'] = match_obj.group(1)
env['EMAIL'] = match_obj.group(2)
# Get maintainer's name
if 'DEBFULLNAME' in env:
maintainer = env['DEBFULLNAME']
elif 'NAME' in env:
maintainer = env['NAME']
else:
# Use password database if no data in environment variables
try:
maintainer = re.sub(r',.*', '', pwd.getpwuid(os.getuid()).pw_gecos)
except (KeyError, AttributeError):
maintainer = None
# Get maintainer's mail address
if 'DEBEMAIL' in env:
email = env['DEBEMAIL']
elif 'EMAIL' in env:
email = env['EMAIL']
else:
addr = None
if os.path.exists('/etc/mailname'):
f = open('/etc/mailname')
try:
addr = f.readline().strip()
finally:
f.close()
if not addr:
addr = socket.getfqdn()
if addr:
user = pwd.getpwuid(os.getuid()).pw_name
if not user:
addr = None
else:
addr = "%s@%s" % (user, addr)
if addr:
email = addr
else:
email = None
return (maintainer, email)
|
[
"os.path.exists",
"socket.getfqdn",
"re.compile",
"os.getuid",
"re.finditer",
"warnings.warn",
"six.text_type",
"six.u"
] |
[((6899, 7040), 're.compile', 're.compile', (["('^(\\\\w%(name_chars)s*) \\\\(([^\\\\(\\\\) \\\\t]+)\\\\)((\\\\s+%(name_chars)s+)+)\\\\;' %\n {'name_chars': '[-+0-9a-z.]'})", 're.IGNORECASE'], {}), "(\n '^(\\\\w%(name_chars)s*) \\\\(([^\\\\(\\\\) \\\\t]+)\\\\)((\\\\s+%(name_chars)s+)+)\\\\;' %\n {'name_chars': '[-+0-9a-z.]'}, re.IGNORECASE)\n", (6909, 7040), False, 'import re\n'), ((7104, 7124), 're.compile', 're.compile', (['"""^\\\\s*$"""'], {}), "('^\\\\s*$')\n", (7114, 7124), False, 'import re\n'), ((7134, 7159), 're.compile', 're.compile', (['"""^\\\\s\\\\s+.*$"""'], {}), "('^\\\\s\\\\s+.*$')\n", (7144, 7159), False, 'import re\n'), ((7169, 7301), 're.compile', 're.compile', (['"""^ -- (.*) <(.*)>( ?)((\\\\w+\\\\,\\\\s*)?\\\\d{1,2}\\\\s+\\\\w+\\\\s+\\\\d{4}\\\\s+\\\\d{1,2}:\\\\d\\\\d:\\\\d\\\\d\\\\s+[-+]\\\\d{4}\\\\s*)$"""'], {}), "(\n '^ -- (.*) <(.*)>( ?)((\\\\w+\\\\,\\\\s*)?\\\\d{1,2}\\\\s+\\\\w+\\\\s+\\\\d{4}\\\\s+\\\\d{1,2}:\\\\d\\\\d:\\\\d\\\\d\\\\s+[-+]\\\\d{4}\\\\s*)$'\n )\n", (7179, 7301), False, 'import re\n'), ((7312, 7449), 're.compile', 're.compile', (['"""^ --(?: (.*) <(.*)>( ?)((\\\\w+\\\\,\\\\s*)?\\\\d{1,2}\\\\s+\\\\w+\\\\s+\\\\d{4}\\\\s+\\\\d{1,2}:\\\\d\\\\d:\\\\d\\\\d\\\\s+[-+]\\\\d{4}))?\\\\s*$"""'], {}), "(\n '^ --(?: (.*) <(.*)>( ?)((\\\\w+\\\\,\\\\s*)?\\\\d{1,2}\\\\s+\\\\w+\\\\s+\\\\d{4}\\\\s+\\\\d{1,2}:\\\\d\\\\d:\\\\d\\\\d\\\\s+[-+]\\\\d{4}))?\\\\s*$'\n )\n", (7322, 7449), False, 'import re\n'), ((7474, 7529), 're.compile', 're.compile', (['"""^([-0-9a-z]+)=\\\\s*(.*\\\\S)$"""', 're.IGNORECASE'], {}), "('^([-0-9a-z]+)=\\\\s*(.*\\\\S)$', re.IGNORECASE)\n", (7484, 7529), False, 'import re\n'), ((7540, 7594), 're.compile', 're.compile', (['"""^([-0-9a-z]+)((\\\\s+.*)?)$"""', 're.IGNORECASE'], {}), "('^([-0-9a-z]+)((\\\\s+.*)?)$', re.IGNORECASE)\n", (7550, 7594), False, 'import re\n'), ((7605, 7643), 're.compile', 're.compile', (['"""^X[BCS]+-"""', 're.IGNORECASE'], {}), "('^X[BCS]+-', re.IGNORECASE)\n", (7615, 7643), False, 'import re\n'), ((7662, 7717), 're.compile', 're.compile', (['"""^(;;\\\\s*)?Local variables:"""', 're.IGNORECASE'], {}), "('^(;;\\\\s*)?Local variables:', re.IGNORECASE)\n", (7672, 7717), False, 'import re\n'), ((7734, 7768), 're.compile', 're.compile', (['"""^vim:"""', 're.IGNORECASE'], {}), "('^vim:', re.IGNORECASE)\n", (7744, 7768), False, 'import re\n'), ((7783, 7811), 're.compile', 're.compile', (['"""^\\\\$\\\\w+:.*\\\\$"""'], {}), "('^\\\\$\\\\w+:.*\\\\$')\n", (7793, 7811), False, 'import re\n'), ((7821, 7840), 're.compile', 're.compile', (['"""^\\\\# """'], {}), "('^\\\\# ')\n", (7831, 7840), False, 'import re\n'), ((7857, 7882), 're.compile', 're.compile', (['"""^/\\\\*.*\\\\*/"""'], {}), "('^/\\\\*.*\\\\*/')\n", (7867, 7882), False, 'import re\n'), ((7891, 7985), 're.compile', 're.compile', (['"""closes:\\\\s*(?:bug)?\\\\#?\\\\s?\\\\d+(?:,\\\\s*(?:bug)?\\\\#?\\\\s?\\\\d+)*"""', 're.IGNORECASE'], {}), "('closes:\\\\s*(?:bug)?\\\\#?\\\\s?\\\\d+(?:,\\\\s*(?:bug)?\\\\#?\\\\s?\\\\d+)*',\n re.IGNORECASE)\n", (7901, 7985), False, 'import re\n'), ((8006, 8066), 're.compile', 're.compile', (['"""lp:\\\\s+\\\\#\\\\d+(?:,\\\\s*\\\\#\\\\d+)*"""', 're.IGNORECASE'], {}), "('lp:\\\\s+\\\\#\\\\d+(?:,\\\\s*\\\\#\\\\d+)*', re.IGNORECASE)\n", (8016, 8066), False, 'import re\n'), ((8080, 8207), 're.compile', 're.compile', (['"""^(\\\\w+\\\\s+\\\\w+\\\\s+\\\\d{1,2} \\\\d{1,2}:\\\\d{1,2}:\\\\d{1,2}\\\\s+[\\\\w\\\\s]*\\\\d{4})\\\\s+(.*)\\\\s+(<|\\\\()(.*)(\\\\)|>)"""'], {}), "(\n '^(\\\\w+\\\\s+\\\\w+\\\\s+\\\\d{1,2} \\\\d{1,2}:\\\\d{1,2}:\\\\d{1,2}\\\\s+[\\\\w\\\\s]*\\\\d{4})\\\\s+(.*)\\\\s+(<|\\\\()(.*)(\\\\)|>)'\n )\n", (8090, 8207), False, 'import re\n'), ((8212, 8300), 're.compile', 're.compile', (['"""^(\\\\w+\\\\s+\\\\w+\\\\s+\\\\d{1,2},?\\\\s*\\\\d{4})\\\\s+(.*)\\\\s+(<|\\\\()(.*)(\\\\)|>)"""'], {}), "(\n '^(\\\\w+\\\\s+\\\\w+\\\\s+\\\\d{1,2},?\\\\s*\\\\d{4})\\\\s+(.*)\\\\s+(<|\\\\()(.*)(\\\\)|>)')\n", (8222, 8300), False, 'import re\n'), ((8315, 8389), 're.compile', 're.compile', (['"""^(\\\\w[-+0-9a-z.]*) \\\\(([^\\\\(\\\\) \\\\t]+)\\\\)\\\\;?"""', 're.IGNORECASE'], {}), "('^(\\\\w[-+0-9a-z.]*) \\\\(([^\\\\(\\\\) \\\\t]+)\\\\)\\\\;?', re.IGNORECASE)\n", (8325, 8389), False, 'import re\n'), ((8409, 8475), 're.compile', 're.compile', (['"""^([\\\\w.+-]+)(-| )(\\\\S+) Debian (\\\\S+)"""', 're.IGNORECASE'], {}), "('^([\\\\w.+-]+)(-| )(\\\\S+) Debian (\\\\S+)', re.IGNORECASE)\n", (8419, 8475), False, 'import re\n'), ((8499, 8563), 're.compile', 're.compile', (['"""^Changes from version (.*) to (.*):"""', 're.IGNORECASE'], {}), "('^Changes from version (.*) to (.*):', re.IGNORECASE)\n", (8509, 8563), False, 'import re\n'), ((8589, 8657), 're.compile', 're.compile', (['"""^Changes for [\\\\w.+-]+-[\\\\w.+-]+:?\\\\s*$"""', 're.IGNORECASE'], {}), "('^Changes for [\\\\w.+-]+-[\\\\w.+-]+:?\\\\s*$', re.IGNORECASE)\n", (8599, 8657), False, 'import re\n'), ((8681, 8730), 're.compile', 're.compile', (['"""^Old Changelog:\\\\s*$"""', 're.IGNORECASE'], {}), "('^Old Changelog:\\\\s*$', re.IGNORECASE)\n", (8691, 8730), False, 'import re\n'), ((8748, 8793), 're.compile', 're.compile', (['"""^(?:\\\\d+:)?\\\\w[\\\\w.+~-]*:?\\\\s*$"""'], {}), "('^(?:\\\\d+:)?\\\\w[\\\\w.+~-]*:?\\\\s*$')\n", (8758, 8793), False, 'import re\n'), ((23438, 23468), 're.compile', 're.compile', (['"""^(.*)\\\\s+<(.*)>$"""'], {}), "('^(.*)\\\\s+<(.*)>$')\n", (23448, 23468), False, 'import re\n'), ((4890, 4922), 're.finditer', 're.finditer', (['"""\\\\d+"""', 'closes_list'], {}), "('\\\\d+', closes_list)\n", (4901, 4922), False, 'import re\n'), ((10155, 10177), 'warnings.warn', 'warnings.warn', (['message'], {}), '(message)\n', (10168, 10177), False, 'import warnings\n'), ((24645, 24676), 'os.path.exists', 'os.path.exists', (['"""/etc/mailname"""'], {}), "('/etc/mailname')\n", (24659, 24676), False, 'import os\n'), ((4727, 4737), 'six.u', 'six.u', (['""" """'], {}), "(' ')\n", (4732, 4737), False, 'import six\n'), ((21048, 21068), 'six.text_type', 'six.text_type', (['block'], {}), '(block)\n', (21061, 21068), False, 'import six\n'), ((21085, 21094), 'six.u', 'six.u', (['""""""'], {}), "('')\n", (21090, 21094), False, 'import six\n'), ((24864, 24880), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (24878, 24880), False, 'import socket\n'), ((20943, 20954), 'six.u', 'six.u', (['"""\n"""'], {}), "('\\n')\n", (20948, 20954), False, 'import six\n'), ((24930, 24941), 'os.getuid', 'os.getuid', ([], {}), '()\n', (24939, 24941), False, 'import os\n'), ((24359, 24370), 'os.getuid', 'os.getuid', ([], {}), '()\n', (24368, 24370), False, 'import os\n')]
|
import requests
import os
import subprocess
import gidgethub
from gidgethub import sansio
AUTOMERGE_LABEL = ":robot: automerge"
def comment_on_pr(issue_number, message):
"""
Leave a comment on a PR/Issue
"""
request_headers = sansio.create_headers(
"miss-islington", oauth_token=os.getenv("GH_AUTH")
)
issue_comment_url = (
f"https://api.github.com/repos/python/cpython/issues/{issue_number}/comments"
)
data = {"body": message}
response = requests.post(issue_comment_url, headers=request_headers, json=data)
if response.status_code == requests.codes.created:
print(f"Commented at {response.json()['html_url']}, message: {message}")
else:
print(response.status_code)
print(response.text)
return response
def assign_pr_to_core_dev(issue_number, coredev_login):
"""
Assign the PR to a core dev. Should be done when miss-islington failed
to backport.
"""
request_headers = sansio.create_headers(
"miss-islington", oauth_token=os.getenv("GH_AUTH")
)
edit_issue_url = (
f"https://api.github.com/repos/python/cpython/issues/{issue_number}"
)
data = {"assignees": [coredev_login]}
response = requests.patch(edit_issue_url, headers=request_headers, json=data)
if response.status_code == requests.codes.created:
print(f"Assigned PR {issue_number} to {coredev_login}")
else:
print(response.status_code)
print(response.text)
return response
async def leave_comment(gh, pr_number, message):
"""
Leave a comment on a PR/Issue
"""
issue_comment_url = f"/repos/python/cpython/issues/{pr_number}/comments"
data = {"body": message}
await gh.post(issue_comment_url, data=data)
def is_cpython_repo():
cmd = "git log -r 7f777ed95a19224294949e1b4ce56bbffcb1fe9f"
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
return False
return True
async def get_gh_participants(gh, pr_number):
pr_url = f"/repos/python/cpython/pulls/{pr_number}"
pr_result = await gh.getitem(pr_url)
created_by = pr_result["user"]["login"]
merged_by = None
if pr_result["merged_by"] and pr_result["merged_by"]["login"] != "miss-islington":
merged_by = pr_result["merged_by"]["login"]
participants = ""
if created_by == merged_by or merged_by is None:
participants = f"@{created_by}"
else:
participants = f"@{created_by} and @{merged_by}"
return participants
def get_participants(created_by, merged_by):
participants = ""
if created_by == merged_by or merged_by == "miss-islington":
participants = f"@{created_by}"
else:
participants = f"@{created_by} and @{merged_by}"
return participants
def normalize_title(title, body):
"""Normalize the title if it spills over into the PR's body."""
if not (title.endswith("…") and body.startswith("…")):
return title
else:
# Being paranoid in case \r\n is used.
return title[:-1] + body[1:].partition("\r\n")[0]
def normalize_message(body):
"""Normalize the message body to make it commit-worthy.
Mostly this just means removing HTML comments, but also removes unwanted
leading or trailing whitespace.
Returns the normalized body.
"""
while "<!--" in body:
body = body[: body.index("<!--")] + body[body.index("-->") + 3 :]
return "\n\n" + body.strip()
# Copied over from https://github.com/python/bedevere
async def is_core_dev(gh, username):
"""Check if the user is a CPython core developer."""
org_teams = "/orgs/python/teams"
team_name = "python core"
async for team in gh.getiter(org_teams):
if team["name"].lower() == team_name:
break
else:
raise ValueError(f"{team_name!r} not found at {org_teams!r}")
# The 'teams' object only provides a URL to a deprecated endpoint,
# so manually construct the URL to the non-deprecated team membership
# endpoint.
membership_url = f"/teams/{team['id']}/memberships/{username}"
try:
await gh.getitem(membership_url)
except gidgethub.BadRequest as exc:
if exc.status_code == 404:
return False
raise
else:
return True
def pr_is_awaiting_merge(pr_labels):
label_names = [label["name"] for label in pr_labels]
if (
"DO-NOT-MERGE" not in label_names
and "awaiting merge" in label_names
and "CLA signed" in label_names
):
return True
return False
def pr_is_automerge(pr_labels):
for label in pr_labels:
if label["name"] == AUTOMERGE_LABEL:
return True
return False
async def get_pr_for_commit(gh, sha):
prs_for_commit = await gh.getitem(
f"/search/issues?q=type:pr+repo:python/cpython+sha:{sha}"
)
if prs_for_commit["total_count"] > 0: # there should only be one
pr_for_commit = prs_for_commit["items"][0]
return pr_for_commit
return None
|
[
"requests.patch",
"requests.post",
"os.getenv"
] |
[((498, 566), 'requests.post', 'requests.post', (['issue_comment_url'], {'headers': 'request_headers', 'json': 'data'}), '(issue_comment_url, headers=request_headers, json=data)\n', (511, 566), False, 'import requests\n'), ((1238, 1304), 'requests.patch', 'requests.patch', (['edit_issue_url'], {'headers': 'request_headers', 'json': 'data'}), '(edit_issue_url, headers=request_headers, json=data)\n', (1252, 1304), False, 'import requests\n'), ((309, 329), 'os.getenv', 'os.getenv', (['"""GH_AUTH"""'], {}), "('GH_AUTH')\n", (318, 329), False, 'import os\n'), ((1048, 1068), 'os.getenv', 'os.getenv', (['"""GH_AUTH"""'], {}), "('GH_AUTH')\n", (1057, 1068), False, 'import os\n')]
|
import aiosqlite
import sqlite3
import asyncio
import nonebot
from nonebot.log import logger
driver: nonebot.Driver = nonebot.get_driver()
config: nonebot.config.Config = driver.config
@driver.on_startup
async def init_db():
config.db = await aiosqlite.connect("src/static/Kiba.db")
logger.info("Kiba Kernel -> Starting to Create \"Kiba Database\"")
try:
await config.db.executescript(
"create table group_poke_table (group_id bigint primary key not null, last_trigger_time int, triggered int, disabled bit, strategy text);"
"create table user_poke_table (user_id bigint, group_id bigint, triggered int);"
"create table guess_table (group_id bigint, enabled bit);"
"create table waiting_table (shop text, location text, wait int, updated text);"
"create table plp_table (id bigint, user_id bigint, nickname text, message text, is_picture bit, view bigint, reply bigint);"
"create table plp_reply_table (id bigint, plpid bigint, userid bigint, nickname text, message text);"
"create table group_plp_table (group_id bigint, disableinsert int, disabletake int, disablereply int, slowmode int, limit int, time bigint);"
"create table plp_blacklist_table (id bigint, lastbanner bigint, disableinsert int, disabletake int, disablereply int);"
"create table gld_table (qq bigint, uid bigint);"
"create table sign_table (no bigint, id bigint, day int);"
"create table acard_table (id bigint, times int, six int, five int, four int, three int, two int, one int);"
)
logger.info("Kiba Kernel -> Create \"Kiba Database\" successfully")
except Exception as e:
logger.info(f"Kiba Kernel --Skip-> Database Created....Skipped Creating Databases. \n[SKIP ERR]{e}")
pass
@driver.on_shutdown
async def free_db():
await config.db.close()
|
[
"nonebot.log.logger.info",
"aiosqlite.connect",
"nonebot.get_driver"
] |
[((119, 139), 'nonebot.get_driver', 'nonebot.get_driver', ([], {}), '()\n', (137, 139), False, 'import nonebot\n'), ((294, 358), 'nonebot.log.logger.info', 'logger.info', (['"""Kiba Kernel -> Starting to Create "Kiba Database\\""""'], {}), '(\'Kiba Kernel -> Starting to Create "Kiba Database"\')\n', (305, 358), False, 'from nonebot.log import logger\n'), ((250, 289), 'aiosqlite.connect', 'aiosqlite.connect', (['"""src/static/Kiba.db"""'], {}), "('src/static/Kiba.db')\n", (267, 289), False, 'import aiosqlite\n'), ((1632, 1697), 'nonebot.log.logger.info', 'logger.info', (['"""Kiba Kernel -> Create "Kiba Database" successfully"""'], {}), '(\'Kiba Kernel -> Create "Kiba Database" successfully\')\n', (1643, 1697), False, 'from nonebot.log import logger\n'), ((1735, 1848), 'nonebot.log.logger.info', 'logger.info', (['f"""Kiba Kernel --Skip-> Database Created....Skipped Creating Databases. \n[SKIP ERR]{e}"""'], {}), '(\n f"""Kiba Kernel --Skip-> Database Created....Skipped Creating Databases. \n[SKIP ERR]{e}"""\n )\n', (1746, 1848), False, 'from nonebot.log import logger\n')]
|
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy
from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice
from dexp.processing.utils.normalise import Normalise
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def scatter_gather_i2i(
function: Callable,
image: xpArray,
tiles: Union[int, Tuple[int, ...]],
margins: Optional[Union[int, Tuple[int, ...]]] = None,
normalise: bool = False,
clip: bool = False,
to_numpy: bool = True,
internal_dtype: Optional[numpy.dtype] = None,
) -> xpArray:
"""
Image-2-image scatter-gather.
'Scatters' computation of a given unary function by splitting the input array into tiles,
computing using a given backend, and reassembling the tiles into a single array of same
shape as the inpout that is either backed by the same backend than that of the input image,
or that is backed by numpy -- usefull when the compute backend cannot hold the whole input and output
images in memory.
Parameters
----------
function : unary function
image : input image (can be any backend, numpy )
tiles : tile sizes to cut input image into, can be a single integer or a tuple of integers.
margins : margins to add to each tile, can be a single integer or a tuple of integers.
if None, no margins are added.
normalise : normalises the input image.
clip : clip after normalisation/denormalisation
to_numpy : should the result be a numpy array? Very usefull when the compute backend
cannot hold the whole input and output images in memory.
internal_dtype : internal dtype for computation
Returns
-------
Result of applying the unary function to the input image, if to_numpy==True then the image is
"""
if internal_dtype is None:
internal_dtype = image.dtype
if type(tiles) == int:
tiles = (tiles,) * image.ndim
# If None is passed for a tile that means that we don't tile along that axis, we als clip the tile size:
tiles = tuple((length if tile is None else min(length, tile)) for tile, length in zip(tiles, image.shape))
if margins is None:
margins = (0,) * image.ndim
if type(margins) == int:
margins = (margins,) * image.ndim
if to_numpy:
result = numpy.empty(shape=image.shape, dtype=internal_dtype)
else:
result = Backend.get_xp_module(image).empty_like(image, dtype=internal_dtype)
# Normalise:
norm = Normalise(Backend.to_backend(image), do_normalise=normalise, clip=clip, quantile=0.005)
# image shape:
shape = image.shape
# We compute the slices objects to cut the input and target images into batches:
tile_slices = list(nd_split_slices(shape, chunks=tiles, margins=margins))
tile_slices_no_margins = list(nd_split_slices(shape, chunks=tiles))
# Zipping together slices with and without margins:
slices = zip(tile_slices, tile_slices_no_margins)
# Number of tiles:
number_of_tiles = len(tile_slices)
if number_of_tiles == 1:
# If there is only one tile, let's not be complicated about it:
result = norm.backward(function(norm.forward(image)))
if to_numpy:
result = Backend.to_numpy(result, dtype=internal_dtype)
else:
result = Backend.to_backend(result, dtype=internal_dtype)
else:
_scatter_gather_loop(
norm.backward, function, image, internal_dtype, norm.forward, result, shape, slices, to_numpy
)
return result
def _scatter_gather_loop(
denorm_fun: Callable,
function: Callable,
image: xpArray,
internal_dtype: numpy.dtype,
norm_fun: Callable,
result: Callable,
shape: Tuple[int, ...],
slices: Sequence[Tuple[slice, ...]],
to_numpy: bool,
) -> None:
for tile_slice, tile_slice_no_margins in slices:
image_tile = image[tile_slice]
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
image_tile = denorm_fun(function(norm_fun(image_tile)))
if to_numpy:
image_tile = Backend.to_numpy(image_tile, dtype=internal_dtype)
else:
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
remove_margin_slice_tuple = remove_margin_slice(shape, tile_slice, tile_slice_no_margins)
image_tile = image_tile[remove_margin_slice_tuple]
result[tile_slice_no_margins] = image_tile
# Dask turned out not too work great here, HUGE overhead compared to the light approach above.
# def scatter_gather_dask(backend: Backend,
# function,
# image,
# chunks,
# margins=None):
# boundary=None
# trim=True
# align_arrays=True
#
# image_d = from_array(image, chunks=chunks, asarray=False)
#
# def function_numpy(_image):
# print(_image.shape)
# return backend.to_numpy(function(_image))
#
# #func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs
# computation= map_overlap(function_numpy,
# image_d,
# depth=margins,
# boundary=boundary,
# trim=trim,
# align_arrays=align_arrays,
# dtype=image.dtype
# )
#
# #computation.visualize(filename='transpose.png')
# result = computation.compute()
#
# return result
|
[
"dexp.processing.utils.nd_slice.nd_split_slices",
"dexp.utils.backends.Backend.get_xp_module",
"dexp.utils.backends.Backend.to_backend",
"dexp.processing.utils.nd_slice.remove_margin_slice",
"dexp.utils.backends.Backend.to_numpy",
"numpy.empty"
] |
[((2346, 2398), 'numpy.empty', 'numpy.empty', ([], {'shape': 'image.shape', 'dtype': 'internal_dtype'}), '(shape=image.shape, dtype=internal_dtype)\n', (2357, 2398), False, 'import numpy\n'), ((2534, 2559), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image'], {}), '(image)\n', (2552, 2559), False, 'from dexp.utils.backends import Backend\n'), ((2765, 2818), 'dexp.processing.utils.nd_slice.nd_split_slices', 'nd_split_slices', (['shape'], {'chunks': 'tiles', 'margins': 'margins'}), '(shape, chunks=tiles, margins=margins)\n', (2780, 2818), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((2854, 2890), 'dexp.processing.utils.nd_slice.nd_split_slices', 'nd_split_slices', (['shape'], {'chunks': 'tiles'}), '(shape, chunks=tiles)\n', (2869, 2890), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((3969, 4021), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (3987, 4021), False, 'from dexp.utils.backends import Backend\n'), ((4312, 4373), 'dexp.processing.utils.nd_slice.remove_margin_slice', 'remove_margin_slice', (['shape', 'tile_slice', 'tile_slice_no_margins'], {}), '(shape, tile_slice, tile_slice_no_margins)\n', (4331, 4373), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((3272, 3318), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['result'], {'dtype': 'internal_dtype'}), '(result, dtype=internal_dtype)\n', (3288, 3318), False, 'from dexp.utils.backends import Backend\n'), ((3354, 3402), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['result'], {'dtype': 'internal_dtype'}), '(result, dtype=internal_dtype)\n', (3372, 3402), False, 'from dexp.utils.backends import Backend\n'), ((4132, 4182), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (4148, 4182), False, 'from dexp.utils.backends import Backend\n'), ((4222, 4274), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (4240, 4274), False, 'from dexp.utils.backends import Backend\n'), ((2426, 2454), 'dexp.utils.backends.Backend.get_xp_module', 'Backend.get_xp_module', (['image'], {}), '(image)\n', (2447, 2454), False, 'from dexp.utils.backends import Backend\n')]
|
import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSPDFInfo(TestCase):
@min_os_level("10.9")
def testMethods(self):
self.assertResultIsBOOL(AppKit.NSPDFInfo.isFileExtensionHidden)
self.assertArgIsBOOL(AppKit.NSPDFInfo.setFileExtensionHidden_, 0)
|
[
"PyObjCTools.TestSupport.min_os_level"
] |
[((111, 131), 'PyObjCTools.TestSupport.min_os_level', 'min_os_level', (['"""10.9"""'], {}), "('10.9')\n", (123, 131), False, 'from PyObjCTools.TestSupport import TestCase, min_os_level\n')]
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'ma_keling'
# Version : 1.0.0
# Start Time : 2018-11-29
# Update Time :
# Change Log :
## 1.
## 2.
## 3.
import time
import arcpy
import math
def express_arcpy_error():
severity = arcpy.GetMaxSeverity()
if severity == 2:
# If the tool returned an error
arcpy.AddError("Error occurred \n{0}".format(arcpy.GetMessages(2)))
elif severity == 1:
# If the tool returned no errors, but returned a warning
arcpy.AddWarning("Warning raised \n{0}".format(arcpy.GetMessages(1)))
else:
# If the tool did not return an error or a warning
arcpy.AddMessage(arcpy.GetMessages())
# Description: Loop layers and calculate lod for every feature in the layer.
def calculate_lods_for_feature(in_layers, fieldName):
try:
startTime = time.time()
timeStampName = time.strftime('%Y_%m_%d %H:%M:%S', time.localtime(time.time()))
arcpy.AddMessage("Start compute lods at: {0}".format(timeStampName))
for layer in in_layers:
arcpy.AddMessage("Calculating lod for layer : {0}.".format(layer))
add_field(layer, fieldName, 9)
cursor = arcpy.da.UpdateCursor(layer, ['SHAPE@', 'OID@', fieldName])
lyr_path = layer.dataSource
desc = arcpy.Describe(lyr_path)
extent = desc.extent
arcpy.AddMessage("Original dataset extent:" + str(desc.extent))
ext_wm = extent.projectAs(arcpy.SpatialReference(102100))
arcpy.AddMessage("New WebMercator extent:" + str(ext_wm))
start_level, start_compute_resolution = confirm_level(ext_wm)
if desc.shapeType == "Polygon":
baselength, basearea = get_length_area_from_pixel(96, 295828764)
lod_area = basearea / math.pow(4, start_level - 1)
arcpy.AddMessage("start lod area: " + str(lod_area))
for row in cursor:
lod = calculate_lod_for_polygon(row[0], baselength, lod_area,start_level)
row[2] = lod
cursor.updateRow(row)
elif desc.shapeType == "Point":
count = get_count(layer)
arcpy.AddMessage("Total Points:" + str(count))
if count < 200000:
arcpy.AddMessage("Input point sets too small for optimized, skip!")
continue
else:
n = math.ceil(math.log(count / (512 * 512 / 16), 4))
arcpy.AddMessage("n:" + str(n))
for row in cursor:
oid = row[1]
lod = calculate_lod_for_point(oid,start_level,n)
row[2] = lod
cursor.updateRow(row)
elif desc.shapeType == 'Polyline':
baselength = get_length_from_pixel(96, 295828764)
lod_length = baselength / math.pow(2, start_level - 1)
for row in cursor:
lod = calculate_lod_for_polyline(row[0],lod_length,start_level)
row[2] = lod
cursor.updateRow(row)
endTime = time.time()
print("Compute finished, elapsed: {0} Seconds.eter..".format(str(endTime - startTime)))
arcpy.AddMessage("Compute finished, elapsed: {0} Seconds.eter..".format(str(endTime - startTime)))
print("\n")
arcpy.AddMessage("\n")
except arcpy.ExecuteError:
express_arcpy_error()
# Description: Compute the total records for a featureclass
def get_count(layer):
fields = ['SHAPE@']
count = 0
with arcpy.da.SearchCursor(layer, fields) as cursor:
for row in cursor:
count += 1
return count
# Description: get the start level based on layer extent
def confirm_level(extent):
width = extent.width
height = extent.height
arcpy.AddMessage("width:"+str(width) +" height:"+ str(height))
length = max(width, height)
base_resolution = 78271.516964011724
base_tile_resolution = base_resolution * 512
for level in range(21):
start_compute_resolution = base_tile_resolution
if length >= base_tile_resolution:
arcpy.AddMessage("level:" + str(level))
arcpy.AddMessage("base tile resolution:" + str(base_tile_resolution))
return level, start_compute_resolution
else:
base_tile_resolution /= 2
# Description: Add a new field with name lod to a table
def add_field(inFeatures,fieldName,fieldPrecision):
try:
startTime = time.time()
timeStampName = time.strftime('%Y_%m_%d %H:%M:%S', time.localtime(time.time()))
print("start add new field for: ", inFeatures, " at: ", timeStampName)
arcpy.AddMessage("start add new field for: {0} at: {1}".format(str(inFeatures), str(timeStampName)))
# Execute AddField for new field
arcpy.AddField_management(inFeatures, fieldName, "Text", fieldPrecision,
field_alias=fieldName, field_is_nullable="NULLABLE")
endTime = time.time()
print(inFeatures, "Add field:", fieldName, "finished, elapsed: ", str(endTime - startTime) + ' Seconds.eter..')
arcpy.AddMessage(
"Add field: {0} finished, elapsed: {1} Seconds.eter..".format(fieldName, str(endTime - startTime)))
print("\n")
arcpy.AddMessage("\n")
except arcpy.ExecuteError:
express_arcpy_error()
# Description: Compute get area and length per pixel based on dpi and scale
def get_length_area_from_pixel(dpi,scale):
pixel = 1 / dpi * 0.025
length = scale * pixel
area = length * length
return length,area
# Description: Compute get length per pixel based on dpi and scale
def get_length_from_pixel(dpi,scale):
pixel = 1 / dpi * 0.025
length = scale * pixel
return length
# Description: Calculate lod for every polygon
def calculate_lod_for_polygon(feature,baselength, basearea, start_level):
try:
if feature:
area = feature.getArea("GEODESIC", "SQUAREMETERS")
# length = feature.getLength("GEODESIC", "METERS")
lod = start_level
for i in range(20):
# arcpy.AddMessage(str(i) + ":" + str(basearea) + "___"+str(area))
# arcpy.AddMessage(str(i) + ":" + str(baselength) + "___" + str(length))
if area >= basearea :
return str(lod)
else:
lod += 1
basearea /= 4
baselength /= 2
return str(lod)
else:
print(type(feature))
return "19"
except arcpy.ExecuteError:
express_arcpy_error()
# Description: Calculate lod for every point
def calculate_lod_for_point(id, start_level, n):
try:
for i in range(n):
if id % math.pow(4, n - i) == 0:
return start_level
else:
start_level += 1
return start_level
except arcpy.ExecuteError:
express_arcpy_error()
# Description: Calculate lod for every polyline
def calculate_lod_for_polyline(feature,baselength, start_level):
try:
if feature:
length = feature.getLength("GEODESIC", "METERS")
lod = start_level
for i in range(20):
# arcpy.AddMessage(str(i) + ":" + str(basearea) + "___"+str(area))
# arcpy.AddMessage(str(i) + ":" + str(baselength) + "___" + str(length))
if length >= baselength:
return lod
else:
lod += 1
baselength /= 2
return lod
else:
print(type(feature))
except arcpy.ExecuteError:
express_arcpy_error()
def execute():
in_map = arcpy.GetParameter(0)
arcpy.AddMessage("Input map : {0}.".format(in_map))
in_layers = arcpy.GetParameter(1)
field_name = "lod"
calculate_lods_for_feature(in_layers, field_name)
# execute()
|
[
"arcpy.GetMessages",
"arcpy.AddField_management",
"arcpy.GetMaxSeverity",
"arcpy.AddMessage",
"math.pow",
"arcpy.Describe",
"arcpy.da.SearchCursor",
"math.log",
"arcpy.GetParameter",
"time.time",
"arcpy.SpatialReference",
"arcpy.da.UpdateCursor"
] |
[((279, 301), 'arcpy.GetMaxSeverity', 'arcpy.GetMaxSeverity', ([], {}), '()\n', (299, 301), False, 'import arcpy\n'), ((8135, 8156), 'arcpy.GetParameter', 'arcpy.GetParameter', (['(0)'], {}), '(0)\n', (8153, 8156), False, 'import arcpy\n'), ((8231, 8252), 'arcpy.GetParameter', 'arcpy.GetParameter', (['(1)'], {}), '(1)\n', (8249, 8252), False, 'import arcpy\n'), ((897, 908), 'time.time', 'time.time', ([], {}), '()\n', (906, 908), False, 'import time\n'), ((3309, 3320), 'time.time', 'time.time', ([], {}), '()\n', (3318, 3320), False, 'import time\n'), ((3556, 3578), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""\n"""'], {}), "('\\n')\n", (3572, 3578), False, 'import arcpy\n'), ((3778, 3814), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['layer', 'fields'], {}), '(layer, fields)\n', (3799, 3814), False, 'import arcpy\n'), ((4758, 4769), 'time.time', 'time.time', ([], {}), '()\n', (4767, 4769), False, 'import time\n'), ((5102, 5231), 'arcpy.AddField_management', 'arcpy.AddField_management', (['inFeatures', 'fieldName', '"""Text"""', 'fieldPrecision'], {'field_alias': 'fieldName', 'field_is_nullable': '"""NULLABLE"""'}), "(inFeatures, fieldName, 'Text', fieldPrecision,\n field_alias=fieldName, field_is_nullable='NULLABLE')\n", (5127, 5231), False, 'import arcpy\n'), ((5284, 5295), 'time.time', 'time.time', ([], {}), '()\n', (5293, 5295), False, 'import time\n'), ((5589, 5611), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""\n"""'], {}), "('\\n')\n", (5605, 5611), False, 'import arcpy\n'), ((1257, 1316), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['layer', "['SHAPE@', 'OID@', fieldName]"], {}), "(layer, ['SHAPE@', 'OID@', fieldName])\n", (1278, 1316), False, 'import arcpy\n'), ((1380, 1404), 'arcpy.Describe', 'arcpy.Describe', (['lyr_path'], {}), '(lyr_path)\n', (1394, 1404), False, 'import arcpy\n'), ((420, 440), 'arcpy.GetMessages', 'arcpy.GetMessages', (['(2)'], {}), '(2)\n', (437, 440), False, 'import arcpy\n'), ((710, 729), 'arcpy.GetMessages', 'arcpy.GetMessages', ([], {}), '()\n', (727, 729), False, 'import arcpy\n'), ((984, 995), 'time.time', 'time.time', ([], {}), '()\n', (993, 995), False, 'import time\n'), ((1555, 1585), 'arcpy.SpatialReference', 'arcpy.SpatialReference', (['(102100)'], {}), '(102100)\n', (1577, 1585), False, 'import arcpy\n'), ((4845, 4856), 'time.time', 'time.time', ([], {}), '()\n', (4854, 4856), False, 'import time\n'), ((590, 610), 'arcpy.GetMessages', 'arcpy.GetMessages', (['(1)'], {}), '(1)\n', (607, 610), False, 'import arcpy\n'), ((1903, 1931), 'math.pow', 'math.pow', (['(4)', '(start_level - 1)'], {}), '(4, start_level - 1)\n', (1911, 1931), False, 'import math\n'), ((7144, 7162), 'math.pow', 'math.pow', (['(4)', '(n - i)'], {}), '(4, n - i)\n', (7152, 7162), False, 'import math\n'), ((2418, 2485), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Input point sets too small for optimized, skip!"""'], {}), "('Input point sets too small for optimized, skip!')\n", (2434, 2485), False, 'import arcpy\n'), ((2574, 2611), 'math.log', 'math.log', (['(count / (512 * 512 / 16))', '(4)'], {}), '(count / (512 * 512 / 16), 4)\n', (2582, 2611), False, 'import math\n'), ((3061, 3089), 'math.pow', 'math.pow', (['(2)', '(start_level - 1)'], {}), '(2, start_level - 1)\n', (3069, 3089), False, 'import math\n')]
|
from multiprocessing import Queue, Value
from time import sleep
from access_face_vision.source.camera import Camera
from access_face_vision.utils import create_parser
from access_face_vision import access_logger
LOG_LEVEL = 'debug'
logger, log_que, que_listener = access_logger.set_main_process_logger(LOG_LEVEL)
def test_camera():
logger.info('Starting Camera test')
cmd_args = create_parser()
camera = Camera(cmd_args, Queue(), log_que, LOG_LEVEL, Value('i',0), draw_frames=True)
camera.start()
sleep(60)
camera.stop()
logger.info('Camera test completed')
que_listener.stop()
if __name__ == '__main__':
test_camera()
|
[
"multiprocessing.Value",
"time.sleep",
"access_face_vision.access_logger.set_main_process_logger",
"access_face_vision.utils.create_parser",
"multiprocessing.Queue"
] |
[((266, 314), 'access_face_vision.access_logger.set_main_process_logger', 'access_logger.set_main_process_logger', (['LOG_LEVEL'], {}), '(LOG_LEVEL)\n', (303, 314), False, 'from access_face_vision import access_logger\n'), ((391, 406), 'access_face_vision.utils.create_parser', 'create_parser', ([], {}), '()\n', (404, 406), False, 'from access_face_vision.utils import create_parser\n'), ((521, 530), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (526, 530), False, 'from time import sleep\n'), ((437, 444), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (442, 444), False, 'from multiprocessing import Queue, Value\n'), ((466, 479), 'multiprocessing.Value', 'Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (471, 479), False, 'from multiprocessing import Queue, Value\n')]
|
from utils.deserializer.protobuf_deserializer import ProtoLoader
from pathlib import Path
import pandas as pd
import pytest
PROTOFILES_DIR_PATH = Path(__file__).parent.joinpath("protofilesdir").absolute().__str__()
INVALID_PATH = "some/wrong/path"
@pytest.mark.parametrize('filepath', ["test_file.pb", "test_file_1.txt", "test_file_2.xml"])
def test_should_return_single_df_sequence_regardless_file_extension(filepath):
loader = ProtoLoader(PROTOFILES_DIR_PATH)
sequence = loader.get_single_sequence(filepath)
assert isinstance(sequence, pd.DataFrame)
def test_should_return_not_none_when_directory_not_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert seq_list is not None
def test_should_return_correct_length_of_seq_list():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 3
def test_should_return_empty_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert len(seq_list) == 0
def test_should_check_for_list_when_directory_empty():
loader = ProtoLoader(PROTOFILES_DIR_PATH + INVALID_PATH)
seq_list = loader.get_list_of_sequences()
assert isinstance(seq_list, list)
def test_should_return_list_of_sequences():
loader = ProtoLoader(PROTOFILES_DIR_PATH)
seq_list = loader.get_list_of_sequences()
for seq in seq_list:
assert isinstance(seq, pd.DataFrame)
|
[
"pytest.mark.parametrize",
"utils.deserializer.protobuf_deserializer.ProtoLoader",
"pathlib.Path"
] |
[((252, 347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filepath"""', "['test_file.pb', 'test_file_1.txt', 'test_file_2.xml']"], {}), "('filepath', ['test_file.pb', 'test_file_1.txt',\n 'test_file_2.xml'])\n", (275, 347), False, 'import pytest\n'), ((436, 468), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['PROTOFILES_DIR_PATH'], {}), '(PROTOFILES_DIR_PATH)\n', (447, 468), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((642, 674), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['PROTOFILES_DIR_PATH'], {}), '(PROTOFILES_DIR_PATH)\n', (653, 674), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((821, 853), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['PROTOFILES_DIR_PATH'], {}), '(PROTOFILES_DIR_PATH)\n', (832, 853), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((1003, 1050), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['(PROTOFILES_DIR_PATH + INVALID_PATH)'], {}), '(PROTOFILES_DIR_PATH + INVALID_PATH)\n', (1014, 1050), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((1197, 1244), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['(PROTOFILES_DIR_PATH + INVALID_PATH)'], {}), '(PROTOFILES_DIR_PATH + INVALID_PATH)\n', (1208, 1244), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((1388, 1420), 'utils.deserializer.protobuf_deserializer.ProtoLoader', 'ProtoLoader', (['PROTOFILES_DIR_PATH'], {}), '(PROTOFILES_DIR_PATH)\n', (1399, 1420), False, 'from utils.deserializer.protobuf_deserializer import ProtoLoader\n'), ((147, 161), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'from pathlib import Path\n')]
|
"""add run_type
Revision ID: 5dd2ba8222b1
Revises: 079a74c15e8b
Create Date: 2021-07-22 23:53:04.043651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5dd2ba8222b1'
down_revision = '079a74c15e8b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('experiment_runs', sa.Column('run_type', sa.Text(), nullable=True), schema='triage_metadata')
op.execute("UPDATE triage_metadata.experiment_runs SET run_type='experiment' WHERE run_type IS NULL")
op.alter_column('experiment_runs', 'experiment_hash', nullable=True, new_column_name='run_hash', schema='triage_metadata')
op.drop_constraint('experiment_runs_experiment_hash_fkey', 'experiment_runs', type_='foreignkey', schema='triage_metadata')
op.execute("ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs")
op.create_table('retrain',
sa.Column('retrain_hash', sa.Text(), nullable=False),
sa.Column('config', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('prediction_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('retrain_hash'),
schema='triage_metadata',
)
op.alter_column('models', 'built_in_experiment_run', nullable=False, new_column_name='built_in_triage_run', schema='triage_metadata')
op.execute("CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models")
op.drop_column('models', 'built_by_experiment', schema='triage_metadata')
op.create_table('retrain_models',
sa.Column('retrain_hash', sa.String(), nullable=False),
sa.Column('model_hash', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['retrain_hash'], ['triage_metadata.retrain.retrain_hash'], ),
sa.PrimaryKeyConstraint('retrain_hash', 'model_hash'),
schema='triage_metadata'
)
def downgrade():
op.execute("ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs")
op.drop_column('experiment_runs', 'run_type', schema='triage_metadata')
op.alter_column('experiment_runs', 'run_hash', nullable=True, new_column_name='experiment_hash', schema='triage_metadata')
op.create_foreign_key('experiment_runs_experiment_hash_fkey', 'experiment_runs', 'experiments', ['experiment_hash'], ['experiment_hash'], source_schema='triage_metadata', referent_schema='triage_metadata')
op.drop_table('retrain_models', schema='triage_metadata')
op.drop_table('retrain', schema='triage_metadata')
op.add_column('models', sa.Column('built_by_experiment', sa.Text(), nullable=True), schema='triage_metadata')
op.alter_column('models', 'built_in_triage_run', nullable=False, new_column_name='built_in_experiment_run', schema='triage_metadata')
|
[
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.create_foreign_key",
"sqlalchemy.DateTime",
"alembic.op.drop_constraint",
"alembic.op.alter_column",
"alembic.op.drop_column",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Text",
"alembic.op.execute",
"sqlalchemy.String"
] |
[((471, 582), 'alembic.op.execute', 'op.execute', (['"""UPDATE triage_metadata.experiment_runs SET run_type=\'experiment\' WHERE run_type IS NULL"""'], {}), '(\n "UPDATE triage_metadata.experiment_runs SET run_type=\'experiment\' WHERE run_type IS NULL"\n )\n', (481, 582), False, 'from alembic import op\n'), ((578, 704), 'alembic.op.alter_column', 'op.alter_column', (['"""experiment_runs"""', '"""experiment_hash"""'], {'nullable': '(True)', 'new_column_name': '"""run_hash"""', 'schema': '"""triage_metadata"""'}), "('experiment_runs', 'experiment_hash', nullable=True,\n new_column_name='run_hash', schema='triage_metadata')\n", (593, 704), False, 'from alembic import op\n'), ((705, 832), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""experiment_runs_experiment_hash_fkey"""', '"""experiment_runs"""'], {'type_': '"""foreignkey"""', 'schema': '"""triage_metadata"""'}), "('experiment_runs_experiment_hash_fkey',\n 'experiment_runs', type_='foreignkey', schema='triage_metadata')\n", (723, 832), False, 'from alembic import op\n'), ((834, 913), 'alembic.op.execute', 'op.execute', (['"""ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs"""'], {}), "('ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs')\n", (844, 913), False, 'from alembic import op\n'), ((1255, 1392), 'alembic.op.alter_column', 'op.alter_column', (['"""models"""', '"""built_in_experiment_run"""'], {'nullable': '(False)', 'new_column_name': '"""built_in_triage_run"""', 'schema': '"""triage_metadata"""'}), "('models', 'built_in_experiment_run', nullable=False,\n new_column_name='built_in_triage_run', schema='triage_metadata')\n", (1270, 1392), False, 'from alembic import op\n'), ((1393, 1563), 'alembic.op.execute', 'op.execute', (['"""CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models"""'], {}), "(\n 'CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models'\n )\n", (1403, 1563), False, 'from alembic import op\n'), ((1558, 1631), 'alembic.op.drop_column', 'op.drop_column', (['"""models"""', '"""built_by_experiment"""'], {'schema': '"""triage_metadata"""'}), "('models', 'built_by_experiment', schema='triage_metadata')\n", (1572, 1631), False, 'from alembic import op\n'), ((2017, 2096), 'alembic.op.execute', 'op.execute', (['"""ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs"""'], {}), "('ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs')\n", (2027, 2096), False, 'from alembic import op\n'), ((2101, 2172), 'alembic.op.drop_column', 'op.drop_column', (['"""experiment_runs"""', '"""run_type"""'], {'schema': '"""triage_metadata"""'}), "('experiment_runs', 'run_type', schema='triage_metadata')\n", (2115, 2172), False, 'from alembic import op\n'), ((2177, 2303), 'alembic.op.alter_column', 'op.alter_column', (['"""experiment_runs"""', '"""run_hash"""'], {'nullable': '(True)', 'new_column_name': '"""experiment_hash"""', 'schema': '"""triage_metadata"""'}), "('experiment_runs', 'run_hash', nullable=True,\n new_column_name='experiment_hash', schema='triage_metadata')\n", (2192, 2303), False, 'from alembic import op\n'), ((2304, 2523), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""experiment_runs_experiment_hash_fkey"""', '"""experiment_runs"""', '"""experiments"""', "['experiment_hash']", "['experiment_hash']"], {'source_schema': '"""triage_metadata"""', 'referent_schema': '"""triage_metadata"""'}), "('experiment_runs_experiment_hash_fkey',\n 'experiment_runs', 'experiments', ['experiment_hash'], [\n 'experiment_hash'], source_schema='triage_metadata', referent_schema=\n 'triage_metadata')\n", (2325, 2523), False, 'from alembic import op\n'), ((2514, 2571), 'alembic.op.drop_table', 'op.drop_table', (['"""retrain_models"""'], {'schema': '"""triage_metadata"""'}), "('retrain_models', schema='triage_metadata')\n", (2527, 2571), False, 'from alembic import op\n'), ((2576, 2626), 'alembic.op.drop_table', 'op.drop_table', (['"""retrain"""'], {'schema': '"""triage_metadata"""'}), "('retrain', schema='triage_metadata')\n", (2589, 2626), False, 'from alembic import op\n'), ((2745, 2882), 'alembic.op.alter_column', 'op.alter_column', (['"""models"""', '"""built_in_triage_run"""'], {'nullable': '(False)', 'new_column_name': '"""built_in_experiment_run"""', 'schema': '"""triage_metadata"""'}), "('models', 'built_in_triage_run', nullable=False,\n new_column_name='built_in_experiment_run', schema='triage_metadata')\n", (2760, 2882), False, 'from alembic import op\n'), ((1169, 1208), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""retrain_hash"""'], {}), "('retrain_hash')\n", (1192, 1208), True, 'import sqlalchemy as sa\n'), ((1805, 1893), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['retrain_hash']", "['triage_metadata.retrain.retrain_hash']"], {}), "(['retrain_hash'], [\n 'triage_metadata.retrain.retrain_hash'])\n", (1828, 1893), True, 'import sqlalchemy as sa\n'), ((1900, 1953), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""retrain_hash"""', '"""model_hash"""'], {}), "('retrain_hash', 'model_hash')\n", (1923, 1953), True, 'import sqlalchemy as sa\n'), ((414, 423), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (421, 423), True, 'import sqlalchemy as sa\n'), ((980, 989), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (987, 989), True, 'import sqlalchemy as sa\n'), ((1130, 1143), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1141, 1143), True, 'import sqlalchemy as sa\n'), ((1705, 1716), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1714, 1716), True, 'import sqlalchemy as sa\n'), ((1767, 1778), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (1776, 1778), True, 'import sqlalchemy as sa\n'), ((2688, 2697), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (2695, 2697), True, 'import sqlalchemy as sa\n'), ((1065, 1074), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (1072, 1074), True, 'import sqlalchemy as sa\n')]
|
# Generated by Django 3.0.7 on 2020-07-09 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20200709_1430'),
]
operations = [
migrations.AlterField(
model_name='location',
name='lat',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='location',
name='lng',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"django.db.models.IntegerField"
] |
[((331, 373), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (350, 373), False, 'from django.db import migrations, models\n'), ((494, 536), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (513, 536), False, 'from django.db import migrations, models\n')]
|
import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
class FittingTest(unittest.TestCase):
def test_2d_fit(self):
"""
2-d Fit with Weibull and Lognormal distribution.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_1 = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents spectral peak period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.4165147571863412, places=5)
self.assertAlmostEqual(dist0.scale(0), 2.833833521811032, places=5)
self.assertAlmostEqual(dist0.loc(0), 0.07055663251419833, places=5)
self.assertAlmostEqual(dist1.shape(0), 0.17742685807554776 , places=5)
#self.assertAlmostEqual(dist1.scale, 7.1536437634240135+2.075539206642004e^{0.1515051024957754x}, places=5)
self.assertAlmostEqual(dist1.loc, None, places=5)
# Now use a 2-parameter Weibull distribution instead of 3-p distr.
dist_description_0 = {'name': 'Weibull_2p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
self.assertEqual(str(my_fit)[0:5], 'Fit()')
def test_2d_benchmark_case(self):
"""
Reproduces the baseline results presented in doi: 10.1115/OMAE2019-96523 .
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset(
path='tests/testfiles/allyears_dataset_A.txt')
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 0.5}
dist_description_1 = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
'functions': ('exp3', None, 'power3')} # Shape, location, scale.
# Compute the fit.
my_fit = Fit((sample_hs, sample_tz),
(dist_description_0, dist_description_1))
# Evaluate the fitted parameters.
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.48, delta=0.02)
self.assertAlmostEqual(dist0.scale(0), 0.944, delta=0.01)
self.assertAlmostEqual(dist0.loc(0), 0.0981, delta=0.001)
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.001)
self.assertAlmostEqual(dist1.shape.b, 0.308, delta=0.002)
self.assertAlmostEqual(dist1.shape.c, -0.250, delta=0.002)
self.assertAlmostEqual(dist1.scale.a, 1.47 , delta=0.02)
self.assertAlmostEqual(dist1.scale.b, 0.214, delta=0.002)
self.assertAlmostEqual(dist1.scale.c, 0.641, delta=0.002)
self.assertAlmostEqual(dist1.scale(0), 4.3 , delta=0.1)
self.assertAlmostEqual(dist1.scale(2), 6, delta=0.1)
self.assertAlmostEqual(dist1.scale(5), 8, delta=0.1)
def test_2d_exponentiated_wbl_fit(self):
"""
Tests if a 2D fit that includes an exp. Weibull distribution works.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_hs = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents zero-upcrossing period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_tz = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_hs]
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'power3')
# Shape, Location, Scale
}
# Fit the model to the data, first test a 1D fit.
fit = Fit(sample_hs, dist_description_hs)
# Now perform the 2D fit.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
dist0 = fit.mul_var_dist.distributions[0]
self.assertGreater(dist0.shape(0), 1) # Should be about 1.5.
self.assertLess(dist0.shape(0), 2)
self.assertIsNone(dist0.loc(0)) # Has no location parameter, should be None.
self.assertGreater(dist0.scale(0), 2) # Should be about 3.
self.assertLess(dist0.scale(0), 4)
self.assertGreater(dist0.shape2(0), 0.5) # Should be about 1.
self.assertLess(dist0.shape2(0), 2)
def test_fit_lnsquare2(self):
"""
Tests a 2D fit that includes an logarithm square dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.scale.a, 1) # Should be about 1-5
self.assertLess(dist1.scale.a, 5) # Should be about 1-5
self.assertGreater(dist1.scale.b, 2) # Should be about 2-10
self.assertLess(dist1.scale.b, 10) # Should be about 2-10
self.assertGreater(dist1.scale(0), 0.1)
self.assertLess(dist1.scale(0), 10)
self.assertEqual(dist1.scale.func_name, 'lnsquare2')
def test_fit_powerdecrease3(self):
"""
Tests a 2D fit that includes an powerdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('powerdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.shape.a, -0.1) # Should be about 0
self.assertLess(dist1.shape.a, 0.1) # Should be about 0
self.assertGreater(dist1.shape.b, 1.5) # Should be about 2-5
self.assertLess(dist1.shape.b, 6) # Should be about 2-10
self.assertGreater(dist1.shape.c, 0.8) # Should be about 1.1
self.assertLess(dist1.shape.c, 2) # Should be about 1.1
self.assertGreater(dist1.shape(0), 0.25) # Should be about 0.35
self.assertLess(dist1.shape(0), 0.4) # Should be about 0.35
self.assertEqual(dist1.shape.func_name, 'powerdecrease3')
def test_fit_asymdecrease3(self):
"""
Tests a 2D fit that includes an asymdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('asymdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.1) # Should be about 0
self.assertAlmostEqual(dist1.shape.b, 0.35, delta=0.4) # Should be about 0.35
self.assertAlmostEqual(np.abs(dist1.shape.c), 0.45, delta=0.2) # Should be about 0.45
self.assertAlmostEquals(dist1.shape(0), 0.35, delta=0.2) # Should be about 0.35
def test_min_number_datapoints_for_fit(self):
"""
Tests if the minimum number of datapoints required for a fit works.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 10
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_10 = dist1.scale.a
# Now require more datapoints for a fit.
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 500
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_500 = dist1.scale.a
# Because in case 2 fewer bins have been used we should get different
# coefficients for the dependence function.
self.assertNotEqual(a_min_10, a_min_500)
def test_multi_processing(selfs):
"""
2-d Fit with multiprocessing (specified by setting a value for timeout)
"""
# Define a sample and a fit.
prng = np.random.RandomState(42)
sample_1 = prng.weibull(1.5, 1000)*3
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1),
timeout=10)
def test_wbl_fit_with_negative_location(self):
"""
Tests fitting a translated Weibull distribution which would result
in a negative location parameter.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_3p',
'dependency': (None, None, None)}
# Fit the model to the data.
fit = Fit((sample_hs, ),
(dist_description_hs, ))
# Correct values for 10 years of data can be found in
# 10.1115/OMAE2019-96523 . Here we used 1 year of data.
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
self.assertGreater(dist0.loc(0), 0.0) # Should be 0.0981
self.assertLess(dist0.loc(0), 0.3) # Should be 0.0981
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
# Shift the wave data with -1 m and fit again.
sample_hs = sample_hs - 2
# Negative location values will be set to zero instead and a
# warning will be raised.
with self.assertWarns(RuntimeWarning):
fit = Fit((sample_hs, ),
(dist_description_hs, ))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
# Should be estimated to be 0.0981 - 2 and corrected to be 0.
self.assertEqual(dist0.loc(0), 0)
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
def test_omae2020_wind_wave_model(self):
"""
Tests fitting the wind-wave model that was used in the publication
'Global hierarchical models for wind and wave contours' on dataset D.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0), 2.42, delta=1)
self.assertAlmostEqual(dist0.scale(0), 10.0, delta=2)
self.assertAlmostEqual(dist0.shape2(0), 0.761, delta=0.5)
dist1 = fit.mul_var_dist.distributions[1]
self.assertEqual(dist1.shape2(0), 5)
inspection_data1 = fit.multiple_fit_inspection_data[1]
self.assertEqual(inspection_data1.shape2_value[0], 5)
self.assertAlmostEqual(inspection_data1.shape_value[0], 0.8, delta=0.5) # interval centered at 1
self.assertAlmostEqual(inspection_data1.shape_value[4], 1.5, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.shape_value[9], 2.5, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.shape(0), 0.8, delta=0.3)
self.assertAlmostEqual(dist1.shape(10), 1.6, delta=0.5)
self.assertAlmostEqual(dist1.shape(20), 2.3, delta=0.7)
self.assertAlmostEqual(dist1.shape.a, 0.582, delta=0.5)
self.assertAlmostEqual(dist1.shape.b, 1.90, delta=1)
self.assertAlmostEqual(dist1.shape.c, 0.248, delta=0.5)
self.assertAlmostEqual(dist1.shape.d, 8.49, delta=5)
self.assertAlmostEqual(inspection_data1.scale_value[0], 0.15, delta=0.2) # interval centered at 1
self.assertAlmostEqual(inspection_data1.scale_value[4], 1, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.scale_value[9], 4, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.scale(0), 0.15, delta=0.5)
self.assertAlmostEqual(dist1.scale(10), 1, delta=0.5)
self.assertAlmostEqual(dist1.scale(20), 4, delta=1)
self.assertAlmostEqual(dist1.scale.a, 0.394, delta=0.5)
self.assertAlmostEqual(dist1.scale.b, 0.0178, delta=0.1)
self.assertAlmostEqual(dist1.scale.c, 1.88, delta=0.8)
def test_wrong_model(self):
"""
Tests wheter errors are raised when incorrect fitting models are
specified.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# This structure is incorrect as there is not distribution called 'something'.
dist_description_v = {'name': 'something',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, None, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as there is not dependence function called 'something'.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('something', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as there will be only 1 or 2 intervals
# that fit 2000 datapoints.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 2000}
with self.assertRaises(RuntimeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as alpha3 is only compatible with
# logistics4 .
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('power3', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(TypeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Lognormal',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, 5, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, 5, None), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
def test_weighting_of_dependence_function(self):
"""
Tests if using weights when the dependence function is fitted works
correctly.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': False}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_no_weights = fit.mul_var_dist.distributions[1]
# Now perform a fit with weights.
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': True}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_with_weights = fit.mul_var_dist.distributions[1]
# Make sure the two fitted dependnece functions are different.
d = np.abs(dist1_with_weights.scale(0) - dist1_no_weights.scale(0)) / \
np.abs(dist1_no_weights.scale(0))
self.assertGreater(d, 0.01)
# Make sure they are not too different.
d = np.abs(dist1_with_weights.scale(20) - dist1_no_weights.scale(20)) / \
np.abs(dist1_no_weights.scale(20))
self.assertLess(d, 0.5)
|
[
"numpy.abs",
"numpy.asarray",
"numpy.exp",
"viroconcom.fitting.Fit",
"csv.reader",
"numpy.random.RandomState"
] |
[((1299, 1312), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1309, 1312), True, 'import numpy as np\n'), ((1321, 1334), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1331, 1334), True, 'import numpy as np\n'), ((825, 860), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""";"""'}), "(csv_file, delimiter=';')\n", (835, 860), False, 'import csv\n'), ((1535, 1560), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1556, 1560), True, 'import numpy as np\n'), ((2523, 2590), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {}), '((sample_1, sample_2), (dist_description_0, dist_description_1))\n', (2526, 2590), False, 'from viroconcom.fitting import Fit\n'), ((3641, 3708), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {}), '((sample_1, sample_2), (dist_description_0, dist_description_1))\n', (3644, 3708), False, 'from viroconcom.fitting import Fit\n'), ((4569, 4638), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_0, dist_description_1)'], {}), '((sample_hs, sample_tz), (dist_description_0, dist_description_1))\n', (4572, 4638), False, 'from viroconcom.fitting import Fit\n'), ((5746, 5771), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (5767, 5771), True, 'import numpy as np\n'), ((7030, 7065), 'viroconcom.fitting.Fit', 'Fit', (['sample_hs', 'dist_description_hs'], {}), '(sample_hs, dist_description_hs)\n', (7033, 7065), False, 'from viroconcom.fitting import Fit\n'), ((7114, 7185), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (7117, 7185), False, 'from viroconcom.fitting import Fit\n'), ((8633, 8704), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (8636, 8704), False, 'from viroconcom.fitting import Fit\n'), ((10232, 10303), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (10235, 10303), False, 'from viroconcom.fitting import Fit\n'), ((12016, 12087), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (12019, 12087), False, 'from viroconcom.fitting import Fit\n'), ((13604, 13675), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (13607, 13675), False, 'from viroconcom.fitting import Fit\n'), ((14347, 14418), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (14350, 14418), False, 'from viroconcom.fitting import Fit\n'), ((14967, 14992), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (14988, 14992), True, 'import numpy as np\n'), ((15548, 15627), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {'timeout': '(10)'}), '((sample_1, sample_2), (dist_description_0, dist_description_1), timeout=10)\n', (15551, 15627), False, 'from viroconcom.fitting import Fit\n'), ((16216, 16257), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs,)', '(dist_description_hs,)'], {}), '((sample_hs,), (dist_description_hs,))\n', (16219, 16257), False, 'from viroconcom.fitting import Fit\n'), ((18521, 18590), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (18524, 18590), False, 'from viroconcom.fitting import Fit\n'), ((26684, 26753), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (26687, 26753), False, 'from viroconcom.fitting import Fit\n'), ((27483, 27552), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (27486, 27552), False, 'from viroconcom.fitting import Fit\n'), ((12424, 12445), 'numpy.abs', 'np.abs', (['dist1.shape.c'], {}), '(dist1.shape.c)\n', (12430, 12445), True, 'import numpy as np\n'), ((16979, 17020), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs,)', '(dist_description_hs,)'], {}), '((sample_hs,), (dist_description_hs,))\n', (16982, 17020), False, 'from viroconcom.fitting import Fit\n'), ((21264, 21303), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v,)', '(dist_description_v,)'], {}), '((sample_v,), (dist_description_v,))\n', (21267, 21303), False, 'from viroconcom.fitting import Fit\n'), ((22045, 22114), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (22048, 22114), False, 'from viroconcom.fitting import Fit\n'), ((22878, 22947), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (22881, 22947), False, 'from viroconcom.fitting import Fit\n'), ((23822, 23891), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (23825, 23891), False, 'from viroconcom.fitting import Fit\n'), ((24442, 24481), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v,)', '(dist_description_v,)'], {}), '((sample_v,), (dist_description_v,))\n', (24445, 24481), False, 'from viroconcom.fitting import Fit\n'), ((25409, 25478), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (25412, 25478), False, 'from viroconcom.fitting import Fit\n'), ((1967, 1986), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (1973, 1986), True, 'import numpy as np\n'), ((6182, 6201), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (6188, 6201), True, 'import numpy as np\n'), ((15070, 15089), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (15076, 15089), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.09.04 by <NAME>
"""
import warnings
warnings.filterwarnings("ignore")
import click
from cli.climb import climb
from cli.diff import diff
@click.group()
def main():
pass
main.add_command(climb)
main.add_command(diff)
if __name__ == '__main__':
main()
|
[
"click.group",
"warnings.filterwarnings"
] |
[((103, 136), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (126, 136), False, 'import warnings\n'), ((208, 221), 'click.group', 'click.group', ([], {}), '()\n', (219, 221), False, 'import click\n')]
|
import os
from data.raw_data_loader.base.base_raw_data_loader import Seq2SeqRawDataLoader
class RawDataLoader(Seq2SeqRawDataLoader):
def __init__(self, data_path):
super().__init__(data_path)
self.cnn_path = "cnn/stories"
self.dailymail_path = "dailymail/stories"
def load_data(self):
if len(self.X) == 0 or len(self.Y) == 0:
total_size = 0
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.cnn_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.dailymail_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
index_list = [i for i in range(total_size)]
self.attributes["index_list"] = index_list
def process_data_file(self, file_path):
cnt = 0
article_lines = []
abstract_lines = []
next_is_highlight = False
with open(file_path, "r") as f:
for line in f:
line = line.strip()
if line:
if line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
abstract_lines.append(line)
else:
article_lines.append(line)
assert len(self.X) == len(self.Y)
idx = len(self.X)
self.X[idx] = " ".join(article_lines)
self.Y[idx] = " ".join(
["%s %s %s" % ("<s>", sent, "</s>") for sent in abstract_lines]
)
cnt += 1
return cnt
|
[
"os.path.join"
] |
[((460, 503), 'os.path.join', 'os.path.join', (['self.data_path', 'self.cnn_path'], {}), '(self.data_path, self.cnn_path)\n', (472, 503), False, 'import os\n'), ((803, 852), 'os.path.join', 'os.path.join', (['self.data_path', 'self.dailymail_path'], {}), '(self.data_path, self.dailymail_path)\n', (815, 852), False, 'import os\n'), ((591, 620), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (603, 620), False, 'import os\n'), ((940, 969), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (952, 969), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.