code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Program to digest make depend files
import os
from typing import List, Set
DirectoryName = str
FileName = str
class AmbiguousFinder:
""" Find files that exist in more than one directory from a given set of directories"""
def __init__(self, project_root_directory: DirectoryName):
""" Initialize a DigestDepends object. """
self.project_root_directory = project_root_directory
pass
def find_ambiguities(self):
""" Main routine."""
dir_files = [
"uniqued-projdirs-CPU_A.txt",
"uniqued-sysdirs-CPU_A.txt"
]
dirs = []
for dir_filename in dir_files:
with open(dir_filename, 'r') as f:
for line in f:
line = line.strip("\n")
dirs.append(line)
pass
pass
pass
containing_dirs = dict()
for dir in dirs:
pass
pass
if __name__ == u'__main__':
print('Finding ambiguous files\n')
finder = AmbiguousFinder(os.getcwd())
finder.find_ambiguities()
print('\ndone.\n')
| [
"os.getcwd"
] | [((1062, 1073), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1071, 1073), False, 'import os\n')] |
'''
Inverse depth prediction net.
Code based on https://github.com/ClementPinard/dispNetPytorch/
'''
import torch
import torch.nn as nn
import math
from . import net_blocks as nb
def predict_disp(in_planes):
return nn.Conv2d(in_planes,1,kernel_size=3,stride=1,padding=1,bias=False)
class DispNet(nn.Module):
expansion = 1
def __init__(self, batch_norm=True):
super(DispNet, self).__init__()
self.batch_norm = batch_norm
self.conv1 = nb.conv2d(self.batch_norm, 3, 64, kernel_size=7, stride=2)
self.conv2 = nb.conv2d(self.batch_norm, 64, 128, kernel_size=5, stride=2)
self.conv3 = nb.conv2d(self.batch_norm, 128, 256, kernel_size=5, stride=2)
self.conv3_1 = nb.conv2d(self.batch_norm, 256, 256)
self.conv4 = nb.conv2d(self.batch_norm, 256, 512, stride=2)
self.conv4_1 = nb.conv2d(self.batch_norm, 512, 512)
self.conv5 = nb.conv2d(self.batch_norm, 512, 512, stride=2)
self.conv5_1 = nb.conv2d(self.batch_norm, 512, 512)
self.conv6 = nb.conv2d(self.batch_norm, 512, 1024, stride=2)
self.conv6_1 = nb.conv2d(self.batch_norm,1024, 1024)
self.deconv5 = nb.deconv2d(1024,512)
self.deconv4 = nb.deconv2d(1025,256)
self.deconv3 = nb.deconv2d(769,128)
self.deconv2 = nb.deconv2d(385,64)
self.deconv1 = nb.deconv2d(193,64)
self.predict_disp6 = predict_disp(1024)
self.predict_disp5 = predict_disp(1025)
self.predict_disp4 = predict_disp(769)
self.predict_disp3 = predict_disp(385)
self.predict_disp2 = predict_disp(193)
self.predict_disp1 = predict_disp(129)
self.upsampled_disp6_to_5 = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False)
self.upsampled_disp5_to_4 = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False)
self.upsampled_disp4_to_3 = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False)
self.upsampled_disp3_to_2 = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False)
self.upsampled_disp2_to_1 = nn.ConvTranspose2d(1, 1, 4, 2, 1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.02 / n) #this modified initialization seems to work better, but it's very hacky
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
disp6 = self.predict_disp6(out_conv6)
disp6_up = self.upsampled_disp6_to_5(disp6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,disp6_up),1)
disp5 = self.predict_disp5(concat5)
disp5_up = self.upsampled_disp5_to_4(disp5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,disp5_up),1)
disp4 = self.predict_disp4(concat4)
disp4_up = self.upsampled_disp4_to_3(disp4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,disp4_up),1)
disp3 = self.predict_disp3(concat3)
disp3_up = self.upsampled_disp3_to_2(disp3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,disp3_up),1)
disp2 = self.predict_disp2(concat2)
disp2_up = self.upsampled_disp2_to_1(disp2)
out_deconv1 = self.deconv1(concat2)
concat1 = torch.cat((out_conv1,out_deconv1,disp2_up),1)
disp1 = self.predict_disp1(concat1)
if self.training:
#return disp1,disp2,disp3,disp4,disp5,disp6
return disp1
else:
return disp1
def dispnet(path=None, batch_norm=True):
"""dispNet model architecture.
Args:
path : where to load pretrained network. will create a new one if not set
"""
model = DispNet(batch_norm=batch_norm)
if path is not None:
data = torch.load(path)
if 'state_dict' in data.keys():
model.load_state_dict(data['state_dict'])
else:
model.load_state_dict(data)
return model | [
"torch.load",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d"
] | [((220, 291), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, 1, kernel_size=3, stride=1, padding=1, bias=False)\n', (229, 291), True, 'import torch.nn as nn\n'), ((1710, 1755), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 1, 4, 2, 1, bias=False)\n', (1728, 1755), True, 'import torch.nn as nn\n'), ((1792, 1837), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 1, 4, 2, 1, bias=False)\n', (1810, 1837), True, 'import torch.nn as nn\n'), ((1874, 1919), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 1, 4, 2, 1, bias=False)\n', (1892, 1919), True, 'import torch.nn as nn\n'), ((1956, 2001), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 1, 4, 2, 1, bias=False)\n', (1974, 2001), True, 'import torch.nn as nn\n'), ((2038, 2083), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 1, 4, 2, 1, bias=False)\n', (2056, 2083), True, 'import torch.nn as nn\n'), ((3094, 3142), 'torch.cat', 'torch.cat', (['(out_conv5, out_deconv5, disp6_up)', '(1)'], {}), '((out_conv5, out_deconv5, disp6_up), 1)\n', (3103, 3142), False, 'import torch\n'), ((3308, 3356), 'torch.cat', 'torch.cat', (['(out_conv4, out_deconv4, disp5_up)', '(1)'], {}), '((out_conv4, out_deconv4, disp5_up), 1)\n', (3317, 3356), False, 'import torch\n'), ((3522, 3570), 'torch.cat', 'torch.cat', (['(out_conv3, out_deconv3, disp4_up)', '(1)'], {}), '((out_conv3, out_deconv3, disp4_up), 1)\n', (3531, 3570), False, 'import torch\n'), ((3736, 3784), 'torch.cat', 'torch.cat', (['(out_conv2, out_deconv2, disp3_up)', '(1)'], {}), '((out_conv2, out_deconv2, disp3_up), 1)\n', (3745, 3784), False, 'import torch\n'), ((3944, 3992), 'torch.cat', 'torch.cat', (['(out_conv1, out_deconv1, disp2_up)', '(1)'], {}), '((out_conv1, out_deconv1, disp2_up), 1)\n', (3953, 3992), False, 'import torch\n'), ((4449, 4465), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (4459, 4465), False, 'import torch\n')] |
import abc
import numpy as np
from tnpy.operators import MPO
class ModelBase(abc.ABC):
def __init__(self, N: int):
"""
Args:
N: System size.
"""
self.N = N
@abc.abstractmethod
def _elem(self, site: int) -> np.ndarray:
return NotImplemented
@property
def mpo(self) -> MPO:
"""
Return matrix product operator (mpo) as a property of the model.
Returns:
mpo:
"""
return MPO(self.N, self._elem)
| [
"tnpy.operators.MPO"
] | [((497, 520), 'tnpy.operators.MPO', 'MPO', (['self.N', 'self._elem'], {}), '(self.N, self._elem)\n', (500, 520), False, 'from tnpy.operators import MPO\n')] |
##########################################################################################
# Program Name : Discord Bot
# Author : DMCTruong
# Last Updated : August 31, 2017
# License : MIT
# Description : A general purpose bot written for Discord
##########################################################################################
import discord
from discord.ext import commands
import asyncio
import configurations
import pyrebase
bot = commands.Bot(configurations.PREFIX)
firebase = pyrebase.initialize_app(configurations.FIREBASE_INFO)
db = firebase.database()
storage = firebase.storage()
class Database:
def __init__(self, bot):
self.bot = bot
@bot.command(aliases=["db, DB, allDB, , showdb, showDB"])
async def alldb(self):
"""Give list of all databases saved"""
getAlldb = db.child("Discord").shallow().get()
allDatabases = "The databases that are available are:\n - {}".format("\n - ".join(getAlldb.val()))
print(allDatabases)
return await self.bot.say(allDatabases)
@bot.command(aliases=["newdb, newDB, entry, insert"])
async def newEntry(self, dbname, name, entry):
"""Add a database entry or create new database"""
db.child("Discord").child(dbname).update({name: "{}".format(entry)})
updateSuccess = "The database, {}, has been updated sucessfully with entry, {}: {}.".format(dbname, name, entry)
print(updatesuccess)
return await self.bot.say(updateSuccess) | [
"discord.ext.commands.Bot",
"pyrebase.initialize_app"
] | [((495, 530), 'discord.ext.commands.Bot', 'commands.Bot', (['configurations.PREFIX'], {}), '(configurations.PREFIX)\n', (507, 530), False, 'from discord.ext import commands\n'), ((542, 595), 'pyrebase.initialize_app', 'pyrebase.initialize_app', (['configurations.FIREBASE_INFO'], {}), '(configurations.FIREBASE_INFO)\n', (565, 595), False, 'import pyrebase\n')] |
import pandas as pd
from sklearn.manifold import TSNE
from numpy import array, dot, diag, nan_to_num
from numpy.random import randn
import sys
features = 'CADD1,CADD2,RecA,EssA,CADD3,CADD4,RecB,EssB,Path'.split(',')
df_data = pd.read_csv("dida_posey_to_predict.csv")
combination = list(map(int, sys.argv[1]))
n_comb = sum(combination)
X = array(df_data[features])
X = dot(X, diag(combination))
for i in (0,1,4,5):
if not combination[i]:
continue
X[:,i] += 1.701666
X[:,i] /= 15.746334
if n_comb > 2:
X = TSNE(n_components=2, init="pca").fit_transform(X)
X = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X = nan_to_num(X)
else:
X = X[:, [i for i, j in enumerate(combination) if j] ]
df_data_vs = df_data.copy(False)
df_data_vs['x'] = X[:,0]
df_data_vs['y'] = X[:,1] if n_comb > 1 else 0
df_data_vs = df_data_vs.drop('Pair', 1)
with open("exports/p_file_" + ''.join(map(str, combination)) + ".csv", "w") as out:
out.write('id,x,y\n')
for line in array(df_data_vs):
out.write(','.join(map(str, line[[0,-2,-1]])) + '\n')
| [
"pandas.read_csv",
"sklearn.manifold.TSNE",
"numpy.diag",
"numpy.array",
"numpy.nan_to_num"
] | [((240, 280), 'pandas.read_csv', 'pd.read_csv', (['"""dida_posey_to_predict.csv"""'], {}), "('dida_posey_to_predict.csv')\n", (251, 280), True, 'import pandas as pd\n'), ((358, 382), 'numpy.array', 'array', (['df_data[features]'], {}), '(df_data[features])\n', (363, 382), False, 'from numpy import array, dot, diag, nan_to_num\n'), ((395, 412), 'numpy.diag', 'diag', (['combination'], {}), '(combination)\n', (399, 412), False, 'from numpy import array, dot, diag, nan_to_num\n'), ((681, 694), 'numpy.nan_to_num', 'nan_to_num', (['X'], {}), '(X)\n', (691, 694), False, 'from numpy import array, dot, diag, nan_to_num\n'), ((1043, 1060), 'numpy.array', 'array', (['df_data_vs'], {}), '(df_data_vs)\n', (1048, 1060), False, 'from numpy import array, dot, diag, nan_to_num\n'), ((559, 591), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'init': '"""pca"""'}), "(n_components=2, init='pca')\n", (563, 591), False, 'from sklearn.manifold import TSNE\n')] |
#http://kidscancode.org/blog/2016/08/pygame_1-1_getting-started/
import pygame
# Initialize pygame and create window
pygame.init()
# For sound
pygame.mixer.init()
# Width of our game window
WINDOW_WIDTH = 360
# Height of our game window
WINDOW_HEIGHT = 480
WINDOW_TITLE = 'My Game'
FRAMES_PER_SECOND = 30
SCREEN = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption(WINDOW_TITLE)
CLOCK = pygame.time.Clock()
class Colors:
'''Colors (R, G, B)'''
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
# Game Loop
IS_RUNNING = True
while IS_RUNNING:
# Process input (events)
for event in pygame.event.get():
# Check for closing window
if event.type == pygame.QUIT:
IS_RUNNING = False
# Update
# Render (draw)
SCREEN.fill(Colors.BLACK)
# *After* drawing everything, flip the display
pygame.display.flip()
# Keep loop running at the right speed
CLOCK.tick(FRAMES_PER_SECOND)
pygame.quit()
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.time.Clock",
"pygame.mixer.init"
] | [((120, 133), 'pygame.init', 'pygame.init', ([], {}), '()\n', (131, 133), False, 'import pygame\n'), ((147, 166), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (164, 166), False, 'import pygame\n'), ((320, 374), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WINDOW_WIDTH, WINDOW_HEIGHT)'], {}), '((WINDOW_WIDTH, WINDOW_HEIGHT))\n', (343, 374), False, 'import pygame\n'), ((375, 415), 'pygame.display.set_caption', 'pygame.display.set_caption', (['WINDOW_TITLE'], {}), '(WINDOW_TITLE)\n', (401, 415), False, 'import pygame\n'), ((424, 443), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (441, 443), False, 'import pygame\n'), ((1003, 1016), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1014, 1016), False, 'import pygame\n'), ((685, 703), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (701, 703), False, 'import pygame\n'), ((906, 927), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (925, 927), False, 'import pygame\n')] |
# encoding=utf-8
"""
Created on 15:30 2019/07/29
@author: <NAME>
It solve the MADA, including train, test.
"""
import torch
import torch.nn as nn
import numpy as np
def test(model, data_loader):
model.eval()
total_loss = 0
corrects = 0
processed_num = 0
for inputs, labels in data_loader:
# inputs = inputs.to(device)
# labels = labels.to(device)
class_outputs = model(inputs, test_mode=True)
_, preds = torch.max(class_outputs, 1)
# loss = nn.CrossEntropyLoss()(class_outputs, labels)
# total_loss += loss.item() * labels.size()[0]
corrects += (preds == labels.data).sum().item()
processed_num += labels.size()[0]
acc = corrects / processed_num
average_loss = total_loss / processed_num
print('Data size = {} , corrects = {}'.format(processed_num, corrects))
return average_loss, acc
def train_one_epoch(model, data_loader, optimizer, n_classes,
optimizer_type, num_epochs, epoch, iter_num, max_iter_num, lr, gamma, loss_weight):
model.train()
total_loss = 0
source_corrects = 0
processed_target_num = 0
total_source_num = 0
class_criterion = nn.CrossEntropyLoss()
alpha = 0
for target_inputs, target_labels in data_loader['target']['train']:
cur_lr = update_optimizer(optimizer, optimizer_type, num_epochs, epoch, iter_num, max_iter_num, lr, gamma)
optimizer.zero_grad()
alpha = get_alpha(num_epochs, epoch, iter_num, max_iter_num)
# Target Train
# target_inputs = target_inputs.to(device)
target_domain_outputs, target_class_outputs = model(target_inputs, alpha=alpha)
# target_domain_labels = torch.ones((target_labels.size()[0] * n_classes, 1), device=device)
target_domain_labels = torch.ones((target_labels.size()[0] * n_classes, 1))
target_domain_loss = nn.BCELoss()(target_domain_outputs.view(-1), target_domain_labels.view(-1))
# Source Train
source_iter = iter(data_loader['source']['train'])
source_inputs, source_labels = next(source_iter)
# source_inputs = source_inputs.to(device)
source_domain_outputs, source_class_outputs = model(source_inputs, alpha=alpha)
# source_labels = source_labels.to(device)
source_class_loss = class_criterion(source_class_outputs, source_labels)
# source_domain_labels = torch.zeros((source_labels.size()[0] * n_classes, 1), device=device)
source_domain_labels = torch.zeros((source_labels.size()[0] * n_classes, 1))
source_domain_loss = nn.BCELoss()(source_domain_outputs.view(-1), source_domain_labels.view(-1))
# LOSS
# loss = target_domain_loss + source_domain_loss + source_class_loss
loss = loss_weight * 0.5 * n_classes * (target_domain_loss + source_domain_loss) + source_class_loss
loss.backward()
optimizer.step()
# Other parameters
total_loss += loss.item() * source_labels.size()[0]
_, source_class_preds = torch.max(source_class_outputs, 1)
source_corrects += (source_class_preds == source_labels.data).sum().item()
total_source_num += source_labels.size()[0]
processed_target_num += target_labels.size()[0]
iter_num += 1
acc = source_corrects / total_source_num
average_loss = total_loss / total_source_num
print('Data size = {} , corrects = {}'.format(total_source_num, source_corrects))
print('Alpha = ', alpha)
print()
return average_loss, acc, iter_num, cur_lr
def train(model, data_loader, optimizer, optimizer_type, test_interval,
max_iter_num, num_epochs, n_classes, lr, gamma, loss_weight, log):
iter_num = 0 # Iteration is the number of batches that an epoch needs.
log_iter = 0
best_val_loss, best_val_acc = test(model, data_loader['source']['test'])
print('Initial Train Loss: {:.4f} Acc: {:.4f}'.format(best_val_loss, best_val_acc))
best_test_loss, best_test_acc = test(model, data_loader['target']['test'])
print('Initial Test Loss: {:.4f} Acc: {:.4f}'.format(best_val_loss, best_val_acc))
for epoch in range(num_epochs):
print('\nEpoch {}/{}'.format(epoch, num_epochs - 1))
print('iteration : {}'.format(iter_num))
# train
train_loss, train_acc, iter_num, cur_lr = train_one_epoch(model, data_loader, optimizer, n_classes,
optimizer_type, num_epochs, epoch, iter_num, max_iter_num,
lr, gamma, loss_weight)
print('Train Loss: {:.4f} Acc: {:.4f}'.format(train_loss, train_acc))
if train_acc >= best_val_acc:
best_val_acc = train_acc
val_acc = val_loss = 0
# Test
# if iter_num - log_iter >= test_interval:
# log_iter = iter_num
test_loss, test_acc = test(model, data_loader['target']['test'])
print('Test Loss: {:.4f} Acc: {:.4f}'.format(test_loss, test_acc))
log.add_log(epoch, optimizer_type, train_acc, test_acc)
if test_acc >= best_test_acc:
best_test_acc = test_acc
best_test_loss = test_loss
print('Current Best Test Acc : {:4f} Current Best Test Loss : {:4f} Cur lr : {:4f}'.format(best_val_acc,
best_test_loss,
cur_lr))
if iter_num >= max_iter_num:
break
print('Best Val Acc : {:4f}, Test Acc : {:4f}'.format(best_val_acc, best_test_acc))
def update_optimizer(optimizer, optimizer_type, num_epochs, epoch, iter_num, max_iter_num, lr, gamma,
power=0.75, weight_decay=0.0005):
"""
Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.
"""
if optimizer_type == 'SGD':
if num_epochs != 999999:
p = epoch / num_epochs
else:
p = iter_num / max_iter_num
lr = lr * (1.0 + gamma * p) ** (-power)
else:
lr = lr
cur_lr = lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group['lr_mult']
param_group['weight_decay'] = weight_decay * param_group['decay_mult']
return cur_lr
def get_alpha(num_epochs, epoch, iter_num, max_iter_num, delta=10.0):
if num_epochs != 999999:
p = epoch / num_epochs
else:
p = iter_num / max_iter_num
return np.float(2.0 / (1.0 + np.exp(-delta * p)) - 1.0)
| [
"numpy.exp",
"torch.nn.BCELoss",
"torch.max",
"torch.nn.CrossEntropyLoss"
] | [((1212, 1233), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1231, 1233), True, 'import torch.nn as nn\n'), ((473, 500), 'torch.max', 'torch.max', (['class_outputs', '(1)'], {}), '(class_outputs, 1)\n', (482, 500), False, 'import torch\n'), ((3064, 3098), 'torch.max', 'torch.max', (['source_class_outputs', '(1)'], {}), '(source_class_outputs, 1)\n', (3073, 3098), False, 'import torch\n'), ((1914, 1926), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1924, 1926), True, 'import torch.nn as nn\n'), ((2617, 2629), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2627, 2629), True, 'import torch.nn as nn\n'), ((6618, 6636), 'numpy.exp', 'np.exp', (['(-delta * p)'], {}), '(-delta * p)\n', (6624, 6636), True, 'import numpy as np\n')] |
from engine.global_config import *
import psycopg2
from config import config
import json
class Admin_Commands():
def __init__(self, name):
self.name = name
def fill_db():
insert_item = """INSERT INTO items(uuid_id, name, item_desc, base_type, size, weight, capacity, can_attributes, room_target, combines_with, is_open, location, location_body, owner)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING uuid_id, name, item_desc, base_type, size, weight, capacity, can_attributes, combines_with, room_target, is_open, location, location_body, owner;"""
insert_room = """INSERT INTO rooms(uuid_id, room_type, name, description, exits, region, zone, effects, owner)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING uuid_id, room_type, name, description, exits, region, zone, effects, owner;"""
insert_player = """INSERT INTO players(uuid_id, name, gender, hp, core_attributes, player_state, conditions, credit, stow_loc, current_room)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING uuid_id, name, gender, hp, core_attributes, player_state, conditions, credit, stow_loc, current_room;"""
insert_npc = """INSERT INTO npcs(uuid_id, base_type, name, race, gender, npc_desc, core_attributes, npc_state, conditions, credit, supply, demand, home_loc, demeanor, current_room)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING uuid_id, base_type, name, race, gender npc_desc, core_attributes, npc_state, conditions, credit, supply, demand, home_loc, demeanor, current_room;"""
insert_org = """INSERT INTO orgs(uuid_id, name, org_desc, supply, demand, home)
VALUES(%s, %s, %s, %s, %s, %s)
RETURNING uuid_id, name, org_desc, supply, demand, home;"""
conn = None
try:
dbparams = config()
conn = psycopg2.connect(**dbparams)
cur = conn.cursor()
# insert a new part
# uuid, name, item_desc, base_type, size, weight, capacity, can_attributes, combines_with, is_open, location, location_body, owner
cur.execute(insert_item, ("70306652-fbda-479a-a06d-48b411911ed7", "9mm magazine", "It's a 9mm magazine.", "ammo_9mm", 1, 1, 10, None, "", "pistol_9mm", False, "65d56cbe-f276-4055-899c-3244c0c92003", None, "da",))
cur.execute(insert_item, ("035d0c23-fbda-479a-a06d-48b411911ed7", "9mm pistol", "Mmm, shiny.", "pistol_9mm", 2, 2, 10, "is_gun", "", "ammo_9mm", False, "65d56cbe-f276-4055-899c-3244c0c92003", None, "da",))
cur.execute(insert_item, ("6123e586-93c7-4fff-8787-3ca5706ad2a8", "rabbit toy", "Huh, looks real.", "toy", 1, 1, 0, None, "", None, False, "70306652-08cf-4c99-ac7d-c8bd1082220c", None, "da",))
cur.execute(insert_item, ("70306652-08cf-4c99-ac7d-c8bd1082220c", "backpack", "It's a backpack.", "storage_backpack", 2, 2, 0, "is_container", "", None, False, "c93e5db1-fabe-496f-a6a6-6769a1bf1404", "r_hand", "da",))
cur.execute(insert_item, ("e87c6768-0e3d-4f52-92b8-56ad69f63bea", "shuttle", "This shuttle belongs to the S.S. Hope.", "ship", 0, 5000, 0, "is_door", "ba0d6g25-ae3r-43n8-b25c-1f4342chyfd0", None, True, "65d56cbe-f276-4055-899c-3244c0c92003", "", "da",))
cur.execute(insert_room, ("65d56cbe-f276-4055-899c-3244c0c92003", None, "ship_capital_dock", "Shuttle Bay", "The room is simple.", json.dumps({"north": "aa0dd325-ae9e-43b0-b25c-1f4803ceefd0"}), "S.S. Hope", "space", None, "da",))
cur.execute(insert_room, ("aa0dd325-ae9e-43b0-b25c-1f4803ceefd0", None, "ship_capital_dock", "Shuttle Bay", "The room is simple.", json.dumps({"south": "65d56cbe-f276-4055-899c-3244c0c92003"}), "<NAME>", "space", None, "aa",))
cur.execute(insert_room, ("ba0d6g25-ae3r-43n8-b25c-1f4342chyfd0", "e87c6768-0e3d-4f52-92b8-56ad69f63bea", "ship_private_main", "Shuttle", "You see the inside of the shuttle.", json.dumps({"out": "65d56cbe-f276-4055-899c-3244c0c92003"}), "shuttle", "shuttle", None, "da",))
cur.execute(insert_room, ("ny0d6j56-ae3r-43n8-m28s-1f4342chyfd0", None, "planet_forest", "Forest", "There are lots of trees.", json.dumps({"south": "aa0dd234-ab72-32b6-c93c-1f4803ceefd0"}), "shuttle", "shuttle", None, "da",))
cur.execute(insert_room, ("34d66jru-f276-2144-384v-3244c0c92003", None, "space", "Space", "Like a back-lit canopy, the stars and galaxies shine across the black.", json.dumps({}), "shuttle", "shuttle", None, "da",))
cur.execute(insert_room, ("aa0dd234-ab72-32b6-c93c-1f4803ceefd0", None, "e87c6768-0e3d-4f52-92b8-56ad69f63bea", "planet_landing", "Open Field", "Tall, golden wheat grows wild here. You can see the edge of a dense forest to the north.", json.dumps({"north": "ny0d6j56-ae3r-43n8-m28s-1f4342chyfd0"}), "shuttle", "shuttle", None, "da",))
cur.execute(insert_room, ("pp2aa543-ab72-93n1-c93c-1f4803ceefd0", None, "e87c6768-0e3d-4f52-92b8-56ad69f63bea", "space_orbit", "Orbit around Oxine", "Green and blue hues decorate the planet of Oxine.", json.dumps({"entry": "aa0dd234-ab72-32b6-c93c-1f4803ceefd0"}), "shuttle", "shuttle", None, "da",))
cur.execute(insert_player, ("c93e5db1-fabe-496f-a6a6-6769a1bf1404", "da", "male", 100, json.dumps({"str": 12, "dex": 8, "con": 15, "ins": 6, "edu": 5, "soc": 6}), "standing", None, 100, None, "65d56cbe-f276-4055-899c-3244c0c92003",))
cur.execute(insert_player, ("3563874d-8646-487f-8beb-3c0278d2f292", "ry", "female", 100, json.dumps({"str": 8, "dex": 12, "con": 8, "ins": 12, "edu": 10, "soc": 10}), "standing", None, 100, None, "65d56cbe-f276-4055-899c-3244c0c92003",))
cur.execute(insert_player, ("06ce6e88-f666-4cac-9901-698f7464e1c5", "fa", "female", 100, json.dumps({"str": 8, "dex": 12, "con": 8, "ins": 12, "edu": 10, "soc": 10}), "standing", None, 100, None, "65d56cbe-f276-4055-899c-3244c0c92003",))
cur.execute(insert_npc, ("c93e5db1-08cf-4cac-a06d-c8bd1082220c", "npc_human", "Lt. Dan", "human", "male", "He looks like he's busy.", json.dumps({"str": 5, "dex": 5, "con": 5, "ins": 5, "edu": 5, "soc": 5}), "standing", None, 100, json.dumps({}), json.dumps({}), "S.S. Hope", "friendly", "65d56cbe-f276-4055-899c-3244c0c92003"))
cur.execute(insert_npc, ("c93e5db1-08cf-4cac-a06d-c8bd1082220c", "npc_predator", "Predator", "onxine", "male", "He looks mean.", json.dumps({"str": 5, "dex": 5, "con": 5, "ins": 5, "edu": 5, "soc": 5}), "standing", None, 100, json.dumps({}), json.dumps({}), "Oxine", "Hostile", "ny0d6j56-ae3r-43n8-m28s-1f4342chyfd0"))
cur.execute(insert_org, ("6123e586-f276-4c99-a06d-48b411911ed7", "Heiss", "A humanoid race focused heavily on cybernetics and augments.", json.dumps({}), json.dumps({}), "Eroli"))
# commit changes
conn.commit()
print("Done adding objects to DB.")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def create_instance(self, user, user_input, input_kwargs):
print("ADMIN | Creating Instance:", user_input)
Room_Procgen(user_input[1]) | [
"config.config",
"json.dumps",
"psycopg2.connect"
] | [((2089, 2097), 'config.config', 'config', ([], {}), '()\n', (2095, 2097), False, 'from config import config\n'), ((2117, 2145), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**dbparams)\n', (2133, 2145), False, 'import psycopg2\n'), ((3655, 3716), 'json.dumps', 'json.dumps', (["{'north': 'aa0dd325-ae9e-43b0-b25c-1f4803ceefd0'}"], {}), "({'north': 'aa0dd325-ae9e-43b0-b25c-1f4803ceefd0'})\n", (3665, 3716), False, 'import json\n'), ((3897, 3958), 'json.dumps', 'json.dumps', (["{'south': '65d56cbe-f276-4055-899c-3244c0c92003'}"], {}), "({'south': '65d56cbe-f276-4055-899c-3244c0c92003'})\n", (3907, 3958), False, 'import json\n'), ((4181, 4240), 'json.dumps', 'json.dumps', (["{'out': '65d56cbe-f276-4055-899c-3244c0c92003'}"], {}), "({'out': '65d56cbe-f276-4055-899c-3244c0c92003'})\n", (4191, 4240), False, 'import json\n'), ((4417, 4478), 'json.dumps', 'json.dumps', (["{'south': 'aa0dd234-ab72-32b6-c93c-1f4803ceefd0'}"], {}), "({'south': 'aa0dd234-ab72-32b6-c93c-1f4803ceefd0'})\n", (4427, 4478), False, 'import json\n'), ((4692, 4706), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (4702, 4706), False, 'import json\n'), ((4992, 5053), 'json.dumps', 'json.dumps', (["{'north': 'ny0d6j56-ae3r-43n8-m28s-1f4342chyfd0'}"], {}), "({'north': 'ny0d6j56-ae3r-43n8-m28s-1f4342chyfd0'})\n", (5002, 5053), False, 'import json\n'), ((5305, 5366), 'json.dumps', 'json.dumps', (["{'entry': 'aa0dd234-ab72-32b6-c93c-1f4803ceefd0'}"], {}), "({'entry': 'aa0dd234-ab72-32b6-c93c-1f4803ceefd0'})\n", (5315, 5366), False, 'import json\n'), ((5516, 5590), 'json.dumps', 'json.dumps', (["{'str': 12, 'dex': 8, 'con': 15, 'ins': 6, 'edu': 5, 'soc': 6}"], {}), "({'str': 12, 'dex': 8, 'con': 15, 'ins': 6, 'edu': 5, 'soc': 6})\n", (5526, 5590), False, 'import json\n'), ((5764, 5840), 'json.dumps', 'json.dumps', (["{'str': 8, 'dex': 12, 'con': 8, 'ins': 12, 'edu': 10, 'soc': 10}"], {}), "({'str': 8, 'dex': 12, 'con': 8, 'ins': 12, 'edu': 10, 'soc': 10})\n", (5774, 5840), False, 'import json\n'), ((6014, 6090), 'json.dumps', 'json.dumps', (["{'str': 8, 'dex': 12, 'con': 8, 'ins': 12, 'edu': 10, 'soc': 10}"], {}), "({'str': 8, 'dex': 12, 'con': 8, 'ins': 12, 'edu': 10, 'soc': 10})\n", (6024, 6090), False, 'import json\n'), ((6310, 6382), 'json.dumps', 'json.dumps', (["{'str': 5, 'dex': 5, 'con': 5, 'ins': 5, 'edu': 5, 'soc': 5}"], {}), "({'str': 5, 'dex': 5, 'con': 5, 'ins': 5, 'edu': 5, 'soc': 5})\n", (6320, 6382), False, 'import json\n'), ((6407, 6421), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6417, 6421), False, 'import json\n'), ((6423, 6437), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6433, 6437), False, 'import json\n'), ((6646, 6718), 'json.dumps', 'json.dumps', (["{'str': 5, 'dex': 5, 'con': 5, 'ins': 5, 'edu': 5, 'soc': 5}"], {}), "({'str': 5, 'dex': 5, 'con': 5, 'ins': 5, 'edu': 5, 'soc': 5})\n", (6656, 6718), False, 'import json\n'), ((6743, 6757), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6753, 6757), False, 'import json\n'), ((6759, 6773), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6769, 6773), False, 'import json\n'), ((6999, 7013), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (7009, 7013), False, 'import json\n'), ((7015, 7029), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (7025, 7029), False, 'import json\n')] |
# coding: utf-8
from __future__ import print_function, division, absolute_import
from xbmcplugin import getSetting
QUALITY = ["sd", "hd"]
FORMATS = ["mp4", "webm"]
def get_setting_int(plugin, name):
val = getSetting(plugin.handle, name)
if not val:
val = '0'
return int(val)
def get_quality(plugin):
return QUALITY[get_setting_int(plugin, 'quality')]
def get_format(plugin):
return FORMATS[get_setting_int(plugin, 'format')]
def prefer_dash(plugin):
val = getSetting(plugin.handle, 'dash')
return val == 'true'
| [
"xbmcplugin.getSetting"
] | [((214, 245), 'xbmcplugin.getSetting', 'getSetting', (['plugin.handle', 'name'], {}), '(plugin.handle, name)\n', (224, 245), False, 'from xbmcplugin import getSetting\n'), ((499, 532), 'xbmcplugin.getSetting', 'getSetting', (['plugin.handle', '"""dash"""'], {}), "(plugin.handle, 'dash')\n", (509, 532), False, 'from xbmcplugin import getSetting\n')] |
from collections import namedtuple
'''
# CC_STRINGS.PY
# Holds the multiline columns used in create character
# Placed here since keeping them in cc is messy
'''
'''Constants used in main and other modules -- equivalent to globals'''
FONT_PATH = "./spaceship/assets/fonts/"
IMG_PATH = "./spaceship/assets/"
MENU_SCREEN_WIDTH, MENU_SCREEN_HEIGHT = 80, 25
MENU_FONT_WIDTH, MENU_FONT_HEIGHT = 8, 16
GAME_SCREEN_WIDTH, GAME_SCREEN_HEIGHT = 80, 50
GAME_FONT_WIDTH, GAME_FONT_HEIGHT = 8, 8
FOV_RADIUS = 25
# CHARACTER MENU GLOBALS
CM_TITLE = 1
CM_SUBTITLE = 2
CM_BORDER_WIDTH = 80
CM_BORDER_HEIGHT = ()
CM_COLUMN_WIDTH = 12
CM_COLUMN_START = 1, 15, 27
CM_FOOTER_HEIGHT = 22
# OPTION MENU GLOBALS
OPT_TITLE = 1
OPT_BORDER_WIDTH = 80
OPT_BORDER_HEIGHT = (3, 24)
# ITEM CONSTANTS
ITEM_DROP_RATE=0 # drop rate from monsters in dungeons
ITEM_FIND_RATE=0 # chances item spawns in dungeon
ITEM_PREREVEAL=0 # basically pre identification rate
# ROOM CONSTANTS
ROOM_MIN_PLACE=0
ROOM_MAX_PLACE=0
ROOM_HALL_SIZE=2
ROOM_DOOR_RATE=0
GAME_TITLE_VERSION="v 0.0.4"
GAME_TITLE_WIDTH=46
GAME_TITLE_HEIGHT=6
GAME_TITLE=''' \
___ _ _
/ _\_ __ __ _ ___ ___ ___| |__ (_)_ __
\ \| '_ \ / _` |/ __/ _ \/ __| '_ \| | '_ \
_\ \ |_) | (_| | (_| __/\__ \ | | | | |_) |
\___/ .__/ \__,_|\___\___||___/_| |_|_| .__/
|_| |_|
'''[1:]
GAME_TITLE_SHORT ='''
██████╗ █████╗ ██████╗ █████╗ ██╗
██╔════╝██╔══██╗██╔══██╗██╔══██╗██║
██║ ███████║██████╔╝███████║██║
██║ ██╔══██║██╔══██╗██╔══██║██║
╚██████╗██║ ██║██████╔╝██║ ██║██████╗
╚═════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚═════╝
Act I: Bones of the Ancestors
'''[1:]
#------------------------------------------------------------------------------
# SCENE :- MAKE
#------------------------------------------------------------------------------
_col1 = """
Gender : {:>10}{delim}
Race : {:>10}{delim}
Capital : {:>10}{delim}
Class : {:>10}{delim}\n
Gold : {:>10}{delim}
Level : {:>10}{delim}
Adv Exp : {:>10}{delim}\n
HP : [c=#00ffff]{:>10}[/c]{delim}
MP : [c=#00ffff]{:>10}[/c]{delim}
SP : [c=#00ffff]{:>10}[/c]{delim}
"""[1:]
_col2 = """
SKILLS : {delim}\n{}{delim}\n{}{delim}{delim}\n
TOT GB RB CB IB{delim}
STR : [c=#00ffff]{:>2}[/c]{delim}
CON : [c=#00ffff]{:>2}[/c]{delim}
DEX : [c=#00ffff]{:>2}[/c]{delim}
INT : [c=#00ffff]{:>2}[/c]{delim}
WIS : [c=#00ffff]{:>2}[/c]{delim}
CHA : [c=#00ffff]{:>2}[/c]{delim}
"""[1:]
_bon = """
{:>2}{delim}
{:>2}{delim}
{:>2}{delim}
{:>2}{delim}
{:>2}{delim}
{:>2}"""[1:]
_col3 = """
Head : {:<5}{delim}\nNeck : {:<5}{delim}\nBody : {:<5}{delim}
Arms : {:<5}{delim}\nHands : {:<5}{delim}\nLhand : {:<5}
Rhand : {:<5}{delim}\nRing1 : {:<5}\nRing2 : {:<5}{delim}
Waist : {:<5}{delim}\nLegs : {:<5}{delim}\nFeet : {:<5}\n
"""[1:]
bonuses = {
"STR": "+{} to Strength",
"CON": "+{} to Constitution",
"WIS": "+{} to Wisdom",
"DEX": "+{} to Dexterity",
"CHA": "+{} to Charisma",
"WIL": "+{} to Willpower",
"PER": "+{} to Perception",
"LUC": "+{} to Luck"
}
# Some formulas to use when developing a character
stats = namedtuple("stats", "str con dex int wis cha")
MALE = stats(1, 0, 0, 0, 0, 0)
FEMALE = stats(0, 0, 0, 0, 1, 0)
HUMAN = stats(3, 3, 3, 3, 3, 3)
HUMAN_BONUS = stats(0, 0, 0, 0, 0, 0)
ELVEN = stats(2, 2, 4, 4, 4, 2)
ELVEN_BONUS = stats(-1, -1, 1, 1, 1, -1)
ORCEN = stats(5, 5, 2, 1, 3, 2)
ORCEN_BONUS = stats(2, 2, -1, -2, 0, -1)
DWARF = stats(4, 4, 2, 3, 2, 3)
DWARF_BONUS = stats(1, 1, -1, 0, -1, 0)
BEAST = stats(4, 3, 4, 3, 2, 2)
BEAST_BONUS = stats(0, 1, -1, 0, 1, -1)
SQUIRE = stats(2, 2, 1, 0, 0, 0)
ARCHER = stats(0, 1, 3, 0, 0, 1)
WIZARD = stats(0, 0, 0, 2, 3, 0)
DRUIDS = stats(1, 0, 0, 2, 2, 0)
CLERIC = stats(0, 1, 0, 2, 2, 0)
EXTRA = 3
start="""
Welcome to the realm of Calabaston. Your spirit has drifited to this place filled with intelligent races, a multitude
of creatures and monsters, and history colored with tragedy, betrayal, and conquest. If you are willing, come and
choose the vessel in which you will enter this world. Take careful consideration as your starting point depends on the
choices you make here. Good luck adventurer.
"""[1:]
race_human="""
The youngest race on the continent of Calabaston, humans were the last to arrive from across the Endless Water, and yet,
they have thrived and proved to be a dominant force throughout the land. Within less than a hundred years, they have
built Renmar, the current capital city of the human empire of Rane. Humans can be skilled with a wide array of
different weaponry and magic and are useful in any class.
"""[1:]
race_dwarf="""
A hardy race, dwarves are most famous for their impenetrable fortresses and mass earthly wealth. Their vast fortune
allows their people to pursue trades and crafts such as blacksmithing, jewelry, and weaponforging, which would not
be possible otherwise. Dwarves seen outside of Yugahdah, the dwarven territory, are most often skilled traders,
merchants, or mercenaries. Dwarves prefer melee weapons but can also use magic and ranged weapons.
"""[1:]
race_beast="""
Beasts have physical differences that set them apart from other races as they are born with a combination of fur,
scales, horns, and tails. After being driven out of the plains of Tempest, they settled near the north west of the
wetlands and now call Tiphmore, a gigantic trade city, their new home. Beasts have unusually high health and mana pools
and are suited for either magic or melee classes.
"""[1:]
race_elven="""
The long-lived, mysterious race of elves reside deep within the forests of Aurendelim. Though they may look frail,
their appearances betray them as they are more dextereous than other races. They live in accordance to the law of the
forest and dissaprove of any attempt by other races that try to exploit the forest for its resources. Elven warriors
are most often seen using magic or ranged weaponry but can be skilled in melee as well.
"""[1:]
race_orcen="""
Brutish and violent, Orcs are feared by other races, including even beast-folk, due to their warring nature and needs
to constantly engage in battle. They are split into many different tribal clans throughout the Burning Lands territory
that struggle to take power among all other factions and claim the Blood-Bone crown, which signifies the strongest orc
clan. The bone crown is currently located in Lok Gurrah, the largest city in the Burning Lands territory.
"""[1:]
race_ishtahari="""
Among all the races that live across Calabaston, the Ishtahari are the oldest. Yet having lived on the continent
hundreds of years before any of the other races, they are now as rare to meet as their magic is to learn. They are the
only race to master two elements of the seven, LIGHT and VOID.
"""[1:]
race_ork="""
"""[1:]
race_goblin="""
"""[1:]
race_troll="""
"""[1:]
knight="""
Warriors of noble heritage, knights make up the elite troops within a military. Their heavy armor and equipment allows
them to take significant damage without injury and deal high damage in return. They are often seen on the front lines of
armies, being used as vanguards for the army in they serve in. Knights have a high sense of duty and honor to their land
and people
"""[1:]
barbarian="""
Barbarians are fearless warriors on the battlefield. Their tendency to fight with rage and reckless abandon makes even
experienced soldiers hesitant to fight them.
"""[1:]
class_cleric="""
Clerics are holy men who use their magic abilities heal their wounded and injured allies. Their spells are particularly
effective in eradicating the undead and ghoulish creatures of the night. They prefer using ranged magic but are no
strangers to melee combat.
"""[1:]
class_druid="""
Druids are sages worship the ancient forces of nature which gives them a mystical connection to earth and natural
abilities. They have a deep relationship with creatures of land and water. Druids are proficient in both physical and
magical combat as they can use magic to strengthen their phyiscal prowess.
"""[1:]
fighter="""
Fighter is blah blah blah
"""[1:]
paladin="""
Paladin is blah blah blah
"""[1:]
ranger="""
Ranger is blah blah blah
"""[1:]
sorcerer="""
Sorcerer is blah blah blah
"""[1:]
rogue="""
Rogue is blah blah blah
"""[1:]
class_archer="""
Archers are skilled in ranged combat, being able to use an assortment of different ranged weaponry that include bows,
throwing daggers, and javalins. If needed they can use their weapons for melee combat as well. They carry very little, as
their equipment is light and can use sneak and steal abilities on their enemies to stealthily replenish their supplies.
"""[1:]
class_wizard="""
Wizards are students of elemental and arcane magic. Their educational background allows them to read ancient scrolls
as well as use spellbooks. Throught study and memorization they can learn new spells through reading and learning. They
have the largest number of upgradable classes available to them including elementalist, sorcerer, and summoner.
"""[1:]
class_squire="""
Squires are the most basic melee class offered to newly created adventurers. They are the most proficient in melee
weapons and combat but can be skilled in ranged combat as well. With enough experience and money, squires have to choice
of upgrading their class statuses to the Knight and Paladin class.
"""[1:]
"""
Lancer,
Archer, Squire, Mystic, Bard, Summoner, Chemist, Dragoon, Geomancer, Monk, Ninja, Samurai, Theif
Scout, Berserker, Pathfinder, Runemaster, Sentinal, Lord, Dragonguard, Explorer, Thunderguard, Guardsman, Shieldmaster
Marksman, sharpshooter, captain, champion, marshal, rider, shaman, enchantress, rider, duelist, fencer, mauler,
pikeman, halberdier, spearman, assassin, trapper, warden, arbiter, enforcer, blademaster,
"""
subrace_descriptions=[
[
"Citizen servents residing in the Rodash Empire",
"Travelers who wander the continent of Auriel",
"Humans living outside of the borders of the Rodash Empire",
"Sadukar are those who live in the Icy Gaze north of the Empire",
],
[
"Family name for mining dwarves from the clan in the Iron Hills",
"Family name for the royal dwarves from the Triple Shining Mountain",
"Family name for the military dwarf clan from Stone Keep",
],
[
"Family name for the elite elven family residing in the Emerald Forest",
"Local elven family name for the elves residing in the woods of Arundelim",
"Drow are banished elves residing in the forest hills of the Dark Forest",
],
[
"Ishma are the titles for light-element users of magic",
"Ishta are the titles for void-element users of magic",
],
[
"Mountain orcs reside in the Shadows of Mount Huron",
"Greenskins reside in swamplands East of Ravenflow",
"Grayskins are found everywhere on the continent of Auriel",
],
[
"Goblins live in the caves and hills along the Storm-wrought hills and caves",
"Hobgoblins are a special type of goblin born among goblins but with more strength",
],
[
"Cave trolls live among the many shelters provided by the Storm-wrought Ridge",
"Forest trolls reside in the northern and colder area of the Dark Forest",
"Ice trolls prefer to live in the coldest areas of the Icy Gaze",
]
]
#------------------------------------------------------------------------------
# SCENE :- GAME
#------------------------------------------------------------------------------
status = '''
{:13}
{:1}{:1}{:1}
{}
LVL: {:>6}
EXP: {:>6}
HP : {:>6}
MP : {:>6}
DMG: {:>6}
A/R: {:>17}/{:>14}
STR: [c={}]{:>6}[/c]
CON: [c={}]{:>6}[/c]
DEX: [c={}]{:>6}[/c]
INT: [c={}]{:>6}[/c]
WIS: [c={}]{:>6}[/c]
CHA: [c={}]{:>6}[/c]
GOLD: {:>5}
TURNS: {:>4}
'''[1:]
profile = [
'''
Name : {name:>6}
Gender : {sex:>6}
Race : {race:>6}
Class : {job:>6}
STR : {:>6}
CON : {:>6}
DEX : {:>6}
WIS : {:>6}
INT : {:>6}
CHA : {:>6}
'''[1:],
'''
Damage : {dmg:>6}
Accuracy : {acc:>5}
'''[1:],
]
dump_template="""
[Character Sheet -- Spaceship]
======== Player Stats ========
Name : {}
Sex : {}
Race : {}
Class : {}
Level : {}
Exp : {}
======== Equipment ========
He :
Neck :
Torso : Peasant garb
Ring(L) :
Hand(L) : Sword
Ring(R) :
Hand(R) :
Waist : Thin rope
Legs : Common pants
Feet : Sandals
======== Player Items ========
======== Alignments ========
======== Relations ========
"""[1:]
enter_map_city = "You enter the city of {}"
enter_map_cave = "You enter the {}."
enter_map_wild = "You enter the area."
go_up_stairs = "You go up the stairs."
go_up_error = "You cannot go upstairs without stairs."
go_up_travel = "You begin travelling."
cmd_invalid = "'{}' is not a valid command"
cmd_switch_eq = "Press 'v' to switch to inventory."
cmd_switch_iv = "Press 'q' to switch to equipment."
cmd_unequip_confirm = "Are you sure you want to unequip the {}?"
cmd_unequip = "You unequip the {}."
cmd_equip_none = "No items to equip for that slot."
cmd_equip_two_hand = "You cannot equip a weapon to your {}. \
You are already wielding the {} on your {}."
# equip, drop, use, eat
query = "Which item to {}?"
item = "You {} the {}."
invalid = "Invalid selection."
error = "You cannot {} this item."
cmds = {
'equip': {
'query': "Which item to equip?", # ?
'item': "You equip the {}.", # ok
'invalid': "Invalid selection", # no
},
'drop': {
'query': "Which item to drop?",
'item': "You drop the {}.",
},
'use': {
'query': "Which item to use?",
'item': "You use the {}.",
'none': "You have no items in your inventory that are useable.",
'invalid': "You cannot use this item.",
},
'eat': {
'query': "Which item to eat?",
'item': "You eat the {}",
'none': "You have no items in your inventory that are usable.",
'invalid': "You cannot eat this item.",
},
}
save = {
'query': "Save and exit game? (Y/N)",
'folder': "saved folder does not exists -creating folder: './saves'",
}
inventory = {
'funcs' : "What to do with the {}?",
'none': "You have nothing in your inventory.",
}
cmd_equip_query = "Which item to equip?"
cmd_equip = "You equip the {}."
cmd_equip_invalid = "Invalid selection."
cmd_drop_query = "Which item to drop?"
cmd_drop_item = "You drop the {}."
cmd_use_none = "You have no items in your inventory that are usable."
cmd_use_query = "Which item to use?"
cmd_use_item = "You use the {}."
cmd_cannot_use_item = "You cannot use this item."
cmd_eat_query = "Which item to eat?"
cmd_eat_none = "You do not have anything to eat."
cmd_eat_item = "You eat the {}."
cmd_cannot_eat_item = "You cannot eat this item."
cmd_save = "Save and exit game? (Y/N)"
cmd_save_folder = 'saved folder does not exist - creating folder: "./saves"'
cmd_inv_funcs = "What to do with the {}?"
cmd_inv_none = "You have nothing in your inventory."
movement = {
"wait": {
"local": "You rest for a while.",
"world": "You wait in the area.",
},
"blocked": {
"oob": "You reached the edge of the map.",
"swim": "You cannot swim.",
"wall": "You walk into {}.",
},
"unit": {
"displace": "{} switches places with {}."
}
}
movement_wait_local = "You rest for a while."
movement_wait_world = "You wait in the area."
movement_move_error = "You cannot travel there."
movement_move_oob = "You reached the edge of the map."
movement_move_swim = "You cannot swim."
movement_move_block = "You walk into {}."
movement_unit_displace = "The {} switches places with the {}."
movement_move_chars = {
"=": "furniture",
"+": "a door",
"/": "a door",
"o": "a lamp",
"#": "a wall",
"x": "a post",
"~": "a river",
"T": "a tree",
"f": "a tree",
"Y": "a tree",
"%": "a wall",
}
door = {
"c": {
"char": "/",
"act": "Closing door.",
"none": "No open doors next to you.",
"many": "There is more than one open door near you. Which door?",
"invalid": "Invalid direction. Canceled closing door.",
"error": "Direction has no door. Canceled closing door.",
},
"o": {
"char": "+",
"act": "Opening door.",
"none": "No closed doors next to you.",
"many": "There is more than one closed door near you. Which door?",
"invalid": "Invalid direction. Canceled opening door.",
"error": "Direction has no door. Canceled closing door.",
}
}
close_door_act = "Closing door."
close_door_none = "No open doors next to you."
close_door_many = "There is more than one open door near you. Which door?"
close_door_invalid = "Invalid direction. Canceled closing door."
close_door_error = "Direction has no door. Canceled closing door."
open_door_act = "Opening door."
open_door_none = "No closed doors next to you."
open_door_many = "There is more than one closed door near you. Which door?"
open_door_invalid = "Invalid direction. Canceled opening door."
open_door_error = "Direction has no door. Canceled closing door."
converse_none = "No one to talk to."
converse_many = "Who do you want to talk to?"
converse_invalid = "Invalid direction. Stopped looking for someone to talk to."
converse_error = "There is no one there."
pass_by_item = [
"You pass by an item.",
"There is something here."
"Your feet touches an object."
]
if __name__ == "__main__":
try:
print(movement_move_chars[input()])
except KeyError:
raise | [
"collections.namedtuple"
] | [((3157, 3203), 'collections.namedtuple', 'namedtuple', (['"""stats"""', '"""str con dex int wis cha"""'], {}), "('stats', 'str con dex int wis cha')\n", (3167, 3203), False, 'from collections import namedtuple\n')] |
# coding: utf-8
"""
Selling Partner API for Pricing
The Selling Partner API for Pricing helps you programmatically retrieve product pricing and offer information for Amazon Marketplace products. # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DetailedShippingTimeType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'minimum_hours': 'int',
'maximum_hours': 'int',
'available_date': 'float',
'availability_type': 'str'
}
attribute_map = {
'minimum_hours': 'minimumHours',
'maximum_hours': 'maximumHours',
'available_date': 'availableDate',
'availability_type': 'availabilityType'
}
def __init__(self, minimum_hours=None, maximum_hours=None, available_date=None, availability_type=None): # noqa: E501
"""DetailedShippingTimeType - a model defined in Swagger""" # noqa: E501
self._minimum_hours = None
self._maximum_hours = None
self._available_date = None
self._availability_type = None
self.discriminator = None
if minimum_hours is not None:
self.minimum_hours = minimum_hours
if maximum_hours is not None:
self.maximum_hours = maximum_hours
if available_date is not None:
self.available_date = available_date
if availability_type is not None:
self.availability_type = availability_type
@property
def minimum_hours(self):
"""Gets the minimum_hours of this DetailedShippingTimeType. # noqa: E501
The minimum time, in hours, that the item will likely be shipped after the order has been placed. # noqa: E501
:return: The minimum_hours of this DetailedShippingTimeType. # noqa: E501
:rtype: int
"""
return self._minimum_hours
@minimum_hours.setter
def minimum_hours(self, minimum_hours):
"""Sets the minimum_hours of this DetailedShippingTimeType.
The minimum time, in hours, that the item will likely be shipped after the order has been placed. # noqa: E501
:param minimum_hours: The minimum_hours of this DetailedShippingTimeType. # noqa: E501
:type: int
"""
self._minimum_hours = minimum_hours
@property
def maximum_hours(self):
"""Gets the maximum_hours of this DetailedShippingTimeType. # noqa: E501
The maximum time, in hours, that the item will likely be shipped after the order has been placed. # noqa: E501
:return: The maximum_hours of this DetailedShippingTimeType. # noqa: E501
:rtype: int
"""
return self._maximum_hours
@maximum_hours.setter
def maximum_hours(self, maximum_hours):
"""Sets the maximum_hours of this DetailedShippingTimeType.
The maximum time, in hours, that the item will likely be shipped after the order has been placed. # noqa: E501
:param maximum_hours: The maximum_hours of this DetailedShippingTimeType. # noqa: E501
:type: int
"""
self._maximum_hours = maximum_hours
@property
def available_date(self):
"""Gets the available_date of this DetailedShippingTimeType. # noqa: E501
The date when the item will be available for shipping. Only displayed for items that are not currently available for shipping. # noqa: E501
:return: The available_date of this DetailedShippingTimeType. # noqa: E501
:rtype: float
"""
return self._available_date
@available_date.setter
def available_date(self, available_date):
"""Sets the available_date of this DetailedShippingTimeType.
The date when the item will be available for shipping. Only displayed for items that are not currently available for shipping. # noqa: E501
:param available_date: The available_date of this DetailedShippingTimeType. # noqa: E501
:type: float
"""
self._available_date = available_date
@property
def availability_type(self):
"""Gets the availability_type of this DetailedShippingTimeType. # noqa: E501
Indicates whether the item is available for shipping now, or on a known or an unknown date in the future. If known, the availableDate property indicates the date that the item will be available for shipping. Possible values: NOW, FUTURE_WITHOUT_DATE, FUTURE_WITH_DATE. # noqa: E501
:return: The availability_type of this DetailedShippingTimeType. # noqa: E501
:rtype: str
"""
return self._availability_type
@availability_type.setter
def availability_type(self, availability_type):
"""Sets the availability_type of this DetailedShippingTimeType.
Indicates whether the item is available for shipping now, or on a known or an unknown date in the future. If known, the availableDate property indicates the date that the item will be available for shipping. Possible values: NOW, FUTURE_WITHOUT_DATE, FUTURE_WITH_DATE. # noqa: E501
:param availability_type: The availability_type of this DetailedShippingTimeType. # noqa: E501
:type: str
"""
allowed_values = ["NOW", "FUTURE_WITHOUT_DATE", "FUTURE_WITH_DATE"] # noqa: E501
if availability_type not in allowed_values:
raise ValueError(
"Invalid value for `availability_type` ({0}), must be one of {1}" # noqa: E501
.format(availability_type, allowed_values)
)
self._availability_type = availability_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DetailedShippingTimeType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DetailedShippingTimeType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((6170, 6203), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (6183, 6203), False, 'import six\n')] |
from sqlalchemy_teradata.compiler import TeradataTypeCompiler as tdtc
from sqlalchemy_teradata.dialect import TeradataDialect as tdd
from sqlalchemy.types import (Integer, SmallInteger, BigInteger, Numeric,
Float, DateTime, Date, String, Text, Unicode, UnicodeText,
Time, LargeBinary, Boolean, Interval,
DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL,
TEXT, NVARCHAR, NCHAR)
from sqlalchemy_teradata.types import (CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC,
VARCHAR, TIMESTAMP, TIME)
from sqlalchemy.testing import fixtures
from itertools import product
import datetime as dt
class TestCompileGeneric(fixtures.TestBase):
def _comp(self, inst):
return self.comp.process(inst)
def setup(self):
# Teradata Type Compiler using Teradata Dialect to compile types
self.comp = tdtc(tdd)
self.charset= ['latin, unicode, graphic, kanjisjis']
self.len_limits = [-1, 32000, 64000]
self.multips = ['K', 'M', 'G']
def test_defaults(self):
assert self._comp(Integer()) == 'INTEGER'
assert self._comp(SmallInteger()) == 'SMALLINT'
assert self._comp(BigInteger()) == 'BIGINT'
assert self._comp(Numeric()) == 'NUMERIC'
assert self._comp(Float()) == 'FLOAT'
assert self._comp(DateTime()) == 'TIMESTAMP(6)'
assert self._comp(Date()) == 'DATE'
assert self._comp(Time()) == 'TIME(6)'
assert self._comp(String()) == 'LONG VARCHAR'
assert self._comp(Text()) == 'CLOB'
assert self._comp(Unicode()) == 'LONG VARCHAR CHAR SET UNICODE'
assert self._comp(UnicodeText()) == 'CLOB CHAR SET UNICODE'
assert self._comp(Boolean()) == 'BYTEINT'
#assert self._comp(LargeBinary()) == 'BLOB'
class TestCompileSQLStandard(fixtures.TestBase):
def _comp(self, inst):
return self.comp.process(inst)
def setup(self):
self.comp = tdtc(tdd)
def test_defaults(self):
assert self._comp(DATE()) == 'DATE'
assert self._comp(DATETIME()) == 'TIMESTAMP(6)'
assert self._comp(TIMESTAMP()) == 'TIMESTAMP(6)'
assert self._comp(TIME()) == 'TIME(6)'
assert self._comp(CHAR()) == 'CHAR(1)'
assert self._comp(VARCHAR()) == 'LONG VARCHAR'
assert self._comp(NCHAR()) == 'CHAR CHAR SET UNICODE'
assert self._comp(NVARCHAR()) == 'LONG VARCHAR CHAR SET UNICODE'
assert self._comp(CLOB()) == 'CLOB'
assert self._comp(TEXT()) == 'CLOB'
assert self._comp(DECIMAL()) == 'DECIMAL(5, 0)'
assert self._comp(NUMERIC()) == 'NUMERIC(5, 0)'
assert self._comp(INTEGER()) == 'INTEGER'
assert self._comp(FLOAT()) == 'FLOAT'
assert self._comp(REAL()) == 'REAL'
assert self._comp(SMALLINT()) == 'SMALLINT'
assert self._comp(BIGINT()) == 'BIGINT'
assert self._comp(BOOLEAN()) == 'BYTEINT'
class TestCompileTypes(fixtures.TestBase):
"""
The tests are based of the info in SQL Data Types and Literals (Release 15.10, Dec '15)
"""
def setup(self):
self.comp = tdtc(tdd)
self.charset= ['latin, unicode, graphic, kanjisjis']
self.len_limits = [-1, 32000, 64000]
self.multips = ['K', 'M', 'G']
def test_strings(self):
for m in self.multips:
c = CLOB(length = 1, multiplier = m)
assert self.comp.process(c) == 'CLOB(1{})'.format(m)
assert c.length == 1
for len_ in self.len_limits:
assert 'VARCHAR({})'.format(len_) == self.comp.process(VARCHAR(len_))
assert 'CHAR({})'.format(len_) == self.comp.process(CHAR(len_))
assert 'CLOB({})'.format(len_) == self.comp.process(CLOB(len_))
for c in self.charset:
assert 'VARCHAR({}) CHAR SET {}'.format(len_, c) == \
self.comp.process(VARCHAR(len_, c))
assert 'CHAR({}) CHAR SET {}'.format(len_, c) == \
self.comp.process(CHAR(len_, c))
assert 'CLOB({}) CHAR SET {}'.format(len_, c) == \
self.comp.process(CLOB(len_, c))
def test_timezones(self):
assert self.comp.process(TIME(1, True)) == 'TIME(1) WITH TIME ZONE'
assert self.comp.process(TIMESTAMP(0, True)) == 'TIMESTAMP(0) WITH TIME ZONE'
| [
"sqlalchemy_teradata.types.DECIMAL",
"sqlalchemy_teradata.types.CLOB",
"sqlalchemy.types.INTEGER",
"sqlalchemy_teradata.types.TIMESTAMP",
"sqlalchemy.types.SMALLINT",
"sqlalchemy.types.SmallInteger",
"sqlalchemy.types.Unicode",
"sqlalchemy.types.Integer",
"sqlalchemy.types.DATETIME",
"sqlalchemy.types.REAL",
"sqlalchemy.types.TEXT",
"sqlalchemy.types.BigInteger",
"sqlalchemy.types.FLOAT",
"sqlalchemy.types.BOOLEAN",
"sqlalchemy.types.Text",
"sqlalchemy_teradata.types.TIME",
"sqlalchemy_teradata.compiler.TeradataTypeCompiler",
"sqlalchemy.types.BIGINT",
"sqlalchemy_teradata.types.VARCHAR",
"sqlalchemy.types.NVARCHAR",
"sqlalchemy.types.Time",
"sqlalchemy.types.Date",
"sqlalchemy.types.DATE",
"sqlalchemy_teradata.types.CHAR",
"sqlalchemy.types.Numeric",
"sqlalchemy.types.Boolean",
"sqlalchemy.types.UnicodeText",
"sqlalchemy.types.String",
"sqlalchemy.types.DateTime",
"sqlalchemy_teradata.types.NUMERIC",
"sqlalchemy.types.NCHAR",
"sqlalchemy.types.Float"
] | [((915, 924), 'sqlalchemy_teradata.compiler.TeradataTypeCompiler', 'tdtc', (['tdd'], {}), '(tdd)\n', (919, 924), True, 'from sqlalchemy_teradata.compiler import TeradataTypeCompiler as tdtc\n'), ((1986, 1995), 'sqlalchemy_teradata.compiler.TeradataTypeCompiler', 'tdtc', (['tdd'], {}), '(tdd)\n', (1990, 1995), True, 'from sqlalchemy_teradata.compiler import TeradataTypeCompiler as tdtc\n'), ((3161, 3170), 'sqlalchemy_teradata.compiler.TeradataTypeCompiler', 'tdtc', (['tdd'], {}), '(tdd)\n', (3165, 3170), True, 'from sqlalchemy_teradata.compiler import TeradataTypeCompiler as tdtc\n'), ((3393, 3421), 'sqlalchemy_teradata.types.CLOB', 'CLOB', ([], {'length': '(1)', 'multiplier': 'm'}), '(length=1, multiplier=m)\n', (3397, 3421), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((1121, 1130), 'sqlalchemy.types.Integer', 'Integer', ([], {}), '()\n', (1128, 1130), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1170, 1184), 'sqlalchemy.types.SmallInteger', 'SmallInteger', ([], {}), '()\n', (1182, 1184), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1225, 1237), 'sqlalchemy.types.BigInteger', 'BigInteger', ([], {}), '()\n', (1235, 1237), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1276, 1285), 'sqlalchemy.types.Numeric', 'Numeric', ([], {}), '()\n', (1283, 1285), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1325, 1332), 'sqlalchemy.types.Float', 'Float', ([], {}), '()\n', (1330, 1332), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1371, 1381), 'sqlalchemy.types.DateTime', 'DateTime', ([], {}), '()\n', (1379, 1381), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1426, 1432), 'sqlalchemy.types.Date', 'Date', ([], {}), '()\n', (1430, 1432), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1469, 1475), 'sqlalchemy.types.Time', 'Time', ([], {}), '()\n', (1473, 1475), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1516, 1524), 'sqlalchemy.types.String', 'String', ([], {}), '()\n', (1522, 1524), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1569, 1575), 'sqlalchemy.types.Text', 'Text', ([], {}), '()\n', (1573, 1575), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1612, 1621), 'sqlalchemy.types.Unicode', 'Unicode', ([], {}), '()\n', (1619, 1621), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1683, 1696), 'sqlalchemy.types.UnicodeText', 'UnicodeText', ([], {}), '()\n', (1694, 1696), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((1751, 1760), 'sqlalchemy.types.Boolean', 'Boolean', ([], {}), '()\n', (1758, 1760), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2052, 2058), 'sqlalchemy.types.DATE', 'DATE', ([], {}), '()\n', (2056, 2058), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2096, 2106), 'sqlalchemy.types.DATETIME', 'DATETIME', ([], {}), '()\n', (2104, 2106), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2152, 2163), 'sqlalchemy_teradata.types.TIMESTAMP', 'TIMESTAMP', ([], {}), '()\n', (2161, 2163), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2209, 2215), 'sqlalchemy_teradata.types.TIME', 'TIME', ([], {}), '()\n', (2213, 2215), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2257, 2263), 'sqlalchemy_teradata.types.CHAR', 'CHAR', ([], {}), '()\n', (2261, 2263), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2304, 2313), 'sqlalchemy_teradata.types.VARCHAR', 'VARCHAR', ([], {}), '()\n', (2311, 2313), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2359, 2366), 'sqlalchemy.types.NCHAR', 'NCHAR', ([], {}), '()\n', (2364, 2366), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2421, 2431), 'sqlalchemy.types.NVARCHAR', 'NVARCHAR', ([], {}), '()\n', (2429, 2431), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2494, 2500), 'sqlalchemy_teradata.types.CLOB', 'CLOB', ([], {}), '()\n', (2498, 2500), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2538, 2544), 'sqlalchemy.types.TEXT', 'TEXT', ([], {}), '()\n', (2542, 2544), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2584, 2593), 'sqlalchemy_teradata.types.DECIMAL', 'DECIMAL', ([], {}), '()\n', (2591, 2593), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2640, 2649), 'sqlalchemy_teradata.types.NUMERIC', 'NUMERIC', ([], {}), '()\n', (2647, 2649), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((2696, 2705), 'sqlalchemy.types.INTEGER', 'INTEGER', ([], {}), '()\n', (2703, 2705), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2747, 2754), 'sqlalchemy.types.FLOAT', 'FLOAT', ([], {}), '()\n', (2752, 2754), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2794, 2800), 'sqlalchemy.types.REAL', 'REAL', ([], {}), '()\n', (2798, 2800), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2839, 2849), 'sqlalchemy.types.SMALLINT', 'SMALLINT', ([], {}), '()\n', (2847, 2849), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2892, 2900), 'sqlalchemy.types.BIGINT', 'BIGINT', ([], {}), '()\n', (2898, 2900), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((2942, 2951), 'sqlalchemy.types.BOOLEAN', 'BOOLEAN', ([], {}), '()\n', (2949, 2951), False, 'from sqlalchemy.types import Integer, SmallInteger, BigInteger, Numeric, Float, DateTime, Date, String, Text, Unicode, UnicodeText, Time, LargeBinary, Boolean, Interval, DATE, BOOLEAN, DATETIME, BIGINT, SMALLINT, INTEGER, FLOAT, REAL, TEXT, NVARCHAR, NCHAR\n'), ((4294, 4307), 'sqlalchemy_teradata.types.TIME', 'TIME', (['(1)', '(True)'], {}), '(1, True)\n', (4298, 4307), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((4370, 4388), 'sqlalchemy_teradata.types.TIMESTAMP', 'TIMESTAMP', (['(0)', '(True)'], {}), '(0, True)\n', (4379, 4388), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((3629, 3642), 'sqlalchemy_teradata.types.VARCHAR', 'VARCHAR', (['len_'], {}), '(len_)\n', (3636, 3642), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((3708, 3718), 'sqlalchemy_teradata.types.CHAR', 'CHAR', (['len_'], {}), '(len_)\n', (3712, 3718), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((3784, 3794), 'sqlalchemy_teradata.types.CLOB', 'CLOB', (['len_'], {}), '(len_)\n', (3788, 3794), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((3950, 3966), 'sqlalchemy_teradata.types.VARCHAR', 'VARCHAR', (['len_', 'c'], {}), '(len_, c)\n', (3957, 3966), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((4084, 4097), 'sqlalchemy_teradata.types.CHAR', 'CHAR', (['len_', 'c'], {}), '(len_, c)\n', (4088, 4097), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n'), ((4215, 4228), 'sqlalchemy_teradata.types.CLOB', 'CLOB', (['len_', 'c'], {}), '(len_, c)\n', (4219, 4228), False, 'from sqlalchemy_teradata.types import CHAR, VARCHAR, CLOB, DECIMAL, NUMERIC, VARCHAR, TIMESTAMP, TIME\n')] |
import logging
import logging.config
import os
import sys
import yaml
config_path = os.path.join(sys.path[0], 'logger/logging.yaml')
with open(config_path, 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
f.close()
| [
"logging.config.dictConfig",
"os.path.join"
] | [((86, 134), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""logger/logging.yaml"""'], {}), "(sys.path[0], 'logger/logging.yaml')\n", (98, 134), False, 'import os\n'), ((212, 245), 'logging.config.dictConfig', 'logging.config.dictConfig', (['config'], {}), '(config)\n', (237, 245), False, 'import logging\n')] |
#!/usr/bin/env python3
#-*-coding:utf-8-*-
from bs4 import BeautifulSoup
from urllib import request
import re
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
#使用BeautifulSoup上面的代码,能够得到一个 BeautifulSoup 的对象,并能按照标准的缩进格式的结构输出
def test1():
#html.parser--python内置解析器,执行速度适中,文档容错能力强,但是python2.7.3或3.2.2之前的版本支持不太好,容错能力差。
soup = BeautifulSoup(html_doc,'html.parser')
print(soup.prettify())
#lxml HTML解析器,执行速度快,容错能力强,但需要安装C语言库
soup = BeautifulSoup(html_doc,'lxml')
print(soup.prettify())
#lxml XML解析器,速度快唯一支持xml的解析器,需要安装C语言库
soup = BeautifulSoup(html_doc,'lxml-xml')
print(soup.prettify())
#最好的容错性,以浏览器的方式解析文档生成html5格式,速度慢,不依赖外部扩展
soup = BeautifulSoup(html_doc,'html5lib')
print(soup.prettify())
#BeautifulSoup对象常用方法
def test2():
soup = BeautifulSoup(html_doc,'html.parser')
print(soup.title)#获取title标签
print(soup.title.name)#title标签的name
print(soup.title.string)#title标签里的内容
print(soup.title.parent.name)#title标签的父标签的内容
print(soup.p)#获取第一个p标签
print(soup.p['class'])#第一个p标签的class属性的值
print(soup.a)#第一个a标签
print(soup.find_all('a'))#所有a标签
print(soup.find('p',{'class':'story'}).get_text())#获取所有class为‘story’的p标签
print(soup.find_all('a',href=re.compile(r'^http://example.com/')))#正则表达式匹配a标签中href以匹配的字符串开头的内容
#访问豆瓣的日记页面
def test3():
req = request.urlopen("https://www.douban.com/note/665344082/").read().decode('utf-8')
soup = BeautifulSoup(req,'html.parser')
print(soup.title.string)
print(soup.find_all('a'))
pass
#test1()
#test2()
test3()
| [
"bs4.BeautifulSoup",
"urllib.request.urlopen",
"re.compile"
] | [((793, 831), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html.parser"""'], {}), "(html_doc, 'html.parser')\n", (806, 831), False, 'from bs4 import BeautifulSoup\n'), ((910, 941), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""lxml"""'], {}), "(html_doc, 'lxml')\n", (923, 941), False, 'from bs4 import BeautifulSoup\n'), ((1025, 1060), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""lxml-xml"""'], {}), "(html_doc, 'lxml-xml')\n", (1038, 1060), False, 'from bs4 import BeautifulSoup\n'), ((1148, 1183), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html5lib"""'], {}), "(html_doc, 'html5lib')\n", (1161, 1183), False, 'from bs4 import BeautifulSoup\n'), ((1258, 1296), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html.parser"""'], {}), "(html_doc, 'html.parser')\n", (1271, 1296), False, 'from bs4 import BeautifulSoup\n'), ((1894, 1927), 'bs4.BeautifulSoup', 'BeautifulSoup', (['req', '"""html.parser"""'], {}), "(req, 'html.parser')\n", (1907, 1927), False, 'from bs4 import BeautifulSoup\n'), ((1700, 1734), 're.compile', 're.compile', (['"""^http://example.com/"""'], {}), "('^http://example.com/')\n", (1710, 1734), False, 'import re\n'), ((1802, 1859), 'urllib.request.urlopen', 'request.urlopen', (['"""https://www.douban.com/note/665344082/"""'], {}), "('https://www.douban.com/note/665344082/')\n", (1817, 1859), False, 'from urllib import request\n')] |
# coding: utf-8
from django.test import TestCase
from django.template import Context
from django.contrib.auth.models import User
from division_perm import models
from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func
from unittest.mock import Mock
class BaseTemplatetagsTest(TestCase):
fixtures = ['perm_func']
def setUp(self):
self.user = User.objects.create_user(username='tester', email='<EMAIL>', password='<PASSWORD>')
self.employee = models.Employee.objects.create(user=self.user, last_name=self.user.username)
self.division = models.Division.objects.create(name='testing')
self.division2 = models.Division.objects.create(name='testing2')
self.func = models.Func.objects.all()[0]
self.role = models.Role.objects.create(name='manager', code=self.func.code, level=2, division=self.division)
class CanModifyFilterTest(BaseTemplatetagsTest):
def test_can_modify_have_access(self):
self.employee.divisions.add(self.division)
self.employee.full_access.add(self.division)
have_access = can_modify(self.employee, self.user)
self.assertTrue(have_access)
def test_can_modify_have_not_access(self):
self.employee.divisions.clear()
self.employee.full_access.clear()
have_access = can_modify(self.employee, self.user)
self.assertFalse(have_access)
class CanFuncTagTest(BaseTemplatetagsTest):
def test_can_func_with_obj_and_is_modify_and_can_modify_is_false(self):
self.func.is_modify = 1
self.func.save()
context = {'user': self.user}
have_access = can_func(context, self.func.code, self.employee)
self.assertFalse(can_modify(self.employee, self.user))
self.assertFalse(have_access)
def test_can_func_with_obj_and_is_modify_and_can_modify_is_true_and_user_level_gt_func_level(self):
self.func.is_modify = 1
self.func.save()
context = {'user': self.user}
self.employee.divisions.add(self.division)
self.employee.full_access.add(self.division)
self.employee.roles.add(self.role)
self.assertFalse(self.func.level <= self.role.level)
self.role.level = self.func.level + 2
self.role.save()
self.assertTrue(self.func.level <= self.role.level)
have_access = can_func(context, self.func.code, self.employee)
self.assertTrue(can_modify(self.employee, self.user))
self.assertTrue(have_access)
def test_can_func_with_obj_and_is_modify_is_false_and_user_level_lt_func_level(self):
context = {'user': self.user}
self.func.is_modify = 0
self.func.save()
self.assertFalse(can_modify(self.employee, self.user))
self.employee.divisions.add(self.division)
self.employee.read_access.add(self.division)
self.employee.roles.add(self.role)
self.assertFalse(self.func.level <= self.role.level)
self.role.level = self.func.level - 2
self.role.save()
self.assertFalse(self.func.level <= self.role.level)
have_access = can_func(context, self.func.code, self.employee)
self.assertFalse(have_access)
def test_can_func_with_obj_is_none(self):
context = {'user': self.user}
self.employee.divisions.add(self.division)
have_access = can_func(context, self.func.code, self.employee)
self.assertFalse(have_access)
class BlockSuperIfCanFuncTagTest(BaseTemplatetagsTest):
def test_block_super_if_can_func(self):
mock_block = Mock()
mock_block.super = 'super'
self.func.is_modify = 0
self.func.save()
self.employee.divisions.add(self.division)
self.employee.read_access.add(self.division)
self.employee.roles.add(self.role)
self.assertFalse(self.func.level <= self.role.level)
self.role.level = self.func.level + 2
self.role.save()
context = Context({'user': self.user, 'block': mock_block})
result = block_super_if_can_func(context, self.func.code, self.employee)
self.assertEqual(result['content'], context['block'].super)
self.assertEqual(
result['user_can_func'],
can_func(Context({'user': self.user}), self.func.code, self.employee)
) | [
"division_perm.models.Division.objects.create",
"division_perm.templatetags.perm.block_super_if_can_func",
"unittest.mock.Mock",
"division_perm.templatetags.perm.can_func",
"division_perm.models.Employee.objects.create",
"division_perm.templatetags.perm.can_modify",
"django.template.Context",
"division_perm.models.Func.objects.all",
"division_perm.models.Role.objects.create",
"django.contrib.auth.models.User.objects.create_user"
] | [((397, 485), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""tester"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='tester', email='<EMAIL>', password=\n '<PASSWORD>')\n", (421, 485), False, 'from django.contrib.auth.models import User\n'), ((505, 581), 'division_perm.models.Employee.objects.create', 'models.Employee.objects.create', ([], {'user': 'self.user', 'last_name': 'self.user.username'}), '(user=self.user, last_name=self.user.username)\n', (535, 581), False, 'from division_perm import models\n'), ((606, 652), 'division_perm.models.Division.objects.create', 'models.Division.objects.create', ([], {'name': '"""testing"""'}), "(name='testing')\n", (636, 652), False, 'from division_perm import models\n'), ((678, 725), 'division_perm.models.Division.objects.create', 'models.Division.objects.create', ([], {'name': '"""testing2"""'}), "(name='testing2')\n", (708, 725), False, 'from division_perm import models\n'), ((795, 895), 'division_perm.models.Role.objects.create', 'models.Role.objects.create', ([], {'name': '"""manager"""', 'code': 'self.func.code', 'level': '(2)', 'division': 'self.division'}), "(name='manager', code=self.func.code, level=2,\n division=self.division)\n", (821, 895), False, 'from division_perm import models\n'), ((1113, 1149), 'division_perm.templatetags.perm.can_modify', 'can_modify', (['self.employee', 'self.user'], {}), '(self.employee, self.user)\n', (1123, 1149), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((1339, 1375), 'division_perm.templatetags.perm.can_modify', 'can_modify', (['self.employee', 'self.user'], {}), '(self.employee, self.user)\n', (1349, 1375), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((1654, 1702), 'division_perm.templatetags.perm.can_func', 'can_func', (['context', 'self.func.code', 'self.employee'], {}), '(context, self.func.code, self.employee)\n', (1662, 1702), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((2365, 2413), 'division_perm.templatetags.perm.can_func', 'can_func', (['context', 'self.func.code', 'self.employee'], {}), '(context, self.func.code, self.employee)\n', (2373, 2413), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((3124, 3172), 'division_perm.templatetags.perm.can_func', 'can_func', (['context', 'self.func.code', 'self.employee'], {}), '(context, self.func.code, self.employee)\n', (3132, 3172), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((3369, 3417), 'division_perm.templatetags.perm.can_func', 'can_func', (['context', 'self.func.code', 'self.employee'], {}), '(context, self.func.code, self.employee)\n', (3377, 3417), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((3580, 3586), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3584, 3586), False, 'from unittest.mock import Mock\n'), ((3977, 4026), 'django.template.Context', 'Context', (["{'user': self.user, 'block': mock_block}"], {}), "({'user': self.user, 'block': mock_block})\n", (3984, 4026), False, 'from django.template import Context\n'), ((4044, 4107), 'division_perm.templatetags.perm.block_super_if_can_func', 'block_super_if_can_func', (['context', 'self.func.code', 'self.employee'], {}), '(context, self.func.code, self.employee)\n', (4067, 4107), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((746, 771), 'division_perm.models.Func.objects.all', 'models.Func.objects.all', ([], {}), '()\n', (769, 771), False, 'from division_perm import models\n'), ((1728, 1764), 'division_perm.templatetags.perm.can_modify', 'can_modify', (['self.employee', 'self.user'], {}), '(self.employee, self.user)\n', (1738, 1764), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((2438, 2474), 'division_perm.templatetags.perm.can_modify', 'can_modify', (['self.employee', 'self.user'], {}), '(self.employee, self.user)\n', (2448, 2474), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((2724, 2760), 'division_perm.templatetags.perm.can_modify', 'can_modify', (['self.employee', 'self.user'], {}), '(self.employee, self.user)\n', (2734, 2760), False, 'from division_perm.templatetags.perm import can_modify, can_func, block_super_if_can_func\n'), ((4260, 4288), 'django.template.Context', 'Context', (["{'user': self.user}"], {}), "({'user': self.user})\n", (4267, 4288), False, 'from django.template import Context\n')] |
# TODO: Copy all of your 04-Drawing.py program and put it below this comment.
import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((640, 480))
nose_y = 240
eyes_y = 180
clock = pygame.time.Clock()
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill((0, 255, 255))#teel
# face
pygame.draw.circle(screen, (255, 200, 100), (320, 240), 150, 0)
#eyes
#eyes_y = eyes_y -1
if eyes_y < 10:
eyes_y = 180
pygame.draw.circle(screen, (0, 0, 0), (245, eyes_y), 25, 0)
pygame.draw.circle(screen, (0, 0, 0), (395, eyes_y), 25, 0)
#nose
nose_y = nose_y + 1
if nose_y > 480:
nose_y = 240
pygame.draw.circle(screen, (255, 0, 0), (320, nose_y), 15, 0)
# squair mouth rect(screen, collor, (start),(with lange)
#pygame.draw.rect(screen, (255, 250, 250), ((245, 290), (150, 30)), 0)
# tryangel mouth
pygame.draw.polygon(screen,(255,100,20),((350,280),(250,280),(300,300)))
pygame.display.update()
# TODO: In this module we'll make the nose move down until a certain y then reset to the top again. | [
"pygame.draw.circle",
"pygame.draw.polygon",
"pygame.init",
"sys.exit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.time.Clock",
"pygame.display.update"
] | [((108, 121), 'pygame.init', 'pygame.init', ([], {}), '()\n', (119, 121), False, 'import pygame\n'), ((132, 167), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(640, 480)'], {}), '((640, 480))\n', (155, 167), False, 'import pygame\n'), ((204, 223), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (221, 223), False, 'import pygame\n'), ((273, 291), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (289, 291), False, 'import pygame\n'), ((406, 469), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(255, 200, 100)', '(320, 240)', '(150)', '(0)'], {}), '(screen, (255, 200, 100), (320, 240), 150, 0)\n', (424, 469), False, 'import pygame\n'), ((549, 608), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(245, eyes_y)', '(25)', '(0)'], {}), '(screen, (0, 0, 0), (245, eyes_y), 25, 0)\n', (567, 608), False, 'import pygame\n'), ((613, 672), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(0, 0, 0)', '(395, eyes_y)', '(25)', '(0)'], {}), '(screen, (0, 0, 0), (395, eyes_y), 25, 0)\n', (631, 672), False, 'import pygame\n'), ((753, 814), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(255, 0, 0)', '(320, nose_y)', '(15)', '(0)'], {}), '(screen, (255, 0, 0), (320, nose_y), 15, 0)\n', (771, 814), False, 'import pygame\n'), ((978, 1064), 'pygame.draw.polygon', 'pygame.draw.polygon', (['screen', '(255, 100, 20)', '((350, 280), (250, 280), (300, 300))'], {}), '(screen, (255, 100, 20), ((350, 280), (250, 280), (300, \n 300)))\n', (997, 1064), False, 'import pygame\n'), ((1056, 1079), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1077, 1079), False, 'import pygame\n'), ((343, 353), 'sys.exit', 'sys.exit', ([], {}), '()\n', (351, 353), False, 'import sys\n')] |
import os
import re
import tweepy
import json
import pandas as pandas
import argparse
import time
import csv
import datetime
from collections import Counter
from emoji import UNICODE_EMOJI
with open('config.json') as f:
config = json.load(f)
auth = tweepy.OAuthHandler(config['consumer_key'], config['consumer_secret'])
auth.set_access_token(config['access_token'], config['access_token_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def get_tweets(user, n=100):
return list(tweepy.Cursor(api.user_timeline, count=200, screen_name=user, tweet_mode="extended",
include_rts=False).items(n))
def tweet_to_dict(tweet):
return {
'name': tweet.author.name,
'login': tweet.author.screen_name,
'likes': tweet.favorite_count,
'retweets': tweet.retweet_count,
'text': tweet.full_text,
'date': tweet.created_at,
'in_reply': tweet.in_reply_to_screen_name,
'source': tweet.source,
'coordinates': tweet.coordinates,
'geo': tweet.geo,
'id': tweet.id,
}
def get_follower_ids(user, n=0):
if isinstance(user, str):
kwargs = {'screen_name': user}
else:
assert isinstance(user, int)
kwargs = {'id': user}
return list(tweepy.Cursor(api.followers_ids, count=5000, **kwargs).items(n))
def get_friend_ids(user, n=0):
if isinstance(user, str):
kwargs = {'screen_name': user}
else:
assert isinstance(user, int)
kwargs = {'id': user}
return list(tweepy.Cursor(api.friends_ids, count=5000, **kwargs).items(n))
def get_retweeter_ids(tweet_id):
return list(tweepy.Cursor(api.retweeters, count=100, id=tweet_id).items())
def ids_to_users(userids):
'''
Taken from https://stackoverflow.com/a/58234314/4110059
'''
users = []
u_count = len(userids)
for i in range(int(u_count/100) + 1):
end_loc = min((i + 1) * 100, u_count)
users.extend(api.lookup_users(user_ids=userids[i * 100:end_loc]))
return users
def get_followers(user, n=0):
ids = get_follower_ids(user, n=n)
return ids_to_users(ids)
def get_friends(user, n=0):
ids = get_friend_ids(user, n=n)
return ids_to_users(ids)
def user_to_dict(user):
return {
'name': user.name,
'screen_name': user.screen_name,
'date': user.created_at,
'description': user.description,
'followers_count': user.followers_count,
'following_count': user.friends_count,
'statuses_count': user.statuses_count,
'likes_count': user.favourites_count,
'default_background': user.default_profile,
'default_avatar': user.default_profile_image,
'verified': user.verified,
'listed_count': user.listed_count,
'protected': user.protected,
'id': user.id,
'location': user.location,
}
def build_dataframe(obj_list, dict_func):
obj_list = [dict_func(obj) for obj in obj_list]
return pandas.DataFrame(obj_list)
def get_pattern(df, pattern):
df = df.reset_index(drop=True)
matches = df['text'].str.extractall(r'(?P<mention>%s)' % pattern, re.IGNORECASE)
matches.index = matches.index.rename(['tweet', 'match'])
df.index = df.index.rename('tweet')
return matches.join(df)
def get_mentions(df):
return get_pattern(df, '@[a-zA-Z0-9_]+')
def get_hashtags(df):
return get_pattern(df, '#[a-zA-Z0-9_]+')
def count_patterns(df, patterns):
counts = []
for pat in patterns:
tmp = get_pattern(df, pat).reset_index()
tmp = tmp[['name', 'login', 'tweet']].drop_duplicates().groupby(['name', 'login']).count().reset_index()
tmp['pattern'] = pat
counts.append(tmp)
return pandas.concat(counts).sort_values(by=['tweet', 'name'], ascending=False)
def tweet_to_words(tweet, min_length=5):
words = tweet.split()
words = [w.lower() for w in words if len(w) >= min_length]
dumb_words = {'dans', 'cette', 'leur', 'merci', 'très', 'nous', 'pour', 'grenoble', 'notre',
'avec'}
words = [w for w in words if w not in dumb_words]
words = [w for w in words if not w.startswith('#') and not w.startswith('@')]
return words
def tweet_to_words_nltk(tweet, language='french', min_length=5):
import nltk
is_noun = lambda pos: pos[:2] == 'NN'
tokenized = nltk.word_tokenize(tweet, language=language)
words = [w for w in tokenized if len(w) >= min_length]
words = [w.lower() for w in words]
#words = [word for (word, pos) in nltk.pos_tag(words) if is_noun(pos)]
return words
def is_emoji(s):
'''
From https://stackoverflow.com/a/36217640/4110059
'''
return s in UNICODE_EMOJI
def tweet_to_emojis(tweet, min_length=None):
return [char for char in tweet if is_emoji(char)]
def count_words(df, split_func=tweet_to_words_nltk, min_length=5):
counters = {login: Counter() for login in df['login'].unique()}
for _, row in df.iterrows():
login = row['login']
words = split_func(row['text'], min_length=min_length)
for w in words:
counters[login][w] += 1
rows = []
for login, c in counters.items():
rows.extend([{'login': login, 'word': word, 'count': count} for word, count in c.items()])
df = pandas.DataFrame(rows)
return df
def tweets_of_user(args):
if args.full:
os.makedirs(args.output)
tweets = build_dataframe(get_tweets(args.obj, n=args.max_number), tweet_to_dict)
filename = os.path.join(args.output, 'tweets.csv') if args.full else args.output
write_csv(tweets, filename)
if not args.full:
return
if len(tweets) == 0:
return
ids = list(tweets['id'])
retweeters = []
for tweet_id in ids:
for usr_id in get_retweeter_ids(tweet_id):
retweeters.append({'tweet_id': tweet_id, 'user_id': usr_id})
retweeters = pandas.DataFrame(retweeters)
filename = os.path.join(args.output, 'retweets.csv')
write_csv(retweeters, filename)
ids = list(retweeters['user_id'].unique())
users = build_dataframe(ids_to_users(ids), user_to_dict)
filename = os.path.join(args.output, 'retweeters.csv')
write_csv(users, filename)
def followers_of_user(args):
df = build_dataframe(get_followers(args.obj, n=args.max_number), user_to_dict)
write_csv(df, args.output)
def friends_of_user(args):
df = build_dataframe(get_friends(args.obj, n=args.max_number), user_to_dict)
write_csv(df, args.output)
def write_csv(df, filename):
df.to_csv(filename, index=False, quoting=csv.QUOTE_ALL)
print(f'File {filename} created with a {len(df)}×{len(df.columns)} dataframe')
def main():
functions = [tweets_of_user, followers_of_user, friends_of_user]
choices = {func.__name__: func for func in functions}
parser = argparse.ArgumentParser(description='Download twitter data and dump it in a CSV')
parser.add_argument('--max_number', type=int, default=100,
help='Maximal number of items to download')
parser.add_argument('--output', type=str, default='/tmp/data.csv',
help='Output CSV file')
parser.add_argument('--full', action='store_true',
help='Download more data (e.g. users that retweeted the tweets)')
parser.add_argument('mode', choices=choices)
parser.add_argument('obj', type=str,
help='User login')
args = parser.parse_args()
t = time.time()
choices[args.mode](args)
t = time.time() - t
print(f'Total time: {t:.2f} seconds')
if __name__ == '__main__':
main()
| [
"nltk.word_tokenize",
"pandas.DataFrame",
"argparse.ArgumentParser",
"os.makedirs",
"tweepy.Cursor",
"os.path.join",
"collections.Counter",
"tweepy.API",
"pandas.concat",
"json.load",
"time.time",
"tweepy.OAuthHandler"
] | [((257, 327), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["config['consumer_key']", "config['consumer_secret']"], {}), "(config['consumer_key'], config['consumer_secret'])\n", (276, 327), False, 'import tweepy\n'), ((411, 484), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (421, 484), False, 'import tweepy\n'), ((235, 247), 'json.load', 'json.load', (['f'], {}), '(f)\n', (244, 247), False, 'import json\n'), ((3019, 3045), 'pandas.DataFrame', 'pandas.DataFrame', (['obj_list'], {}), '(obj_list)\n', (3035, 3045), True, 'import pandas as pandas\n'), ((4387, 4431), 'nltk.word_tokenize', 'nltk.word_tokenize', (['tweet'], {'language': 'language'}), '(tweet, language=language)\n', (4405, 4431), False, 'import nltk\n'), ((5324, 5346), 'pandas.DataFrame', 'pandas.DataFrame', (['rows'], {}), '(rows)\n', (5340, 5346), True, 'import pandas as pandas\n'), ((5934, 5962), 'pandas.DataFrame', 'pandas.DataFrame', (['retweeters'], {}), '(retweeters)\n', (5950, 5962), True, 'import pandas as pandas\n'), ((5978, 6019), 'os.path.join', 'os.path.join', (['args.output', '"""retweets.csv"""'], {}), "(args.output, 'retweets.csv')\n", (5990, 6019), False, 'import os\n'), ((6179, 6222), 'os.path.join', 'os.path.join', (['args.output', '"""retweeters.csv"""'], {}), "(args.output, 'retweeters.csv')\n", (6191, 6222), False, 'import os\n'), ((6868, 6954), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Download twitter data and dump it in a CSV"""'}), "(description=\n 'Download twitter data and dump it in a CSV')\n", (6891, 6954), False, 'import argparse\n'), ((7517, 7528), 'time.time', 'time.time', ([], {}), '()\n', (7526, 7528), False, 'import time\n'), ((4934, 4943), 'collections.Counter', 'Counter', ([], {}), '()\n', (4941, 4943), False, 'from collections import Counter\n'), ((5415, 5439), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (5426, 5439), False, 'import os\n'), ((5540, 5579), 'os.path.join', 'os.path.join', (['args.output', '"""tweets.csv"""'], {}), "(args.output, 'tweets.csv')\n", (5552, 5579), False, 'import os\n'), ((7566, 7577), 'time.time', 'time.time', ([], {}), '()\n', (7575, 7577), False, 'import time\n'), ((3771, 3792), 'pandas.concat', 'pandas.concat', (['counts'], {}), '(counts)\n', (3784, 3792), True, 'import pandas as pandas\n'), ((532, 640), 'tweepy.Cursor', 'tweepy.Cursor', (['api.user_timeline'], {'count': '(200)', 'screen_name': 'user', 'tweet_mode': '"""extended"""', 'include_rts': '(False)'}), "(api.user_timeline, count=200, screen_name=user, tweet_mode=\n 'extended', include_rts=False)\n", (545, 640), False, 'import tweepy\n'), ((1298, 1352), 'tweepy.Cursor', 'tweepy.Cursor', (['api.followers_ids'], {'count': '(5000)'}), '(api.followers_ids, count=5000, **kwargs)\n', (1311, 1352), False, 'import tweepy\n'), ((1559, 1611), 'tweepy.Cursor', 'tweepy.Cursor', (['api.friends_ids'], {'count': '(5000)'}), '(api.friends_ids, count=5000, **kwargs)\n', (1572, 1611), False, 'import tweepy\n'), ((1673, 1726), 'tweepy.Cursor', 'tweepy.Cursor', (['api.retweeters'], {'count': '(100)', 'id': 'tweet_id'}), '(api.retweeters, count=100, id=tweet_id)\n', (1686, 1726), False, 'import tweepy\n')] |
from bs4 import BeautifulSoup
import urllib
import json
f = urllib.urlopen('https://petitions.whitehouse.gov/petition/legally-recognize-westboro-baptist-church-hate-group/DYf3pH2d')
soup = BeautifulSoup(f)
ss = str(soup.find(class_ = "petition-detail petition-detail-margin-right"))
json.dump(ss, open('ptext.json', 'wb')) | [
"bs4.BeautifulSoup",
"urllib.urlopen"
] | [((100, 231), 'urllib.urlopen', 'urllib.urlopen', (['"""https://petitions.whitehouse.gov/petition/legally-recognize-westboro-baptist-church-hate-group/DYf3pH2d"""'], {}), "(\n 'https://petitions.whitehouse.gov/petition/legally-recognize-westboro-baptist-church-hate-group/DYf3pH2d'\n )\n", (114, 231), False, 'import urllib\n'), ((229, 245), 'bs4.BeautifulSoup', 'BeautifulSoup', (['f'], {}), '(f)\n', (242, 245), False, 'from bs4 import BeautifulSoup\n')] |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from . import option_list
option_list['url_prefix'] = os.environ.get('DIGITS_URL_PREFIX', '')
| [
"os.environ.get"
] | [((132, 171), 'os.environ.get', 'os.environ.get', (['"""DIGITS_URL_PREFIX"""', '""""""'], {}), "('DIGITS_URL_PREFIX', '')\n", (146, 171), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Station characterization module is used to create an object based on
user input information (settings). The basic settings
from the user is in turn used to generate additional information/data
and the final result is an object that has all the attributes necessary to
run the station characterization function in stc_functions.
Example usage:
import stc_functions
myStation=StationChar('HTM')
# to generate a map with sensitivity binned in accordance with interval and degree user settings
map_representation_polar_graph_upd(myStation, 'sensitivity')
"""
__author__ = ["<NAME>"]
__credits__ = "ICOS Carbon Portal"
__license__ = "GPL-3.0"
__version__ = "0.1.2"
__maintainer__ = "ICOS Carbon Portal, elaborated products team"
__email__ = ['<EMAIL>', '<EMAIL>']
__status__ = "rc1"
__date__ = "2021-06-22"
import requests
import datetime as dt
import pandas as pd
import stc_functions
stcDataPath='/data/project/stc/'
#need settings in an object.
#second class? BinLabels.. object available within station
class StationChar():
"""
Create a station characterization object. This class intends to create
an instance of a station characterization, providing information
needed to run station characterization functions. The object will look
different depending on what information is provided in settings
(station, date range, binning specifications for the output maps etc)
Attributes include latitude and longitude of the station. Given that
the object myStationChar is created, this is an example of its use:
myStationChar.lat -> returns latitude as float.
all object attributes are listed in def __init__
"""
def __init__(self, settings):
"""
Initialize your Station Characterization object
"""
self.settings = settings # dictionary from the GUI with
self.stationId = None # string with the station id
self.stationName = None # full name of station
self.lat = None # float with latitude of station
self.lon = None # float with longitude of station
self.country = None # string with the station's country name
self.stationClass = None # string with station class (1 or 2 or none)
self.siteType = None # string with station type (mountain etc. or none)
self.dateRange = None # date range for average footprint specified by users
self.fp = None # numpy array 400 columns 480 rows (192000 cells) STILT footprint grid given daterange
self.fpLat = None # numpy array with STILT grid lat-values (480)
self.fpLon = None # # numpy array with STILT grid lon-values (400)
self.distances = None # distances from station to all cells in the STILT grid
self.degrees = None # degree angle between station and all cells in the STILT grid
#own class? --> script. provide lat and lon + gridsize. general.get distances and angels. general. "version 2".
self.intervalBins = None # numpy array with the bin intervals for the maps
self.intervalLabels = None # list with the bin labels for the maps
self.dirBins = None # numpy array with the direction bins for the maps
self.dirLabels = None # list with the direction labels for the maps
self.figures = {} # dictionary to store figures and captions
# use figures['1'] = [fig, caption] to get figure 1....
# to add, see function add_figure()
# fucntions to generate the object attributes
self._setStationData()
self._setDateRange()
self._setFootprint()
self._setDistancesAndDegrees()
self._setBinsAndLabels()
def _setStationData(self):
self.stationId = self.settings['stationCode']
self.country = self.settings['stilt']['geoinfo']['name']['common']
if 'icos' in self.settings.keys():
# ICOS station
self.stationName=self.settings['icos']['name']
self.lat=self.settings['icos']['lat']
self.lon=self.settings['icos']['lon']
#only going to be set for icos stations, not when only a STILT station
self.stationClass=self.settings['icos']['icosclass']
self.siteType=self.settings['icos']['siteType']
else:
# STILT station:
self.stationName=self.settings['stilt']['name']
self.lat=self.settings['stilt']['lat']
self.lon=self.settings['stilt']['lon']
def _setDateRange(self):
"""
Pandas dataframe with the dates/times for all footprints (every three
hours: 0, 3, 6, 9 etc.) from the start- and end date specified in
settings by the user.
"""
start_date=dt.datetime(self.settings['startYear'],self.settings['startMonth'],self.settings['startDay'],0)
end_date=dt.datetime(self.settings['endYear'],self.settings['endMonth'],self.settings['endDay'],0)
self.dateRange = stc_functions.date_range_hour_filtered(start_date, end_date, self.settings['timeOfDay'])
def _setFootprint(self):
"""
Generate an average footprint (fp) given the user date range.
Only footprints that the users are interested in is used (timeselect).
fpLat and fpLon corresponding to the average footprint are also set here.
"""
nfp, self.fp, self.fpLon, self.fpLat, title= stc_functions.read_aggreg_footprints(self.stationId, self.dateRange)
#station_lat, station_lon, grid_lat, grid_lon
def _setDistancesAndDegrees(self):
"""
Distances and degrees from the station to all cells in the STILT grid.
The information is used to bin the footprints based on angle and distance
to station. For the labelled atmoshperic stations (as of 2020-07-15)
this is pre-calculated and accessed from csv-files. If it is any other
station, these values are calculated and set as attributes. The
calculated values are also appended to the csv-files.
"""
#saved distances and degrees for ICOS stations:
df_w_distances=pd.read_csv(stcDataPath+ "approved_stations_distances.csv")
df_w_degrees=pd.read_csv(stcDataPath + "approved_stations_degrees.csv")
#if not saved distances to all 192000 cells, calculate it.
if self.stationId in df_w_degrees.keys().tolist():
self.distances=df_w_distances[self.stationId]
self.degrees=df_w_degrees[self.stationId]
else:
self.distances=stc_functions.distances_from_point_to_grid_cells(self.lat, self.lon, self.fpLat, self.fpLon)
self.degrees=stc_functions.degrees_from_point_to_grid_cells(self.lat, self.lon, self.fpLat, self.fpLon)
#or other class... but belongs to object station characterization as much as distances and degrees to footprint?
#can now remove "define_bins_landcover_polar_graph"
def _setBinsAndLabels(self):
"""
Given the user specified intervals and degree-binning for the output
maps, bin arrays and labels are generated for both distance from station
and degree angle in relation to station. The labels are only used
within the function (heading for columns in pandas dataframe).
"""
km_intervals = self.settings['binInterval']
bin_size = self.settings['binSize']
self.intervalBins, self.intervalLabels, self.dirBins, self.dirLabels = stc_functions.define_bins_maprose(km_intervals, bin_size)
def add_figure(self, key, figure, caption):
"""
add figures in the dictionary self.figures. To retrieve the figures
you can use object.figure
Parameters
----------
key : INT|STR : figure number (1 = sensitivity, 2 = pointsource
3 = population, 4 = landcover_rose, 5 = multiple_variables
6 = seasonal, 7 = landcover_bar
figure : OBjECT : Matplotlib figure or smilar, needs to have a function
Object.show() and object.savefig('filename')
caption : STR : String to be used as a caption text for example when
creating a pdf output
Returns: None.
"""
# humean readable key to value assignment
short = {'1': 'sensitivity',
'2': 'pointsource',
'3': 'population',
'4': 'landcover_bar',
'5': 'seasonal',
'6': 'landcover_polar',
'7': 'multivar'}
self.figures[str(key)]=[figure, caption, short[str(key)]]
if __name__ == "__main__":
"""
execute only if run as a script
get a full list from the CarbonPortal SPARQL endpoint
and create a list of station objects
"""
msg = """
You have chosen to run this as a standalone script.
usually you would use it to create a station characterization
class to be used in when running station characterization functions
in stc_functions. It is also used when pusing the update button in the GUI at
ICOS exploredata (exploredata.icos-cp.eu).
"""
print(msg)
| [
"datetime.datetime",
"stc_functions.read_aggreg_footprints",
"stc_functions.date_range_hour_filtered",
"pandas.read_csv",
"stc_functions.degrees_from_point_to_grid_cells",
"stc_functions.define_bins_maprose",
"stc_functions.distances_from_point_to_grid_cells"
] | [((5329, 5432), 'datetime.datetime', 'dt.datetime', (["self.settings['startYear']", "self.settings['startMonth']", "self.settings['startDay']", '(0)'], {}), "(self.settings['startYear'], self.settings['startMonth'], self.\n settings['startDay'], 0)\n", (5340, 5432), True, 'import datetime as dt\n'), ((5442, 5539), 'datetime.datetime', 'dt.datetime', (["self.settings['endYear']", "self.settings['endMonth']", "self.settings['endDay']", '(0)'], {}), "(self.settings['endYear'], self.settings['endMonth'], self.\n settings['endDay'], 0)\n", (5453, 5539), True, 'import datetime as dt\n'), ((5558, 5651), 'stc_functions.date_range_hour_filtered', 'stc_functions.date_range_hour_filtered', (['start_date', 'end_date', "self.settings['timeOfDay']"], {}), "(start_date, end_date, self.settings[\n 'timeOfDay'])\n", (5596, 5651), False, 'import stc_functions\n'), ((5986, 6054), 'stc_functions.read_aggreg_footprints', 'stc_functions.read_aggreg_footprints', (['self.stationId', 'self.dateRange'], {}), '(self.stationId, self.dateRange)\n', (6022, 6054), False, 'import stc_functions\n'), ((6709, 6769), 'pandas.read_csv', 'pd.read_csv', (["(stcDataPath + 'approved_stations_distances.csv')"], {}), "(stcDataPath + 'approved_stations_distances.csv')\n", (6720, 6769), True, 'import pandas as pd\n'), ((6790, 6848), 'pandas.read_csv', 'pd.read_csv', (["(stcDataPath + 'approved_stations_degrees.csv')"], {}), "(stcDataPath + 'approved_stations_degrees.csv')\n", (6801, 6848), True, 'import pandas as pd\n'), ((8066, 8123), 'stc_functions.define_bins_maprose', 'stc_functions.define_bins_maprose', (['km_intervals', 'bin_size'], {}), '(km_intervals, bin_size)\n', (8099, 8123), False, 'import stc_functions\n'), ((7144, 7241), 'stc_functions.distances_from_point_to_grid_cells', 'stc_functions.distances_from_point_to_grid_cells', (['self.lat', 'self.lon', 'self.fpLat', 'self.fpLon'], {}), '(self.lat, self.lon, self.\n fpLat, self.fpLon)\n', (7192, 7241), False, 'import stc_functions\n'), ((7263, 7358), 'stc_functions.degrees_from_point_to_grid_cells', 'stc_functions.degrees_from_point_to_grid_cells', (['self.lat', 'self.lon', 'self.fpLat', 'self.fpLon'], {}), '(self.lat, self.lon, self.\n fpLat, self.fpLon)\n', (7309, 7358), False, 'import stc_functions\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the BigQuery side input example."""
import logging
import unittest
import google.cloud.dataflow as df
from google.cloud.dataflow.examples.cookbook import bigquery_side_input
class BigQuerySideInputTest(unittest.TestCase):
def test_create_groups(self):
p = df.Pipeline('DirectPipelineRunner')
group_ids_pcoll = p | df.Create('create_group_ids', ['A', 'B', 'C'])
corpus_pcoll = p | df.Create('create_corpus',
[{'f': 'corpus1'},
{'f': 'corpus2'},
{'f': 'corpus3'}])
words_pcoll = p | df.Create('create_words', [{'f': 'word1'},
{'f': 'word2'},
{'f': 'word3'}])
ignore_corpus_pcoll = p | df.Create('create_ignore_corpus', ['corpus1'])
ignore_word_pcoll = p | df.Create('create_ignore_word', ['word1'])
groups = bigquery_side_input.create_groups(group_ids_pcoll, corpus_pcoll,
words_pcoll, ignore_corpus_pcoll,
ignore_word_pcoll)
def group_matcher(actual):
self.assertEqual(len(actual), 3)
for group in actual:
self.assertEqual(len(group), 3)
self.assertTrue(group[1].startswith('corpus'))
self.assertNotEqual(group[1], 'corpus1')
self.assertTrue(group[2].startswith('word'))
self.assertNotEqual(group[2], 'word1')
df.assert_that(groups, group_matcher)
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| [
"google.cloud.dataflow.Create",
"logging.getLogger",
"google.cloud.dataflow.assert_that",
"google.cloud.dataflow.examples.cookbook.bigquery_side_input.create_groups",
"unittest.main",
"google.cloud.dataflow.Pipeline"
] | [((2237, 2252), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2250, 2252), False, 'import unittest\n'), ((877, 912), 'google.cloud.dataflow.Pipeline', 'df.Pipeline', (['"""DirectPipelineRunner"""'], {}), "('DirectPipelineRunner')\n", (888, 912), True, 'import google.cloud.dataflow as df\n'), ((1552, 1673), 'google.cloud.dataflow.examples.cookbook.bigquery_side_input.create_groups', 'bigquery_side_input.create_groups', (['group_ids_pcoll', 'corpus_pcoll', 'words_pcoll', 'ignore_corpus_pcoll', 'ignore_word_pcoll'], {}), '(group_ids_pcoll, corpus_pcoll,\n words_pcoll, ignore_corpus_pcoll, ignore_word_pcoll)\n', (1585, 1673), False, 'from google.cloud.dataflow.examples.cookbook import bigquery_side_input\n'), ((2111, 2148), 'google.cloud.dataflow.assert_that', 'df.assert_that', (['groups', 'group_matcher'], {}), '(groups, group_matcher)\n', (2125, 2148), True, 'import google.cloud.dataflow as df\n'), ((940, 986), 'google.cloud.dataflow.Create', 'df.Create', (['"""create_group_ids"""', "['A', 'B', 'C']"], {}), "('create_group_ids', ['A', 'B', 'C'])\n", (949, 986), True, 'import google.cloud.dataflow as df\n'), ((1010, 1096), 'google.cloud.dataflow.Create', 'df.Create', (['"""create_corpus"""', "[{'f': 'corpus1'}, {'f': 'corpus2'}, {'f': 'corpus3'}]"], {}), "('create_corpus', [{'f': 'corpus1'}, {'f': 'corpus2'}, {'f':\n 'corpus3'}])\n", (1019, 1096), True, 'import google.cloud.dataflow as df\n'), ((1216, 1291), 'google.cloud.dataflow.Create', 'df.Create', (['"""create_words"""', "[{'f': 'word1'}, {'f': 'word2'}, {'f': 'word3'}]"], {}), "('create_words', [{'f': 'word1'}, {'f': 'word2'}, {'f': 'word3'}])\n", (1225, 1291), True, 'import google.cloud.dataflow as df\n'), ((1420, 1466), 'google.cloud.dataflow.Create', 'df.Create', (['"""create_ignore_corpus"""', "['corpus1']"], {}), "('create_ignore_corpus', ['corpus1'])\n", (1429, 1466), True, 'import google.cloud.dataflow as df\n'), ((1495, 1537), 'google.cloud.dataflow.Create', 'df.Create', (['"""create_ignore_word"""', "['word1']"], {}), "('create_ignore_word', ['word1'])\n", (1504, 1537), True, 'import google.cloud.dataflow as df\n'), ((2192, 2211), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2209, 2211), False, 'import logging\n')] |
from yolov5.detect import run
import argparse
from pathlib import Path
import sys
import os
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str,default='yolov5/runs/train/exp6/weights/best.pt', help='model path(s)')
parser.add_argument('--source', type=str, default='scene_image_data/image', help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--data', type=str, default='yolov5/data/mydata.yaml', help='(optional) dataset.yaml path')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--conf-thres', type=float, default=0.75, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='show results')
parser.add_argument('--save-txt', action='store_true',default=True, help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--visualize', action='store_true',help='visualize features')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='yolov5/runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
return opt
def main(opt):
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
| [
"pathlib.Path.cwd",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((344, 369), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (367, 369), False, 'import argparse\n'), ((100, 114), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from pathlib import Path\n'), ((288, 298), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (296, 298), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# This file is a part of cmd-gen.
#
# Copyright (c) 2019 <NAME>
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE.md in the root of this project.
import argparse
import os
from hashlib import sha256
from pyctr.crypto import CryptoEngine, Keyslot
from pyctr.types.tmd import TitleMetadataReader
parser = argparse.ArgumentParser(description='Generate Nintendo 3DS CMD files.')
parser.add_argument('-t', '--tmd', help='tmd file', required=True)
parser.add_argument('-m', '--movable', help='movable.sed file', required=True)
parser.add_argument('-o', '--otp', help='otp.bin file, for TWLNAND contents')
parser.add_argument('-b', '--boot9', help='boot9 file')
parser.add_argument('--output-id', help='CMD content ID, default 00000001', default='00000001')
a = parser.parse_args()
MISSING = b'\xff\xff\xff\xff'
crypto = CryptoEngine()
crypto.setup_sd_key_from_file(a.movable)
try:
crypto.setup_keys_from_otp_file(a.otp)
except FileNotFoundError:
pass
tmd = TitleMetadataReader.from_file(a.tmd)
dirname = os.path.dirname(a.tmd)
if tmd.title_id.startswith('0004008c'):
content_dir = os.path.join(dirname, '00000000')
else:
content_dir = dirname
# TODO: check Download Play
if tmd.title_id.startswith('00048'):
keyslot = Keyslot.CMACNANDDB
else:
keyslot = Keyslot.CMACSDNAND
highest_index = 0
content_ids = {}
for chunk in tmd.chunk_records:
try:
with open(os.path.join(content_dir, chunk.id + '.app'), 'rb') as f:
highest_index = chunk.cindex
f.seek(0x100)
header = f.read(0x100)
id_bytes = bytes.fromhex(chunk.id)[::-1]
data = header + chunk.cindex.to_bytes(4, 'little') + id_bytes
data_hash = sha256(data)
c = crypto.create_cmac_object(keyslot)
c.update(data_hash.digest())
content_ids[chunk.cindex] = (id_bytes, c.digest())
except FileNotFoundError:
# currently unknown if there's actually a process to generating the cmac for missing contents
pass
# add content IDs up to the last one
ids_by_index = [MISSING] * (highest_index + 1)
installed_ids = []
cmacs = []
for x in range(len(ids_by_index)):
try:
info = content_ids[x]
except KeyError:
# the 3DS actually puts either random data, or generates it using an unknown process.
# probably doesn't matter though, since these contents aren't installed
cmacs.append(b'\xdd' * 16)
else:
ids_by_index[x] = info[0]
cmacs.append(info[1])
installed_ids.append(info[0])
installed_ids.sort(key=lambda x: int.from_bytes(x, 'little'))
final = bytes.fromhex(a.output_id)[::-1] \
+ len(ids_by_index).to_bytes(4, 'little') \
+ len(installed_ids).to_bytes(4, 'little') \
+ (1).to_bytes(4, 'little')
c = crypto.create_cmac_object(keyslot)
c.update(final)
final += c.digest()
final += b''.join(ids_by_index)
final += b''.join(installed_ids)
final += b''.join(cmacs)
os.makedirs(os.path.join(dirname, 'cmd'), exist_ok=True)
with open(os.path.join(dirname, 'cmd', a.output_id + '.cmd'), 'wb') as o:
o.write(final)
| [
"hashlib.sha256",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"pyctr.crypto.CryptoEngine",
"pyctr.types.tmd.TitleMetadataReader.from_file"
] | [((381, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Nintendo 3DS CMD files."""'}), "(description='Generate Nintendo 3DS CMD files.')\n", (404, 452), False, 'import argparse\n'), ((895, 909), 'pyctr.crypto.CryptoEngine', 'CryptoEngine', ([], {}), '()\n', (907, 909), False, 'from pyctr.crypto import CryptoEngine, Keyslot\n'), ((1040, 1076), 'pyctr.types.tmd.TitleMetadataReader.from_file', 'TitleMetadataReader.from_file', (['a.tmd'], {}), '(a.tmd)\n', (1069, 1076), False, 'from pyctr.types.tmd import TitleMetadataReader\n'), ((1087, 1109), 'os.path.dirname', 'os.path.dirname', (['a.tmd'], {}), '(a.tmd)\n', (1102, 1109), False, 'import os\n'), ((1168, 1201), 'os.path.join', 'os.path.join', (['dirname', '"""00000000"""'], {}), "(dirname, '00000000')\n", (1180, 1201), False, 'import os\n'), ((3051, 3079), 'os.path.join', 'os.path.join', (['dirname', '"""cmd"""'], {}), "(dirname, 'cmd')\n", (3063, 3079), False, 'import os\n'), ((3106, 3156), 'os.path.join', 'os.path.join', (['dirname', '"""cmd"""', "(a.output_id + '.cmd')"], {}), "(dirname, 'cmd', a.output_id + '.cmd')\n", (3118, 3156), False, 'import os\n'), ((1780, 1792), 'hashlib.sha256', 'sha256', (['data'], {}), '(data)\n', (1786, 1792), False, 'from hashlib import sha256\n'), ((1468, 1512), 'os.path.join', 'os.path.join', (['content_dir', "(chunk.id + '.app')"], {}), "(content_dir, chunk.id + '.app')\n", (1480, 1512), False, 'import os\n')] |
import re
import requests
import werkzeug
from flask import Flask
from flask_cors import CORS
from flask_restful import abort, reqparse
# import function สำหรับค้นหาหัวข้อข่าวที่เกี่ยวข้องและ similarity check
from Preprocess.tf_idf_all_headline_news_similarity import cosine_similarity_T
# import function สำหรับ OCR
from EasyOCR.EasyOCR_model import OCR_with_user_image
# import function สำหรับดึง user input ของ user จาก facebook
from News_fetcher.facebook import facebook
# import function สำหรับตรวจ link ที่ต้องมี domain เป็น www.facebook.com
from urllib.parse import urlparse
app = Flask(__name__)
CORS(app)
@app.route('/extension', methods=['GET', 'POST'])
def extension():
input_add_args = reqparse.RequestParser()
input_add_args.add_argument(
"message", type=str, help="กรุณาระบุข้อความ input เป็นตัวอักษรความยาวไม่เกิน 1000 ตัวอักษร")
input_add_args.add_argument(
"image", type=werkzeug.datastructures.FileStorage, location='files')
input_add_args.add_argument(
"message_type", type=str, help="กรุณาระบุประเภท Input เป็นตัวอักษร")
args = input_add_args.parse_args()
message_type = ['link', 'content', 'image', 'image_url']
# กรณีไม่ได้ระบุประเภทของ input
if args['message_type'] not in message_type:
abort(400, message="กรุณาระบุประเภทของ input เป็น link, content, image หรือ image_url")
else:
if args['message'].isspace():
abort(422, message="กรุณาใส่ข้อความ , ลิงค์ หรือ URL ของรูปภาพ")
# กรณีไม่มีข้อความ (message) แนบมาด้วย
if args['message_type'] == 'link':
# กรณีไม่มีลิงค์ (link) แนบมาด้วย
if not args['message']:
abort(422, message="กรุณาใส่ link ของ facebook")
else:
hostname = urlparse(args['message']).hostname
# กรณีใส่ link ที่ domain ไม่ใช่ post ของ facebook
if not re.search(hostname, 'facebook'):
abort(400, message="กรุณาระบุลิงค์ของโพสต์ Facebook")
else:
try:
facebook_fetch = facebook(args["message"])
post_facebook = facebook_fetch.fetch_page()
all_result_with_url = cosine_similarity_T(
10, post_facebook["content"])
queryObject = {
"message": post_facebook["content"],
"message_type": args["message_type"],
"result": all_result_with_url
}
except:
abort(400, message="กรุณาระบุลิงค์ของโพสต์ให้ถูกต้อง")
elif args['message_type'] == 'content':
# กรณีไม่มีข้อความ (content) แนบมาด้วย
if not args['message']:
abort(422, message="กรุณาใส่ข้อความ")
else:
all_result_with_url = cosine_similarity_T(10, args["message"])
queryObject = {
"message": args["message"],
"message_type": args["message_type"],
"result": all_result_with_url
}
elif args['message_type'] == 'image':
# กรณีไม่มีรูปภาพ (image) แนบมาด้วย
if not args['message']:
abort(422, message="กรุณาอัพโหลดรูปภาพ")
else:
image_file = args['image']
image_file.save("EasyOCR/OCR_User_Pic/tmp.jpg")
text_from_image = OCR_with_user_image(
"EasyOCR/OCR_User_Pic/tmp.jpg")
all_result_with_url = cosine_similarity_T(10, text_from_image)
queryObject = {
"message": text_from_image,
"message_type": args["message_type"],
"result": all_result_with_url
}
else:
# กรณีไม่มีลิงค์รูปภาพ (image_url) แนบมาด้วย
if not args['message']:
abort(422, message="กรุณาใส่ URL ของรูปภาพ")
else:
response = requests.get(args["message"])
with open('EasyOCR/OCR_User_Pic/tmp.jpg', 'wb') as file:
file.write(response.content)
text_from_image = OCR_with_user_image(
"EasyOCR/OCR_User_Pic/tmp.jpg")
all_result_with_url = cosine_similarity_T(10, text_from_image)
queryObject = {
"message": text_from_image,
"message_type": args["message_type"],
"result": all_result_with_url
}
return queryObject
if __name__ == '__main__':
app.run()
| [
"urllib.parse.urlparse",
"flask_restful.reqparse.RequestParser",
"flask_cors.CORS",
"flask.Flask",
"EasyOCR.EasyOCR_model.OCR_with_user_image",
"requests.get",
"Preprocess.tf_idf_all_headline_news_similarity.cosine_similarity_T",
"News_fetcher.facebook.facebook",
"flask_restful.abort",
"re.search"
] | [((593, 608), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'from flask import Flask\n'), ((609, 618), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (613, 618), False, 'from flask_cors import CORS\n'), ((709, 733), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (731, 733), False, 'from flask_restful import abort, reqparse\n'), ((1283, 1375), 'flask_restful.abort', 'abort', (['(400)'], {'message': '"""กรุณาระบุประเภทของ input เป็น link, content, image หรือ image_url"""'}), "(400, message=\n 'กรุณาระบุประเภทของ input เป็น link, content, image หรือ image_url')\n", (1288, 1375), False, 'from flask_restful import abort, reqparse\n'), ((1431, 1495), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""กรุณาใส่ข้อความ , ลิงค์ หรือ URL ของรูปภาพ"""'}), "(422, message='กรุณาใส่ข้อความ , ลิงค์ หรือ URL ของรูปภาพ')\n", (1436, 1495), False, 'from flask_restful import abort, reqparse\n'), ((1684, 1732), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""กรุณาใส่ link ของ facebook"""'}), "(422, message='กรุณาใส่ link ของ facebook')\n", (1689, 1732), False, 'from flask_restful import abort, reqparse\n'), ((1778, 1803), 'urllib.parse.urlparse', 'urlparse', (["args['message']"], {}), "(args['message'])\n", (1786, 1803), False, 'from urllib.parse import urlparse\n'), ((1903, 1934), 're.search', 're.search', (['hostname', '"""facebook"""'], {}), "(hostname, 'facebook')\n", (1912, 1934), False, 'import re\n'), ((1956, 2009), 'flask_restful.abort', 'abort', (['(400)'], {'message': '"""กรุณาระบุลิงค์ของโพสต์ Facebook"""'}), "(400, message='กรุณาระบุลิงค์ของโพสต์ Facebook')\n", (1961, 2009), False, 'from flask_restful import abort, reqparse\n'), ((2831, 2868), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""กรุณาใส่ข้อความ"""'}), "(422, message='กรุณาใส่ข้อความ')\n", (2836, 2868), False, 'from flask_restful import abort, reqparse\n'), ((2925, 2965), 'Preprocess.tf_idf_all_headline_news_similarity.cosine_similarity_T', 'cosine_similarity_T', (['(10)', "args['message']"], {}), "(10, args['message'])\n", (2944, 2965), False, 'from Preprocess.tf_idf_all_headline_news_similarity import cosine_similarity_T\n'), ((2098, 2123), 'News_fetcher.facebook.facebook', 'facebook', (["args['message']"], {}), "(args['message'])\n", (2106, 2123), False, 'from News_fetcher.facebook import facebook\n'), ((2238, 2287), 'Preprocess.tf_idf_all_headline_news_similarity.cosine_similarity_T', 'cosine_similarity_T', (['(10)', "post_facebook['content']"], {}), "(10, post_facebook['content'])\n", (2257, 2287), False, 'from Preprocess.tf_idf_all_headline_news_similarity import cosine_similarity_T\n'), ((3319, 3359), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""กรุณาอัพโหลดรูปภาพ"""'}), "(422, message='กรุณาอัพโหลดรูปภาพ')\n", (3324, 3359), False, 'from flask_restful import abort, reqparse\n'), ((3520, 3571), 'EasyOCR.EasyOCR_model.OCR_with_user_image', 'OCR_with_user_image', (['"""EasyOCR/OCR_User_Pic/tmp.jpg"""'], {}), "('EasyOCR/OCR_User_Pic/tmp.jpg')\n", (3539, 3571), False, 'from EasyOCR.EasyOCR_model import OCR_with_user_image\n'), ((3631, 3671), 'Preprocess.tf_idf_all_headline_news_similarity.cosine_similarity_T', 'cosine_similarity_T', (['(10)', 'text_from_image'], {}), '(10, text_from_image)\n', (3650, 3671), False, 'from Preprocess.tf_idf_all_headline_news_similarity import cosine_similarity_T\n'), ((4002, 4046), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""กรุณาใส่ URL ของรูปภาพ"""'}), "(422, message='กรุณาใส่ URL ของรูปภาพ')\n", (4007, 4046), False, 'from flask_restful import abort, reqparse\n'), ((4092, 4121), 'requests.get', 'requests.get', (["args['message']"], {}), "(args['message'])\n", (4104, 4121), False, 'import requests\n'), ((4279, 4330), 'EasyOCR.EasyOCR_model.OCR_with_user_image', 'OCR_with_user_image', (['"""EasyOCR/OCR_User_Pic/tmp.jpg"""'], {}), "('EasyOCR/OCR_User_Pic/tmp.jpg')\n", (4298, 4330), False, 'from EasyOCR.EasyOCR_model import OCR_with_user_image\n'), ((4390, 4430), 'Preprocess.tf_idf_all_headline_news_similarity.cosine_similarity_T', 'cosine_similarity_T', (['(10)', 'text_from_image'], {}), '(10, text_from_image)\n', (4409, 4430), False, 'from Preprocess.tf_idf_all_headline_news_similarity import cosine_similarity_T\n'), ((2625, 2679), 'flask_restful.abort', 'abort', (['(400)'], {'message': '"""กรุณาระบุลิงค์ของโพสต์ให้ถูกต้อง"""'}), "(400, message='กรุณาระบุลิงค์ของโพสต์ให้ถูกต้อง')\n", (2630, 2679), False, 'from flask_restful import abort, reqparse\n')] |
from constraint_handler import *
from rated_statistic_storage import *
import rospy
from arni_msgs.msg import RatedStatistics
from arni_core.host_lookup import *
from std_srvs.srv import Empty
import helper
import time
class CountermeasureNode(object):
"""A ROS node.
Evaluates incoming rated statistics with a list of constraints.
If those constraints turn out to be true appropriate action is taken.
"""
def __init__(self):
"""Periodically (threading)
evaluate the constraints and clean old statistics."""
super(CountermeasureNode, self).__init__()
rospy.init_node("countermeasure", log_level=rospy.DEBUG)
self.__enabled = False
self.__init_params()
#: The storage of all incoming rated statistic.
self.__rated_statistic_storage = RatedStatisticStorage()
#: The handler for all constraints.
self.__constraint_handler = ConstraintHandler(
self.__rated_statistic_storage)
#: The time to wait between two evaluations.
self.__evaluation_period = helper.get_param_duration(
helper.ARNI_CTM_CFG_NS + "evaluation_period")
self.__register_subscriber()
self.__register_services()
def __register_subscriber(self):
"""Register to the rated statistics."""
rospy.Subscriber(
"/statistics_rated", RatedStatistics,
self.__rated_statistic_storage.callback_rated_statistic)
rospy.Subscriber(
"/statistics_rated", RatedStatistics,
HostLookup().callback_rated)
def __register_services(self):
"""Register all services"""
rospy.Service(
"~reload_constraints", Empty, self.__handle_reload_constraints)
def __handle_reload_constraints(self, req):
"""Reload all constraints from param server."""
self.__constraint_handler = ConstraintHandler(
self.__rated_statistic_storage)
return []
def __callback_evaluate_and_react(self, event):
""" Evaluate every constraint and execute reactions
if seemed necessary by the evaluation.
"""
try:
if self.__enabled:
self.__constraint_handler.evaluate_constraints()
self.__constraint_handler.execute_reactions()
except rospy.ROSInterruptException:
pass
def loop(self):
# simulation? wait for begin
while rospy.Time.now() == rospy.Time(0):
time.sleep(0.01)
#check periodically for enabled_statistic
rospy.Timer(
rospy.Duration(
rospy.get_param("arni/check_enabled_interval", 10)),
self.__callback_enable)
# evaluate periodically
rospy.Timer(
self.__evaluation_period,
self.__callback_evaluate_and_react)
rospy.spin()
def __callback_enable(self, event):
"""Simple callback to check if statistics are enabled."""
self.__enabled = rospy.get_param("/enable_statistics", False)
def __init_params(self):
"""Initializes params on the parameter server,
if they are not already set.
"""
default = {
"reaction_autonomy_level": 100,
"storage_timeout": 10,
"evaluation_period": 1,
"default/min_reaction_interval": 10,
"default/reaction_timeout": 30
}
for param in default:
if not rospy.has_param(helper.ARNI_CTM_CFG_NS + param):
rospy.set_param(helper.ARNI_CTM_CFG_NS + param, default[param])
def main():
try:
cn = CountermeasureNode()
# rospy.loginfo(rospy.get_caller_id() + ": im on ")
cn.loop()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| [
"rospy.Timer",
"rospy.init_node",
"helper.get_param_duration",
"rospy.Service",
"rospy.get_param",
"rospy.has_param",
"time.sleep",
"rospy.set_param",
"rospy.Time.now",
"rospy.Time",
"rospy.spin",
"rospy.Subscriber"
] | [((608, 664), 'rospy.init_node', 'rospy.init_node', (['"""countermeasure"""'], {'log_level': 'rospy.DEBUG'}), "('countermeasure', log_level=rospy.DEBUG)\n", (623, 664), False, 'import rospy\n'), ((1082, 1153), 'helper.get_param_duration', 'helper.get_param_duration', (["(helper.ARNI_CTM_CFG_NS + 'evaluation_period')"], {}), "(helper.ARNI_CTM_CFG_NS + 'evaluation_period')\n", (1107, 1153), False, 'import helper\n'), ((1334, 1450), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/statistics_rated"""', 'RatedStatistics', 'self.__rated_statistic_storage.callback_rated_statistic'], {}), "('/statistics_rated', RatedStatistics, self.\n __rated_statistic_storage.callback_rated_statistic)\n", (1350, 1450), False, 'import rospy\n'), ((1668, 1745), 'rospy.Service', 'rospy.Service', (['"""~reload_constraints"""', 'Empty', 'self.__handle_reload_constraints'], {}), "('~reload_constraints', Empty, self.__handle_reload_constraints)\n", (1681, 1745), False, 'import rospy\n'), ((2767, 2840), 'rospy.Timer', 'rospy.Timer', (['self.__evaluation_period', 'self.__callback_evaluate_and_react'], {}), '(self.__evaluation_period, self.__callback_evaluate_and_react)\n', (2778, 2840), False, 'import rospy\n'), ((2874, 2886), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2884, 2886), False, 'import rospy\n'), ((3019, 3063), 'rospy.get_param', 'rospy.get_param', (['"""/enable_statistics"""', '(False)'], {}), "('/enable_statistics', False)\n", (3034, 3063), False, 'import rospy\n'), ((2457, 2473), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2471, 2473), False, 'import rospy\n'), ((2477, 2490), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (2487, 2490), False, 'import rospy\n'), ((2504, 2520), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2514, 2520), False, 'import time\n'), ((2637, 2687), 'rospy.get_param', 'rospy.get_param', (['"""arni/check_enabled_interval"""', '(10)'], {}), "('arni/check_enabled_interval', 10)\n", (2652, 2687), False, 'import rospy\n'), ((3485, 3532), 'rospy.has_param', 'rospy.has_param', (['(helper.ARNI_CTM_CFG_NS + param)'], {}), '(helper.ARNI_CTM_CFG_NS + param)\n', (3500, 3532), False, 'import rospy\n'), ((3550, 3613), 'rospy.set_param', 'rospy.set_param', (['(helper.ARNI_CTM_CFG_NS + param)', 'default[param]'], {}), '(helper.ARNI_CTM_CFG_NS + param, default[param])\n', (3565, 3613), False, 'import rospy\n')] |
import pendulum
import pytest
from uuid import uuid4
from unittest.mock import Mock
from threading import Thread
from atst.domain.csp.cloud import MockCloudProvider
from atst.jobs import (
RecordEnvironmentFailure,
RecordEnvironmentRoleFailure,
do_create_environment,
do_create_atat_admin_user,
dispatch_create_environment,
dispatch_create_atat_admin_user,
create_environment,
dispatch_provision_user,
do_provision_user,
)
from atst.models.utils import claim_for_update
from atst.domain.exceptions import ClaimFailedException
from tests.factories import (
EnvironmentFactory,
EnvironmentRoleFactory,
PortfolioFactory,
ApplicationRoleFactory,
)
from atst.models import EnvironmentRole, ApplicationRoleStatus
@pytest.fixture(autouse=True, scope="function")
def csp():
return Mock(wraps=MockCloudProvider({}, with_delay=False, with_failure=False))
def test_environment_job_failure(celery_app, celery_worker):
@celery_app.task(bind=True, base=RecordEnvironmentFailure)
def _fail_hard(self, environment_id=None):
raise ValueError("something bad happened")
environment = EnvironmentFactory.create()
celery_worker.reload()
# Use apply instead of delay since we are testing the on_failure hook only
task = _fail_hard.apply(kwargs={"environment_id": environment.id})
with pytest.raises(ValueError):
task.get()
assert environment.job_failures
job_failure = environment.job_failures[0]
assert job_failure.task == task
def test_environment_role_job_failure(celery_app, celery_worker):
@celery_app.task(bind=True, base=RecordEnvironmentRoleFailure)
def _fail_hard(self, environment_role_id=None):
raise ValueError("something bad happened")
role = EnvironmentRoleFactory.create()
celery_worker.reload()
# Use apply instead of delay since we are testing the on_failure hook only
task = _fail_hard.apply(kwargs={"environment_role_id": role.id})
with pytest.raises(ValueError):
task.get()
assert role.job_failures
job_failure = role.job_failures[0]
assert job_failure.task == task
now = pendulum.now()
yesterday = now.subtract(days=1)
tomorrow = now.add(days=1)
def test_create_environment_job(session, csp):
environment = EnvironmentFactory.create()
do_create_environment(csp, environment.id)
session.refresh(environment)
assert environment.cloud_id
def test_create_environment_job_is_idempotent(csp, session):
environment = EnvironmentFactory.create(cloud_id=uuid4().hex)
do_create_environment(csp, environment.id)
csp.create_environment.assert_not_called()
def test_create_atat_admin_user(csp, session):
environment = EnvironmentFactory.create(cloud_id="something")
do_create_atat_admin_user(csp, environment.id)
session.refresh(environment)
assert environment.root_user_info
def test_dispatch_create_environment(session, monkeypatch):
# Given that I have a portfolio with an active CLIN and two environments,
# one of which is deleted
portfolio = PortfolioFactory.create(
applications=[{"environments": [{}, {}]}],
task_orders=[
{
"create_clins": [
{
"start_date": pendulum.now().subtract(days=1),
"end_date": pendulum.now().add(days=1),
}
]
}
],
)
[e1, e2] = portfolio.applications[0].environments
e2.deleted = True
session.add(e2)
session.commit()
mock = Mock()
monkeypatch.setattr("atst.jobs.create_environment", mock)
# When dispatch_create_environment is called
dispatch_create_environment.run()
# It should cause the create_environment task to be called once with the
# non-deleted environment
mock.delay.assert_called_once_with(environment_id=e1.id)
def test_dispatch_create_atat_admin_user(session, monkeypatch):
portfolio = PortfolioFactory.create(
applications=[
{"environments": [{"cloud_id": uuid4().hex, "root_user_info": None}]}
],
task_orders=[
{
"create_clins": [
{
"start_date": pendulum.now().subtract(days=1),
"end_date": pendulum.now().add(days=1),
}
]
}
],
)
mock = Mock()
monkeypatch.setattr("atst.jobs.create_atat_admin_user", mock)
environment = portfolio.applications[0].environments[0]
dispatch_create_atat_admin_user.run()
mock.delay.assert_called_once_with(environment_id=environment.id)
def test_create_environment_no_dupes(session, celery_app, celery_worker):
portfolio = PortfolioFactory.create(
applications=[
{"environments": [{"cloud_id": uuid4().hex, "root_user_info": {}}]}
],
task_orders=[
{
"create_clins": [
{
"start_date": pendulum.now().subtract(days=1),
"end_date": pendulum.now().add(days=1),
}
]
}
],
)
environment = portfolio.applications[0].environments[0]
# create_environment is run twice on the same environment
create_environment.run(environment_id=environment.id)
session.refresh(environment)
first_cloud_id = environment.cloud_id
create_environment.run(environment_id=environment.id)
session.refresh(environment)
# The environment's cloud_id was not overwritten in the second run
assert environment.cloud_id == first_cloud_id
# The environment's claim was released
assert environment.claimed_until == None
def test_claim_for_update(session):
portfolio = PortfolioFactory.create(
applications=[
{"environments": [{"cloud_id": uuid4().hex, "root_user_info": {}}]}
],
task_orders=[
{
"create_clins": [
{
"start_date": pendulum.now().subtract(days=1),
"end_date": pendulum.now().add(days=1),
}
]
}
],
)
environment = portfolio.applications[0].environments[0]
satisfied_claims = []
exceptions = []
# Two threads race to do work on environment and check out the lock
class FirstThread(Thread):
def run(self):
try:
with claim_for_update(environment):
satisfied_claims.append("FirstThread")
except ClaimFailedException:
exceptions.append("FirstThread")
class SecondThread(Thread):
def run(self):
try:
with claim_for_update(environment):
satisfied_claims.append("SecondThread")
except ClaimFailedException:
exceptions.append("SecondThread")
t1 = FirstThread()
t2 = SecondThread()
t1.start()
t2.start()
t1.join()
t2.join()
session.refresh(environment)
assert len(satisfied_claims) == 1
assert len(exceptions) == 1
if satisfied_claims == ["FirstThread"]:
assert exceptions == ["SecondThread"]
else:
assert satisfied_claims == ["SecondThread"]
assert exceptions == ["FirstThread"]
# The claim is released
assert environment.claimed_until is None
def test_dispatch_provision_user(csp, session, celery_app, celery_worker, monkeypatch):
# Given that I have four environment roles:
# (A) one of which has a completed status
# (B) one of which has an environment that has not been provisioned
# (C) one of which is pending, has a provisioned environment but an inactive application role
# (D) one of which is pending, has a provisioned environment and has an active application role
provisioned_environment = EnvironmentFactory.create(
cloud_id="cloud_id", root_user_info={}
)
unprovisioned_environment = EnvironmentFactory.create()
_er_a = EnvironmentRoleFactory.create(
environment=provisioned_environment, status=EnvironmentRole.Status.COMPLETED
)
_er_b = EnvironmentRoleFactory.create(
environment=unprovisioned_environment, status=EnvironmentRole.Status.PENDING
)
_er_c = EnvironmentRoleFactory.create(
environment=unprovisioned_environment,
status=EnvironmentRole.Status.PENDING,
application_role=ApplicationRoleFactory(status=ApplicationRoleStatus.PENDING),
)
er_d = EnvironmentRoleFactory.create(
environment=provisioned_environment,
status=EnvironmentRole.Status.PENDING,
application_role=ApplicationRoleFactory(status=ApplicationRoleStatus.ACTIVE),
)
mock = Mock()
monkeypatch.setattr("atst.jobs.provision_user", mock)
# When I dispatch the user provisioning task
dispatch_provision_user.run()
# I expect it to dispatch only one call, to EnvironmentRole D
mock.delay.assert_called_once_with(environment_role_id=er_d.id)
def test_do_provision_user(csp, session):
# Given that I have an EnvironmentRole with a provisioned environment
credentials = MockCloudProvider(())._auth_credentials
provisioned_environment = EnvironmentFactory.create(
cloud_id="cloud_id", root_user_info={"credentials": credentials}
)
environment_role = EnvironmentRoleFactory.create(
environment=provisioned_environment,
status=EnvironmentRole.Status.PENDING,
role="my_role",
)
# When I call the user provisoning task
do_provision_user(csp=csp, environment_role_id=environment_role.id)
session.refresh(environment_role)
# I expect that the CSP create_or_update_user method will be called
csp.create_or_update_user.assert_called_once_with(
credentials, environment_role, "my_role"
)
# I expect that the EnvironmentRole now has a csp_user_id
assert environment_role.csp_user_id
| [
"atst.jobs.do_create_atat_admin_user",
"atst.domain.csp.cloud.MockCloudProvider",
"unittest.mock.Mock",
"atst.jobs.dispatch_create_atat_admin_user.run",
"tests.factories.EnvironmentFactory.create",
"atst.models.utils.claim_for_update",
"uuid.uuid4",
"atst.jobs.create_environment.run",
"atst.jobs.dispatch_provision_user.run",
"pytest.raises",
"atst.jobs.do_provision_user",
"atst.jobs.dispatch_create_environment.run",
"pytest.fixture",
"atst.jobs.do_create_environment",
"pendulum.now",
"tests.factories.ApplicationRoleFactory",
"tests.factories.EnvironmentRoleFactory.create"
] | [((763, 809), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""function"""'}), "(autouse=True, scope='function')\n", (777, 809), False, 'import pytest\n'), ((2153, 2167), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (2165, 2167), False, 'import pendulum\n'), ((1147, 1174), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {}), '()\n', (1172, 1174), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((1777, 1808), 'tests.factories.EnvironmentRoleFactory.create', 'EnvironmentRoleFactory.create', ([], {}), '()\n', (1806, 1808), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((2295, 2322), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {}), '()\n', (2320, 2322), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((2327, 2369), 'atst.jobs.do_create_environment', 'do_create_environment', (['csp', 'environment.id'], {}), '(csp, environment.id)\n', (2348, 2369), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((2569, 2611), 'atst.jobs.do_create_environment', 'do_create_environment', (['csp', 'environment.id'], {}), '(csp, environment.id)\n', (2590, 2611), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((2727, 2774), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {'cloud_id': '"""something"""'}), "(cloud_id='something')\n", (2752, 2774), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((2779, 2825), 'atst.jobs.do_create_atat_admin_user', 'do_create_atat_admin_user', (['csp', 'environment.id'], {}), '(csp, environment.id)\n', (2804, 2825), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((3587, 3593), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3591, 3593), False, 'from unittest.mock import Mock\n'), ((3710, 3743), 'atst.jobs.dispatch_create_environment.run', 'dispatch_create_environment.run', ([], {}), '()\n', (3741, 3743), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((4445, 4451), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (4449, 4451), False, 'from unittest.mock import Mock\n'), ((4583, 4620), 'atst.jobs.dispatch_create_atat_admin_user.run', 'dispatch_create_atat_admin_user.run', ([], {}), '()\n', (4618, 4620), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((5348, 5401), 'atst.jobs.create_environment.run', 'create_environment.run', ([], {'environment_id': 'environment.id'}), '(environment_id=environment.id)\n', (5370, 5401), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((5483, 5536), 'atst.jobs.create_environment.run', 'create_environment.run', ([], {'environment_id': 'environment.id'}), '(environment_id=environment.id)\n', (5505, 5536), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((7975, 8040), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {'cloud_id': '"""cloud_id"""', 'root_user_info': '{}'}), "(cloud_id='cloud_id', root_user_info={})\n", (8000, 8040), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((8087, 8114), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {}), '()\n', (8112, 8114), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((8127, 8239), 'tests.factories.EnvironmentRoleFactory.create', 'EnvironmentRoleFactory.create', ([], {'environment': 'provisioned_environment', 'status': 'EnvironmentRole.Status.COMPLETED'}), '(environment=provisioned_environment, status=\n EnvironmentRole.Status.COMPLETED)\n', (8156, 8239), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((8261, 8373), 'tests.factories.EnvironmentRoleFactory.create', 'EnvironmentRoleFactory.create', ([], {'environment': 'unprovisioned_environment', 'status': 'EnvironmentRole.Status.PENDING'}), '(environment=unprovisioned_environment, status\n =EnvironmentRole.Status.PENDING)\n', (8290, 8373), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((8851, 8857), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (8855, 8857), False, 'from unittest.mock import Mock\n'), ((8970, 8999), 'atst.jobs.dispatch_provision_user.run', 'dispatch_provision_user.run', ([], {}), '()\n', (8997, 8999), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((9341, 9437), 'tests.factories.EnvironmentFactory.create', 'EnvironmentFactory.create', ([], {'cloud_id': '"""cloud_id"""', 'root_user_info': "{'credentials': credentials}"}), "(cloud_id='cloud_id', root_user_info={\n 'credentials': credentials})\n", (9366, 9437), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((9470, 9596), 'tests.factories.EnvironmentRoleFactory.create', 'EnvironmentRoleFactory.create', ([], {'environment': 'provisioned_environment', 'status': 'EnvironmentRole.Status.PENDING', 'role': '"""my_role"""'}), "(environment=provisioned_environment, status=\n EnvironmentRole.Status.PENDING, role='my_role')\n", (9499, 9596), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((9672, 9739), 'atst.jobs.do_provision_user', 'do_provision_user', ([], {'csp': 'csp', 'environment_role_id': 'environment_role.id'}), '(csp=csp, environment_role_id=environment_role.id)\n', (9689, 9739), False, 'from atst.jobs import RecordEnvironmentFailure, RecordEnvironmentRoleFailure, do_create_environment, do_create_atat_admin_user, dispatch_create_environment, dispatch_create_atat_admin_user, create_environment, dispatch_provision_user, do_provision_user\n'), ((1362, 1387), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1375, 1387), False, 'import pytest\n'), ((1994, 2019), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2007, 2019), False, 'import pytest\n'), ((9271, 9292), 'atst.domain.csp.cloud.MockCloudProvider', 'MockCloudProvider', (['()'], {}), '(())\n', (9288, 9292), False, 'from atst.domain.csp.cloud import MockCloudProvider\n'), ((843, 902), 'atst.domain.csp.cloud.MockCloudProvider', 'MockCloudProvider', (['{}'], {'with_delay': '(False)', 'with_failure': '(False)'}), '({}, with_delay=False, with_failure=False)\n', (860, 902), False, 'from atst.domain.csp.cloud import MockCloudProvider\n'), ((8545, 8605), 'tests.factories.ApplicationRoleFactory', 'ApplicationRoleFactory', ([], {'status': 'ApplicationRoleStatus.PENDING'}), '(status=ApplicationRoleStatus.PENDING)\n', (8567, 8605), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((8772, 8831), 'tests.factories.ApplicationRoleFactory', 'ApplicationRoleFactory', ([], {'status': 'ApplicationRoleStatus.ACTIVE'}), '(status=ApplicationRoleStatus.ACTIVE)\n', (8794, 8831), False, 'from tests.factories import EnvironmentFactory, EnvironmentRoleFactory, PortfolioFactory, ApplicationRoleFactory\n'), ((2552, 2559), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2557, 2559), False, 'from uuid import uuid4\n'), ((6544, 6573), 'atst.models.utils.claim_for_update', 'claim_for_update', (['environment'], {}), '(environment)\n', (6560, 6573), False, 'from atst.models.utils import claim_for_update\n'), ((6818, 6847), 'atst.models.utils.claim_for_update', 'claim_for_update', (['environment'], {}), '(environment)\n', (6834, 6847), False, 'from atst.models.utils import claim_for_update\n'), ((4086, 4093), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4091, 4093), False, 'from uuid import uuid4\n'), ((4875, 4882), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4880, 4882), False, 'from uuid import uuid4\n'), ((5926, 5933), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (5931, 5933), False, 'from uuid import uuid4\n'), ((3290, 3304), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (3302, 3304), False, 'import pendulum\n'), ((3359, 3373), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (3371, 3373), False, 'import pendulum\n'), ((4266, 4280), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (4278, 4280), False, 'import pendulum\n'), ((4335, 4349), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (4347, 4349), False, 'import pendulum\n'), ((5053, 5067), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (5065, 5067), False, 'import pendulum\n'), ((5122, 5136), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (5134, 5136), False, 'import pendulum\n'), ((6104, 6118), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (6116, 6118), False, 'import pendulum\n'), ((6173, 6187), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (6185, 6187), False, 'import pendulum\n')] |
from __future__ import absolute_import, unicode_literals
from datetime import timedelta
import environ
ROOT_DIR = environ.Path(__file__) - 4 # (/root/cpdb/config/settings/myfile.py - 4 = /)
APPS_DIR = ROOT_DIR.path('cpdb')
env = environ.Env()
environ.Env.read_env(f'{ROOT_DIR}/.env') # reading .env file
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('DJANGO_SECRET_KEY', default='django')
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'django.contrib.sitemaps',
'django.contrib.sites',
)
THIRD_PARTY_APPS = (
'rest_framework',
'rest_framework.authtoken',
'django_nose',
'taggit',
'taggit_serializer',
'anymail',
'corsheaders',
'adminsortable',
'bandit',
'sortedm2m'
)
LOCAL_APPS = (
'data',
'search',
'vftg',
'cms',
'es_index',
'analytics',
'officers',
'cr',
'units',
'alias',
'twitterbot',
'activity_grid',
'document_cloud',
'search_terms',
'heatmap',
'trr',
'popup',
'airtable_integration',
'data_importer',
'status',
'email_service',
'social_graph',
'xlsx',
'tracker',
'sitemap',
'activity_log',
'pinboard',
'toast',
'app_config',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'twitterbot.middleware.LogTwitterbotLinkVisitMiddleware',
'config.cache.set_user_with_rest_framework_authenticator_middleware',
'config.cache.FetchFromCacheForAnonymousUserMiddleware',
]
# CACHES CONFIGURATION
# ------------------------------------------------------------------------------
CACHE_MIDDLEWARE_SECONDS = 300
CACHE_MIDDLEWARE_KEY_PREFIX = 'django'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', False)
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('CPDB', '<EMAIL>'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': env.str('DB_HOST', 'postgres'),
'NAME': env.str('DB_NAME', 'cpdb'),
'PASSWORD': env.str('DB_PASSWORD', 'password'),
'PORT': 5432,
'USER': env.str('DB_USER', 'cpdb')
}
}
ROOT_URLCONF = 'config.urls'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(APPS_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# REST FRAMEWORK SETTINGS
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'ORDERING_PARAM': 'sort'
}
MAILCHIMP_API_KEY = env.str('MAILCHIMP_API_KEY', default='')
MAILCHIMP_USER = env.str('MAILCHIMP_USER', default='')
VFTG_LIST_ID = 'e38095f8d7'
AZURE_STORAGE_ACCOUNT_NAME = env.str('AZURE_STORAGE_ACCOUNT_NAME', default='')
AZURE_STORAGE_ACCOUNT_KEY = env.str('AZURE_STORAGE_ACCOUNT_KEY', default='')
TWITTERBOT_STORAGE_ACCOUNT_NAME = env.str('TWITTERBOT_STORAGE_ACCOUNT_NAME', default='')
TWITTERBOT_STORAGE_ACCOUNT_KEY = env.str('TWITTERBOT_STORAGE_ACCOUNT_KEY', default='')
AZURE_QUEUE_NAME = 'cpdpbot'
DATA_PIPELINE_STORAGE_ACCOUNT_NAME = env.str('DATA_PIPELINE_STORAGE_ACCOUNT_NAME', default='')
DATA_PIPELINE_STORAGE_ACCOUNT_KEY = env.str('DATA_PIPELINE_STORAGE_ACCOUNT_KEY', default='')
TWITTER_CONSUMER_KEY = env.str('TWITTER_CONSUMER_KEY', default='')
TWITTER_CONSUMER_SECRET = env.str('TWITTER_CONSUMER_SECRET', default='')
TWITTER_APP_TOKEN_KEY = env.str('TWITTER_APP_TOKEN_KEY', default='')
TWITTER_APP_TOKEN_SECRET = env.str('TWITTER_APP_TOKEN_SECRET', default='')
V1_URL = 'https://data.cpdp.co'
ELASTICSEARCH_HOSTS = ['elasticsearch:9200']
TEST = False
RUNNING_PORT = '80'
DOCUMENTCLOUD_USER = env.str('DOCUMENTCLOUD_USER', '')
DOCUMENTCLOUD_PASSWORD = env.str('DOCUMENTCLOUD_PASSWORD', '')
GOOGLE_GEOCODE_APIKEY = env.str('GOOGLE_GEOCODE_APIKEY', '')
ALLEGATION_MIN = env.str('ALLEGATION_MIN', '1988-01-01')
ALLEGATION_MAX = env.str('ALLEGATION_MAX', '2016-07-01')
INTERNAL_CIVILIAN_ALLEGATION_MIN = env.str('INTERNAL_CIVILIAN_ALLEGATION_MIN', '2000-01-01')
INTERNAL_CIVILIAN_ALLEGATION_MAX = env.str('INTERNAL_CIVILIAN_ALLEGATION_MIN', '2016-07-01')
TRR_MIN = env.str('TRR_MIN', '2004-01-08')
TRR_MAX = env.str('TRR_MAX', '2016-04-12')
AZURE_STATICFILES_CONTAINER = 'static'
AZURE_STATICFILES_SSL = False
AIRTABLE_PROJECT_KEY = env.str('AIRTABLE_PROJECT_KEY', '')
AIRTABLE_TABLE_NAME = env.str('AIRTABLE_TABLE_NAME', 'Request a FOIA')
AIRTABLE_COPA_AGENCY_ID = ''
AIRTABLE_CPD_AGENCY_ID = ''
TEMPLATE_TIME_TO_LIVE = timedelta(minutes=5)
AZURE_TEMPLATE_CONTAINER = 'templates'
S3_BUCKET_XLSX_DIRECTORY = 'xlsx'
S3_BUCKET_PDF_DIRECTORY = 'pdf'
S3_BUCKET_ZIP_DIRECTORY = 'zip'
GOOGLE_APPLICATION_CREDENTIALS = env.str('GOOGLE_APPLICATION_CREDENTIALS', default='')
GOOGLE_ANALYTICS_VIEW_ID = '129538462'
ENABLE_SITEMAP = False
CORS_ALLOW_CREDENTIALS = True
SESSION_COOKIE_SAMESITE = None
CPDP_ALERTS_WEBHOOK = env.str('CPDP_ALERTS_WEBHOOK', '')
ENABLE_MAKE_CLOUD_DOCUMENTS_PUBLIC = False
IMPORT_NOT_PUBLIC_CLOUD_DOCUMENTS = False
DOCUMENT_REQUEST_CC_EMAIL = '<EMAIL>'
# If you want django-taggit to be CASE-INSENSITIVE when looking up existing tags,
# you’ll have to set the TAGGIT_CASE_INSENSITIVE setting to True (False by default)
# https://django-taggit.readthedocs.io/en/latest/getting_started.html
TAGGIT_CASE_INSENSITIVE = True
GA_TRACKING_ID = env.str('GA_TRACKING_ID', '')
CLICKY_TRACKING_ID = env.str('CLICKY_TRACKING_ID', '')
CLICKY_SITEKEY_ADMIN = env.str('CLICKY_SITEKEY_ADMIN', '')
| [
"datetime.timedelta",
"environ.Path",
"environ.Env",
"environ.Env.read_env"
] | [((235, 248), 'environ.Env', 'environ.Env', ([], {}), '()\n', (246, 248), False, 'import environ\n'), ((249, 289), 'environ.Env.read_env', 'environ.Env.read_env', (['f"""{ROOT_DIR}/.env"""'], {}), "(f'{ROOT_DIR}/.env')\n", (269, 289), False, 'import environ\n'), ((9993, 10013), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (10002, 10013), False, 'from datetime import timedelta\n'), ((118, 140), 'environ.Path', 'environ.Path', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'import environ\n')] |
from pick import pick
from beem import Hive
from beem.account import Account
from beem.amount import Amount
client = Hive()
# capture username
account = input('Username: ')
account = Account(account)
balance = account['balance']
symbol = balance.symbol
# we need high precision because VESTS
denom = 1e6
dgpo = client.get_dynamic_global_properties()
total_vesting_fund_hive = Amount(dgpo['total_vesting_fund_hive']).amount
total_vesting_shares_mvest = Amount(dgpo['total_vesting_shares']).amount / denom
base_per_mvest = total_vesting_fund_hive / total_vesting_shares_mvest
# capture list limit
limit = input('Max number of vesting delegations to display: ') or '10'
# list type
title = 'Please choose the type of list: '
options = ['Active Vesting Delegations', 'Expiring Vesting Delegations']
# get index and selected list name
option, index = pick(options, title)
print('\n' + 'List of ' + option + ': ' + '\n')
if option=='Active Vesting Delegations' :
delegations = account.get_vesting_delegations(limit=limit)
else:
delegations = account.get_expiring_vesting_delegations("2018-01-01T00:00:00", limit=limit)
if len(delegations) == 0:
print('No ' + option)
exit
for delegation in delegations:
delegated_vests = float(delegation['vesting_shares']['amount']) / denom
delegated_base = (delegated_vests / denom) * base_per_mvest
print('\t' + delegation['delegatee'] + ': ' + format(delegated_base, '.3f') + ' ' + symbol)
| [
"beem.amount.Amount",
"beem.account.Account",
"beem.Hive",
"pick.pick"
] | [((118, 124), 'beem.Hive', 'Hive', ([], {}), '()\n', (122, 124), False, 'from beem import Hive\n'), ((185, 201), 'beem.account.Account', 'Account', (['account'], {}), '(account)\n', (192, 201), False, 'from beem.account import Account\n'), ((853, 873), 'pick.pick', 'pick', (['options', 'title'], {}), '(options, title)\n', (857, 873), False, 'from pick import pick\n'), ((380, 419), 'beem.amount.Amount', 'Amount', (["dgpo['total_vesting_fund_hive']"], {}), "(dgpo['total_vesting_fund_hive'])\n", (386, 419), False, 'from beem.amount import Amount\n'), ((456, 492), 'beem.amount.Amount', 'Amount', (["dgpo['total_vesting_shares']"], {}), "(dgpo['total_vesting_shares'])\n", (462, 492), False, 'from beem.amount import Amount\n')] |
#!/usr/bin/env python
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import fci
mol = gto.M(atom='Be 0 0 0; H .23 -1.1 0; H .23 1.1 0',
symmetry=1, verbose=0)
m = scf.RHF(mol)
m.kernel()
norb = m.mo_energy.size
nelec = mol.nelectron
class KnowValues(unittest.TestCase):
def test_symm_spin0(self):
fs = fci.FCI(mol, m.mo_coeff)
fs.wfnsym = 'B1'
fs.nroots = 3
e, c = fs.kernel()
self.assertAlmostEqual(e[0], -19.286003160337, 9)
self.assertAlmostEqual(e[1], -18.812177419921, 9)
self.assertAlmostEqual(e[2], -18.786684534678, 9)
self.assertAlmostEqual(fci.spin_op.spin_square0(c[0], norb, nelec)[0], 0, 9)
self.assertAlmostEqual(fci.spin_op.spin_square0(c[1], norb, nelec)[0], 6, 9)
self.assertAlmostEqual(fci.spin_op.spin_square0(c[2], norb, nelec)[0], 0, 9)
def test_symm_spin1(self):
fs = fci.FCI(mol, m.mo_coeff, singlet=False)
fs.wfnsym = 'B1'
fs.nroots = 2
e, c = fs.kernel()
self.assertAlmostEqual(e[0], -19.303845373762, 9)
self.assertAlmostEqual(e[1], -19.286003160337, 9)
self.assertAlmostEqual(fci.spin_op.spin_square0(c[0], norb, nelec)[0], 2, 9)
self.assertAlmostEqual(fci.spin_op.spin_square0(c[1], norb, nelec)[0], 0, 9)
if __name__ == "__main__":
print("Full Tests for init_guess")
unittest.main()
| [
"pyscf.fci.spin_op.spin_square0",
"pyscf.gto.M",
"unittest.main",
"pyscf.scf.RHF",
"pyscf.fci.FCI"
] | [((154, 226), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""Be 0 0 0; H .23 -1.1 0; H .23 1.1 0"""', 'symmetry': '(1)', 'verbose': '(0)'}), "(atom='Be 0 0 0; H .23 -1.1 0; H .23 1.1 0', symmetry=1, verbose=0)\n", (159, 226), False, 'from pyscf import gto\n'), ((243, 255), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (250, 255), False, 'from pyscf import scf\n'), ((1440, 1455), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1453, 1455), False, 'import unittest\n'), ((395, 419), 'pyscf.fci.FCI', 'fci.FCI', (['mol', 'm.mo_coeff'], {}), '(mol, m.mo_coeff)\n', (402, 419), False, 'from pyscf import fci\n'), ((968, 1007), 'pyscf.fci.FCI', 'fci.FCI', (['mol', 'm.mo_coeff'], {'singlet': '(False)'}), '(mol, m.mo_coeff, singlet=False)\n', (975, 1007), False, 'from pyscf import fci\n'), ((699, 742), 'pyscf.fci.spin_op.spin_square0', 'fci.spin_op.spin_square0', (['c[0]', 'norb', 'nelec'], {}), '(c[0], norb, nelec)\n', (723, 742), False, 'from pyscf import fci\n'), ((784, 827), 'pyscf.fci.spin_op.spin_square0', 'fci.spin_op.spin_square0', (['c[1]', 'norb', 'nelec'], {}), '(c[1], norb, nelec)\n', (808, 827), False, 'from pyscf import fci\n'), ((869, 912), 'pyscf.fci.spin_op.spin_square0', 'fci.spin_op.spin_square0', (['c[2]', 'norb', 'nelec'], {}), '(c[2], norb, nelec)\n', (893, 912), False, 'from pyscf import fci\n'), ((1229, 1272), 'pyscf.fci.spin_op.spin_square0', 'fci.spin_op.spin_square0', (['c[0]', 'norb', 'nelec'], {}), '(c[0], norb, nelec)\n', (1253, 1272), False, 'from pyscf import fci\n'), ((1314, 1357), 'pyscf.fci.spin_op.spin_square0', 'fci.spin_op.spin_square0', (['c[1]', 'norb', 'nelec'], {}), '(c[1], norb, nelec)\n', (1338, 1357), False, 'from pyscf import fci\n')] |
"""
This file is for setting up the models used for the feature vis
process, the objects for the global model and layer values are set here
"""
import tensorflow as tf
from tensorflow import keras
from tf_slim import nets as slim_nets
from luna.pretrained_models import googlenet
def model_resnet50v2():
"""
Instantiates ResNet50V2 architecture using keras
Returns:
keras.application: ResNet50V2 Architecture
"""
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
return keras.applications.ResNet50V2(weights="imagenet", include_top=False)
def model_inceptionv3():
"""
Instantiates InceptionV3 architecture using keras
Returns:
keras.application: InceptionV3 Architecture
"""
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
return keras.applications.InceptionV3(
weights="imagenet", include_top=False)
def model_inceptionv1():
"""
Instantiates InceptionV1 architecture using googlnet
Returns:
googlenet: InceptionV1 Architecture
"""
tf.compat.v1.keras.backend.set_image_data_format('channels_first')
return googlenet.create_googlenet()
def model_inceptionv1_slim():
"""
Instantiates InceptionV1 architecture using tensorflow slim
Returns:
slim_net: InceptionV1 Architecture
"""
inputs = tf.random.uniform((1, 224, 224, 3), dtype=tf.dtypes.float32)
return slim_nets.inception.inception_v1(inputs)
def model_vgg16():
"""
Instantiates vgg16 architecture using keras
Returns:
keras.application: vgg16 Architecture
"""
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
return keras.applications.VGG16(weights="imagenet", include_top=False)
def model_vgg19():
"""
Instantiates vgg19 architecture using keras
Returns:
keras.applications: vgg19 Architecture
"""
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
return keras.applications.VGG19(weights="imagenet", include_top=False)
| [
"tensorflow.random.uniform",
"tensorflow.keras.applications.InceptionV3",
"tensorflow.keras.applications.VGG16",
"tensorflow.compat.v1.keras.backend.set_image_data_format",
"tensorflow.keras.applications.ResNet50V2",
"luna.pretrained_models.googlenet.create_googlenet",
"tf_slim.nets.inception.inception_v1",
"tensorflow.keras.applications.VGG19"
] | [((445, 510), 'tensorflow.compat.v1.keras.backend.set_image_data_format', 'tf.compat.v1.keras.backend.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (493, 510), True, 'import tensorflow as tf\n'), ((522, 590), 'tensorflow.keras.applications.ResNet50V2', 'keras.applications.ResNet50V2', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (551, 590), False, 'from tensorflow import keras\n'), ((758, 823), 'tensorflow.compat.v1.keras.backend.set_image_data_format', 'tf.compat.v1.keras.backend.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (806, 823), True, 'import tensorflow as tf\n'), ((835, 904), 'tensorflow.keras.applications.InceptionV3', 'keras.applications.InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (865, 904), False, 'from tensorflow import keras\n'), ((1076, 1142), 'tensorflow.compat.v1.keras.backend.set_image_data_format', 'tf.compat.v1.keras.backend.set_image_data_format', (['"""channels_first"""'], {}), "('channels_first')\n", (1124, 1142), True, 'import tensorflow as tf\n'), ((1154, 1182), 'luna.pretrained_models.googlenet.create_googlenet', 'googlenet.create_googlenet', ([], {}), '()\n', (1180, 1182), False, 'from luna.pretrained_models import googlenet\n'), ((1365, 1425), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 224, 224, 3)'], {'dtype': 'tf.dtypes.float32'}), '((1, 224, 224, 3), dtype=tf.dtypes.float32)\n', (1382, 1425), True, 'import tensorflow as tf\n'), ((1437, 1477), 'tf_slim.nets.inception.inception_v1', 'slim_nets.inception.inception_v1', (['inputs'], {}), '(inputs)\n', (1469, 1477), True, 'from tf_slim import nets as slim_nets\n'), ((1627, 1692), 'tensorflow.compat.v1.keras.backend.set_image_data_format', 'tf.compat.v1.keras.backend.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (1675, 1692), True, 'import tensorflow as tf\n'), ((1704, 1767), 'tensorflow.keras.applications.VGG16', 'keras.applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (1728, 1767), False, 'from tensorflow import keras\n'), ((1918, 1983), 'tensorflow.compat.v1.keras.backend.set_image_data_format', 'tf.compat.v1.keras.backend.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (1966, 1983), True, 'import tensorflow as tf\n'), ((1995, 2058), 'tensorflow.keras.applications.VGG19', 'keras.applications.VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2019, 2058), False, 'from tensorflow import keras\n')] |
import json
import sys
import threading
import CIPC
with open('config.json', "r") as jsonfile:
configs = json.loads(jsonfile.read())
def viewhelp():
print("""
Usage: python3 CIPC_cli.py mode device_names
Example: python3 CIPC_cli.py r S1 S2 R2 S8 S3 R4
Modes:
r read configurations - reads configurations from devices and saves them to corresponding configuration txt files
note that encrypted passwords will be read and saved in encrypted form
u update configurations - reads configurations from txt files and applies them to devices
without clearing previous configurations
e erase configurations - erases configurations from devices
w write configurations - reads configurations from files and applies them to devices
erasing previous configurations
с alternative usage, for one device with custom passwords (if there is no password type "n")
Usage: python3 CIPC_cli.py c mode device_name user_EXEC_password priv_EXEC_password
Example: python3 CIPC_cli.py c e S1 n cllass
You can use "all" as a device name to order handle all devices in device list
Example: python3 CIPC_cli.py r all
""")
if len(sys.argv) == 1:
viewhelp()
else:
mode = sys.argv[1]
with open('device_list_and_configs/device_list.json', "r") as jsonfile:
devise_list = json.loads(jsonfile.read())
if mode == "c":
mode = sys.argv[2]
devise_list[sys.argv[3]]["user_password"] = sys.argv[4]
devise_list[sys.argv[3]]["priv_password"] = sys.argv[5]
devise_name_list = [sys.argv[3]]
elif sys.argv[2] == "all":
devise_name_list = devise_list.keys() # keys() вернет имена всех девайсов
else:
devise_name_list = sys.argv[2:]
if mode == "r":
for devise_name in devise_name_list:
threading.Thread(name=devise_name + "[read_conf]",
target=CIPC.read_configuration,
args=(devise_name,
configs["devise_address"],
devise_list[devise_name]["devise_port"],
devise_list[devise_name]["user_password"],
devise_list[devise_name]["priv_password"])).start()
elif mode == "u":
for devise_name in devise_name_list:
threading.Thread(name=devise_name + "[upd_conf]",
target=CIPC.update_configuration,
args=(devise_name,
configs["devise_address"],
devise_list[devise_name]["devise_port"],
devise_list[devise_name]["user_password"],
devise_list[devise_name]["priv_password"])).start()
elif mode == "w":
for devise_name in devise_name_list:
threading.Thread(name=devise_name + "[write_conf]",
target=CIPC.write_configuration,
args=(devise_name,
configs["devise_address"],
devise_list[devise_name]["devise_port"],
devise_list[devise_name]["user_password"],
devise_list[devise_name]["priv_password"])).start()
elif mode == "e":
for devise_name in devise_name_list:
threading.Thread(name=devise_name + "[erase_conf]",
target=CIPC.erase_configuration,
args=(devise_name,
configs["devise_address"],
devise_list[devise_name]["devise_port"],
devise_list[devise_name]["user_password"],
devise_list[devise_name]["priv_password"])).start()
elif mode == "/?":
viewhelp()
| [
"threading.Thread"
] | [((2059, 2329), 'threading.Thread', 'threading.Thread', ([], {'name': "(devise_name + '[read_conf]')", 'target': 'CIPC.read_configuration', 'args': "(devise_name, configs['devise_address'], devise_list[devise_name][\n 'devise_port'], devise_list[devise_name]['user_password'], devise_list[\n devise_name]['priv_password'])"}), "(name=devise_name + '[read_conf]', target=CIPC.\n read_configuration, args=(devise_name, configs['devise_address'],\n devise_list[devise_name]['devise_port'], devise_list[devise_name][\n 'user_password'], devise_list[devise_name]['priv_password']))\n", (2075, 2329), False, 'import threading\n'), ((2612, 2883), 'threading.Thread', 'threading.Thread', ([], {'name': "(devise_name + '[upd_conf]')", 'target': 'CIPC.update_configuration', 'args': "(devise_name, configs['devise_address'], devise_list[devise_name][\n 'devise_port'], devise_list[devise_name]['user_password'], devise_list[\n devise_name]['priv_password'])"}), "(name=devise_name + '[upd_conf]', target=CIPC.\n update_configuration, args=(devise_name, configs['devise_address'],\n devise_list[devise_name]['devise_port'], devise_list[devise_name][\n 'user_password'], devise_list[devise_name]['priv_password']))\n", (2628, 2883), False, 'import threading\n'), ((3166, 3438), 'threading.Thread', 'threading.Thread', ([], {'name': "(devise_name + '[write_conf]')", 'target': 'CIPC.write_configuration', 'args': "(devise_name, configs['devise_address'], devise_list[devise_name][\n 'devise_port'], devise_list[devise_name]['user_password'], devise_list[\n devise_name]['priv_password'])"}), "(name=devise_name + '[write_conf]', target=CIPC.\n write_configuration, args=(devise_name, configs['devise_address'],\n devise_list[devise_name]['devise_port'], devise_list[devise_name][\n 'user_password'], devise_list[devise_name]['priv_password']))\n", (3182, 3438), False, 'import threading\n'), ((3721, 3993), 'threading.Thread', 'threading.Thread', ([], {'name': "(devise_name + '[erase_conf]')", 'target': 'CIPC.erase_configuration', 'args': "(devise_name, configs['devise_address'], devise_list[devise_name][\n 'devise_port'], devise_list[devise_name]['user_password'], devise_list[\n devise_name]['priv_password'])"}), "(name=devise_name + '[erase_conf]', target=CIPC.\n erase_configuration, args=(devise_name, configs['devise_address'],\n devise_list[devise_name]['devise_port'], devise_list[devise_name][\n 'user_password'], devise_list[devise_name]['priv_password']))\n", (3737, 3993), False, 'import threading\n')] |
#-*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=import-error, too-many-locals, too-many-statements
# pylint: disable=pointless-string-statement, no-member
"""
average_ckpt.py: Tensorflow 2.1 averaging model weights.
I referred to https://stackoverflow.com/questions/48212110/average-weights-in-keras-models
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tfsr.helper.common_helper import ParseOption, Logger
from tfsr.helper.misc_helper import Util
import tfsr.helper.train_helper as th
from tfsr.trainer_tf import ConvEncoder
def main():
# pylint: disable=too-many-branches
# Initializing
Util.prepare_device()
logger = Logger(name="speech_transformer", level=Logger.DEBUG).logger
config = ParseOption(sys.argv, logger).args
# Loading a vocabulary
_, _, dec_in_dim, dec_out_dim = \
Util.load_vocab(Util.get_file_path(config.path_base, config.path_vocab),
logger)
dec_out_dim = dec_in_dim + 1
logger.info("The modified output Dimension %d", dec_out_dim)
# Model selection
# pylint: disable=invalid-name
model = ConvEncoder(config.model_encoder_num, config.model_dimension,
config.model_att_head_num, config.model_inner_dim,
config.feat_dim,
config.train_inp_dropout, config.train_inn_dropout,
config.train_att_dropout, config.train_res_dropout,
config.model_conv_filter_num,
config.model_conv_layer_num,
config.model_initializer, dec_out_dim)
# Setting optimizer and checkpoint manager
optimizer = th.get_optimizer(config)
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt, max_to_keep=None)
ckpts = []
for ckpt in ckpt_manager.checkpoints:
if config.train_max_epoch == 0 or \
int(ckpt.split("-")[-1]) <= config.train_max_epoch:
ckpts.append(ckpt)
optimizer = th.get_optimizer(config)
models = []
for ckpt_path in ckpts[-config.model_average_num:]:
logger.info(ckpt_path)
model = ConvEncoder(config.model_encoder_num, config.model_dimension,
config.model_att_head_num, config.model_inner_dim,
config.feat_dim,
config.train_inp_dropout, config.train_inn_dropout,
config.train_att_dropout, config.train_res_dropout,
config.model_conv_filter_num,
config.model_conv_layer_num,
config.model_initializer, dec_out_dim)
# Creating or loading a check point
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
ckpt.restore(ckpt_path).expect_partial()
dummy_feats = tf.random.uniform([1, 20, config.feat_dim])
dummy_in_len = tf.ones(1) * 20
model(dummy_feats, input_lengths=dummy_in_len, is_training=False,
mask=None, attention_penalty_mask=None,
in_len_div=config.model_conv_layer_num ** config.model_conv_stride)
models.append(model)
logger.info("Total %d models were loaded.", len(models))
# Computing averaged weights
weights = list()
for model in models:
weights.append(model.get_weights())
new_weights = list()
for weights_list_tuple in zip(*weights):
new_weights.append(
np.array([np.array(w).mean(axis=0) for w in zip(*weights_list_tuple)])
)
# Saving
model = ConvEncoder(config.model_encoder_num, config.model_dimension,
config.model_att_head_num, config.model_inner_dim,
config.feat_dim,
config.train_inp_dropout, config.train_inn_dropout,
config.train_att_dropout, config.train_res_dropout,
config.model_conv_filter_num,
config.model_conv_layer_num,
config.model_initializer, dec_out_dim)
dummy_feats = tf.random.uniform([10, 20, config.feat_dim])
dummy_in_len = tf.ones(10) * 20
model(dummy_feats, input_lengths=dummy_in_len, is_training=False,
mask=None, attention_penalty_mask=None,
in_len_div=config.model_conv_layer_num ** config.model_conv_stride)
model.set_weights(weights[0])
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)
if os.path.exists(config.path_ckpt + "/avg"):
shutil.rmtree(config.path_ckpt + "/avg")
ckpt_manager = \
tf.train.CheckpointManager(ckpt, config.path_ckpt + "/avg", max_to_keep=1)
logger.info("Saved to %s", ckpt_manager.save())
if __name__ == "__main__":
main()
| [
"tensorflow.random.uniform",
"tensorflow.train.Checkpoint",
"os.path.exists",
"tensorflow.ones",
"tfsr.helper.train_helper.get_optimizer",
"tfsr.helper.common_helper.Logger",
"shutil.rmtree",
"tfsr.helper.misc_helper.Util.get_file_path",
"numpy.array",
"tfsr.trainer_tf.ConvEncoder",
"tfsr.helper.common_helper.ParseOption",
"tensorflow.train.CheckpointManager",
"tfsr.helper.misc_helper.Util.prepare_device"
] | [((1306, 1327), 'tfsr.helper.misc_helper.Util.prepare_device', 'Util.prepare_device', ([], {}), '()\n', (1325, 1327), False, 'from tfsr.helper.misc_helper import Util\n'), ((1771, 2121), 'tfsr.trainer_tf.ConvEncoder', 'ConvEncoder', (['config.model_encoder_num', 'config.model_dimension', 'config.model_att_head_num', 'config.model_inner_dim', 'config.feat_dim', 'config.train_inp_dropout', 'config.train_inn_dropout', 'config.train_att_dropout', 'config.train_res_dropout', 'config.model_conv_filter_num', 'config.model_conv_layer_num', 'config.model_initializer', 'dec_out_dim'], {}), '(config.model_encoder_num, config.model_dimension, config.\n model_att_head_num, config.model_inner_dim, config.feat_dim, config.\n train_inp_dropout, config.train_inn_dropout, config.train_att_dropout,\n config.train_res_dropout, config.model_conv_filter_num, config.\n model_conv_layer_num, config.model_initializer, dec_out_dim)\n', (1782, 2121), False, 'from tfsr.trainer_tf import ConvEncoder\n'), ((2317, 2341), 'tfsr.helper.train_helper.get_optimizer', 'th.get_optimizer', (['config'], {}), '(config)\n', (2333, 2341), True, 'import tfsr.helper.train_helper as th\n'), ((2390, 2443), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'optimizer', 'model': 'model'}), '(optimizer=optimizer, model=model)\n', (2409, 2443), True, 'import tensorflow as tf\n'), ((2467, 2535), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'config.path_ckpt'], {'max_to_keep': 'None'}), '(ckpt, config.path_ckpt, max_to_keep=None)\n', (2493, 2535), True, 'import tensorflow as tf\n'), ((2728, 2752), 'tfsr.helper.train_helper.get_optimizer', 'th.get_optimizer', (['config'], {}), '(config)\n', (2744, 2752), True, 'import tfsr.helper.train_helper as th\n'), ((4206, 4556), 'tfsr.trainer_tf.ConvEncoder', 'ConvEncoder', (['config.model_encoder_num', 'config.model_dimension', 'config.model_att_head_num', 'config.model_inner_dim', 'config.feat_dim', 'config.train_inp_dropout', 'config.train_inn_dropout', 'config.train_att_dropout', 'config.train_res_dropout', 'config.model_conv_filter_num', 'config.model_conv_layer_num', 'config.model_initializer', 'dec_out_dim'], {}), '(config.model_encoder_num, config.model_dimension, config.\n model_att_head_num, config.model_inner_dim, config.feat_dim, config.\n train_inp_dropout, config.train_inn_dropout, config.train_att_dropout,\n config.train_res_dropout, config.model_conv_filter_num, config.\n model_conv_layer_num, config.model_initializer, dec_out_dim)\n', (4217, 4556), False, 'from tfsr.trainer_tf import ConvEncoder\n'), ((4709, 4753), 'tensorflow.random.uniform', 'tf.random.uniform', (['[10, 20, config.feat_dim]'], {}), '([10, 20, config.feat_dim])\n', (4726, 4753), True, 'import tensorflow as tf\n'), ((5023, 5076), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'optimizer', 'model': 'model'}), '(optimizer=optimizer, model=model)\n', (5042, 5076), True, 'import tensorflow as tf\n'), ((5082, 5123), 'os.path.exists', 'os.path.exists', (["(config.path_ckpt + '/avg')"], {}), "(config.path_ckpt + '/avg')\n", (5096, 5123), False, 'import os\n'), ((5194, 5268), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', "(config.path_ckpt + '/avg')"], {'max_to_keep': '(1)'}), "(ckpt, config.path_ckpt + '/avg', max_to_keep=1)\n", (5220, 5268), True, 'import tensorflow as tf\n'), ((1339, 1392), 'tfsr.helper.common_helper.Logger', 'Logger', ([], {'name': '"""speech_transformer"""', 'level': 'Logger.DEBUG'}), "(name='speech_transformer', level=Logger.DEBUG)\n", (1345, 1392), False, 'from tfsr.helper.common_helper import ParseOption, Logger\n'), ((1411, 1440), 'tfsr.helper.common_helper.ParseOption', 'ParseOption', (['sys.argv', 'logger'], {}), '(sys.argv, logger)\n', (1422, 1440), False, 'from tfsr.helper.common_helper import ParseOption, Logger\n'), ((1528, 1583), 'tfsr.helper.misc_helper.Util.get_file_path', 'Util.get_file_path', (['config.path_base', 'config.path_vocab'], {}), '(config.path_base, config.path_vocab)\n', (1546, 1583), False, 'from tfsr.helper.misc_helper import Util\n'), ((2861, 3211), 'tfsr.trainer_tf.ConvEncoder', 'ConvEncoder', (['config.model_encoder_num', 'config.model_dimension', 'config.model_att_head_num', 'config.model_inner_dim', 'config.feat_dim', 'config.train_inp_dropout', 'config.train_inn_dropout', 'config.train_att_dropout', 'config.train_res_dropout', 'config.model_conv_filter_num', 'config.model_conv_layer_num', 'config.model_initializer', 'dec_out_dim'], {}), '(config.model_encoder_num, config.model_dimension, config.\n model_att_head_num, config.model_inner_dim, config.feat_dim, config.\n train_inp_dropout, config.train_inn_dropout, config.train_att_dropout,\n config.train_res_dropout, config.model_conv_filter_num, config.\n model_conv_layer_num, config.model_initializer, dec_out_dim)\n', (2872, 3211), False, 'from tfsr.trainer_tf import ConvEncoder\n'), ((3413, 3466), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'optimizer', 'model': 'model'}), '(optimizer=optimizer, model=model)\n', (3432, 3466), True, 'import tensorflow as tf\n'), ((3531, 3574), 'tensorflow.random.uniform', 'tf.random.uniform', (['[1, 20, config.feat_dim]'], {}), '([1, 20, config.feat_dim])\n', (3548, 3574), True, 'import tensorflow as tf\n'), ((4771, 4782), 'tensorflow.ones', 'tf.ones', (['(10)'], {}), '(10)\n', (4778, 4782), True, 'import tensorflow as tf\n'), ((5129, 5169), 'shutil.rmtree', 'shutil.rmtree', (["(config.path_ckpt + '/avg')"], {}), "(config.path_ckpt + '/avg')\n", (5142, 5169), False, 'import shutil\n'), ((3594, 3604), 'tensorflow.ones', 'tf.ones', (['(1)'], {}), '(1)\n', (3601, 3604), True, 'import tensorflow as tf\n'), ((4117, 4128), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (4125, 4128), True, 'import numpy as np\n')] |
# coding: UTF-8
import torch
from torch import nn
import math
# import neural_renderer as nr
from .utils import *
from pytorch3d.structures import Pointclouds
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
PointLights,
PointsRasterizationSettings,
PointsRenderer,
PointsRasterizer,
AlphaCompositor,
NormWeightedCompositor
)
"""
@date: 2020.06.22
@author: daiheng.gao
@target: replace neural_renderer with pytorch3d soft rasterizer.
@date: 2020.06.23
@author: daiheng.gao
@target: use point cloud renderer.
@readme: revise get_face_idx() & get_textures_from_im() to the resolution of image itself to compatible with pytorch3d.
"""
EPS = 1e-7
class Renderer(nn.Module):
def __init__(self, cfgs):
super(Renderer, self).__init__()
self.device = cfgs.get('device', 'cuda:0')
self.image_size = cfgs.get('image_size', 64)
self.min_depth = cfgs.get('min_depth', 0.9)
self.max_depth = cfgs.get('max_depth', 1.1)
self.rot_center_depth = cfgs.get('rot_center_depth', (self.min_depth+self.max_depth)/2)
# todo: FoV (Field of View) was set to be an fixed value of 10 degree (according to the paper).
self.fov = cfgs.get('fov', 10)
self.tex_cube_size = cfgs.get('tex_cube_size', 2)
self.renderer_min_depth = cfgs.get('renderer_min_depth', 0.1)
self.renderer_max_depth = cfgs.get('renderer_max_depth', 10.)
#### camera intrinsics
# (u) (x)
# d * K^-1 (v) = (y)
# (1) (z)
## renderer for visualization
R = [[[1.,0.,0.],
[0.,1.,0.],
[0.,0.,1.]]]
R = torch.FloatTensor(R).to(self.device)
t = torch.zeros(1,3, dtype=torch.float32).to(self.device)
## todo: K is the camera intrinsic matrix.
fx = (self.image_size-1)/2/(math.tan(self.fov/2 *math.pi/180))
fy = (self.image_size-1)/2/(math.tan(self.fov/2 *math.pi/180))
cx = (self.image_size-1)/2
cy = (self.image_size-1)/2
K = [[fx, 0., cx],
[0., fy, cy],
[0., 0., 1.]]
K = torch.FloatTensor(K).to(self.device)
self.inv_K = torch.inverse(K).unsqueeze(0)
self.K = K.unsqueeze(0)
## todo: define Renderer.
## use renderer from pytorch3d.
# fixme: znear and zfar is equivalent to the neural renderer default settings.
cameras = OpenGLOrthographicCameras(device=self.device, R=R, T=t, znear=0.01, zfar=100)
# cameras = OpenGLPerspectiveCameras(device=self.device, R=R, T=t,
# znear=self.renderer_min_depth,
# zfar=self.renderer_max_depth,
# fov=self.fov)
raster_settings = PointsRasterizationSettings(
image_size=self.image_size,
radius=0.003,
points_per_pixel=10,
bin_size=None,
max_points_per_bin=None
)
self.renderer = PointsRenderer(
rasterizer=PointsRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
compositor=AlphaCompositor(
composite_params=None
)
)
def set_transform_matrices(self, view):
self.rot_mat, self.trans_xyz = get_transform_matrices(view)
def rotate_pts(self, pts, rot_mat):
centroid = torch.FloatTensor([0.,0.,self.rot_center_depth]).to(pts.device).view(1,1,3)
pts = pts - centroid # move to centroid
pts = pts.matmul(rot_mat.transpose(2,1)) # rotate
pts = pts + centroid # move back
return pts
def translate_pts(self, pts, trans_xyz):
return pts + trans_xyz
def depth_to_3d_grid(self, depth):
b, h, w = depth.shape
grid_2d = get_grid(b, h, w, normalize=False).to(depth.device) # N x H x W x 2
depth = depth.unsqueeze(-1)
grid_3d = torch.cat((grid_2d, torch.ones_like(depth)), dim=3)
grid_3d = grid_3d.matmul(self.inv_K.to(depth.device).transpose(2,1)) * depth
return grid_3d
def grid_3d_to_2d(self, grid_3d):
b, h, w, _ = grid_3d.shape
grid_2d = grid_3d / grid_3d[...,2:]
grid_2d = grid_2d.matmul(self.K.to(grid_3d.device).transpose(2,1))[:,:,:,:2]
WH = torch.FloatTensor([w-1, h-1]).to(grid_3d.device).view(1,1,1,2)
grid_2d = grid_2d / WH *2.-1. # normalize to -1~1
return grid_2d
def get_warped_3d_grid(self, depth):
"""
unsup3d's warping method is to wrap the object itself rather than change the perspective camera like in pytorch3d.
:param depth:
:return:
"""
b, h, w = depth.shape
grid_3d = self.depth_to_3d_grid(depth).reshape(b,-1,3)
grid_3d = self.rotate_pts(grid_3d, self.rot_mat)
grid_3d = self.translate_pts(grid_3d, self.trans_xyz)
return grid_3d.reshape(b,h,w,3) # return 3d vertices
def get_inv_warped_3d_grid(self, depth):
b, h, w = depth.shape
grid_3d = self.depth_to_3d_grid(depth).reshape(b,-1,3)
grid_3d = self.translate_pts(grid_3d, -self.trans_xyz)
grid_3d = self.rotate_pts(grid_3d, self.rot_mat.transpose(2,1))
return grid_3d.reshape(b,h,w,3) # return 3d vertices
def get_warped_2d_grid(self, depth):
grid_3d = self.get_warped_3d_grid(depth)
grid_2d = self.grid_3d_to_2d(grid_3d)
return grid_2d
def get_inv_warped_2d_grid(self, depth):
grid_3d = self.get_inv_warped_3d_grid(depth)
grid_2d = self.grid_3d_to_2d(grid_3d)
return grid_2d
def warp_canon_depth(self, canon_depth):
b, h, w = canon_depth.shape
# grid_3d supposed to be the 3D mesh.
grid_3d = self.get_warped_3d_grid(canon_depth).reshape(b,-1,3)
faces = get_face_idx(b, h, w).to(canon_depth.device)
# fixme: https://github.com/facebookresearch/pytorch3d/issues/202
grid_3d = Pointclouds(points=grid_3d, features=faces)
warped_depth = self.renderer(grid_3d).squeeze()
# allow some margin out of valid range
margin = (self.max_depth - self.min_depth) / 2
warped_depth = warped_depth.clamp(min=self.min_depth-margin, max=self.max_depth+margin)
return warped_depth
def get_normal_from_depth(self, depth):
b, h, w = depth.shape
grid_3d = self.depth_to_3d_grid(depth)
tu = grid_3d[:,1:-1,2:] - grid_3d[:,1:-1,:-2]
tv = grid_3d[:,2:,1:-1] - grid_3d[:,:-2,1:-1]
normal = tu.cross(tv, dim=3)
zero = torch.FloatTensor([0,0,1]).to(depth.device)
normal = torch.cat([zero.repeat(b,h-2,1,1), normal, zero.repeat(b,h-2,1,1)], 2)
normal = torch.cat([zero.repeat(b,1,w,1), normal, zero.repeat(b,1,w,1)], 1)
normal = normal / (((normal**2).sum(3, keepdim=True))**0.5 + EPS)
return normal
def render_yaw(self, im, depth, v_before=None, v_after=None, rotations=None, maxr=90, nsample=9, crop_mesh=None):
b, c, h, w = im.shape
grid_3d = self.depth_to_3d_grid(depth)
if crop_mesh is not None:
top, bottom, left, right = crop_mesh # pixels from border to be cropped
if top > 0:
grid_3d[:,:top,:,1] = grid_3d[:,top:top+1,:,1].repeat(1,top,1)
grid_3d[:,:top,:,2] = grid_3d[:,top:top+1,:,2].repeat(1,top,1)
if bottom > 0:
grid_3d[:,-bottom:,:,1] = grid_3d[:,-bottom-1:-bottom,:,1].repeat(1,bottom,1)
grid_3d[:,-bottom:,:,2] = grid_3d[:,-bottom-1:-bottom,:,2].repeat(1,bottom,1)
if left > 0:
grid_3d[:,:,:left,0] = grid_3d[:,:,left:left+1,0].repeat(1,1,left)
grid_3d[:,:,:left,2] = grid_3d[:,:,left:left+1,2].repeat(1,1,left)
if right > 0:
grid_3d[:,:,-right:,0] = grid_3d[:,:,-right-1:-right,0].repeat(1,1,right)
grid_3d[:,:,-right:,2] = grid_3d[:,:,-right-1:-right,2].repeat(1,1,right)
grid_3d = grid_3d.reshape(b,-1,3)
im_trans = []
# inverse warp
if v_before is not None:
rot_mat, trans_xyz = get_transform_matrices(v_before)
grid_3d = self.translate_pts(grid_3d, -trans_xyz)
grid_3d = self.rotate_pts(grid_3d, rot_mat.transpose(2,1))
if rotations is None:
rotations = torch.linspace(-math.pi/180*maxr, math.pi/180*maxr, nsample)
for i, ri in enumerate(rotations):
ri = torch.FloatTensor([0, ri, 0]).to(im.device).view(1,3)
rot_mat_i, _ = get_transform_matrices(ri)
grid_3d_i = self.rotate_pts(grid_3d, rot_mat_i.repeat(b,1,1))
if v_after is not None:
if len(v_after.shape) == 3:
v_after_i = v_after[i]
else:
v_after_i = v_after
rot_mat, trans_xyz = get_transform_matrices(v_after_i)
grid_3d_i = self.rotate_pts(grid_3d_i, rot_mat)
grid_3d_i = self.translate_pts(grid_3d_i, trans_xyz)
textures = get_textures_from_im(im, tx_size=self.tex_cube_size)
grid_3d_i = Pointclouds(points=grid_3d_i, features=textures)
warped_images = self.renderer(grid_3d_i).clamp(min=-1., max=1.)
# print("扭过来的图像大小", warped_images.shape) torch.Size([8, 64, 64, 3])
im_trans += [warped_images]
return torch.stack(im_trans, 1) # b x t x c x h x w
| [
"pytorch3d.renderer.PointsRasterizer",
"torch.ones_like",
"pytorch3d.structures.Pointclouds",
"math.tan",
"pytorch3d.renderer.AlphaCompositor",
"torch.stack",
"torch.FloatTensor",
"pytorch3d.renderer.OpenGLOrthographicCameras",
"torch.inverse",
"torch.zeros",
"pytorch3d.renderer.PointsRasterizationSettings",
"torch.linspace"
] | [((2555, 2632), 'pytorch3d.renderer.OpenGLOrthographicCameras', 'OpenGLOrthographicCameras', ([], {'device': 'self.device', 'R': 'R', 'T': 't', 'znear': '(0.01)', 'zfar': '(100)'}), '(device=self.device, R=R, T=t, znear=0.01, zfar=100)\n', (2580, 2632), False, 'from pytorch3d.renderer import look_at_view_transform, OpenGLPerspectiveCameras, OpenGLOrthographicCameras, PointLights, PointsRasterizationSettings, PointsRenderer, PointsRasterizer, AlphaCompositor, NormWeightedCompositor\n'), ((2942, 3076), 'pytorch3d.renderer.PointsRasterizationSettings', 'PointsRasterizationSettings', ([], {'image_size': 'self.image_size', 'radius': '(0.003)', 'points_per_pixel': '(10)', 'bin_size': 'None', 'max_points_per_bin': 'None'}), '(image_size=self.image_size, radius=0.003,\n points_per_pixel=10, bin_size=None, max_points_per_bin=None)\n', (2969, 3076), False, 'from pytorch3d.renderer import look_at_view_transform, OpenGLPerspectiveCameras, OpenGLOrthographicCameras, PointLights, PointsRasterizationSettings, PointsRenderer, PointsRasterizer, AlphaCompositor, NormWeightedCompositor\n'), ((6170, 6213), 'pytorch3d.structures.Pointclouds', 'Pointclouds', ([], {'points': 'grid_3d', 'features': 'faces'}), '(points=grid_3d, features=faces)\n', (6181, 6213), False, 'from pytorch3d.structures import Pointclouds\n'), ((9633, 9657), 'torch.stack', 'torch.stack', (['im_trans', '(1)'], {}), '(im_trans, 1)\n', (9644, 9657), False, 'import torch\n'), ((1986, 2024), 'math.tan', 'math.tan', (['(self.fov / 2 * math.pi / 180)'], {}), '(self.fov / 2 * math.pi / 180)\n', (1994, 2024), False, 'import math\n'), ((2057, 2095), 'math.tan', 'math.tan', (['(self.fov / 2 * math.pi / 180)'], {}), '(self.fov / 2 * math.pi / 180)\n', (2065, 2095), False, 'import math\n'), ((8578, 8646), 'torch.linspace', 'torch.linspace', (['(-math.pi / 180 * maxr)', '(math.pi / 180 * maxr)', 'nsample'], {}), '(-math.pi / 180 * maxr, math.pi / 180 * maxr, nsample)\n', (8592, 8646), False, 'import torch\n'), ((9372, 9420), 'pytorch3d.structures.Pointclouds', 'Pointclouds', ([], {'points': 'grid_3d_i', 'features': 'textures'}), '(points=grid_3d_i, features=textures)\n', (9383, 9420), False, 'from pytorch3d.structures import Pointclouds\n'), ((1795, 1815), 'torch.FloatTensor', 'torch.FloatTensor', (['R'], {}), '(R)\n', (1812, 1815), False, 'import torch\n'), ((1844, 1882), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {'dtype': 'torch.float32'}), '(1, 3, dtype=torch.float32)\n', (1855, 1882), False, 'import torch\n'), ((2255, 2275), 'torch.FloatTensor', 'torch.FloatTensor', (['K'], {}), '(K)\n', (2272, 2275), False, 'import torch\n'), ((2313, 2329), 'torch.inverse', 'torch.inverse', (['K'], {}), '(K)\n', (2326, 2329), False, 'import torch\n'), ((3207, 3273), 'pytorch3d.renderer.PointsRasterizer', 'PointsRasterizer', ([], {'cameras': 'cameras', 'raster_settings': 'raster_settings'}), '(cameras=cameras, raster_settings=raster_settings)\n', (3223, 3273), False, 'from pytorch3d.renderer import look_at_view_transform, OpenGLPerspectiveCameras, OpenGLOrthographicCameras, PointLights, PointsRasterizationSettings, PointsRenderer, PointsRasterizer, AlphaCompositor, NormWeightedCompositor\n'), ((3344, 3382), 'pytorch3d.renderer.AlphaCompositor', 'AlphaCompositor', ([], {'composite_params': 'None'}), '(composite_params=None)\n', (3359, 3382), False, 'from pytorch3d.renderer import look_at_view_transform, OpenGLPerspectiveCameras, OpenGLOrthographicCameras, PointLights, PointsRasterizationSettings, PointsRenderer, PointsRasterizer, AlphaCompositor, NormWeightedCompositor\n'), ((4149, 4171), 'torch.ones_like', 'torch.ones_like', (['depth'], {}), '(depth)\n', (4164, 4171), False, 'import torch\n'), ((6780, 6808), 'torch.FloatTensor', 'torch.FloatTensor', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6797, 6808), False, 'import torch\n'), ((3596, 3648), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0, 0.0, self.rot_center_depth]'], {}), '([0.0, 0.0, self.rot_center_depth])\n', (3613, 3648), False, 'import torch\n'), ((4505, 4538), 'torch.FloatTensor', 'torch.FloatTensor', (['[w - 1, h - 1]'], {}), '([w - 1, h - 1])\n', (4522, 4538), False, 'import torch\n'), ((8699, 8728), 'torch.FloatTensor', 'torch.FloatTensor', (['[0, ri, 0]'], {}), '([0, ri, 0])\n', (8716, 8728), False, 'import torch\n')] |
"""
This file implements Numpy framework of the
simulator. It's main use is in conjunction with the :py:mod:`optimizer`
module, and example programs are listed in :py:mod:`simulator` module.
"""
import numpy as np
import copy
import qtree.operators as ops
import qtree.optimizer as opt
import qtree.utils as utils
def get_np_buckets(buckets, data_dict):
"""
Takes buckets and returns their Numpy counterparts.
Parameters
----------
buckets : list of list
buckets as returned by :py:meth:`circ2buckets`
and :py:meth:`reorder_buckets`.
data_dict : dict
dictionary containing values for the placeholder Tensors
Returns
-------
np_buckets : list of lists
Buckets having Numpy tensors in place of gate labels
"""
# import pdb
# pdb.set_trace()
# Create numpy buckets
np_buckets = []
for bucket in buckets:
np_bucket = []
for tensor in bucket:
# sort tensor dimensions
transpose_order = np.argsort(list(map(int, tensor.indices)))
data = data_dict[tensor.data_key]
new_tensor = tensor.copy(
indices=(tensor.indices[pp] for pp in transpose_order),
data=np.transpose(data.copy(), transpose_order))
np_bucket.append(new_tensor)
np_buckets.append(np_bucket)
return np_buckets
def slice_np_buckets(np_buckets, slice_dict):
"""
Takes slices of the tensors in Numpy buckets
over the variables in idx_parallel.
Parameters
----------
np_buckets : list of lists
Buckets containing Numpy tensors
slice_dict : dict
Current subtensor along the sliced variables
in the form {variable: slice}
Returns
-------
sliced_buckets : list of lists
buckets with sliced tensors
"""
# import pdb
# pdb.set_trace()
# Create tf buckets from unordered buckets
sliced_buckets = []
for bucket in np_buckets:
sliced_bucket = []
for tensor in bucket:
slice_bounds = []
for idx in tensor.indices:
try:
slice_bounds.append(slice_dict[idx])
except KeyError:
slice_bounds.append(slice(None))
sliced_bucket.append(
tensor.copy(data=tensor.data[tuple(slice_bounds)])
)
sliced_buckets.append(sliced_bucket)
return sliced_buckets
def get_sliced_np_buckets(buckets, data_dict, slice_dict):
"""
Takes placeholder buckets and populates them with
actual sliced values. This function is a sum of
:func:`get_np_buckets` and :func:`slice_np_buckets`
Parameters
----------
buckets : list of list
buckets as returned by :py:meth:`circ2buckets`
and :py:meth:`reorder_buckets`.
data_dict : dict
dictionary containing values for the placeholder Tensors
slice_dict : dict
Current subtensor along the sliced variables
in the form {variable: slice}
Returns
-------
sliced_buckets : list of lists
buckets with sliced Numpy tensors
"""
# import pdb
# pdb.set_trace()
# Create np buckets from buckets
sliced_buckets = []
for bucket in buckets:
sliced_bucket = []
for tensor in bucket:
# get data
# sort tensor dimensions
transpose_order = np.argsort(list(map(int, tensor.indices)))
data = np.transpose(data_dict[tensor.data_key],
transpose_order)
# transpose indices
indices_sorted = [tensor.indices[pp]
for pp in transpose_order]
# slice data
slice_bounds = []
for idx in indices_sorted:
try:
slice_bounds.append(slice_dict[idx])
except KeyError:
slice_bounds.append(slice(None))
data = data[tuple(slice_bounds)]
# update indices
indices_sliced = [idx.copy(size=size) for idx, size in
zip(indices_sorted, data.shape)]
sliced_bucket.append(
tensor.copy(indices=indices_sliced, data=data))
sliced_buckets.append(sliced_bucket)
return sliced_buckets
def process_bucket_np(bucket, no_sum=False):
"""
Process bucket in the bucket elimination algorithm.
We multiply all tensors in the bucket and sum over the
variable which the bucket corresponds to. This way the
variable of the bucket is removed from the expression.
Parameters
----------
bucket : list
List containing tuples of tensors (gates) with their indices.
no_sum : bool
If no summation should be done over the buckets's variable
Returns
-------
tensor : optimizer.Tensor
wrapper tensor object holding the result
"""
result_indices = bucket[0].indices
result_data = bucket[0].data
for tensor in bucket[1:]:
expr = utils.get_einsum_expr(
list(map(int, result_indices)), list(map(int, tensor.indices))
)
result_data = np.einsum(expr, result_data, tensor.data)
# Merge and sort indices and shapes
result_indices = tuple(sorted(
set(result_indices + tensor.indices),
key=int)
)
if len(result_indices) > 0:
if not no_sum: # trim first index
first_index, *result_indices = result_indices
else:
first_index, *_ = result_indices
tag = first_index.identity
else:
tag = 'f'
result_indices = []
# reduce
if no_sum:
result = opt.Tensor(f'E{tag}', result_indices,
data=result_data)
else:
result = opt.Tensor(f'E{tag}', result_indices,
data=np.sum(result_data, axis=0))
return result
| [
"qtree.optimizer.Tensor",
"numpy.transpose",
"numpy.einsum",
"numpy.sum"
] | [((5308, 5349), 'numpy.einsum', 'np.einsum', (['expr', 'result_data', 'tensor.data'], {}), '(expr, result_data, tensor.data)\n', (5317, 5349), True, 'import numpy as np\n'), ((5845, 5900), 'qtree.optimizer.Tensor', 'opt.Tensor', (['f"""E{tag}"""', 'result_indices'], {'data': 'result_data'}), "(f'E{tag}', result_indices, data=result_data)\n", (5855, 5900), True, 'import qtree.optimizer as opt\n'), ((3581, 3638), 'numpy.transpose', 'np.transpose', (['data_dict[tensor.data_key]', 'transpose_order'], {}), '(data_dict[tensor.data_key], transpose_order)\n', (3593, 3638), True, 'import numpy as np\n'), ((6027, 6054), 'numpy.sum', 'np.sum', (['result_data'], {'axis': '(0)'}), '(result_data, axis=0)\n', (6033, 6054), True, 'import numpy as np\n')] |
from typing import Dict, List, Tuple
from .reference_network import ReferenceNetwork
from networkx.algorithms.dag import topological_sort
import numpy as np
import deep500 as d5
import networkx as nx
class ReferenceGraphExecutor(d5.GraphExecutor):
def __init__(self, model: d5.ops.OnnxModel, device: d5.DeviceType, events: List[d5.ExecutorEvent] = [],
use_python_ops=False):
"""
Creates a reference operator graph executor. Can use Python reference implementations (very slow)
or compiled C++ reference implementations.
@param model The model to build the network from
@param device The device to use
@param events Event objects to invoke
@param use_python_ops If True, uses Python reference implementations, otherwise compiles and runs
C++ implementations
"""
super(ReferenceGraphExecutor, self).__init__(ReferenceNetwork(device), events)
if isinstance(device, d5.GPUDevice):
print('Warning: GPU reference operators are currently not implemented. Falling back to CPU')
self.model = model
self.use_python_ops = use_python_ops
def inference(self, input_dict: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.network.train_mode = False
for event in self.events:
event.before_executor(input_dict)
if not self.network.built_graph:
# Choose visitor according to settings
if self.use_python_ops:
from .reference_build_graph_visitor_impl import ReferenceBuildGraphVisitor
else:
from .reference_build_graph_visitor_impl_cpp import ReferenceBuildGraphVisitor
print('Compiling operators, this may take a while')
# Build graph
self.network.variables.update(input_dict)
self.model.accept(ReferenceBuildGraphVisitor(), self.network)
self.network.nodes_sorted_fwd = list(topological_sort(self.network.graph))
self.network.nodes_sorted_bwd = list(reversed(list(topological_sort(self.network.graph))))
self.network.built_graph = True
out_dict = self.network.inference(input_dict)
for event in self.events:
event.after_inference(out_dict)
self.network.output_dict.update(out_dict)
return out_dict
def inference_and_backprop(self, input_dict: Dict[str, np.ndarray], y: str= 'loss') -> Dict[str, np.ndarray]:
self.network.train_mode = True
for event in self.events:
event.before_executor(input_dict)
if not self.network.built_graph:
# Choose visitor according to settings
if self.use_python_ops:
from .reference_build_graph_visitor_impl import ReferenceBuildGraphVisitor
else:
from .reference_build_graph_visitor_impl_cpp import ReferenceBuildGraphVisitor
print('Compiling operators, this may take a while')
# Build graph
self.network.variables.update(input_dict)
self.model.accept(ReferenceBuildGraphVisitor(), self.network)
self.network.nodes_sorted_fwd = list(topological_sort(self.network.graph))
self.network.nodes_sorted_bwd = list(reversed(list(topological_sort(self.network.graph))))
self.network.built_graph = True
out_dict = self.network.inference_and_backprop(input_dict, y)
for event in self.events:
event.after_backprop(out_dict)
return out_dict | [
"networkx.algorithms.dag.topological_sort"
] | [((2024, 2060), 'networkx.algorithms.dag.topological_sort', 'topological_sort', (['self.network.graph'], {}), '(self.network.graph)\n', (2040, 2060), False, 'from networkx.algorithms.dag import topological_sort\n'), ((3299, 3335), 'networkx.algorithms.dag.topological_sort', 'topological_sort', (['self.network.graph'], {}), '(self.network.graph)\n', (3315, 3335), False, 'from networkx.algorithms.dag import topological_sort\n'), ((2125, 2161), 'networkx.algorithms.dag.topological_sort', 'topological_sort', (['self.network.graph'], {}), '(self.network.graph)\n', (2141, 2161), False, 'from networkx.algorithms.dag import topological_sort\n'), ((3400, 3436), 'networkx.algorithms.dag.topological_sort', 'topological_sort', (['self.network.graph'], {}), '(self.network.graph)\n', (3416, 3436), False, 'from networkx.algorithms.dag import topological_sort\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.14
# In conjunction with Tcl version 8.6
# Aug 03, 2018 09:52:30 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import Main_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
top = Main (root)
Main_support.init(root, top)
root.mainloop()
w = None
def create_Main(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel (root)
top = Main (w)
Main_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Main():
global w
w.destroy()
w = None
class Main:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.configure('.',font="TkDefaultFont")
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("1024x700+403+207")
top.title("Main")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.frmMenuBar = Frame(top)
self.frmMenuBar.place(relx=0.0, rely=0.0, relheight=0.06, relwidth=1.0)
self.frmMenuBar.configure(relief=GROOVE)
self.frmMenuBar.configure(borderwidth="2")
self.frmMenuBar.configure(relief=GROOVE)
self.frmMenuBar.configure(background="#d9d9d9")
self.frmMenuBar.configure(highlightbackground="#d9d9d9")
self.frmMenuBar.configure(highlightcolor="black")
self.frmMenuBar.configure(width=125)
self.tbtnExit = ttk.Button(self.frmMenuBar)
self.tbtnExit.place(relx=0.96, rely=0.1, height=32, width=32)
self.tbtnExit.configure(command=Main_support.mbExit)
self.tbtnExit.configure(takefocus="")
self.tbtnExit.configure(text='''Tbutton''')
self._img1 = PhotoImage(file="./graphics/application-exit.png")
self.tbtnExit.configure(image=self._img1)
self.tbtnFileOpen = ttk.Button(self.frmMenuBar)
self.tbtnFileOpen.place(relx=0.04, rely=0.1, height=32, width=32)
self.tbtnFileOpen.configure(command=Main_support.mbFileOpen)
self.tbtnFileOpen.configure(takefocus="")
self.tbtnFileOpen.configure(text='''Tbutton''')
self._img2 = PhotoImage(file="./graphics/document-open.png")
self.tbtnFileOpen.configure(image=self._img2)
self.tbtnFileNew = ttk.Button(self.frmMenuBar)
self.tbtnFileNew.place(relx=0.0, rely=0.1, height=32, width=32)
self.tbtnFileNew.configure(command=Main_support.mbFileNew)
self.tbtnFileNew.configure(takefocus="")
self.tbtnFileNew.configure(text='''Tbutton''')
self._img3 = PhotoImage(file="./graphics/document-new.png")
self.tbtnFileNew.configure(image=self._img3)
self.tbtnCopy = ttk.Button(self.frmMenuBar)
self.tbtnCopy.place(relx=0.14, rely=0.1, height=32, width=32)
self.tbtnCopy.configure(command=Main_support.mbCopy)
self.tbtnCopy.configure(takefocus="")
self.tbtnCopy.configure(text='''Tbutton''')
self._img4 = PhotoImage(file="./graphics/edit-copy.png")
self.tbtnCopy.configure(image=self._img4)
self.tbtnPaste = ttk.Button(self.frmMenuBar)
self.tbtnPaste.place(relx=0.2, rely=0.1, height=32, width=32)
self.tbtnPaste.configure(command=Main_support.mbPaste)
self.tbtnPaste.configure(takefocus="")
self.tbtnPaste.configure(text='''Tbutton''')
self._img5 = PhotoImage(file="./graphics/edit-paste.png")
self.tbtnPaste.configure(image=self._img5)
self.tbtnAbout = ttk.Button(self.frmMenuBar)
self.tbtnAbout.place(relx=0.91, rely=0.1, height=32, width=32)
self.tbtnAbout.configure(command=Main_support.mbAbout)
self.tbtnAbout.configure(takefocus="")
self.tbtnAbout.configure(text='''Tbutton''')
self._img6 = PhotoImage(file="./graphics/help-about.png")
self.tbtnAbout.configure(image=self._img6)
self.tbtnCut = ttk.Button(self.frmMenuBar)
self.tbtnCut.place(relx=0.17, rely=0.1, height=32, width=32)
self.tbtnCut.configure(command=Main_support.mbCut)
self.tbtnCut.configure(takefocus="")
self.tbtnCut.configure(text='''Tbutton''')
self._img7 = PhotoImage(file="./graphics/edit-cut.png")
self.tbtnCut.configure(image=self._img7)
self.Frame2 = Frame(top)
self.Frame2.place(relx=0.01, rely=0.07, relheight=0.92, relwidth=0.37)
self.Frame2.configure(relief=GROOVE)
self.Frame2.configure(borderwidth="2")
self.Frame2.configure(relief=GROOVE)
self.Frame2.configure(background="#d9d9d9")
self.Frame2.configure(highlightbackground="#d9d9d9")
self.Frame2.configure(highlightcolor="black")
self.Frame2.configure(width=375)
self.Frame3 = Frame(top)
self.Frame3.place(relx=0.38, rely=0.07, relheight=0.52, relwidth=0.61)
self.Frame3.configure(relief=GROOVE)
self.Frame3.configure(borderwidth="2")
self.Frame3.configure(relief=GROOVE)
self.Frame3.configure(background="#d9d9d9")
self.Frame3.configure(highlightbackground="#d9d9d9")
self.Frame3.configure(highlightcolor="black")
self.Frame3.configure(width=625)
self.Frame4 = Frame(top)
self.Frame4.place(relx=0.38, rely=0.6, relheight=0.39, relwidth=0.61)
self.Frame4.configure(relief=GROOVE)
self.Frame4.configure(borderwidth="2")
self.Frame4.configure(relief=GROOVE)
self.Frame4.configure(background="#d9d9d9")
self.Frame4.configure(highlightbackground="#d9d9d9")
self.Frame4.configure(highlightcolor="black")
self.Frame4.configure(width=625)
if __name__ == '__main__':
vp_start_gui()
| [
"tkinter.ttk.Style",
"tkinter.ttk.Button",
"Main_support.init"
] | [((548, 576), 'Main_support.init', 'Main_support.init', (['root', 'top'], {}), '(root, top)\n', (565, 576), False, 'import Main_support\n'), ((811, 853), 'Main_support.init', 'Main_support.init', (['w', 'top', '*args'], {}), '(w, top, *args, **kwargs)\n', (828, 853), False, 'import Main_support\n'), ((1409, 1420), 'tkinter.ttk.Style', 'ttk.Style', ([], {}), '()\n', (1418, 1420), True, 'import tkinter.ttk as ttk\n'), ((2524, 2551), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (2534, 2551), True, 'import tkinter.ttk as ttk\n'), ((2940, 2967), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (2950, 2967), True, 'import tkinter.ttk as ttk\n'), ((3376, 3403), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (3386, 3403), True, 'import tkinter.ttk as ttk\n'), ((3801, 3828), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (3811, 3828), True, 'import tkinter.ttk as ttk\n'), ((4207, 4234), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (4217, 4234), True, 'import tkinter.ttk as ttk\n'), ((4619, 4646), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (4629, 4646), True, 'import tkinter.ttk as ttk\n'), ((5030, 5057), 'tkinter.ttk.Button', 'ttk.Button', (['self.frmMenuBar'], {}), '(self.frmMenuBar)\n', (5040, 5057), True, 'import tkinter.ttk as ttk\n')] |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class OpenFlow(Base):
"""OpenFlow is a Layer 2 communications protocol that gives access to the forwarding plane of a network switch or router over the network.
The OpenFlow class encapsulates a required openFlow resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'openFlow'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'PortRole': 'portRole',
'RunningState': 'runningState',
}
_SDM_ENUM_MAP = {
'portRole': ['control', 'traffic', 'controlAndTraffic'],
'runningState': ['unknown', 'stopped', 'stopping', 'starting', 'started'],
}
def __init__(self, parent, list_op=False):
super(OpenFlow, self).__init__(parent, list_op)
@property
def Device(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.device_4586a4936a98f79a4eb811335e8b0199.Device): An instance of the Device class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.device_4586a4936a98f79a4eb811335e8b0199 import Device
if self._properties.get('Device', None) is not None:
return self._properties.get('Device')
else:
return Device(self)
@property
def EthernetTrafficEndPoint(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernettrafficendpoint_399c5a8996b8d783c5205ec4f1afc1a9.EthernetTrafficEndPoint): An instance of the EthernetTrafficEndPoint class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernettrafficendpoint_399c5a8996b8d783c5205ec4f1afc1a9 import EthernetTrafficEndPoint
if self._properties.get('EthernetTrafficEndPoint', None) is not None:
return self._properties.get('EthernetTrafficEndPoint')
else:
return EthernetTrafficEndPoint(self)
@property
def HostTopologyLearnedInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.hosttopologylearnedinformation_4110abe6bc708b97f65a28c397fa159a.HostTopologyLearnedInformation): An instance of the HostTopologyLearnedInformation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.hosttopologylearnedinformation_4110abe6bc708b97f65a28c397fa159a import HostTopologyLearnedInformation
if self._properties.get('HostTopologyLearnedInformation', None) is not None:
return self._properties.get('HostTopologyLearnedInformation')
else:
return HostTopologyLearnedInformation(self)._select()
@property
def Ipv4TrafficEndPoint(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.Ipv4TrafficEndPoint): An instance of the Ipv4TrafficEndPoint class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d import Ipv4TrafficEndPoint
if self._properties.get('Ipv4TrafficEndPoint', None) is not None:
return self._properties.get('Ipv4TrafficEndPoint')
else:
return Ipv4TrafficEndPoint(self)
@property
def Ipv6TrafficEndPoint(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv6trafficendpoint_5ba047ad864d88c7c789f996fb9125d8.Ipv6TrafficEndPoint): An instance of the Ipv6TrafficEndPoint class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv6trafficendpoint_5ba047ad864d88c7c789f996fb9125d8 import Ipv6TrafficEndPoint
if self._properties.get('Ipv6TrafficEndPoint', None) is not None:
return self._properties.get('Ipv6TrafficEndPoint')
else:
return Ipv6TrafficEndPoint(self)
@property
def LearnedInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_5ae2c6e302466a4ce0ccf9b15b6452d6.LearnedInformation): An instance of the LearnedInformation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_5ae2c6e302466a4ce0ccf9b15b6452d6 import LearnedInformation
if self._properties.get('LearnedInformation', None) is not None:
return self._properties.get('LearnedInformation')
else:
return LearnedInformation(self)._select()
@property
def MplsTrafficEndPoint(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstrafficendpoint_9c9576354d6d254197b269b117417591.MplsTrafficEndPoint): An instance of the MplsTrafficEndPoint class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstrafficendpoint_9c9576354d6d254197b269b117417591 import MplsTrafficEndPoint
if self._properties.get('MplsTrafficEndPoint', None) is not None:
return self._properties.get('MplsTrafficEndPoint')
else:
return MplsTrafficEndPoint(self)
@property
def OfTopologyLearnedInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.oftopologylearnedinformation_87be98fe03804b7931f394eeeb6ce91e.OfTopologyLearnedInformation): An instance of the OfTopologyLearnedInformation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.oftopologylearnedinformation_87be98fe03804b7931f394eeeb6ce91e import OfTopologyLearnedInformation
if self._properties.get('OfTopologyLearnedInformation', None) is not None:
return self._properties.get('OfTopologyLearnedInformation')
else:
return OfTopologyLearnedInformation(self)._select()
@property
def SwitchLearnedInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.switchlearnedinformation_229c33dcb7a8e23f875f8a6acf5d4f8a.SwitchLearnedInformation): An instance of the SwitchLearnedInformation class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.switchlearnedinformation_229c33dcb7a8e23f875f8a6acf5d4f8a import SwitchLearnedInformation
if self._properties.get('SwitchLearnedInformation', None) is not None:
return self._properties.get('SwitchLearnedInformation')
else:
return SwitchLearnedInformation(self)._select()
@property
def TrafficEndPoint(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.trafficendpoint_3120038095a42c08bd61e91959198aa0.TrafficEndPoint): An instance of the TrafficEndPoint class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.trafficendpoint_3120038095a42c08bd61e91959198aa0 import TrafficEndPoint
if self._properties.get('TrafficEndPoint', None) is not None:
return self._properties.get('TrafficEndPoint')
else:
return TrafficEndPoint(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, the openFlow object is enabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def PortRole(self):
# type: () -> str
"""
Returns
-------
- str(control | traffic | controlAndTraffic): Indicates the role of the port in the protocol configuration.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortRole'])
@PortRole.setter
def PortRole(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PortRole'], value)
@property
def RunningState(self):
# type: () -> str
"""
Returns
-------
- str(unknown | stopped | stopping | starting | started): Indicates the state of the OpenFlow protocol on the port.
"""
return self._get_attribute(self._SDM_ATT_MAP['RunningState'])
def update(self, Enabled=None, PortRole=None):
# type: (bool, str) -> OpenFlow
"""Updates openFlow resource on the server.
Args
----
- Enabled (bool): If true, the openFlow object is enabled.
- PortRole (str(control | traffic | controlAndTraffic)): Indicates the role of the port in the protocol configuration.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
This describes the start value of the trigger settings.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
This describes the stop value of the trigger settings.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.device_4586a4936a98f79a4eb811335e8b0199.Device",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.switchlearnedinformation_229c33dcb7a8e23f875f8a6acf5d4f8a.SwitchLearnedInformation",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstrafficendpoint_9c9576354d6d254197b269b117417591.MplsTrafficEndPoint",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernettrafficendpoint_399c5a8996b8d783c5205ec4f1afc1a9.EthernetTrafficEndPoint",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.trafficendpoint_3120038095a42c08bd61e91959198aa0.TrafficEndPoint",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.oftopologylearnedinformation_87be98fe03804b7931f394eeeb6ce91e.OfTopologyLearnedInformation",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.hosttopologylearnedinformation_4110abe6bc708b97f65a28c397fa159a.HostTopologyLearnedInformation",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_5ae2c6e302466a4ce0ccf9b15b6452d6.LearnedInformation",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv6trafficendpoint_5ba047ad864d88c7c789f996fb9125d8.Ipv6TrafficEndPoint",
"ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.Ipv4TrafficEndPoint"
] | [((2647, 2659), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.device_4586a4936a98f79a4eb811335e8b0199.Device', 'Device', (['self'], {}), '(self)\n', (2653, 2659), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.device_4586a4936a98f79a4eb811335e8b0199 import Device\n'), ((3439, 3468), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernettrafficendpoint_399c5a8996b8d783c5205ec4f1afc1a9.EthernetTrafficEndPoint', 'EthernetTrafficEndPoint', (['self'], {}), '(self)\n', (3462, 3468), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ethernettrafficendpoint_399c5a8996b8d783c5205ec4f1afc1a9 import EthernetTrafficEndPoint\n'), ((5098, 5123), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.Ipv4TrafficEndPoint', 'Ipv4TrafficEndPoint', (['self'], {}), '(self)\n', (5117, 5123), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d import Ipv4TrafficEndPoint\n'), ((5871, 5896), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv6trafficendpoint_5ba047ad864d88c7c789f996fb9125d8.Ipv6TrafficEndPoint', 'Ipv6TrafficEndPoint', (['self'], {}), '(self)\n', (5890, 5896), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ipv6trafficendpoint_5ba047ad864d88c7c789f996fb9125d8 import Ipv6TrafficEndPoint\n'), ((7418, 7443), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstrafficendpoint_9c9576354d6d254197b269b117417591.MplsTrafficEndPoint', 'MplsTrafficEndPoint', (['self'], {}), '(self)\n', (7437, 7443), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstrafficendpoint_9c9576354d6d254197b269b117417591 import MplsTrafficEndPoint\n'), ((9851, 9872), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.trafficendpoint_3120038095a42c08bd61e91959198aa0.TrafficEndPoint', 'TrafficEndPoint', (['self'], {}), '(self)\n', (9866, 9872), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.trafficendpoint_3120038095a42c08bd61e91959198aa0 import TrafficEndPoint\n'), ((4304, 4340), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.hosttopologylearnedinformation_4110abe6bc708b97f65a28c397fa159a.HostTopologyLearnedInformation', 'HostTopologyLearnedInformation', (['self'], {}), '(self)\n', (4334, 4340), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.hosttopologylearnedinformation_4110abe6bc708b97f65a28c397fa159a import HostTopologyLearnedInformation\n'), ((6636, 6660), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_5ae2c6e302466a4ce0ccf9b15b6452d6.LearnedInformation', 'LearnedInformation', (['self'], {}), '(self)\n', (6654, 6660), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.learnedinformation_5ae2c6e302466a4ce0ccf9b15b6452d6 import LearnedInformation\n'), ((8263, 8297), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.oftopologylearnedinformation_87be98fe03804b7931f394eeeb6ce91e.OfTopologyLearnedInformation', 'OfTopologyLearnedInformation', (['self'], {}), '(self)\n', (8291, 8297), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.oftopologylearnedinformation_87be98fe03804b7931f394eeeb6ce91e import OfTopologyLearnedInformation\n'), ((9095, 9125), 'ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.switchlearnedinformation_229c33dcb7a8e23f875f8a6acf5d4f8a.SwitchLearnedInformation', 'SwitchLearnedInformation', (['self'], {}), '(self)\n', (9119, 9125), False, 'from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.switchlearnedinformation_229c33dcb7a8e23f875f8a6acf5d4f8a import SwitchLearnedInformation\n')] |
import torch
import torch.nn as nn
import numpy as np
from Layers import layers
from torch.nn import functional as F
def fc(input_shape, num_classes, dense_classifier=False, pretrained=False, L=6, N=100, nonlinearity=nn.ReLU()):
size = np.prod(input_shape)
# Linear feature extractor
modules = [nn.Flatten()]
modules.append(layers.Linear(size, N))
modules.append(nonlinearity)
for i in range(L-2):
modules.append(layers.Linear(N,N))
modules.append(nonlinearity)
# Linear classifier
if dense_classifier:
modules.append(nn.Linear(N, num_classes))
else:
modules.append(layers.Linear(N, num_classes))
model = nn.Sequential(*modules)
# Pretrained model
if pretrained:
print("WARNING: this model does not have pretrained weights.")
return model
def conv(input_shape, num_classes, dense_classifier=False, pretrained=False, L=3, N=32, nonlinearity=nn.ReLU()):
channels, width, height = input_shape
# Convolutional feature extractor
modules = []
modules.append(layers.Conv2d(channels, N, kernel_size=3, padding=3//2))
modules.append(nonlinearity)
for i in range(L-2):
modules.append(layers.Conv2d(N, N, kernel_size=3, padding=3//2))
modules.append(nonlinearity)
# Linear classifier
modules.append(nn.Flatten())
if dense_classifier:
modules.append(nn.Linear(N * width * height, num_classes))
else:
modules.append(layers.Linear(N * width * height, num_classes))
model = nn.Sequential(*modules)
# Pretrained model
if pretrained:
print("WARNING: this model does not have pretrained weights.")
return model | [
"numpy.prod",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Flatten",
"Layers.layers.Linear",
"Layers.layers.Conv2d",
"torch.nn.Linear"
] | [((219, 228), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (226, 228), True, 'import torch.nn as nn\n'), ((240, 260), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (247, 260), True, 'import numpy as np\n'), ((647, 670), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (660, 670), True, 'import torch.nn as nn\n'), ((898, 907), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (905, 907), True, 'import torch.nn as nn\n'), ((1466, 1489), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1479, 1489), True, 'import torch.nn as nn\n'), ((306, 318), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (316, 318), True, 'import torch.nn as nn\n'), ((337, 359), 'Layers.layers.Linear', 'layers.Linear', (['size', 'N'], {}), '(size, N)\n', (350, 359), False, 'from Layers import layers\n'), ((1022, 1079), 'Layers.layers.Conv2d', 'layers.Conv2d', (['channels', 'N'], {'kernel_size': '(3)', 'padding': '(3 // 2)'}), '(channels, N, kernel_size=3, padding=3 // 2)\n', (1035, 1079), False, 'from Layers import layers\n'), ((1281, 1293), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1291, 1293), True, 'import torch.nn as nn\n'), ((434, 453), 'Layers.layers.Linear', 'layers.Linear', (['N', 'N'], {}), '(N, N)\n', (447, 453), False, 'from Layers import layers\n'), ((552, 577), 'torch.nn.Linear', 'nn.Linear', (['N', 'num_classes'], {}), '(N, num_classes)\n', (561, 577), True, 'import torch.nn as nn\n'), ((606, 635), 'Layers.layers.Linear', 'layers.Linear', (['N', 'num_classes'], {}), '(N, num_classes)\n', (619, 635), False, 'from Layers import layers\n'), ((1152, 1202), 'Layers.layers.Conv2d', 'layers.Conv2d', (['N', 'N'], {'kernel_size': '(3)', 'padding': '(3 // 2)'}), '(N, N, kernel_size=3, padding=3 // 2)\n', (1165, 1202), False, 'from Layers import layers\n'), ((1337, 1379), 'torch.nn.Linear', 'nn.Linear', (['(N * width * height)', 'num_classes'], {}), '(N * width * height, num_classes)\n', (1346, 1379), True, 'import torch.nn as nn\n'), ((1408, 1454), 'Layers.layers.Linear', 'layers.Linear', (['(N * width * height)', 'num_classes'], {}), '(N * width * height, num_classes)\n', (1421, 1454), False, 'from Layers import layers\n')] |
'''
07 - The digits recognition dataset
Up until now, you have been performing binary classification, since the target variable
had two possible outcomes. Hugo, however, got to perform multi-class classification in the
videos, where the target variable could take on three possible outcomes. Why does he get
to have all the fun?! In the following exercises, you'll be working with the MNIST digits
recognition dataset, which has 10 classes, the digits 0 through 9! A reduced version of the
MNIST dataset is one of scikit-learn's included datasets, and that is the one we will use in
this exercise.
Each sample in this scikit-learn dataset is an 8x8 image representing a handwritten digit.
Each pixel is represented by an integer in the range 0 to 16, indicating varying levels of
black. Recall that scikit-learn's built-in datasets are of type Bunch, which are dictionary-like
objects. Helpfully for the MNIST dataset, scikit-learn provides an 'images' key in addition to
the 'data' and 'target' keys that you have seen with the Iris data. Because it is a 2D array of
the images corresponding to each sample, this 'images' key is useful for visualizing the images,
as you'll see in this exercise (for more on plotting 2D arrays, see Chapter 2 of DataCamp's course
on Data Visualization with Python). On the other hand, the 'data' key contains the feature array -
that is, the images as a flattened array of 64 pixels.
Notice that you can access the keys of these Bunch objects in two different ways: By using the .
notation, as in digits.images, or the [] notation, as in digits['images'].
For more on the MNIST data, check out this exercise in Part 1 of DataCamp's Importing Data in
Python course. There, the full version of the MNIST dataset is used, in which the images are 28x28.
It is a famous dataset in machine learning and computer vision, and frequently used as a benchmark
to evaluate the performance of a new model.
INSTRUCTIONS
- Import datasets from sklearn and matplotlib.pyplot as plt.
- Load the digits dataset using the .load_digits() method on datasets.
- Print the keys and DESCR of digits.
- Print the shape of images and data keys using the . notation.
- Display the 1010th image using plt.imshow(). This has been done for you, so hit 'Submit Answer' to see which handwritten digit this happens to be!
'''
# Import necessary modules
from sklearn import datasets
import matplotlib.pyplot as plt
# Load the digits dataset: digits
digits = datasets.load_digits()
# Print the keys and DESCR of the dataset
print(digits.keys())
print(digits.DESCR)
# Print the shape of the images and data keys
print(digits.images.shape)
print(digits.data.shape)
# Display digit 1010
plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
'''
<script.py> output:
dict_keys(['data', 'target', 'target_names', 'images', 'DESCR'])
Optical Recognition of Handwritten Digits Data Set
===================================================
Notes
-----
Data Set Characteristics:
:Number of Instances: 5620
:Number of Attributes: 64
:Attribute Information: 8x8 image of integer pixels in the range 0..16.
:Missing Attribute Values: None
:Creator: <NAME> (alpaydin '@' boun.edu.tr)
:Date: July; 1998
This is a copy of the test set of the UCI ML hand-written digits datasets
http://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
The data set contains images of hand-written digits: 10 classes where
each class refers to a digit.
Preprocessing programs made available by NIST were used to extract
normalized bitmaps of handwritten digits from a preprinted form. From a
total of 43 people, 30 contributed to the training set and different 13
to the test set. 32x32 bitmaps are divided into nonoverlapping blocks of
4x4 and the number of on pixels are counted in each block. This generates
an input matrix of 8x8 where each element is an integer in the range
0..16. This reduces dimensionality and gives invariance to small
distortions.
For info on NIST preprocessing routines, see <NAME>, <NAME>, G.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, NIST Form-Based Handprint Recognition System, NISTIR 5469,
1994.
References
----------
- <NAME> (1995) Methods of Combining Multiple Classifiers and Their
Applications to Handwritten Digit Recognition, MSc Thesis, Institute of
Graduate Studies in Science and Engineering, Bogazici University.
- <NAME>, <NAME> (1998) Cascading Classifiers, Kybernetika.
- <NAME> and <NAME> and <NAME> and <NAME>.
Linear dimensionalityreduction using relevance weighted LDA. School of
Electrical and Electronic Engineering Nanyang Technological University.
2005.
- <NAME>. A New Approximate Maximal Margin Classification
Algorithm. NIPS. 2000.
(1797, 8, 8)
(1797, 64)
'''
| [
"matplotlib.pyplot.imshow",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.show"
] | [((2522, 2544), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (2542, 2544), False, 'from sklearn import datasets\n'), ((2750, 2826), 'matplotlib.pyplot.imshow', 'plt.imshow', (['digits.images[1010]'], {'cmap': 'plt.cm.gray_r', 'interpolation': '"""nearest"""'}), "(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')\n", (2760, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2835, 2837), True, 'import matplotlib.pyplot as plt\n')] |
from flask import Blueprint, request
import logging
from flask_socketio import emit
from .. import socketio
api = Blueprint(
'api_blueprint',
__name__,
url_prefix='/api',
)
logger = logging.getLogger(__name__)
@api.route('/test')
def test_route():
print('test')
return 'test'
@socketio.on('connect')
def on_connect():
emit('my_response', {'data': 'Connected', 'count': 0})
logger.debug('Client connected {}'.format(request))
@socketio.on('disconnect')
def on_disconnect():
logger.debug('Client disconnected {}'.format(request))
| [
"logging.getLogger",
"flask.Blueprint",
"flask_socketio.emit"
] | [((116, 171), 'flask.Blueprint', 'Blueprint', (['"""api_blueprint"""', '__name__'], {'url_prefix': '"""/api"""'}), "('api_blueprint', __name__, url_prefix='/api')\n", (125, 171), False, 'from flask import Blueprint, request\n'), ((197, 224), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (214, 224), False, 'import logging\n'), ((349, 403), 'flask_socketio.emit', 'emit', (['"""my_response"""', "{'data': 'Connected', 'count': 0}"], {}), "('my_response', {'data': 'Connected', 'count': 0})\n", (353, 403), False, 'from flask_socketio import emit\n')] |
def handle(controller):
from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network
from collections import defaultdict
# relations for all sites
ctrls_by_site = defaultdict(list)
ctrl_sites = ControllerSite.objects.all()
for ctrl_site in ctrl_sites:
ctrls_by_site[ctrl_site.site].append(ctrl_site.controller)
sites = Site.objects.all()
for site in sites:
if site not in ctrls_by_site or \
controller not in ctrls_by_site[site]:
controller_site = ControllerSite(controller=controller, site=site)
controller_site.save()
# relations for all slices
ctrls_by_slice = defaultdict(list)
ctrl_slices = ControllerSlice.objects.all()
for ctrl_slice in ctrl_slices:
ctrls_by_slice[ctrl_slice.slice].append(ctrl_slice.controller)
slices = Slice.objects.all()
for slice in slices:
if slice not in ctrls_by_slice or \
controller not in ctrls_by_slice[slice]:
controller_slice = ControllerSlice(controller=controller, slice=slice)
controller_slice.save()
# relations for all users
ctrls_by_user = defaultdict(list)
ctrl_users = ControllerUser.objects.all()
for ctrl_user in ctrl_users:
ctrls_by_user[ctrl_user.user].append(ctrl_user.controller)
users = User.objects.all()
for user in users:
if user not in ctrls_by_user or \
controller not in ctrls_by_user[user]:
controller_user = ControllerUser(controller=controller, user=user)
controller_user.save()
# relations for all networks
ctrls_by_network = defaultdict(list)
ctrl_networks = ControllerNetwork.objects.all()
for ctrl_network in ctrl_networks:
ctrls_by_network[ctrl_network.network].append(ctrl_network.controller)
networks = Network.objects.all()
for network in networks:
if network not in ctrls_by_network or \
controller not in ctrls_by_network[network]:
controller_network = ControllerNetwork(controller=controller, network=network)
if network.subnet and network.subnet.strip():
controller_network.subnet = network.subnet.strip()
controller_network.save()
# relations for all images
ctrls_by_image = defaultdict(list)
ctrl_images = ControllerImages.objects.all()
for ctrl_image in ctrl_images:
ctrls_by_image[ctrl_image.image].append(ctrl_image.controller)
images = Image.objects.all()
for image in images:
if image not in ctrls_by_image or \
controller not in ctrls_by_image[image]:
controller_image = ControllerImages(controller=controller, image=image)
controller_image.save()
| [
"core.models.ControllerNetwork.objects.all",
"core.models.Image.objects.all",
"core.models.ControllerSlice",
"core.models.ControllerSite.objects.all",
"core.models.Slice.objects.all",
"core.models.User.objects.all",
"core.models.ControllerSite",
"core.models.ControllerUser",
"core.models.ControllerImages",
"core.models.ControllerUser.objects.all",
"core.models.Network.objects.all",
"collections.defaultdict",
"core.models.ControllerNetwork",
"core.models.Site.objects.all",
"core.models.ControllerImages.objects.all",
"core.models.ControllerSlice.objects.all"
] | [((276, 293), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (287, 293), False, 'from collections import defaultdict\n'), ((311, 339), 'core.models.ControllerSite.objects.all', 'ControllerSite.objects.all', ([], {}), '()\n', (337, 339), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((452, 470), 'core.models.Site.objects.all', 'Site.objects.all', ([], {}), '()\n', (468, 470), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((753, 770), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (764, 770), False, 'from collections import defaultdict\n'), ((789, 818), 'core.models.ControllerSlice.objects.all', 'ControllerSlice.objects.all', ([], {}), '()\n', (816, 818), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((938, 957), 'core.models.Slice.objects.all', 'Slice.objects.all', ([], {}), '()\n', (955, 957), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1249, 1266), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1260, 1266), False, 'from collections import defaultdict\n'), ((1284, 1312), 'core.models.ControllerUser.objects.all', 'ControllerUser.objects.all', ([], {}), '()\n', (1310, 1312), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1425, 1443), 'core.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (1441, 1443), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1730, 1747), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1741, 1747), False, 'from collections import defaultdict\n'), ((1768, 1799), 'core.models.ControllerNetwork.objects.all', 'ControllerNetwork.objects.all', ([], {}), '()\n', (1797, 1799), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1933, 1954), 'core.models.Network.objects.all', 'Network.objects.all', ([], {}), '()\n', (1952, 1954), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((2395, 2412), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2406, 2412), False, 'from collections import defaultdict\n'), ((2431, 2461), 'core.models.ControllerImages.objects.all', 'ControllerImages.objects.all', ([], {}), '()\n', (2459, 2461), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((2581, 2600), 'core.models.Image.objects.all', 'Image.objects.all', ([], {}), '()\n', (2598, 2600), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((617, 665), 'core.models.ControllerSite', 'ControllerSite', ([], {'controller': 'controller', 'site': 'site'}), '(controller=controller, site=site)\n', (631, 665), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1111, 1162), 'core.models.ControllerSlice', 'ControllerSlice', ([], {'controller': 'controller', 'slice': 'slice'}), '(controller=controller, slice=slice)\n', (1126, 1162), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((1590, 1638), 'core.models.ControllerUser', 'ControllerUser', ([], {'controller': 'controller', 'user': 'user'}), '(controller=controller, user=user)\n', (1604, 1638), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((2122, 2179), 'core.models.ControllerNetwork', 'ControllerNetwork', ([], {'controller': 'controller', 'network': 'network'}), '(controller=controller, network=network)\n', (2139, 2179), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n'), ((2754, 2806), 'core.models.ControllerImages', 'ControllerImages', ([], {'controller': 'controller', 'image': 'image'}), '(controller=controller, image=image)\n', (2770, 2806), False, 'from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network\n')] |
import argparse
from multiprocessing import Pool
from typing import List
import gin
import numpy as np
import pipelines.cleaner as dc
import pipelines.data_source as ds
import pipelines.utils as sc
from pipelines.parser import Parser
def argument_parser():
parser = argparse.ArgumentParser(description='UN Data Krunch')
parser.add_argument('-f', '--gin_config_path', type=str, help='Gin configuration file path', required=True)
parser.add_argument('-a', '--addition', type=str, help='Additonal procedure', required=False)
return parser
@gin.configurable("base_pipeline")
def base(files: List[str], encoder: "pipelines.encoder.BaseEncoder"=None):
"""
Base pipeline method to be used @pararell_processing
:param files: file paths to be processed
:param encoder: encoder class defined @config.gin
"""
try:
if not encoder:
raise ValueError("Encoder cannot be None. PLz Specificy a encoder @gin.config!")
# Initialization
generator = ds.build_advertise_generator(files)
parser = Parser()
enc_client = encoder()
# Source generator, clean and encode advertise
for line in generator:
try:
ad = dc.clean_raw_advertise(line, parser)
if ad:
enc_client.encode_advertise(ad)
except Exception as err:
sc.message(err)
enc_client.save_maps()
except Exception as erro:
sc.message(erro)
@gin.configurable("clean_pipeline")
def clean(files: List[str], encoder: "pipelines.encoder.BaseEncoder"=None):
"""
Data pipeline method to be used @pararell_processing when data is already clean
So, no parser is needed.
:param files: file paths to be processed
:param encoder: encoder class defined @config.gin
"""
try:
if not encoder:
raise ValueError("Encoder cannot be None. PLz Specificy a encoder @gin.config!")
generator = ds.build_advertise_generator(files)
enc_client = encoder()
for ad in generator:
try:
enc_client.encode_advertise(ad)
except Exception as err:
sc.message(err)
enc_client.save_maps()
except Exception as erro:
sc.message(erro)
@gin.configurable
def parallel_process(pipeline, dataset_path, workers: int, reducer=None):
"""
Main method that will spawn #workers processes to process data from @dataset_path
through @pipeline method defined.
:param pipeline: Pipeline method to be applied
:param dataset_path: Dataset path
:param workers: number of process to spawn (that will be limited to the number of cores do you have available)
:param reducer: Reducer method to aggregate workers' processed data
"""
workers_files = [arr.tolist() for arr in np.array_split(ds.get_file_paths(folder_path=dataset_path), workers)]
print(workers_files)
with Pool(processes=workers) as pool:
pool.map(pipeline, workers_files)
# Apply reducer if available and if there was more than one worker processing data
if reducer and workers > 1:
red = reducer()
red.reduce_process()
if __name__ == '__main__':
# Parsing command line args
sys_vars = sc.pre_loading(argument_parser)
# Chooses the configuration file to run
execution_file: str = sys_vars['gin_config_path']
gin.parse_config_file(execution_file)
# Start pararell processing based on configuration file setup
parallel_process()
| [
"pipelines.data_source.build_advertise_generator",
"pipelines.utils.message",
"pipelines.cleaner.clean_raw_advertise",
"argparse.ArgumentParser",
"pipelines.parser.Parser",
"gin.configurable",
"pipelines.data_source.get_file_paths",
"multiprocessing.Pool",
"gin.parse_config_file",
"pipelines.utils.pre_loading"
] | [((559, 592), 'gin.configurable', 'gin.configurable', (['"""base_pipeline"""'], {}), "('base_pipeline')\n", (575, 592), False, 'import gin\n'), ((1502, 1536), 'gin.configurable', 'gin.configurable', (['"""clean_pipeline"""'], {}), "('clean_pipeline')\n", (1518, 1536), False, 'import gin\n'), ((274, 327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""UN Data Krunch"""'}), "(description='UN Data Krunch')\n", (297, 327), False, 'import argparse\n'), ((3294, 3325), 'pipelines.utils.pre_loading', 'sc.pre_loading', (['argument_parser'], {}), '(argument_parser)\n', (3308, 3325), True, 'import pipelines.utils as sc\n'), ((3429, 3466), 'gin.parse_config_file', 'gin.parse_config_file', (['execution_file'], {}), '(execution_file)\n', (3450, 3466), False, 'import gin\n'), ((1012, 1047), 'pipelines.data_source.build_advertise_generator', 'ds.build_advertise_generator', (['files'], {}), '(files)\n', (1040, 1047), True, 'import pipelines.data_source as ds\n'), ((1065, 1073), 'pipelines.parser.Parser', 'Parser', ([], {}), '()\n', (1071, 1073), False, 'from pipelines.parser import Parser\n'), ((1988, 2023), 'pipelines.data_source.build_advertise_generator', 'ds.build_advertise_generator', (['files'], {}), '(files)\n', (2016, 2023), True, 'import pipelines.data_source as ds\n'), ((2969, 2992), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'workers'}), '(processes=workers)\n', (2973, 2992), False, 'from multiprocessing import Pool\n'), ((1482, 1498), 'pipelines.utils.message', 'sc.message', (['erro'], {}), '(erro)\n', (1492, 1498), True, 'import pipelines.utils as sc\n'), ((2289, 2305), 'pipelines.utils.message', 'sc.message', (['erro'], {}), '(erro)\n', (2299, 2305), True, 'import pipelines.utils as sc\n'), ((1230, 1266), 'pipelines.cleaner.clean_raw_advertise', 'dc.clean_raw_advertise', (['line', 'parser'], {}), '(line, parser)\n', (1252, 1266), True, 'import pipelines.cleaner as dc\n'), ((2878, 2921), 'pipelines.data_source.get_file_paths', 'ds.get_file_paths', ([], {'folder_path': 'dataset_path'}), '(folder_path=dataset_path)\n', (2895, 2921), True, 'import pipelines.data_source as ds\n'), ((1395, 1410), 'pipelines.utils.message', 'sc.message', (['err'], {}), '(err)\n', (1405, 1410), True, 'import pipelines.utils as sc\n'), ((2203, 2218), 'pipelines.utils.message', 'sc.message', (['err'], {}), '(err)\n', (2213, 2218), True, 'import pipelines.utils as sc\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import assets.asset_helpers
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('assets', '0015_auto_20161105_1800'),
]
operations = [
migrations.AlterField(
model_name='asset',
name='name',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='secureasset',
name='file',
field=models.FileField(upload_to=assets.asset_helpers.generate_asset_file_name, storage=django.core.files.storage.FileSystemStorage(base_url='/media-secure', location='/data/django/django-cedar/media-secure')),
),
migrations.AlterField(
model_name='secureasset',
name='name',
field=models.CharField(max_length=250),
),
]
| [
"django.db.models.CharField"
] | [((409, 441), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (425, 441), False, 'from django.db import migrations, models\n'), ((894, 926), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (910, 926), False, 'from django.db import migrations, models\n')] |
## this version of get_freq collects %AT-richness, gene expression data and SumFreq statistic on top of the data collated by get_freq.py
import pandas as pd
import numpy as np
## NOTE: All filenames are placeholders
raw = pd.read_csv("REDItools_processed_dedup-filt.genann.txt", header = 0, sep = "\t")
exp = pd.read_csv("Expression_Data/quant.sf", header=0, sep="\t")
at_richness = pd.read_csv("at_richness.txt", header=0, sep="\t")
gene_ann = pd.read_csv("Gene_Length_Data.txt", header = 0, sep = "\t")
counting = raw[["GeneID", "Frequency"]]
#print(test.head(5))
counting["NumLoci"] = 1
counting = counting.groupby("GeneID", as_index = False).sum()
counting = counting[counting["GeneID"] != "-"]
merged = pd.merge(counting, gene_ann, on = "GeneID")
merged = merged[["GeneID", "Frequency", "NumLoci", "Length", "TranscriptID"]]
merged["AvgFreq"] = merged["Frequency"]/merged["NumLoci"]
exp_merged = pd.merge(merged, exp, left_on="TranscriptID", right_on="Name")
exp_mergedClean = exp_merged[["GeneID", "Frequency", "NumLoci", "Length_x", "TranscriptID", "AvgFreq", "Name", "TPM", "NumReads"]]
exp_mergedClean.rename(columns = {"Frequency" : "SumFreq"}, inplace=True)
final_merged = pd.merge(exp_mergedClean, at_richness, on="TranscriptID")
final_merged = final_merged[["GeneID", "SumFreq", "NumLoci", "Length_x", "TranscriptID", "AvgFreq", "TPM", "NumReads", "%AT_Richness"]]
final_merged["SumFreq"] = final_merged["SumFreq"].round(decimals = 3)
final_merged["AvgFreq"] = final_merged["AvgFreq"].round(decimals = 3)
final_merged["%AT_Richness"] = final_merged["%AT_Richness"].round(decimals = 3)
final_merged["TPM"] = final_merged["TPM"].round(decimals = 3)
final_merged.rename(columns = {"Length_x" : "Length"}, inplace=True)
#print(final_merged.head(5))
final_merged.to_csv("Sample_getFreq.txt", sep = "\t", header = True, index = False)
| [
"pandas.merge",
"pandas.read_csv"
] | [((223, 299), 'pandas.read_csv', 'pd.read_csv', (['"""REDItools_processed_dedup-filt.genann.txt"""'], {'header': '(0)', 'sep': '"""\t"""'}), "('REDItools_processed_dedup-filt.genann.txt', header=0, sep='\\t')\n", (234, 299), True, 'import pandas as pd\n'), ((310, 369), 'pandas.read_csv', 'pd.read_csv', (['"""Expression_Data/quant.sf"""'], {'header': '(0)', 'sep': '"""\t"""'}), "('Expression_Data/quant.sf', header=0, sep='\\t')\n", (321, 369), True, 'import pandas as pd\n'), ((385, 435), 'pandas.read_csv', 'pd.read_csv', (['"""at_richness.txt"""'], {'header': '(0)', 'sep': '"""\t"""'}), "('at_richness.txt', header=0, sep='\\t')\n", (396, 435), True, 'import pandas as pd\n'), ((447, 502), 'pandas.read_csv', 'pd.read_csv', (['"""Gene_Length_Data.txt"""'], {'header': '(0)', 'sep': '"""\t"""'}), "('Gene_Length_Data.txt', header=0, sep='\\t')\n", (458, 502), True, 'import pandas as pd\n'), ((712, 753), 'pandas.merge', 'pd.merge', (['counting', 'gene_ann'], {'on': '"""GeneID"""'}), "(counting, gene_ann, on='GeneID')\n", (720, 753), True, 'import pandas as pd\n'), ((906, 968), 'pandas.merge', 'pd.merge', (['merged', 'exp'], {'left_on': '"""TranscriptID"""', 'right_on': '"""Name"""'}), "(merged, exp, left_on='TranscriptID', right_on='Name')\n", (914, 968), True, 'import pandas as pd\n'), ((1191, 1248), 'pandas.merge', 'pd.merge', (['exp_mergedClean', 'at_richness'], {'on': '"""TranscriptID"""'}), "(exp_mergedClean, at_richness, on='TranscriptID')\n", (1199, 1248), True, 'import pandas as pd\n')] |
import numpy as np
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import mnist_parser as mnist
from lib.neural_network import NeuralNetwork
def test_high_acc():
output_layer = 10
training_images = mnist.train_images()
training_labels = mnist.train_labels()
testing_images = mnist.test_images()
testing_labels = mnist.test_labels()
training_inputs = training_images.reshape(
training_images.shape[0], training_images.shape[1] * training_images.shape[2]).astype('float32')
normalized_inputs = training_inputs/255
normalized_outputs = np.eye(output_layer)[training_labels]
testing_inputs = testing_images.reshape(
testing_images.shape[0], testing_images.shape[1]*testing_images.shape[2]).astype('float32')
norm_test_inputs = testing_inputs/255
norm_test_outputs = testing_labels
layers = [784, 30, 10]
learning_rate = 0.001
batch_size = 1
epochs = 5
nn = NeuralNetwork(layers, batch_size, epochs, learning_rate)
nn.fit(normalized_inputs, normalized_outputs, False)
acc = nn.accuracy_test(norm_test_inputs, norm_test_outputs)
assert(acc > 90)
def test_low_acc():
output_layer = 10
training_images = mnist.train_images()
training_labels = mnist.train_labels()
testing_images = mnist.test_images()
testing_labels = mnist.test_labels()
training_inputs = training_images.reshape(
training_images.shape[0], training_images.shape[1] * training_images.shape[2]).astype('float32')
normalized_inputs = training_inputs/255
normalized_outputs = np.eye(output_layer)[training_labels]
testing_inputs = testing_images.reshape(
testing_images.shape[0], testing_images.shape[1]*testing_images.shape[2]).astype('float32')
norm_test_inputs = testing_inputs/255
norm_test_outputs = testing_labels
layers = [784, 30, 10]
learning_rate = 0.01
batch_size = 1
epochs = 5
nn = NeuralNetwork(layers, batch_size, epochs, learning_rate)
nn.fit(normalized_inputs, normalized_outputs, False)
acc = nn.accuracy_test(norm_test_inputs, norm_test_outputs)
assert(acc > 60) | [
"numpy.eye",
"lib.neural_network.NeuralNetwork",
"mnist_parser.test_images",
"mnist_parser.test_labels",
"mnist_parser.train_images",
"os.path.abspath",
"mnist_parser.train_labels"
] | [((260, 280), 'mnist_parser.train_images', 'mnist.train_images', ([], {}), '()\n', (278, 280), True, 'import mnist_parser as mnist\n'), ((303, 323), 'mnist_parser.train_labels', 'mnist.train_labels', ([], {}), '()\n', (321, 323), True, 'import mnist_parser as mnist\n'), ((345, 364), 'mnist_parser.test_images', 'mnist.test_images', ([], {}), '()\n', (362, 364), True, 'import mnist_parser as mnist\n'), ((386, 405), 'mnist_parser.test_labels', 'mnist.test_labels', ([], {}), '()\n', (403, 405), True, 'import mnist_parser as mnist\n'), ((992, 1048), 'lib.neural_network.NeuralNetwork', 'NeuralNetwork', (['layers', 'batch_size', 'epochs', 'learning_rate'], {}), '(layers, batch_size, epochs, learning_rate)\n', (1005, 1048), False, 'from lib.neural_network import NeuralNetwork\n'), ((1258, 1278), 'mnist_parser.train_images', 'mnist.train_images', ([], {}), '()\n', (1276, 1278), True, 'import mnist_parser as mnist\n'), ((1301, 1321), 'mnist_parser.train_labels', 'mnist.train_labels', ([], {}), '()\n', (1319, 1321), True, 'import mnist_parser as mnist\n'), ((1343, 1362), 'mnist_parser.test_images', 'mnist.test_images', ([], {}), '()\n', (1360, 1362), True, 'import mnist_parser as mnist\n'), ((1384, 1403), 'mnist_parser.test_labels', 'mnist.test_labels', ([], {}), '()\n', (1401, 1403), True, 'import mnist_parser as mnist\n'), ((1989, 2045), 'lib.neural_network.NeuralNetwork', 'NeuralNetwork', (['layers', 'batch_size', 'epochs', 'learning_rate'], {}), '(layers, batch_size, epochs, learning_rate)\n', (2002, 2045), False, 'from lib.neural_network import NeuralNetwork\n'), ((628, 648), 'numpy.eye', 'np.eye', (['output_layer'], {}), '(output_layer)\n', (634, 648), True, 'import numpy as np\n'), ((1626, 1646), 'numpy.eye', 'np.eye', (['output_layer'], {}), '(output_layer)\n', (1632, 1646), True, 'import numpy as np\n'), ((92, 114), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'from os import path\n')] |
from corc.providers.oci.config import generate_oci_config, valid_oci_config
def prepare_config(provider, provider_kwargs, cluster={}, vcn={}):
config = {"oci": {"cluster": cluster}}
config["oci"].update(provider_kwargs)
# Expects that the default corc config is present
config = generate_oci_config(**config)
if not valid_oci_config(config, verbose=True):
return False
return config
| [
"corc.providers.oci.config.generate_oci_config",
"corc.providers.oci.config.valid_oci_config"
] | [((298, 327), 'corc.providers.oci.config.generate_oci_config', 'generate_oci_config', ([], {}), '(**config)\n', (317, 327), False, 'from corc.providers.oci.config import generate_oci_config, valid_oci_config\n'), ((339, 377), 'corc.providers.oci.config.valid_oci_config', 'valid_oci_config', (['config'], {'verbose': '(True)'}), '(config, verbose=True)\n', (355, 377), False, 'from corc.providers.oci.config import generate_oci_config, valid_oci_config\n')] |
# -*- coding: utf-8 -*-
from nodular import Node, NodePublisher, TRAVERSE_STATUS
from .test_db import db, TestDatabaseFixture
from .test_nodetree import TestType
# This test suite covers traversal.
# Publisher tests are bundled with the view tests.
class TestNodeTraversal(TestDatabaseFixture):
"""Dictionary access to node hierarchy."""
def setUp(self):
super(TestNodeTraversal, self).setUp()
# Make some nodes
self.root = Node(name=u'root', title=u'Root Node')
if not hasattr(self, 'nodetype'):
self.nodetype = Node
self.node1 = self.nodetype(name=u'node1', title=u'Node 1', parent=self.root)
self.node2 = self.nodetype(name=u'node2', title=u'Node 2', parent=self.root)
self.node3 = self.nodetype(name=u'node3', title=u'Node 3', parent=self.node2)
self.node4 = self.nodetype(name=u'node4', title=u'Node 4', parent=self.node3)
self.node5 = self.nodetype(name=u'node5', title=u'Node 5', parent=self.root)
db.session.add_all([self.root, self.node1, self.node2, self.node3, self.node4, self.node5])
db.session.commit()
self.rootpub = NodePublisher(self.root, None, u'/')
self.nodepub = NodePublisher(self.root, None, u'/node2', u'/')
def test_invalid_publisher(self):
"""Publisher paths must be absolute."""
self.assertRaises(ValueError, NodePublisher, self.root, None, u'node2')
self.assertRaises(ValueError, NodePublisher, self.root, None, u'/node2', u'node2')
def test_traverse_basepaths(self):
"""Publisher basepaths must be stored accurately."""
self.assertEqual(self.rootpub.basepath, u'/')
self.assertEqual(self.nodepub.basepath, u'/node2')
newpub = NodePublisher(self.root, None, u'/node2/')
self.assertEqual(newpub.basepath, '/node2')
def test_traverse_noroot_root(self):
"""If there's no root node, status is NOROOT (root publisher)."""
db.session.delete(self.root)
db.session.commit()
status, node, path = self.rootpub.traverse(u'/node2')
self.assertEqual(status, TRAVERSE_STATUS.NOROOT)
def test_traverse_noroot_node(self):
"""If there's no root node, status is NOROOT (node publisher)."""
db.session.delete(self.node2)
db.session.commit()
status, node, path = self.nodepub.traverse(u'/')
self.assertEqual(status, TRAVERSE_STATUS.NOROOT)
def test_traverse_match_root(self):
"""Traversal direct match for root publisher."""
status, node, path = self.rootpub.traverse(u'/node2')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node2)
self.assertEqual(path, None)
status, node, path = self.rootpub.traverse(u'/node2/node3')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node3)
self.assertEqual(path, None)
status, node, path = self.rootpub.traverse(u'/node2/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node4)
self.assertEqual(path, None)
def test_traverse_match_root_slashless(self):
"""Traversal direct match for root publisher (without leading slashes)."""
status, node, path = self.rootpub.traverse(u'node2')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node2)
self.assertEqual(path, None)
status, node, path = self.rootpub.traverse(u'node2/node3')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node3)
self.assertEqual(path, None)
status, node, path = self.rootpub.traverse(u'node2/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node4)
self.assertEqual(path, None)
def test_traverse_match_node(self):
"""Traversal direct match for node publisher."""
status, node, path = self.nodepub.traverse(u'/')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node2)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'/node3')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node3)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node4)
self.assertEqual(path, None)
def test_traverse_match_node_slashless(self):
"""Traversal direct match for node publisher (without leading slashes)."""
status, node, path = self.nodepub.traverse(u'')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node2)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'node3')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node3)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node4)
self.assertEqual(path, None)
def test_traverse_partial_match(self):
"""Test for partial path matching."""
status, node, path = self.rootpub.traverse(u'/nodeX')
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.root)
self.assertEqual(path, '/nodeX')
status, node, path = self.rootpub.traverse(u'/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.root)
self.assertEqual(path, '/node3/node4')
status, node, path = self.rootpub.traverse(u'/node2/node4')
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.node2)
self.assertEqual(path, '/node4')
def test_traverse_redirect_root(self):
"""Renamed nodes result in REDIRECT status (root publisher)."""
self.node2.name = u'nodeX'
db.session.commit()
status, node, path = self.rootpub.traverse(u'/nodeX')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node2)
self.assertEqual(path, None)
status, node, path = self.rootpub.traverse(u'/node2')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.root)
self.assertEqual(path, '/nodeX')
status, node, path = self.rootpub.traverse(u'/node2/node3')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.root)
self.assertEqual(path, '/nodeX/node3')
status, node, path = self.rootpub.traverse(u'/node2/node4')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.root)
self.assertEqual(path, '/nodeX/node4')
def test_traverse_redirect_node(self):
"""Renamed nodes result in REDIRECT status (node publisher)."""
self.node3.name = u'nodeX'
db.session.commit()
status, node, path = self.nodepub.traverse(u'/nodeX')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node3)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'/node3')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.node2)
self.assertEqual(path, '/nodeX')
status, node, path = self.nodepub.traverse(u'/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.node2)
self.assertEqual(path, '/nodeX/node4')
def test_traverse_redirect_subnode(self):
"""Renamed nodes result in REDIRECT status (node publisher)."""
self.node4.name = u'nodeX'
db.session.commit()
status, node, path = self.nodepub.traverse(u'/node3/nodeX')
self.assertEqual(status, TRAVERSE_STATUS.MATCH)
self.assertEqual(node, self.node4)
self.assertEqual(path, None)
status, node, path = self.nodepub.traverse(u'/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.node3)
self.assertEqual(path, '/node3/nodeX')
self.nodepub.urlpath = self.nodepub.basepath
status, node, path = self.nodepub.traverse(u'/node2/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.REDIRECT)
self.assertEqual(node, self.node3)
self.assertEqual(path, '/node2/node3/nodeX')
def test_traverse_gone_root(self):
"""Deleted nodes cause a GONE response status (root publisher)."""
db.session.delete(self.node3)
db.session.commit()
status, node, path = self.rootpub.traverse(u'/node2/node3')
self.assertEqual(status, TRAVERSE_STATUS.GONE)
self.assertEqual(node, self.node2)
status, node, path = self.rootpub.traverse(u'/node2/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.GONE)
self.assertEqual(node, self.node2)
def test_traverse_gone_node(self):
"""Deleted nodes cause a GONE response status (node publisher)."""
db.session.delete(self.node3)
db.session.commit()
status, node, path = self.nodepub.traverse(u'/node3')
self.assertEqual(status, TRAVERSE_STATUS.GONE)
self.assertEqual(node, self.node2)
status, node, path = self.nodepub.traverse(u'/node3/node4')
self.assertEqual(status, TRAVERSE_STATUS.GONE)
self.assertEqual(node, self.node2)
def test_traverse_gone_root_noredirect(self):
"""Deleted nodes return PARTIAL when redirects are disabled (root publisher)."""
db.session.delete(self.node3)
db.session.commit()
status, node, path = self.rootpub.traverse(u'/node2/node3', redirect=False)
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.node2)
self.assertEqual(path, u'/node3')
status, node, path = self.rootpub.traverse(u'/node2/node3/node4', redirect=False)
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.node2)
self.assertEqual(path, u'/node3/node4')
def test_traverse_gone_node_noredirect(self):
"""Deleted nodes return PARTIAL when redirects are disabled (node publisher)."""
db.session.delete(self.node3)
db.session.commit()
status, node, path = self.nodepub.traverse(u'/node3', redirect=False)
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.node2)
self.assertEqual(path, u'/node3')
status, node, path = self.nodepub.traverse(u'/node3/node4', redirect=False)
self.assertEqual(status, TRAVERSE_STATUS.PARTIAL)
self.assertEqual(node, self.node2)
self.assertEqual(path, u'/node3/node4')
class TestTypeTraversal(TestNodeTraversal):
def setUp(self):
self.nodetype = TestType
super(TestTypeTraversal, self).setUp()
| [
"nodular.NodePublisher",
"nodular.Node"
] | [((460, 498), 'nodular.Node', 'Node', ([], {'name': 'u"""root"""', 'title': 'u"""Root Node"""'}), "(name=u'root', title=u'Root Node')\n", (464, 498), False, 'from nodular import Node, NodePublisher, TRAVERSE_STATUS\n'), ((1153, 1189), 'nodular.NodePublisher', 'NodePublisher', (['self.root', 'None', 'u"""/"""'], {}), "(self.root, None, u'/')\n", (1166, 1189), False, 'from nodular import Node, NodePublisher, TRAVERSE_STATUS\n'), ((1213, 1260), 'nodular.NodePublisher', 'NodePublisher', (['self.root', 'None', 'u"""/node2"""', 'u"""/"""'], {}), "(self.root, None, u'/node2', u'/')\n", (1226, 1260), False, 'from nodular import Node, NodePublisher, TRAVERSE_STATUS\n'), ((1751, 1793), 'nodular.NodePublisher', 'NodePublisher', (['self.root', 'None', 'u"""/node2/"""'], {}), "(self.root, None, u'/node2/')\n", (1764, 1793), False, 'from nodular import Node, NodePublisher, TRAVERSE_STATUS\n')] |
# Generated by Django 3.1.4 on 2020-12-22 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tweets', '0003_auto_20201222_1535'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='profile_pic',
field=models.URLField(default='pic'),
),
]
| [
"django.db.models.URLField"
] | [((339, 369), 'django.db.models.URLField', 'models.URLField', ([], {'default': '"""pic"""'}), "(default='pic')\n", (354, 369), False, 'from django.db import migrations, models\n')] |
# Generated by Django 2.2.14 on 2020-08-11 13:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('promoted', '0005_auto_20200803_1214'),
('discovery', '0006_auto_20200807_2051'),
]
operations = [
migrations.CreateModel(
name='PromotedAddon',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('promoted.promotedaddon',),
),
]
| [
"django.db.migrations.CreateModel"
] | [((279, 433), 'django.db.migrations.CreateModel', 'migrations.CreateModel', ([], {'name': '"""PromotedAddon"""', 'fields': '[]', 'options': "{'proxy': True, 'indexes': [], 'constraints': []}", 'bases': "('promoted.promotedaddon',)"}), "(name='PromotedAddon', fields=[], options={'proxy': \n True, 'indexes': [], 'constraints': []}, bases=('promoted.promotedaddon',))\n", (301, 433), False, 'from django.db import migrations\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# one we assume theta_0 and theta_1 params below predict function returns y_hat
def predict(theta_0: float, theta_1: float, x_i: float) -> float:
return theta_1 * x_i + theta_0
# since we know the actual y_i output we can compute the error for each pair
def error(theta_0: float, theta_1: float, x_i: float, y_i: float) -> float:
"""
The error from predicting theta_1 * x_i + theta_0, when actual value is y_i
"""
return predict(theta_0, theta_1, x_i) - y_i
# we woudl like to know the total error over the entire dataset
# sum of squared errors covers both negative and positive errors, preventing error cancel out.
from linear_algebra import Vector
def sum_of_sqerrors(theta_0: float, theta_1: float, x: Vector, y: Vector) -> float:
return sum(error(theta_0, theta_1, x_i, y_i) ** 2
for x_i, y_i in zip(x, y) )
# +
from typing import Tuple
from statistics import correlation, standard_deviation, mean
# based on the OLS, the error-minimizing theta_0 and theta_1
def least_squares_fit (x: Vector, y: Vector) -> Tuple[float, float]:
"""
Given two vectors x and y, find the least-squares values of theta_0 and theta_1
"""
theta_1 = correlation(x, y) * standard_deviation(y) / standard_deviation(x)
theta_0 = mean(y) - theta_1 * mean(x)
return theta_0, theta_1
# -
# Some good intuition notes by the book:
#
# The choice of theta_0 simply states we are trying to predict the averga of the reponse variable via the average the input variable
# The choice of theta_1 means when the input value increases by standard_deviation(x) the prediction then increases by correlation(x, y) * standard_deviation(y)
x = [i for i in range(-100, 110, 10)]
y = [3 * i - 5 for i in x]
assert least_squares_fit(x, y) == (-5, 3)
# +
from statistics import num_friends_good, daily_minutes_good
theta_0, theta_1 = least_squares_fit(num_friends_good, daily_minutes_good)
assert 22.9 < theta_0 < 23.0
assert 0.9 < theta_1 < 0.905
# -
# A common measure of model perfroamnce with OLS is the R-squared, which measures the fraction of the total variation of the output variable that is captured by the model
from statistics import de_mean
# +
def total_sum_squares(y: Vector) -> float:
"""the total squared variation of y_i's from their mean"""
return sum(v **2 for v in de_mean(y))
def r_squared(theta_0: float, theta_1: float, x: Vector, y: Vector) -> float:
"""
the fraction of variation in y captured by the model which equals
1 - the fraction of variation in y not captured by the model
"""
return 1.0 - (sum_of_sqerrors(theta_0, theta_1, x, y) /
total_sum_squares(y))
rsq = r_squared(theta_0, theta_1, num_friends_good, daily_minutes_good)
assert 0.328 < rsq < 0.330
# -
# Using gradient descent
import random
from gradient_descent import gradient_step
# +
num_epochs = 10000
random.seed(0)
theta_guess = [random.random(), random.random()] # choose random value to start
learning_rate = 0.00001
for _ in range(num_epochs):
theta_0, theta_1 = theta_guess
# Partial derivatives wirh respect to theta_0 and theta_1
grad_theta_0 = sum(2 * error(theta_0, theta_1, x_i, y_i)
for x_i, y_i in zip(num_friends_good, daily_minutes_good))
grad_theta_1 = sum(2 * error(theta_0, theta_1, x_i, y_i) * x_i
for x_i, y_i in zip(num_friends_good, daily_minutes_good))
# compute loss
loss = sum_of_sqerrors(theta_0, theta_1, num_friends_good, daily_minutes_good)
print(f"loss: {loss:.3f}")
# update the guess
theta_guess = gradient_step(theta_guess, [grad_theta_0, grad_theta_1], -learning_rate)
theta_guess = theta_0, theta_1
assert 22.9 < theta_0 < 23.0
assert 0.9 < theta_1 < 0.905
# -
| [
"statistics.mean",
"statistics.standard_deviation",
"statistics.de_mean",
"random.seed",
"random.random",
"gradient_descent.gradient_step",
"statistics.correlation"
] | [((3187, 3201), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3198, 3201), False, 'import random\n'), ((3218, 3233), 'random.random', 'random.random', ([], {}), '()\n', (3231, 3233), False, 'import random\n'), ((3235, 3250), 'random.random', 'random.random', ([], {}), '()\n', (3248, 3250), False, 'import random\n'), ((3920, 3992), 'gradient_descent.gradient_step', 'gradient_step', (['theta_guess', '[grad_theta_0, grad_theta_1]', '(-learning_rate)'], {}), '(theta_guess, [grad_theta_0, grad_theta_1], -learning_rate)\n', (3933, 3992), False, 'from gradient_descent import gradient_step\n'), ((1539, 1560), 'statistics.standard_deviation', 'standard_deviation', (['x'], {}), '(x)\n', (1557, 1560), False, 'from statistics import correlation, standard_deviation, mean\n'), ((1575, 1582), 'statistics.mean', 'mean', (['y'], {}), '(y)\n', (1579, 1582), False, 'from statistics import correlation, standard_deviation, mean\n'), ((1495, 1512), 'statistics.correlation', 'correlation', (['x', 'y'], {}), '(x, y)\n', (1506, 1512), False, 'from statistics import correlation, standard_deviation, mean\n'), ((1515, 1536), 'statistics.standard_deviation', 'standard_deviation', (['y'], {}), '(y)\n', (1533, 1536), False, 'from statistics import correlation, standard_deviation, mean\n'), ((1595, 1602), 'statistics.mean', 'mean', (['x'], {}), '(x)\n', (1599, 1602), False, 'from statistics import correlation, standard_deviation, mean\n'), ((2633, 2643), 'statistics.de_mean', 'de_mean', (['y'], {}), '(y)\n', (2640, 2643), False, 'from statistics import de_mean\n')] |
import Rahimcalc as r
def sample():
a=int(input("Enter the a:"))
b=int(input("Enter the b:"))
print(r.sum(a,b))
print(r.sub(a,b))
print(r.mul(a,b))
print(r.complex(a,b))
print(r.div(a,b))
if __name__=='__main__':
sample()
| [
"Rahimcalc.mul",
"Rahimcalc.sub",
"Rahimcalc.sum",
"Rahimcalc.div",
"Rahimcalc.complex"
] | [((116, 127), 'Rahimcalc.sum', 'r.sum', (['a', 'b'], {}), '(a, b)\n', (121, 127), True, 'import Rahimcalc as r\n'), ((139, 150), 'Rahimcalc.sub', 'r.sub', (['a', 'b'], {}), '(a, b)\n', (144, 150), True, 'import Rahimcalc as r\n'), ((162, 173), 'Rahimcalc.mul', 'r.mul', (['a', 'b'], {}), '(a, b)\n', (167, 173), True, 'import Rahimcalc as r\n'), ((185, 200), 'Rahimcalc.complex', 'r.complex', (['a', 'b'], {}), '(a, b)\n', (194, 200), True, 'import Rahimcalc as r\n'), ((212, 223), 'Rahimcalc.div', 'r.div', (['a', 'b'], {}), '(a, b)\n', (217, 223), True, 'import Rahimcalc as r\n')] |
import os
import zipfile
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(path, '..')))
def do_zip(directory, fname):
zipf = zipfile.ZipFile(fname, 'w', zipfile.ZIP_DEFLATED)
zipdir(directory, zipf)
zipf.close()
| [
"zipfile.ZipFile",
"os.path.join",
"os.walk"
] | [((79, 92), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (86, 92), False, 'import os\n'), ((344, 393), 'zipfile.ZipFile', 'zipfile.ZipFile', (['fname', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(fname, 'w', zipfile.ZIP_DEFLATED)\n", (359, 393), False, 'import zipfile\n'), ((144, 168), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (156, 168), False, 'import os\n'), ((209, 233), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (221, 233), False, 'import os\n'), ((274, 298), 'os.path.join', 'os.path.join', (['path', '""".."""'], {}), "(path, '..')\n", (286, 298), False, 'import os\n')] |
'''
일원화된 라벨링 이미지 데이터셋에서 학습 및 테스트를 수행하는 프로그램.
'''
__author__ = 'will'
from keras.models import Sequential
from keras.layers import Dense
#from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.keras.backend import conv2d
#import pickle
outputs = 1
from get_image_self import *
trX,trY = get_training_data()
teX,teY = get_test_data()
seed = 0
np.random.seed(seed)
tf.random.set_seed(seed)
model=Sequential()
#model.add(Dense(512, input_dim=np.shape(trX)[1], activation='relu'))
model.add(Dense(512, input_dim=256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trX, trY, epochs=30, batch_size=1)
Y_prediction = model.predict(teX).flatten()
for i in range(1000):
label = teY[i]
pred = Y_prediction[i]
print("label:{:.2f}, pred:{:.2f}".format(label, pred))
# def get_direction(img):
# print(img.shape)
# img = np.array([np.reshape(img, img.shape**2)])
# ret = model.predict(np.array([img]))
# return ret
# Predict direction with single image
#dir=get_direction([teX[10]])
#print(dir[0][0])
model.save("mlt_model2")
| [
"keras.layers.Dense",
"tensorflow.random.set_seed",
"numpy.random.seed",
"keras.models.Sequential"
] | [((432, 452), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (446, 452), True, 'import numpy as np\n'), ((453, 477), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (471, 477), True, 'import tensorflow as tf\n'), ((485, 497), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (495, 497), False, 'from keras.models import Sequential\n'), ((578, 622), 'keras.layers.Dense', 'Dense', (['(512)'], {'input_dim': '(256)', 'activation': '"""relu"""'}), "(512, input_dim=256, activation='relu')\n", (583, 622), False, 'from keras.layers import Dense\n'), ((634, 662), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (639, 662), False, 'from keras.layers import Dense\n'), ((674, 682), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (679, 682), False, 'from keras.layers import Dense\n')] |
#!/usr/bin/python3
from hypothesis import given
from hypothesis.strategies._internal.deferred import DeferredStrategy
from brownie.network.account import Account
from brownie.test import strategy
def test_strategy():
assert isinstance(strategy("address"), DeferredStrategy)
@given(value=strategy("address"))
def test_given(accounts, value):
assert value in accounts
assert isinstance(value, Account)
@given(value=strategy("address", length=3))
def test_length(accounts, value):
assert list(accounts).index(value) < 3
def test_repr():
assert repr(strategy("address")) == "sampled_from(accounts)"
| [
"brownie.test.strategy"
] | [((243, 262), 'brownie.test.strategy', 'strategy', (['"""address"""'], {}), "('address')\n", (251, 262), False, 'from brownie.test import strategy\n'), ((297, 316), 'brownie.test.strategy', 'strategy', (['"""address"""'], {}), "('address')\n", (305, 316), False, 'from brownie.test import strategy\n'), ((433, 462), 'brownie.test.strategy', 'strategy', (['"""address"""'], {'length': '(3)'}), "('address', length=3)\n", (441, 462), False, 'from brownie.test import strategy\n'), ((576, 595), 'brownie.test.strategy', 'strategy', (['"""address"""'], {}), "('address')\n", (584, 595), False, 'from brownie.test import strategy\n')] |
#!/usr/bin/python3 -u
import os
import time
import RPi.GPIO as GPIO
import lcd_i2c
from encoder import Encoder
GPIO.setmode(GPIO.BCM)
switch_pin = 13
encoder_down_pin = 6
encoder_up_pin = 5
minutes = 0
dirty = True
state = 'set'
mylcd = lcd_i2c.lcd()
timer_end = None
def start_timer():
global timer_end, state
timer_end = time.perf_counter() + (minutes * 60)
state = 'timer'
print("Starting timer")
os.system('say Starting timer')
def check_timer():
global timer_end
remaining = int(timer_end - time.perf_counter())
return int(remaining)
def switch_pressed(v):
global dirty, state
print("Pressed")
if state == 'set':
start_timer()
elif state == 'done':
state = 'set'
dirty = True
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(switch_pin, GPIO.FALLING, callback=switch_pressed, bouncetime=500)
def update():
global state
if state == 'set':
global minutes, dirty
if not dirty: return
mylcd.lcd_clear()
mylcd.lcd_display_string(f"Timer: {minutes} min", 1)
dirty = False
elif state == 'timer':
mylcd.lcd_clear()
remaining = check_timer()
minutes = int(remaining / 60)
seconds = remaining % 60
mylcd.lcd_display_string(f"Timer: {minutes}:{seconds:02d}")
elif state == 'done':
mylcd.lcd_clear()
mylcd.lcd_display_string("Timer done.")
def valueChanged(value):
global minutes
global dirty
dirty = True
minutes = max(0, value)
e1 = Encoder(encoder_down_pin, encoder_up_pin, callback=valueChanged)
update()
try:
while True:
if state == 'timer' and check_timer() <= 0:
print("Timer done")
os.system(f'say timer {minutes} minutes done')
state = 'done'
update()
time.sleep(0.5)
if state != 'set':
time.sleep(.45) # 0.5 + 0.45 = approx 1 second refresh rate
finally:
print("Cleanup")
GPIO.cleanup()
| [
"encoder.Encoder",
"RPi.GPIO.cleanup",
"RPi.GPIO.add_event_detect",
"RPi.GPIO.setup",
"time.perf_counter",
"time.sleep",
"lcd_i2c.lcd",
"os.system",
"RPi.GPIO.setmode"
] | [((115, 137), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (127, 137), True, 'import RPi.GPIO as GPIO\n'), ((242, 255), 'lcd_i2c.lcd', 'lcd_i2c.lcd', ([], {}), '()\n', (253, 255), False, 'import lcd_i2c\n'), ((761, 818), 'RPi.GPIO.setup', 'GPIO.setup', (['switch_pin', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (771, 818), True, 'import RPi.GPIO as GPIO\n'), ((819, 911), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['switch_pin', 'GPIO.FALLING'], {'callback': 'switch_pressed', 'bouncetime': '(500)'}), '(switch_pin, GPIO.FALLING, callback=switch_pressed,\n bouncetime=500)\n', (840, 911), True, 'import RPi.GPIO as GPIO\n'), ((1587, 1651), 'encoder.Encoder', 'Encoder', (['encoder_down_pin', 'encoder_up_pin'], {'callback': 'valueChanged'}), '(encoder_down_pin, encoder_up_pin, callback=valueChanged)\n', (1594, 1651), False, 'from encoder import Encoder\n'), ((427, 458), 'os.system', 'os.system', (['"""say Starting timer"""'], {}), "('say Starting timer')\n", (436, 458), False, 'import os\n'), ((2032, 2046), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (2044, 2046), True, 'import RPi.GPIO as GPIO\n'), ((338, 357), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (355, 357), False, 'import time\n'), ((1881, 1896), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1891, 1896), False, 'import time\n'), ((533, 552), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (550, 552), False, 'import time\n'), ((1780, 1826), 'os.system', 'os.system', (['f"""say timer {minutes} minutes done"""'], {}), "(f'say timer {minutes} minutes done')\n", (1789, 1826), False, 'import os\n'), ((1936, 1952), 'time.sleep', 'time.sleep', (['(0.45)'], {}), '(0.45)\n', (1946, 1952), False, 'import time\n')] |
from flask import Blueprint
bp = Blueprint('payment', __name__)
from app.payment import routes, forms | [
"flask.Blueprint"
] | [((34, 64), 'flask.Blueprint', 'Blueprint', (['"""payment"""', '__name__'], {}), "('payment', __name__)\n", (43, 64), False, 'from flask import Blueprint\n')] |
#//----------------------------------------------------------------------
#// Copyright 2007-2010 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
from uvm.base.uvm_object import UVMObject
from uvm.macros import *
from uvm import sv
RANGE_32B = range(0, (1 << 32) - 1)
class packet(UVMObject):
# rand int addr
# rand int data
# Use the macro in a class to implement factory registration along with other
# utilities (create, get_type_name). For only factory registration, use
# the macro `uvm_object_registry(packet,"packet").
# Base constraints TODO
# constraint c1 { addr inside { [ 0x0: 0x40], [ 0x100: 0x200], [ 0x1000: 0x1fff], [ 0x4000: 0x4fff] }; }
# constraint c2 { (addr <= 0x40) -> (data inside { [10:20] } ); }
# constraint c3 { (addr >= 0x100 and addr <= 0x200) -> (data inside { [100:200] } ); }
# constraint c4 { (addr >= 0x1000 and addr <= 0x1fff) -> (data inside { [300:400] } ); }
# constraint c5 { (addr >= 0x4000 and addr <= 0x4fff) -> (data inside { [600:800] } ); }
# do printing, comparing, etc. These functions can also be automated inside
# the `uvm_object_utils_begin/end macros if desired. Below show the manual
# approach.
def do_print(self, printer):
printer.print_field("addr", self.addr, sv.bits(self.addr))
printer.print_field("data", self.data, sv.bits(self.data))
def do_compare(self, rhs, comparer):
rhs_ = None
if rhs is None:
return 0
do_compare = 1
do_compare &= comparer.compare_field("addr", self.addr, rhs_.addr,
sv.bits(self.addr))
do_compare &= comparer.compare_field("data", self.data, rhs_.data,
sv.bits(self.data))
return do_compare
def do_copy(self, rhs):
rhs_ = rhs
if rhs is None:
return
self.addr = rhs_.addr
self.data = rhs_.data
def __init__(self, name="packet"):
super().__init__(name)
self.addr = 0
self.rand('addr', RANGE_32B)
self.data = 0
self.rand('data', RANGE_32B)
uvm_object_utils(packet)
| [
"uvm.sv.bits"
] | [((2164, 2182), 'uvm.sv.bits', 'sv.bits', (['self.addr'], {}), '(self.addr)\n', (2171, 2182), False, 'from uvm import sv\n'), ((2231, 2249), 'uvm.sv.bits', 'sv.bits', (['self.data'], {}), '(self.data)\n', (2238, 2249), False, 'from uvm import sv\n'), ((2468, 2486), 'uvm.sv.bits', 'sv.bits', (['self.addr'], {}), '(self.addr)\n', (2475, 2486), False, 'from uvm import sv\n'), ((2575, 2593), 'uvm.sv.bits', 'sv.bits', (['self.data'], {}), '(self.data)\n', (2582, 2593), False, 'from uvm import sv\n')] |
import sys
from tello.tello import Tello
from tello.tello_command import Command, GenericCommand
if __name__ == "__main__":
tello = Tello()
tello_commands = [Command()] + [GenericCommand(cmd) for cmd in sys.argv[1:]]
for tello_command in tello_commands:
tello.execute_command(tello_command)
tello.join()
| [
"tello.tello_command.GenericCommand",
"tello.tello_command.Command",
"tello.tello.Tello"
] | [((138, 145), 'tello.tello.Tello', 'Tello', ([], {}), '()\n', (143, 145), False, 'from tello.tello import Tello\n'), ((168, 177), 'tello.tello_command.Command', 'Command', ([], {}), '()\n', (175, 177), False, 'from tello.tello_command import Command, GenericCommand\n'), ((182, 201), 'tello.tello_command.GenericCommand', 'GenericCommand', (['cmd'], {}), '(cmd)\n', (196, 201), False, 'from tello.tello_command import Command, GenericCommand\n')] |
__author__ = 'guorongxu'
import subprocess
def download(workspace, data_set):
with open(workspace + "/" + data_set + "/gene_list.txt") as f:
content = f.readlines()
length = len(content)
for i in range(0, 20):
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "download",
workspace + "/" + data_set, str(i * length/8), str((i + 1) * length/8 - 1)])
def print_json(workspace, data_set):
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "print_json", workspace + "/" + data_set])
def print_label(workspace, data_set):
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "print_label", workspace + "/" + data_set])
def print_edge(workspace, data_set):
subprocess.call(["qsub", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "print_edge", workspace + "/" + data_set])
def print_schema(workspace, data_set):
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "print_schema", workspace, data_set])
## To append id into json files
def append_id(workspace, data_set):
subprocess.call(["qsub", "-pe", "smp", "1", "-o", "search_engine.log", "-e", "search_engine.log",
workspace + "/codes/" + data_set + "/pubmed.sh", "append_id", workspace, data_set])
| [
"subprocess.call"
] | [((570, 748), 'subprocess.call', 'subprocess.call', (["['qsub', '-o', 'search_engine.log', '-e', 'search_engine.log', workspace +\n '/codes/' + data_set + '/pubmed.sh', 'print_json', workspace + '/' +\n data_set]"], {}), "(['qsub', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'print_json', workspace + '/' + data_set])\n", (585, 748), False, 'import subprocess\n'), ((805, 984), 'subprocess.call', 'subprocess.call', (["['qsub', '-o', 'search_engine.log', '-e', 'search_engine.log', workspace +\n '/codes/' + data_set + '/pubmed.sh', 'print_label', workspace + '/' +\n data_set]"], {}), "(['qsub', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'print_label', workspace + '/' + data_set])\n", (820, 984), False, 'import subprocess\n'), ((1040, 1218), 'subprocess.call', 'subprocess.call', (["['qsub', '-o', 'search_engine.log', '-e', 'search_engine.log', workspace +\n '/codes/' + data_set + '/pubmed.sh', 'print_edge', workspace + '/' +\n data_set]"], {}), "(['qsub', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'print_edge', workspace + '/' + data_set])\n", (1055, 1218), False, 'import subprocess\n'), ((1276, 1468), 'subprocess.call', 'subprocess.call', (["['qsub', '-pe', 'smp', '1', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'print_schema', workspace, data_set]"], {}), "(['qsub', '-pe', 'smp', '1', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'print_schema', workspace, data_set])\n", (1291, 1468), False, 'import subprocess\n'), ((1554, 1743), 'subprocess.call', 'subprocess.call', (["['qsub', '-pe', 'smp', '1', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'append_id', workspace, data_set]"], {}), "(['qsub', '-pe', 'smp', '1', '-o', 'search_engine.log', '-e',\n 'search_engine.log', workspace + '/codes/' + data_set + '/pubmed.sh',\n 'append_id', workspace, data_set])\n", (1569, 1743), False, 'import subprocess\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 17:53:58 2017
@author: jiahuei
"""
from link_dirs import CURR_DIR, BASE_DIR, pjoin
import argparse
import os
import logging
import platform
from copy import deepcopy
from time import localtime, strftime
from src import infer_fn_v2 as infer
from src.compat_v2 import update_config
from common import configuration_v1 as cfg
from common.natural_sort import natural_keys as nat_key
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--infer_set', type=str, default='test',
choices=['test', 'valid', 'coco_test', 'coco_valid'],
help='The split to perform inference on.')
parser.add_argument(
'--infer_checkpoints_dir', type=str,
default=pjoin('mscoco', 'radix_b256_add_LN_softmax_h8_tie_lstm_run_01'),
help='The directory containing the checkpoint files.')
parser.add_argument(
'--infer_checkpoints', type=str, default='all',
help='The checkpoint numbers to be evaluated. Comma-separated.')
parser.add_argument(
'--annotations_file', type=str, default='captions_val2014.json',
help='The annotations / reference file for calculating scores.')
parser.add_argument(
'--dataset_dir', type=str, default='',
help='Dataset directory.')
parser.add_argument(
'--ckpt_prefix', type=str, default='model_compact-',
help='Prefix of checkpoint names.')
parser.add_argument(
'--run_inference', type=bool, default=True,
help='Whether to perform inference.')
parser.add_argument(
'--get_metric_score', type=bool, default=True,
help='Whether to perform metric score calculations.')
parser.add_argument(
'--save_attention_maps', type=bool, default=False,
help='Whether to save attention maps to disk as pickle file.')
parser.add_argument(
'--gpu', type=str, default='0',
help='The gpu number.')
parser.add_argument(
'--per_process_gpu_memory_fraction', type=float, default=0.75,
help='The fraction of GPU memory allocated.')
parser.add_argument(
'--verbosity', type=int, default=10, choices=[10, 20])
parser.add_argument(
'--infer_beam_size', type=int, default=3,
help='The beam size.')
parser.add_argument(
'--infer_length_penalty_weight', type=float, default=0.0,
help='The length penalty weight used in beam search.')
parser.add_argument(
'--infer_max_length', type=int, default=30,
help='The maximum caption length allowed during inference.')
parser.add_argument(
'--batch_size_infer', type=int, default=25,
help='The batch size.')
args = parser.parse_args()
return args
def main(args):
args = deepcopy(args)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
default_exp_dir = pjoin(os.path.dirname(CURR_DIR), 'experiments')
args.infer_checkpoints_dir = pjoin(default_exp_dir, args.infer_checkpoints_dir)
args.annotations_file = pjoin(BASE_DIR, 'common', 'coco_caption', 'annotations', args.annotations_file)
if args.dataset_dir == '':
args.dataset_dir = pjoin(BASE_DIR, 'datasets', 'mscoco')
ckpt_prefix = args.ckpt_prefix
if args.infer_checkpoints == 'all':
ckpt_files = sorted(os.listdir(args.infer_checkpoints_dir), key=nat_key)
ckpt_files = [f for f in ckpt_files if ckpt_prefix in f]
ckpt_files = [f.replace('.index', '') for f in ckpt_files if '.index' in f]
ckpt_files = [f.replace(ckpt_prefix, '') for f in ckpt_files]
# if len(ckpt_files) > 20:
ckpt_files = ckpt_files[-12:]
args.infer_checkpoints = ckpt_files
else:
args.infer_checkpoints = args.infer_checkpoints.split(',')
if len(args.infer_checkpoints) < 1:
raise ValueError('`infer_checkpoints` must be either `all` or '
'a list of comma-separated checkpoint numbers.')
###
c = cfg.load_config(pjoin(args.infer_checkpoints_dir, 'config.pkl'))
c = update_config(c)
c.__dict__.update(args.__dict__)
save_name = 'b{}_lp{:2.1f}___{}'.format(c.infer_beam_size,
c.infer_length_penalty_weight,
strftime('%m-%d_%H-%M', localtime()))
set_name = c.infer_set[0] + ''.join(x.title() for x in c.infer_set.split('_'))[1:] # camelCase
c.infer_save_path = '_'.join([c.infer_checkpoints_dir, '__infer', set_name, save_name])
# c.infer_save_path = pjoin(c.infer_checkpoints_dir, '_'.join(['infer', set_name, save_name])
###
if not os.path.exists(c.infer_save_path):
os.mkdir(c.infer_save_path)
# Loop through the checkpoint files
scores_combined = {}
for ckpt_num in c.infer_checkpoints:
curr_ckpt_path = pjoin(c.infer_checkpoints_dir, ckpt_prefix + ckpt_num)
infer.evaluate_model(config=c,
curr_ckpt_path=curr_ckpt_path,
scores_combined=scores_combined)
print('\n')
if __name__ == '__main__':
_args = parse_args()
logging.basicConfig(level=_args.verbosity)
logger = logging.getLogger(__name__)
logger.debug('Python version: {}'.format(platform.python_version()))
main(_args)
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"src.compat_v2.update_config",
"src.infer_fn_v2.evaluate_model",
"link_dirs.pjoin",
"os.path.dirname",
"os.mkdir",
"copy.deepcopy",
"time.localtime",
"platform.python_version"
] | [((486, 563), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(formatter_class=argparse.RawDescriptionHelpFormatter)\n', (509, 563), False, 'import argparse\n'), ((2902, 2916), 'copy.deepcopy', 'deepcopy', (['args'], {}), '(args)\n', (2910, 2916), False, 'from copy import deepcopy\n'), ((3126, 3176), 'link_dirs.pjoin', 'pjoin', (['default_exp_dir', 'args.infer_checkpoints_dir'], {}), '(default_exp_dir, args.infer_checkpoints_dir)\n', (3131, 3176), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((3205, 3284), 'link_dirs.pjoin', 'pjoin', (['BASE_DIR', '"""common"""', '"""coco_caption"""', '"""annotations"""', 'args.annotations_file'], {}), "(BASE_DIR, 'common', 'coco_caption', 'annotations', args.annotations_file)\n", (3210, 3284), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((4248, 4264), 'src.compat_v2.update_config', 'update_config', (['c'], {}), '(c)\n', (4261, 4264), False, 'from src.compat_v2 import update_config\n'), ((5347, 5389), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '_args.verbosity'}), '(level=_args.verbosity)\n', (5366, 5389), False, 'import logging\n'), ((5403, 5430), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5420, 5430), False, 'import logging\n'), ((3051, 3076), 'os.path.dirname', 'os.path.dirname', (['CURR_DIR'], {}), '(CURR_DIR)\n', (3066, 3076), False, 'import os\n'), ((3343, 3380), 'link_dirs.pjoin', 'pjoin', (['BASE_DIR', '"""datasets"""', '"""mscoco"""'], {}), "(BASE_DIR, 'datasets', 'mscoco')\n", (3348, 3380), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((4191, 4238), 'link_dirs.pjoin', 'pjoin', (['args.infer_checkpoints_dir', '"""config.pkl"""'], {}), "(args.infer_checkpoints_dir, 'config.pkl')\n", (4196, 4238), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((4846, 4879), 'os.path.exists', 'os.path.exists', (['c.infer_save_path'], {}), '(c.infer_save_path)\n', (4860, 4879), False, 'import os\n'), ((4889, 4916), 'os.mkdir', 'os.mkdir', (['c.infer_save_path'], {}), '(c.infer_save_path)\n', (4897, 4916), False, 'import os\n'), ((5053, 5107), 'link_dirs.pjoin', 'pjoin', (['c.infer_checkpoints_dir', '(ckpt_prefix + ckpt_num)'], {}), '(c.infer_checkpoints_dir, ckpt_prefix + ckpt_num)\n', (5058, 5107), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((5116, 5214), 'src.infer_fn_v2.evaluate_model', 'infer.evaluate_model', ([], {'config': 'c', 'curr_ckpt_path': 'curr_ckpt_path', 'scores_combined': 'scores_combined'}), '(config=c, curr_ckpt_path=curr_ckpt_path,\n scores_combined=scores_combined)\n', (5136, 5214), True, 'from src import infer_fn_v2 as infer\n'), ((851, 914), 'link_dirs.pjoin', 'pjoin', (['"""mscoco"""', '"""radix_b256_add_LN_softmax_h8_tie_lstm_run_01"""'], {}), "('mscoco', 'radix_b256_add_LN_softmax_h8_tie_lstm_run_01')\n", (856, 914), False, 'from link_dirs import CURR_DIR, BASE_DIR, pjoin\n'), ((3485, 3523), 'os.listdir', 'os.listdir', (['args.infer_checkpoints_dir'], {}), '(args.infer_checkpoints_dir)\n', (3495, 3523), False, 'import os\n'), ((4513, 4524), 'time.localtime', 'localtime', ([], {}), '()\n', (4522, 4524), False, 'from time import localtime, strftime\n'), ((5476, 5501), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (5499, 5501), False, 'import platform\n')] |
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import (
Sequential as Seq,
Dropout,
Linear as Lin,
LeakyReLU,
ReLU,
BatchNorm1d as BN,
)
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.nn import (
DynamicEdgeConv,
PointConv,
XConv,
fps,
radius,
global_max_pool,
knn_interpolate,
)
from pykeops.torch import LazyTensor
from benchmark_layers import MyDynamicEdgeConv, MyXConv
from geometry_processing import dMaSIFConv, mesh_normals_areas, tangent_vectors
from helper import diagonal_ranges
DEConv = {"torch": DynamicEdgeConv, "keops": MyDynamicEdgeConv}
# Dynamic Graph CNNs ===========================================================
# Adapted from the PyTorch_geometric gallery to get a close fit to
# the original paper.
def MLP(channels, batch_norm=True):
"""Multi-layer perceptron, with ReLU non-linearities and batch normalization."""
return Seq(
*[
Seq(
Lin(channels[i - 1], channels[i]),
BN(channels[i]) if batch_norm else nn.Identity(),
LeakyReLU(negative_slope=0.2),
)
for i in range(1, len(channels))
]
)
class DGCNN_seg(torch.nn.Module):
def __init__(
self, in_channels, out_channels, n_layers, k=40, aggr="max", backend="keops"
):
super(DGCNN_seg, self).__init__()
self.name = "DGCNN_seg_" + backend
self.I, self.O = (
in_channels + 3,
out_channels,
) # Add coordinates to input channels
self.n_layers = n_layers
self.transform_1 = DEConv[backend](MLP([2 * 3, 64, 128]), k, aggr)
self.transform_2 = MLP([128, 1024])
self.transform_3 = MLP([1024, 512, 256], batch_norm=False)
self.transform_4 = Lin(256, 3 * 3)
self.conv_layers = nn.ModuleList(
[DEConv[backend](MLP([2 * self.I, self.O, self.O]), k, aggr)]
+ [
DEConv[backend](MLP([2 * self.O, self.O, self.O]), k, aggr)
for i in range(n_layers - 1)
]
)
self.linear_layers = nn.ModuleList(
[
nn.Sequential(
nn.Linear(self.O, self.O), nn.ReLU(), nn.Linear(self.O, self.O)
)
for i in range(n_layers)
]
)
self.linear_transform = nn.ModuleList(
[nn.Linear(self.I, self.O)]
+ [nn.Linear(self.O, self.O) for i in range(n_layers - 1)]
)
def forward(self, positions, features, batch_indices):
# Lab: (B,), Pos: (N, 3), Batch: (N,)
pos, feat, batch = positions, features, batch_indices
# TransformNet:
x = pos # Don't use the normals!
x = self.transform_1(x, batch) # (N, 3) -> (N, 128)
x = self.transform_2(x) # (N, 128) -> (N, 1024)
x = global_max_pool(x, batch) # (B, 1024)
x = self.transform_3(x) # (B, 256)
x = self.transform_4(x) # (B, 3*3)
x = x[batch] # (N, 3*3)
x = x.view(-1, 3, 3) # (N, 3, 3)
# Apply the transform:
x0 = torch.einsum("ni,nij->nj", pos, x) # (N, 3)
# Add features to coordinates
x = torch.cat([x0, feat], dim=-1).contiguous()
for i in range(self.n_layers):
x_i = self.conv_layers[i](x, batch)
x_i = self.linear_layers[i](x_i)
x = self.linear_transform[i](x)
x = x + x_i
return x
# Reference PointNet models, from the PyTorch_geometric gallery =========================
class SAModule(torch.nn.Module):
"""Set abstraction module."""
def __init__(self, ratio, r, nn, max_num_neighbors=64):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.conv = PointConv(nn)
self.max_num_neighbors = max_num_neighbors
def forward(self, x, pos, batch):
# Subsample with Farthest Point Sampling:
# idx = fps(pos, batch, ratio=self.ratio) # Extract self.ratio indices TURN OFF FOR NOW
idx = torch.arange(0, len(pos), device=pos.device)
# For each "cluster", get the list of (up to 64) neighbors in a ball of radius r:
row, col = radius(
pos,
pos[idx],
self.r,
batch,
batch[idx],
max_num_neighbors=self.max_num_neighbors,
)
# Applies the PointNet++ Conv:
edge_index = torch.stack([col, row], dim=0)
x = self.conv(x, (pos, pos[idx]), edge_index)
# Return the features and sub-sampled point clouds:
pos, batch = pos[idx], batch[idx]
return x, pos, batch
class GlobalSAModule(torch.nn.Module):
def __init__(self, nn):
super(GlobalSAModule, self).__init__()
self.nn = nn
def forward(self, x, pos, batch):
x = self.nn(torch.cat([x, pos], dim=1))
x = global_max_pool(x, batch)
pos = pos.new_zeros((x.size(0), 3))
batch = torch.arange(x.size(0), device=batch.device)
return x, pos, batch
class FPModule(torch.nn.Module):
def __init__(self, k, nn):
super(FPModule, self).__init__()
self.k = k
self.nn = nn
def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):
x = knn_interpolate(x, pos, pos_skip, batch, batch_skip, k=self.k)
if x_skip is not None:
x = torch.cat([x, x_skip], dim=1)
x = self.nn(x)
return x, pos_skip, batch_skip
class PointNet2_seg(torch.nn.Module):
def __init__(self, args, in_channels, out_channels):
super(PointNet2_seg, self).__init__()
self.name = "PointNet2"
self.I, self.O = in_channels, out_channels
self.radius = args.radius
self.k = 10000 # We don't restrict the number of points in a patch
self.n_layers = args.n_layers
# self.sa1_module = SAModule(1.0, self.radius, MLP([self.I+3, self.O, self.O]),self.k)
self.layers = nn.ModuleList(
[SAModule(1.0, self.radius, MLP([self.I + 3, self.O, self.O]), self.k)]
+ [
SAModule(1.0, self.radius, MLP([self.O + 3, self.O, self.O]), self.k)
for i in range(self.n_layers - 1)
]
)
self.linear_layers = nn.ModuleList(
[
nn.Sequential(
nn.Linear(self.O, self.O), nn.ReLU(), nn.Linear(self.O, self.O)
)
for i in range(self.n_layers)
]
)
self.linear_transform = nn.ModuleList(
[nn.Linear(self.I, self.O)]
+ [nn.Linear(self.O, self.O) for i in range(self.n_layers - 1)]
)
def forward(self, positions, features, batch_indices):
x = (features, positions, batch_indices)
for i, layer in enumerate(self.layers):
x_i, pos, b_ind = layer(*x)
x_i = self.linear_layers[i](x_i)
x = self.linear_transform[i](x[0])
x = x + x_i
x = (x, pos, b_ind)
return x[0]
## TangentConv benchmark segmentation
class dMaSIFConv_seg(torch.nn.Module):
def __init__(self, args, in_channels, out_channels, n_layers, radius=9.0):
super(dMaSIFConv_seg, self).__init__()
self.name = "dMaSIFConv_seg_keops"
self.radius = radius
self.I, self.O = in_channels, out_channels
self.layers = nn.ModuleList(
[dMaSIFConv(self.I, self.O, radius, self.O)]
+ [dMaSIFConv(self.O, self.O, radius, self.O) for i in range(n_layers - 1)]
)
self.linear_layers = nn.ModuleList(
[
nn.Sequential(
nn.Linear(self.O, self.O), nn.ReLU(), nn.Linear(self.O, self.O)
)
for i in range(n_layers)
]
)
self.linear_transform = nn.ModuleList(
[nn.Linear(self.I, self.O)]
+ [nn.Linear(self.O, self.O) for i in range(n_layers - 1)]
)
def forward(self, features):
# Lab: (B,), Pos: (N, 3), Batch: (N,)
points, nuv, ranges = self.points, self.nuv, self.ranges
x = features
for i, layer in enumerate(self.layers):
x_i = layer(points, nuv, x, ranges)
x_i = self.linear_layers[i](x_i)
x = self.linear_transform[i](x)
x = x + x_i
return x
def load_mesh(self, xyz, triangles=None, normals=None, weights=None, batch=None):
"""Loads the geometry of a triangle mesh.
Input arguments:
- xyz, a point cloud encoded as an (N, 3) Tensor.
- triangles, a connectivity matrix encoded as an (N, 3) integer tensor.
- weights, importance weights for the orientation estimation, encoded as an (N, 1) Tensor.
- radius, the scale used to estimate the local normals.
- a batch vector, following PyTorch_Geometric's conventions.
The routine updates the model attributes:
- points, i.e. the point cloud itself,
- nuv, a local oriented basis in R^3 for every point,
- ranges, custom KeOps syntax to implement batch processing.
"""
# 1. Save the vertices for later use in the convolutions ---------------
self.points = xyz
self.batch = batch
self.ranges = diagonal_ranges(
batch
) # KeOps support for heterogeneous batch processing
self.triangles = triangles
self.normals = normals
self.weights = weights
# 2. Estimate the normals and tangent frame ----------------------------
# Normalize the scale:
points = xyz / self.radius
# Normals and local areas:
if normals is None:
normals, areas = mesh_normals_areas(points, triangles, 0.5, batch)
tangent_bases = tangent_vectors(normals) # Tangent basis (N, 2, 3)
# 3. Steer the tangent bases according to the gradient of "weights" ----
# 3.a) Encoding as KeOps LazyTensors:
# Orientation scores:
weights_j = LazyTensor(weights.view(1, -1, 1)) # (1, N, 1)
# Vertices:
x_i = LazyTensor(points[:, None, :]) # (N, 1, 3)
x_j = LazyTensor(points[None, :, :]) # (1, N, 3)
# Normals:
n_i = LazyTensor(normals[:, None, :]) # (N, 1, 3)
n_j = LazyTensor(normals[None, :, :]) # (1, N, 3)
# Tangent basis:
uv_i = LazyTensor(tangent_bases.view(-1, 1, 6)) # (N, 1, 6)
# 3.b) Pseudo-geodesic window:
# Pseudo-geodesic squared distance:
rho2_ij = ((x_j - x_i) ** 2).sum(-1) * ((2 - (n_i | n_j)) ** 2) # (N, N, 1)
# Gaussian window:
window_ij = (-rho2_ij).exp() # (N, N, 1)
# 3.c) Coordinates in the (u, v) basis - not oriented yet:
X_ij = uv_i.matvecmult(x_j - x_i) # (N, N, 2)
# 3.d) Local average in the tangent plane:
orientation_weight_ij = window_ij * weights_j # (N, N, 1)
orientation_vector_ij = orientation_weight_ij * X_ij # (N, N, 2)
# Support for heterogeneous batch processing:
orientation_vector_ij.ranges = self.ranges # Block-diagonal sparsity mask
orientation_vector_i = orientation_vector_ij.sum(dim=1) # (N, 2)
orientation_vector_i = (
orientation_vector_i + 1e-5
) # Just in case someone's alone...
# 3.e) Normalize stuff:
orientation_vector_i = F.normalize(orientation_vector_i, p=2, dim=-1) # (N, 2)
ex_i, ey_i = (
orientation_vector_i[:, 0][:, None],
orientation_vector_i[:, 1][:, None],
) # (N,1)
# 3.f) Re-orient the (u,v) basis:
uv_i = tangent_bases # (N, 2, 3)
u_i, v_i = uv_i[:, 0, :], uv_i[:, 1, :] # (N, 3)
tangent_bases = torch.cat(
(ex_i * u_i + ey_i * v_i, -ey_i * u_i + ex_i * v_i), dim=1
).contiguous() # (N, 6)
# 4. Store the local 3D frame as an attribute --------------------------
self.nuv = torch.cat(
(normals.view(-1, 1, 3), tangent_bases.view(-1, 2, 3)), dim=1
)
| [
"torch.nn.ReLU",
"torch_geometric.nn.global_max_pool",
"torch_geometric.nn.knn_interpolate",
"torch.nn.LeakyReLU",
"geometry_processing.tangent_vectors",
"geometry_processing.dMaSIFConv",
"torch_geometric.nn.PointConv",
"torch.stack",
"helper.diagonal_ranges",
"torch.nn.functional.normalize",
"torch.nn.BatchNorm1d",
"torch.einsum",
"geometry_processing.mesh_normals_areas",
"torch.nn.Linear",
"torch_geometric.nn.radius",
"torch.nn.Identity",
"pykeops.torch.LazyTensor",
"torch.cat"
] | [((1882, 1897), 'torch.nn.Linear', 'Lin', (['(256)', '(3 * 3)'], {}), '(256, 3 * 3)\n', (1885, 1897), True, 'from torch.nn import Sequential as Seq, Dropout, Linear as Lin, LeakyReLU, ReLU, BatchNorm1d as BN\n'), ((2968, 2993), 'torch_geometric.nn.global_max_pool', 'global_max_pool', (['x', 'batch'], {}), '(x, batch)\n', (2983, 2993), False, 'from torch_geometric.nn import DynamicEdgeConv, PointConv, XConv, fps, radius, global_max_pool, knn_interpolate\n'), ((3216, 3250), 'torch.einsum', 'torch.einsum', (['"""ni,nij->nj"""', 'pos', 'x'], {}), "('ni,nij->nj', pos, x)\n", (3228, 3250), False, 'import torch\n'), ((3903, 3916), 'torch_geometric.nn.PointConv', 'PointConv', (['nn'], {}), '(nn)\n', (3912, 3916), False, 'from torch_geometric.nn import DynamicEdgeConv, PointConv, XConv, fps, radius, global_max_pool, knn_interpolate\n'), ((4323, 4418), 'torch_geometric.nn.radius', 'radius', (['pos', 'pos[idx]', 'self.r', 'batch', 'batch[idx]'], {'max_num_neighbors': 'self.max_num_neighbors'}), '(pos, pos[idx], self.r, batch, batch[idx], max_num_neighbors=self.\n max_num_neighbors)\n', (4329, 4418), False, 'from torch_geometric.nn import DynamicEdgeConv, PointConv, XConv, fps, radius, global_max_pool, knn_interpolate\n'), ((4558, 4588), 'torch.stack', 'torch.stack', (['[col, row]'], {'dim': '(0)'}), '([col, row], dim=0)\n', (4569, 4588), False, 'import torch\n'), ((5011, 5036), 'torch_geometric.nn.global_max_pool', 'global_max_pool', (['x', 'batch'], {}), '(x, batch)\n', (5026, 5036), False, 'from torch_geometric.nn import DynamicEdgeConv, PointConv, XConv, fps, radius, global_max_pool, knn_interpolate\n'), ((5399, 5461), 'torch_geometric.nn.knn_interpolate', 'knn_interpolate', (['x', 'pos', 'pos_skip', 'batch', 'batch_skip'], {'k': 'self.k'}), '(x, pos, pos_skip, batch, batch_skip, k=self.k)\n', (5414, 5461), False, 'from torch_geometric.nn import DynamicEdgeConv, PointConv, XConv, fps, radius, global_max_pool, knn_interpolate\n'), ((9445, 9467), 'helper.diagonal_ranges', 'diagonal_ranges', (['batch'], {}), '(batch)\n', (9460, 9467), False, 'from helper import diagonal_ranges\n'), ((9954, 9978), 'geometry_processing.tangent_vectors', 'tangent_vectors', (['normals'], {}), '(normals)\n', (9969, 9978), False, 'from geometry_processing import dMaSIFConv, mesh_normals_areas, tangent_vectors\n'), ((10267, 10297), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[:, None, :]'], {}), '(points[:, None, :])\n', (10277, 10297), False, 'from pykeops.torch import LazyTensor\n'), ((10325, 10355), 'pykeops.torch.LazyTensor', 'LazyTensor', (['points[None, :, :]'], {}), '(points[None, :, :])\n', (10335, 10355), False, 'from pykeops.torch import LazyTensor\n'), ((10402, 10433), 'pykeops.torch.LazyTensor', 'LazyTensor', (['normals[:, None, :]'], {}), '(normals[:, None, :])\n', (10412, 10433), False, 'from pykeops.torch import LazyTensor\n'), ((10461, 10492), 'pykeops.torch.LazyTensor', 'LazyTensor', (['normals[None, :, :]'], {}), '(normals[None, :, :])\n', (10471, 10492), False, 'from pykeops.torch import LazyTensor\n'), ((11557, 11603), 'torch.nn.functional.normalize', 'F.normalize', (['orientation_vector_i'], {'p': '(2)', 'dim': '(-1)'}), '(orientation_vector_i, p=2, dim=-1)\n', (11568, 11603), True, 'import torch.nn.functional as F\n'), ((4971, 4997), 'torch.cat', 'torch.cat', (['[x, pos]'], {'dim': '(1)'}), '([x, pos], dim=1)\n', (4980, 4997), False, 'import torch\n'), ((5509, 5538), 'torch.cat', 'torch.cat', (['[x, x_skip]'], {'dim': '(1)'}), '([x, x_skip], dim=1)\n', (5518, 5538), False, 'import torch\n'), ((9880, 9929), 'geometry_processing.mesh_normals_areas', 'mesh_normals_areas', (['points', 'triangles', '(0.5)', 'batch'], {}), '(points, triangles, 0.5, batch)\n', (9898, 9929), False, 'from geometry_processing import dMaSIFConv, mesh_normals_areas, tangent_vectors\n'), ((3312, 3341), 'torch.cat', 'torch.cat', (['[x0, feat]'], {'dim': '(-1)'}), '([x0, feat], dim=-1)\n', (3321, 3341), False, 'import torch\n'), ((11922, 11991), 'torch.cat', 'torch.cat', (['(ex_i * u_i + ey_i * v_i, -ey_i * u_i + ex_i * v_i)'], {'dim': '(1)'}), '((ex_i * u_i + ey_i * v_i, -ey_i * u_i + ex_i * v_i), dim=1)\n', (11931, 11991), False, 'import torch\n'), ((1051, 1084), 'torch.nn.Linear', 'Lin', (['channels[i - 1]', 'channels[i]'], {}), '(channels[i - 1], channels[i])\n', (1054, 1084), True, 'from torch.nn import Sequential as Seq, Dropout, Linear as Lin, LeakyReLU, ReLU, BatchNorm1d as BN\n'), ((1168, 1197), 'torch.nn.LeakyReLU', 'LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (1177, 1197), False, 'from torch.nn import Sequential as Seq, Dropout, Linear as Lin, LeakyReLU, ReLU, BatchNorm1d as BN\n'), ((2286, 2311), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (2295, 2311), True, 'import torch.nn as nn\n'), ((2313, 2322), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2320, 2322), True, 'import torch.nn as nn\n'), ((2324, 2349), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (2333, 2349), True, 'import torch.nn as nn\n'), ((2494, 2519), 'torch.nn.Linear', 'nn.Linear', (['self.I', 'self.O'], {}), '(self.I, self.O)\n', (2503, 2519), True, 'import torch.nn as nn\n'), ((2536, 2561), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (2545, 2561), True, 'import torch.nn as nn\n'), ((6479, 6504), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (6488, 6504), True, 'import torch.nn as nn\n'), ((6506, 6515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6513, 6515), True, 'import torch.nn as nn\n'), ((6517, 6542), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (6526, 6542), True, 'import torch.nn as nn\n'), ((6692, 6717), 'torch.nn.Linear', 'nn.Linear', (['self.I', 'self.O'], {}), '(self.I, self.O)\n', (6701, 6717), True, 'import torch.nn as nn\n'), ((6734, 6759), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (6743, 6759), True, 'import torch.nn as nn\n'), ((7553, 7595), 'geometry_processing.dMaSIFConv', 'dMaSIFConv', (['self.I', 'self.O', 'radius', 'self.O'], {}), '(self.I, self.O, radius, self.O)\n', (7563, 7595), False, 'from geometry_processing import dMaSIFConv, mesh_normals_areas, tangent_vectors\n'), ((7612, 7654), 'geometry_processing.dMaSIFConv', 'dMaSIFConv', (['self.O', 'self.O', 'radius', 'self.O'], {}), '(self.O, self.O, radius, self.O)\n', (7622, 7654), False, 'from geometry_processing import dMaSIFConv, mesh_normals_areas, tangent_vectors\n'), ((7805, 7830), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (7814, 7830), True, 'import torch.nn as nn\n'), ((7832, 7841), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7839, 7841), True, 'import torch.nn as nn\n'), ((7843, 7868), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (7852, 7868), True, 'import torch.nn as nn\n'), ((8013, 8038), 'torch.nn.Linear', 'nn.Linear', (['self.I', 'self.O'], {}), '(self.I, self.O)\n', (8022, 8038), True, 'import torch.nn as nn\n'), ((8055, 8080), 'torch.nn.Linear', 'nn.Linear', (['self.O', 'self.O'], {}), '(self.O, self.O)\n', (8064, 8080), True, 'import torch.nn as nn\n'), ((1102, 1117), 'torch.nn.BatchNorm1d', 'BN', (['channels[i]'], {}), '(channels[i])\n', (1104, 1117), True, 'from torch.nn import Sequential as Seq, Dropout, Linear as Lin, LeakyReLU, ReLU, BatchNorm1d as BN\n'), ((1137, 1150), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1148, 1150), True, 'import torch.nn as nn\n')] |
import collections as cl
import oyaml as yaml
def read_init_file(init_file_name):
"""Reads in the model specification from yaml file."""
# Import yaml initialization file as dictionary init_dict
with open(init_file_name) as y:
init_dict = yaml.load(y, Loader=yaml.FullLoader)
init_dict = expand_init_dict(init_dict)
model_params = create_namedtuple(init_dict)
return model_params
def expand_init_dict(init_dict):
"""Enhances read in initialization dictionary by
adding model parameters derived from the
specified initialisation file"""
# Calculate range of years of education in the (simulated) sample
educ_min = init_dict["INITIAL_CONDITIONS"]["educ_min"]
educ_max = init_dict["INITIAL_CONDITIONS"]["educ_max"]
educ_range = educ_max - educ_min + 1
# Calculate covariances of the error terms given standard deviations
shocks_cov = init_dict["PARAMETERS"]["optim_paras"][14:17]
shocks_cov = [shocks_cov[0] ** 2, shocks_cov[1] ** 2, shocks_cov[2] ** 2]
init_dict["DERIVED_ATTR"] = {"educ_range": educ_range, "shocks_cov": shocks_cov}
# Return function output
return init_dict
def create_namedtuple(init_dict):
"""Transfers model specification from a dictionary
to a named tuple class object."""
model_params = cl.namedtuple("model_parameters", "")
model_params.num_periods = init_dict["GENERAL"]["num_periods"]
model_params.num_choices = init_dict["GENERAL"]["num_choices"]
model_params.delta = init_dict["CONSTANTS"]["delta"]
model_params.mu = init_dict["CONSTANTS"]["mu"]
model_params.benefits = init_dict["CONSTANTS"]["benefits"]
model_params.educ_max = init_dict["INITIAL_CONDITIONS"]["educ_max"]
model_params.educ_min = init_dict["INITIAL_CONDITIONS"]["educ_min"]
model_params.seed_sim = init_dict["SIMULATION"]["seed_sim"]
model_params.num_agents_sim = init_dict["SIMULATION"]["num_agents_sim"]
model_params.seed_emax = init_dict["SOLUTION"]["seed_emax"]
model_params.num_draws_emax = init_dict["SOLUTION"]["num_draws_emax"]
model_params.optim_paras = init_dict["PARAMETERS"]["optim_paras"]
model_params.educ_range = init_dict["DERIVED_ATTR"]["educ_range"]
model_params.shocks_cov = init_dict["DERIVED_ATTR"]["shocks_cov"]
return model_params
| [
"oyaml.load",
"collections.namedtuple"
] | [((1320, 1357), 'collections.namedtuple', 'cl.namedtuple', (['"""model_parameters"""', '""""""'], {}), "('model_parameters', '')\n", (1333, 1357), True, 'import collections as cl\n'), ((263, 299), 'oyaml.load', 'yaml.load', (['y'], {'Loader': 'yaml.FullLoader'}), '(y, Loader=yaml.FullLoader)\n', (272, 299), True, 'import oyaml as yaml\n')] |
'''
This example shows how to load the dataset using python and how to evaluate a method
For each video in the test set it
- randomly scores frames
- computes the average precision and nMSD
'''
__author__ = 'michaelgygli'
import pandas as pd
import numpy as np
import sys
# Import v2g_evaluation
# Needs to be done from the root of the repository
# or the package is installed via python setup.py install
import v2g_evaluation
# Read csv file using pandas
#For more info on pandas check http://pandas.pydata.org/pandas-docs/stable/10min.html
dataset=pd.read_csv('metadata.txt',sep=';\t',engine='python')
# Read test IDs
with open('testset.txt','r') as f:
test_ids=[l.rstrip('\n') for l in f]
def evaluate_random():
'''
This function shows how a method can be evaluated
'''
all_ap=np.zeros(len(test_ids))
all_msd=np.zeros(len(test_ids))
for idx,youtube_id in enumerate(test_ids):
y_gt=v2g_evaluation.get_gt_score(youtube_id, dataset)
y_predicted=np.random.rand(len(y_gt[0]))
all_ap[idx] = v2g_evaluation.get_ap(np.array(y_gt).max(axis=0), y_predicted)
all_msd[idx] = v2g_evaluation.meaningful_summary_duration(y_gt, y_predicted)
print('AP=%.2f%%; MSD=%.2f%%' % (100*np.mean(all_ap),100*np.mean(all_msd)))
if __name__=='__main__':
sys.stdout.write('Evaluate random performance\n')
sys.stdout.flush()
evaluate_random() | [
"numpy.mean",
"pandas.read_csv",
"v2g_evaluation.get_gt_score",
"numpy.array",
"sys.stdout.flush",
"v2g_evaluation.meaningful_summary_duration",
"sys.stdout.write"
] | [((560, 615), 'pandas.read_csv', 'pd.read_csv', (['"""metadata.txt"""'], {'sep': '""";\t"""', 'engine': '"""python"""'}), "('metadata.txt', sep=';\\t', engine='python')\n", (571, 615), True, 'import pandas as pd\n'), ((1314, 1363), 'sys.stdout.write', 'sys.stdout.write', (['"""Evaluate random performance\n"""'], {}), "('Evaluate random performance\\n')\n", (1330, 1363), False, 'import sys\n'), ((1368, 1386), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1384, 1386), False, 'import sys\n'), ((934, 982), 'v2g_evaluation.get_gt_score', 'v2g_evaluation.get_gt_score', (['youtube_id', 'dataset'], {}), '(youtube_id, dataset)\n', (961, 982), False, 'import v2g_evaluation\n'), ((1141, 1202), 'v2g_evaluation.meaningful_summary_duration', 'v2g_evaluation.meaningful_summary_duration', (['y_gt', 'y_predicted'], {}), '(y_gt, y_predicted)\n', (1183, 1202), False, 'import v2g_evaluation\n'), ((1077, 1091), 'numpy.array', 'np.array', (['y_gt'], {}), '(y_gt)\n', (1085, 1091), True, 'import numpy as np\n'), ((1244, 1259), 'numpy.mean', 'np.mean', (['all_ap'], {}), '(all_ap)\n', (1251, 1259), True, 'import numpy as np\n'), ((1264, 1280), 'numpy.mean', 'np.mean', (['all_msd'], {}), '(all_msd)\n', (1271, 1280), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .decoder import DecoderBase
def he_init(m):
s = np.sqrt(2./ m.in_features)
m.weight.data.normal_(0, s)
class GatedMaskedConv2d(nn.Module):
def __init__(self, in_dim, out_dim=None, kernel_size = 3, mask = 'B'):
super(GatedMaskedConv2d, self).__init__()
if out_dim is None:
out_dim = in_dim
self.dim = out_dim
self.size = kernel_size
self.mask = mask
pad = self.size // 2
#vertical stack
self.v_conv = nn.Conv2d(in_dim, 2*self.dim, kernel_size=(pad+1, self.size))
self.v_pad1 = nn.ConstantPad2d((pad, pad, pad, 0), 0)
self.v_pad2 = nn.ConstantPad2d((0, 0, 1, 0), 0)
self.vh_conv = nn.Conv2d(2*self.dim, 2*self.dim, kernel_size = 1)
#horizontal stack
self.h_conv = nn.Conv2d(in_dim, 2*self.dim, kernel_size=(1, pad+1))
self.h_pad1 = nn.ConstantPad2d((self.size // 2, 0, 0, 0), 0)
self.h_pad2 = nn.ConstantPad2d((1, 0, 0, 0), 0)
self.h_conv_res = nn.Conv2d(self.dim, self.dim, 1)
def forward(self, v_map, h_map):
v_out = self.v_pad2(self.v_conv(self.v_pad1(v_map)))[:, :, :-1, :]
v_map_out = F.tanh(v_out[:, :self.dim])*F.sigmoid(v_out[:, self.dim:])
vh = self.vh_conv(v_out)
h_out = self.h_conv(self.h_pad1(h_map))
if self.mask == 'A':
h_out = self.h_pad2(h_out)[:, :, :, :-1]
h_out = h_out + vh
h_out = F.tanh(h_out[:, :self.dim])*F.sigmoid(h_out[:, self.dim:])
h_map_out = self.h_conv_res(h_out)
if self.mask == 'B':
h_map_out = h_map_out + h_map
return v_map_out, h_map_out
class StackedGatedMaskedConv2d(nn.Module):
def __init__(self,
img_size = [1, 28, 28], layers = [64,64,64],
kernel_size = [7,7,7], latent_dim=64, latent_feature_map = 1):
super(StackedGatedMaskedConv2d, self).__init__()
input_dim = img_size[0]
self.conv_layers = []
if latent_feature_map > 0:
self.latent_feature_map = latent_feature_map
self.z_linear = nn.Linear(latent_dim, latent_feature_map*28*28)
for i in range(len(kernel_size)):
if i == 0:
self.conv_layers.append(GatedMaskedConv2d(input_dim+latent_feature_map,
layers[i], kernel_size[i], 'A'))
else:
self.conv_layers.append(GatedMaskedConv2d(layers[i-1], layers[i], kernel_size[i]))
self.modules = nn.ModuleList(self.conv_layers)
def forward(self, img, q_z=None):
"""
Args:
img: (batch, nc, H, W)
q_z: (batch, nsamples, nz)
"""
batch_size, nsamples, _ = q_z.size()
if q_z is not None:
z_img = self.z_linear(q_z)
z_img = z_img.view(img.size(0), nsamples, self.latent_feature_map, img.size(2), img.size(3))
# (batch, nsamples, nc, H, W)
img = img.unsqueeze(1).expand(batch_size, nsamples, *img.size()[1:])
for i in range(len(self.conv_layers)):
if i == 0:
if q_z is not None:
# (batch, nsamples, nc + fm, H, W) --> (batch * nsamples, nc + fm, H, W)
v_map = torch.cat([img, z_img], 2)
v_map = v_map.view(-1, *v_map.size()[2:])
else:
v_map = img
h_map = v_map
v_map, h_map = self.conv_layers[i](v_map, h_map)
return h_map
class PixelCNNDecoder(DecoderBase):
"""docstring for PixelCNNDecoder"""
def __init__(self, args):
super(PixelCNNDecoder, self).__init__()
self.dec_cnn = StackedGatedMaskedConv2d(img_size=args.img_size, layers = args.dec_layers,
latent_dim= args.nz, kernel_size = args.dec_kernel_size,
latent_feature_map = args.latent_feature_map)
self.dec_linear = nn.Conv2d(args.dec_layers[-1], args.img_size[0], kernel_size = 1)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
he_init(m)
def decode(self, img, q_z):
dec_cnn_output = self.dec_cnn(img, q_z)
pred = F.sigmoid(self.dec_linear(dec_cnn_output))
return pred
def reconstruct_error(self, x, z):
"""Cross Entropy in the language case
Args:
x: (batch_size, nc, H, W)
z: (batch_size, n_sample, nz)
Returns:
loss: (batch_size, n_sample). Loss
across different sentence and z
"""
batch_size, nsamples, _ = z.size()
# (batch * nsamples, nc, H, W)
pred = self.decode(x, z)
prob = torch.clamp(pred.view(pred.size(0), -1), min=1e-5, max=1.-1e-5)
# (batch, nsamples, nc, H, W) --> (batch * nsamples, nc, H, W)
x = x.unsqueeze(1).expand(batch_size, nsamples, *x.size()[1:]).contiguous()
tgt_vec = x.view(-1, *x.size()[2:])
# (batch * nsamples, *)
tgt_vec = tgt_vec.view(tgt_vec.size(0), -1)
log_bernoulli = tgt_vec * torch.log(prob) + (1. - tgt_vec)*torch.log(1. - prob)
log_bernoulli = log_bernoulli.view(batch_size, nsamples, -1)
return -torch.sum(log_bernoulli, 2)
def log_probability(self, x, z):
"""Cross Entropy in the language case
Args:
x: (batch_size, nc, H, W)
z: (batch_size, n_sample, nz)
Returns:
log_p: (batch_size, n_sample).
log_p(x|z) across different x and z
"""
return -self.reconstruct_error(x, z)
| [
"torch.nn.functional.tanh",
"numpy.sqrt",
"torch.log",
"torch.nn.ModuleList",
"torch.nn.functional.sigmoid",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Linear",
"torch.nn.ConstantPad2d",
"torch.cat"
] | [((145, 173), 'numpy.sqrt', 'np.sqrt', (['(2.0 / m.in_features)'], {}), '(2.0 / m.in_features)\n', (152, 173), True, 'import numpy as np\n'), ((583, 648), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(2 * self.dim)'], {'kernel_size': '(pad + 1, self.size)'}), '(in_dim, 2 * self.dim, kernel_size=(pad + 1, self.size))\n', (592, 648), True, 'import torch.nn as nn\n'), ((667, 706), 'torch.nn.ConstantPad2d', 'nn.ConstantPad2d', (['(pad, pad, pad, 0)', '(0)'], {}), '((pad, pad, pad, 0), 0)\n', (683, 706), True, 'import torch.nn as nn\n'), ((729, 762), 'torch.nn.ConstantPad2d', 'nn.ConstantPad2d', (['(0, 0, 1, 0)', '(0)'], {}), '((0, 0, 1, 0), 0)\n', (745, 762), True, 'import torch.nn as nn\n'), ((786, 838), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * self.dim)', '(2 * self.dim)'], {'kernel_size': '(1)'}), '(2 * self.dim, 2 * self.dim, kernel_size=1)\n', (795, 838), True, 'import torch.nn as nn\n'), ((886, 943), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(2 * self.dim)'], {'kernel_size': '(1, pad + 1)'}), '(in_dim, 2 * self.dim, kernel_size=(1, pad + 1))\n', (895, 943), True, 'import torch.nn as nn\n'), ((962, 1008), 'torch.nn.ConstantPad2d', 'nn.ConstantPad2d', (['(self.size // 2, 0, 0, 0)', '(0)'], {}), '((self.size // 2, 0, 0, 0), 0)\n', (978, 1008), True, 'import torch.nn as nn\n'), ((1031, 1064), 'torch.nn.ConstantPad2d', 'nn.ConstantPad2d', (['(1, 0, 0, 0)', '(0)'], {}), '((1, 0, 0, 0), 0)\n', (1047, 1064), True, 'import torch.nn as nn\n'), ((1091, 1123), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.dim', 'self.dim', '(1)'], {}), '(self.dim, self.dim, 1)\n', (1100, 1123), True, 'import torch.nn as nn\n'), ((2611, 2642), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.conv_layers'], {}), '(self.conv_layers)\n', (2624, 2642), True, 'import torch.nn as nn\n'), ((4098, 4161), 'torch.nn.Conv2d', 'nn.Conv2d', (['args.dec_layers[-1]', 'args.img_size[0]'], {'kernel_size': '(1)'}), '(args.dec_layers[-1], args.img_size[0], kernel_size=1)\n', (4107, 4161), True, 'import torch.nn as nn\n'), ((1257, 1284), 'torch.nn.functional.tanh', 'F.tanh', (['v_out[:, :self.dim]'], {}), '(v_out[:, :self.dim])\n', (1263, 1284), True, 'import torch.nn.functional as F\n'), ((1285, 1315), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['v_out[:, self.dim:]'], {}), '(v_out[:, self.dim:])\n', (1294, 1315), True, 'import torch.nn.functional as F\n'), ((1523, 1550), 'torch.nn.functional.tanh', 'F.tanh', (['h_out[:, :self.dim]'], {}), '(h_out[:, :self.dim])\n', (1529, 1550), True, 'import torch.nn.functional as F\n'), ((1551, 1581), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['h_out[:, self.dim:]'], {}), '(h_out[:, self.dim:])\n', (1560, 1581), True, 'import torch.nn.functional as F\n'), ((2180, 2231), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', '(latent_feature_map * 28 * 28)'], {}), '(latent_dim, latent_feature_map * 28 * 28)\n', (2189, 2231), True, 'import torch.nn as nn\n'), ((5446, 5473), 'torch.sum', 'torch.sum', (['log_bernoulli', '(2)'], {}), '(log_bernoulli, 2)\n', (5455, 5473), False, 'import torch\n'), ((5305, 5320), 'torch.log', 'torch.log', (['prob'], {}), '(prob)\n', (5314, 5320), False, 'import torch\n'), ((5338, 5359), 'torch.log', 'torch.log', (['(1.0 - prob)'], {}), '(1.0 - prob)\n', (5347, 5359), False, 'import torch\n'), ((3364, 3390), 'torch.cat', 'torch.cat', (['[img, z_img]', '(2)'], {}), '([img, z_img], 2)\n', (3373, 3390), False, 'import torch\n')] |
from abc import ABC, abstractmethod
import networkx as nx
import cargonet.utils.geo as geo
class TransportGraphBuilder(ABC):
def __init__(self, result):
self.result = result
@classmethod
@abstractmethod
def from_nodes_edges(
cls, station_id_node_mapping, transport_edges, undirected=False
):
pass
def build(self):
return self.result
class NXTGBuilder(TransportGraphBuilder):
@classmethod
def from_nodes_edges(
cls, station_id_node_mapping, transport_edges, undirected=False
):
"""
Builds networkX transport graph
"""
tg = nx.Graph() if undirected else nx.DiGraph()
for s_id, mapping in station_id_node_mapping.items():
index = mapping["index"]
if index is None:
continue
node_attrs = mapping.copy()
node_attrs["stationId"] = s_id
tg.add_node(index, **node_attrs)
tg.add_edges_from(transport_edges)
for u, v in tg.edges:
# Add distance edge feature
p1, p2 = tg.nodes[u].get("pos"), tg.nodes[v].get("pos")
if None not in [p1, p2]:
tg.edges[u, v]["distance"] = geo.dist_m_v2(p1, p2) / 1000.0
# TODO: Add more delay metrics
tg.edges[u, v]["delay"] = tg.nodes[v].get("delayRelPercent") # or delayRel
# Add planned duration edge feature
p_d, p_a = (
tg.nodes[u].get("plannedDepartureTime"),
tg.nodes[v].get("plannedArrivalTime"),
)
if None not in [p_a, p_d]:
tg.edges[u, v]["plannedDuration"] = p_a - p_d
# Add real duration edge feature
r_d, r_a = (
tg.nodes[u].get("departureTime"),
tg.nodes[v].get("arrivalTime"),
)
if None not in [r_a, r_d]:
tg.edges[u, v]["duration"] = r_a - r_d
return cls(tg)
| [
"cargonet.utils.geo.dist_m_v2",
"networkx.DiGraph",
"networkx.Graph"
] | [((638, 648), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (646, 648), True, 'import networkx as nx\n'), ((668, 680), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (678, 680), True, 'import networkx as nx\n'), ((1227, 1248), 'cargonet.utils.geo.dist_m_v2', 'geo.dist_m_v2', (['p1', 'p2'], {}), '(p1, p2)\n', (1240, 1248), True, 'import cargonet.utils.geo as geo\n')] |
import signal
from howdy import signal_handler
signal.signal( signal.SIGINT, signal_handler )
import logging, os, re, time
from itertools import chain
from multiprocessing import Pool
from argparse import ArgumentParser
#
from howdy.core import core_deluge, core, core_torrents
def get_items_jackett( name, maxnum = 1000, verify = True ):
assert( maxnum >= 5 )
logging.info( 'started getting book torrents with jackett %s.' % name )
items, status = core_torrents.get_book_torrent_jackett(
name, maxnum = maxnum, verify = verify )
if status != 'SUCCESS':
logging.info( 'ERROR, JACKETT COULD NOT FIND %s.' % name )
return None
return items
def get_book_torrent_items(
items, filename = None, to_torrent_server = False ):
if len( items ) != 1:
sortdict = { idx + 1 : item for ( idx, item ) in enumerate( items ) }
bs = 'Choose candidate book item:\n%s\n' % '\n'.join(
map(lambda idx: '%d: %s (%d SE, %d LE)' % (
idx, sortdict[ idx ][ 'title'], sortdict[ idx ][ 'seeders' ],
sortdict[ idx ][ 'leechers' ] ),
sorted( sortdict ) ) )
iidx = input( bs )
try:
iidx = int( iidx.strip( ) )
if iidx not in sortdict:
print('Error, need to choose one of the candidate books. Exiting...')
return
magnet_link = sortdict[ iidx ][ 'link' ]
actbook = sortdict[ iidx ][ 'title' ]
except Exception as e:
print( 'Error, did not give a valid integer value. Exiting...' )
return
else:
actbook = max( items )[ 'title' ]
magnet_link = max( items )[ 'link' ]
print( 'Chosen book: %s' % actbook )
if to_torrent_server: # upload to deluge server
client, status = core_deluge.get_deluge_client( )
if status != 'SUCCESS':
print( status )
return
core_deluge.deluge_add_magnet_file(
client, magnet_link )
elif filename is None:
print( 'magnet link: %s' % magnet_link )
else:
with open( filename, 'w' ) as openfile:
openfile.write( '%s\n' % magnet_link )
def main( ):
parser = ArgumentParser( )
parser.add_argument('-n', '--name', dest='name', type=str, action='store', required = True,
help = 'Name of the book to get.')
parser.add_argument('--maxnum', dest='maxnum', type=int, action='store', default = 10,
help = 'Maximum number of torrents to look through. Default is 10.')
parser.add_argument('-f', '--filename', dest='filename', action='store', type=str,
help = 'If defined, put magnet link into filename.')
parser.add_argument('--add', dest='do_add', action='store_true', default = False,
help = 'If chosen, push the magnet link into the deluge server.' )
parser.add_argument('--info', dest='do_info', action='store_true', default = False,
help = 'If chosen, run in info mode.' )
parser.add_argument('--noverify', dest='do_verify', action='store_false', default = True,
help = 'If chosen, do not verify SSL connections.' )
args = parser.parse_args( )
logger = logging.getLogger( )
if args.do_info: logger.setLevel( logging.INFO )
#
time0 = time.time( )
if core.get_jackett_credentials( ) is None:
print( 'Error, Jackett server does not work. Exiting...' )
retur
items = get_items_jackett( args.name, maxnum = args.maxnum, verify = args.do_verify )
logging.info( 'search for %s took %0.3f seconds.' % ( args.name, time.time( ) - time0 ) )
if items is None: return
#
## sort from most seeders + leecher to least
items_sorted = sorted( items, key = lambda tup: (
-tup['seeders'] - tup['leechers'], tup['title'] ) )[:args.maxnum]
get_book_torrent_items( items_sorted, filename = args.filename, to_torrent_server = args.do_add )
| [
"logging.getLogger",
"signal.signal",
"argparse.ArgumentParser",
"howdy.core.core_torrents.get_book_torrent_jackett",
"howdy.core.core.get_jackett_credentials",
"howdy.core.core_deluge.get_deluge_client",
"howdy.core.core_deluge.deluge_add_magnet_file",
"time.time",
"logging.info"
] | [((47, 91), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (60, 91), False, 'import signal\n'), ((370, 439), 'logging.info', 'logging.info', (["('started getting book torrents with jackett %s.' % name)"], {}), "('started getting book torrents with jackett %s.' % name)\n", (382, 439), False, 'import logging, os, re, time\n'), ((462, 536), 'howdy.core.core_torrents.get_book_torrent_jackett', 'core_torrents.get_book_torrent_jackett', (['name'], {'maxnum': 'maxnum', 'verify': 'verify'}), '(name, maxnum=maxnum, verify=verify)\n', (500, 536), False, 'from howdy.core import core_deluge, core, core_torrents\n'), ((2231, 2247), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2245, 2247), False, 'from argparse import ArgumentParser\n'), ((3287, 3306), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3304, 3306), False, 'import logging, os, re, time\n'), ((3379, 3390), 'time.time', 'time.time', ([], {}), '()\n', (3388, 3390), False, 'import logging, os, re, time\n'), ((587, 643), 'logging.info', 'logging.info', (["('ERROR, JACKETT COULD NOT FIND %s.' % name)"], {}), "('ERROR, JACKETT COULD NOT FIND %s.' % name)\n", (599, 643), False, 'import logging, os, re, time\n'), ((1829, 1860), 'howdy.core.core_deluge.get_deluge_client', 'core_deluge.get_deluge_client', ([], {}), '()\n', (1858, 1860), False, 'from howdy.core import core_deluge, core, core_torrents\n'), ((1949, 2004), 'howdy.core.core_deluge.deluge_add_magnet_file', 'core_deluge.deluge_add_magnet_file', (['client', 'magnet_link'], {}), '(client, magnet_link)\n', (1983, 2004), False, 'from howdy.core import core_deluge, core, core_torrents\n'), ((3399, 3429), 'howdy.core.core.get_jackett_credentials', 'core.get_jackett_credentials', ([], {}), '()\n', (3427, 3429), False, 'from howdy.core import core_deluge, core, core_torrents\n'), ((3680, 3691), 'time.time', 'time.time', ([], {}), '()\n', (3689, 3691), False, 'import logging, os, re, time\n')] |
from django.conf import settings
from django.db import models
from posts.models import Post
class Comment(models.Model):
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
on_delete=models.CASCADE,
)
content = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return f'Comment on {self.post}'
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
] | [((138, 207), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (155, 207), False, 'from django.db import models\n'), ((243, 292), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Post'], {'on_delete': 'models.CASCADE'}), '(Post, on_delete=models.CASCADE)\n', (260, 292), False, 'from django.db import models\n'), ((307, 381), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE'}), "('self', null=True, blank=True, on_delete=models.CASCADE)\n", (324, 381), False, 'from django.db import models\n'), ((436, 454), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (452, 454), False, 'from django.db import models\n'), ((470, 509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (490, 509), False, 'from django.db import models\n'), ((524, 559), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (544, 559), False, 'from django.db import models\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class CreateNodegroupTemplate(tables.LinkAction):
name = "create"
verbose_name = _("Create Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"create-nodegroup-template")
classes = ("ajax-modal", "btn-create", "create-nodegrouptemplate-btn")
class ConfigureNodegroupTemplate(tables.LinkAction):
name = "configure"
verbose_name = _("Configure Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"configure-nodegroup-template")
classes = ("ajax-modal", "btn-create", "configure-nodegrouptemplate-btn")
attrs = {"style": "display: none"}
class CopyTemplate(tables.LinkAction):
name = "copy"
verbose_name = _("Copy Template")
url = "horizon:project:data_processing.nodegroup_templates:copy"
classes = ("ajax-modal", )
class DeleteTemplate(tables.BatchAction):
name = "delete_nodegroup_template"
verbose_name = _("Delete")
classes = ("btn-terminate", "btn-danger")
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("Template")
data_type_plural = _("Templates")
def action(self, request, template_id):
saharaclient.nodegroup_template_delete(request, template_id)
def render_processes(nodegroup_template):
template_name = (
'project/data_processing.nodegroup_templates/_processes_list.html')
context = {"processes": nodegroup_template.node_processes}
return template.loader.render_to_string(template_name, context)
class NodegroupTemplatesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link=("horizon:project:data_processing.nodegroup_templates:details"))
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
verbose_name=_("Hadoop Version"))
node_processes = tables.Column(render_processes,
verbose_name=_("Node Processes"))
class Meta:
name = "nodegroup_templates"
verbose_name = _("Node Group Templates")
table_actions = (CreateNodegroupTemplate,
ConfigureNodegroupTemplate,
DeleteTemplate)
row_actions = (CopyTemplate,
DeleteTemplate,)
| [
"logging.getLogger",
"openstack_dashboard.api.sahara.nodegroup_template_delete",
"django.utils.translation.ugettext_lazy",
"django.template.loader.render_to_string"
] | [((740, 767), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (757, 767), False, 'import logging\n'), ((859, 879), 'django.utils.translation.ugettext_lazy', '_', (['"""Create Template"""'], {}), "('Create Template')\n", (860, 879), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1158, 1181), 'django.utils.translation.ugettext_lazy', '_', (['"""Configure Template"""'], {}), "('Configure Template')\n", (1159, 1181), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1486, 1504), 'django.utils.translation.ugettext_lazy', '_', (['"""Copy Template"""'], {}), "('Copy Template')\n", (1487, 1504), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1707, 1718), 'django.utils.translation.ugettext_lazy', '_', (['"""Delete"""'], {}), "('Delete')\n", (1708, 1718), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1787, 1798), 'django.utils.translation.ugettext_lazy', '_', (['"""Delete"""'], {}), "('Delete')\n", (1788, 1798), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1817, 1829), 'django.utils.translation.ugettext_lazy', '_', (['"""Deleted"""'], {}), "('Deleted')\n", (1818, 1829), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1855, 1868), 'django.utils.translation.ugettext_lazy', '_', (['"""Template"""'], {}), "('Template')\n", (1856, 1868), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1892, 1906), 'django.utils.translation.ugettext_lazy', '_', (['"""Templates"""'], {}), "('Templates')\n", (1893, 1906), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2237, 2293), 'django.template.loader.render_to_string', 'template.loader.render_to_string', (['template_name', 'context'], {}), '(template_name, context)\n', (2269, 2293), False, 'from django import template\n'), ((1960, 2020), 'openstack_dashboard.api.sahara.nodegroup_template_delete', 'saharaclient.nodegroup_template_delete', (['request', 'template_id'], {}), '(request, template_id)\n', (1998, 2020), True, 'from openstack_dashboard.api import sahara as saharaclient\n'), ((2914, 2939), 'django.utils.translation.ugettext_lazy', '_', (['"""Node Group Templates"""'], {}), "('Node Group Templates')\n", (2915, 2939), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2399, 2408), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (2400, 2408), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2580, 2591), 'django.utils.translation.ugettext_lazy', '_', (['"""Plugin"""'], {}), "('Plugin')\n", (2581, 2591), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2694, 2713), 'django.utils.translation.ugettext_lazy', '_', (['"""Hadoop Version"""'], {}), "('Hadoop Version')\n", (2695, 2713), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2816, 2835), 'django.utils.translation.ugettext_lazy', '_', (['"""Node Processes"""'], {}), "('Node Processes')\n", (2817, 2835), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
#!/usr/bin/python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
# pylint:disable=invalid-name
"""Unit tests for dhcp.py implementation."""
__author__ = '<EMAIL> (<NAME>)'
import google3
from tr.wvtest import unittest
import tr.cwmpdate
import tr.handle
import dhcp
class DhcpTest(unittest.TestCase):
"""Tests for dhcp.py."""
def testClient(self):
client = dhcp.Client(ipaddr='1.2.3.4', chaddr='00:01:02:03:04:05',
expiry=1389022961, clientid='clientid_1',
hostname='hostname_1', userclassid='userclassid_1',
vendorclassid='vendorclassid_1')
self.assertEqual(client.Chaddr, '00:01:02:03:04:05')
self.assertEqual(client.IPv4AddressNumberOfEntries, 1)
client.AddIP(ipaddr='1.2.3.5', expiry=1389022962)
self.assertEqual(client.IPv4AddressNumberOfEntries, 2)
self.assertEqual(client.IPv4AddressList['1'].IPAddress, '1.2.3.4')
d = tr.cwmpdate.format(client.IPv4AddressList['1'].LeaseTimeRemaining)
self.assertEqual(d, '2014-01-06T15:42:41Z')
self.assertEqual(client.IPv4AddressList['2'].IPAddress, '1.2.3.5')
d = tr.cwmpdate.format(client.IPv4AddressList['2'].LeaseTimeRemaining)
self.assertEqual(d, '2014-01-06T15:42:42Z')
self.assertEqual(client.OptionNumberOfEntries, 4)
self.assertEqual(client.OptionList['1'].Tag, dhcp.CL)
self.assertEqual(client.OptionList['1'].Value, 'clientid_1')
self.assertEqual(client.OptionList['2'].Tag, dhcp.HN)
self.assertEqual(client.OptionList['2'].Value, 'hostname_1')
self.assertEqual(client.OptionList['3'].Tag, dhcp.UC)
self.assertEqual(client.OptionList['3'].Value, 'userclassid_1')
self.assertEqual(client.OptionList['4'].Tag, dhcp.VC)
self.assertEqual(client.OptionList['4'].Value, 'vendorclassid_1')
tr.handle.ValidateExports(client)
if __name__ == '__main__':
unittest.main()
| [
"dhcp.Client",
"tr.wvtest.unittest.main"
] | [((2469, 2484), 'tr.wvtest.unittest.main', 'unittest.main', ([], {}), '()\n', (2482, 2484), False, 'from tr.wvtest import unittest\n'), ((969, 1162), 'dhcp.Client', 'dhcp.Client', ([], {'ipaddr': '"""1.2.3.4"""', 'chaddr': '"""00:01:02:03:04:05"""', 'expiry': '(1389022961)', 'clientid': '"""clientid_1"""', 'hostname': '"""hostname_1"""', 'userclassid': '"""userclassid_1"""', 'vendorclassid': '"""vendorclassid_1"""'}), "(ipaddr='1.2.3.4', chaddr='00:01:02:03:04:05', expiry=1389022961,\n clientid='clientid_1', hostname='hostname_1', userclassid=\n 'userclassid_1', vendorclassid='vendorclassid_1')\n", (980, 1162), False, 'import dhcp\n')] |
#!/usr/bin/env python
from ferromtm.models.coupled2D import *
# from ferromtm.models.electrostatics.per2D import femmodel
from ferromtm.models.electrostatics.nonper2D import femmodel
import importlib
importlib.reload(femmodel)
from ferromtm.tools.utils import *
from ferromtm.models.theo import *
from pytheas.homogenization import *
from theo import *
if __name__ == "__main__":
fem = femmodel.FemModel()
fem.rm_tmp_dir()
# fem.gmsh_verbose=4
# fem.getdp_verbose=4
fem.parmesh = 11
fem.parmesh_des = 11
fem.parmesh_incl = 11
fem.E_static = 1
epsi0 = epsilonr_ferroelectric(0)
epsiE = epsilonr_ferroelectric(fem.E_static)
fem.eps_host = 1
fem.eps_incl = 2.4
fem.b_pml = 0
fem.h_pml = 1
fem.space2pml_L = 1
fem.space2pml_R = 1
fem.space2pml_B = 1
fem.space2pml_T = 1
fem.inclusion_flag = True
r = 0.5 * 1.1
dholes = 0.3 + r * 2
dx, dy = 2 * r + dholes, 2 * r + dholes
nb_inclx, nb_incly = 2, 3
fem.hx_des = 3.8
fem.hy_des = 4.4
nb_incl = nb_inclx * nb_incly
fem.nb_incl = nb_incl
lboxx = fem.hx_des + fem.space2pml_L + fem.space2pml_R + 2 * fem.h_pml
lboxy = fem.hy_des + fem.space2pml_T + fem.space2pml_B + 2 * fem.h_pml
# Rx = (0.1 + np.random.random(nb_incl) * 0.3) * dx
# Ry = (0.1 + np.random.random(nb_incl) * 0.3) * dy
# rot_ = np.random.random(nb_incl) * 2 * pi
x00 = -dholes / 2
y00 = -dholes
X0 = np.linspace(-x00, x00, nb_inclx)
Y0 = np.linspace(-y00, y00, nb_incly)
X0, Y0 = np.meshgrid(X0, Y0)
X0 = X0.ravel()
Y0 = Y0.ravel()
# Y0 = np.ones(nb_incl) * 0
# Y0 = (-0.5 + np.random.random(nb_incl) * 1) * dy
Rx = np.ones(nb_incl) * r * 0.5
Ry = np.ones(nb_incl) * r
rot_ = np.linspace(-pi / 2, pi / 2, nb_incl)
# rot_ = np.ones(nb_incl) * 0
fem.initialize()
if fem.inclusion_flag:
i = 0
for Rinclx, Rincly, rot_incl, x0, y0 in zip(Rx, Ry, rot_, X0, Y0):
# x0 += (1 - 2 * np.random.rand()) * Rinclx / 3
# y0 += (1 - 2 * np.random.rand()) * Rincly / 3
points = ellipse(Rinclx, Rincly, rot_incl, x0, y0)
fem.inclusion_filename_ = "ellipse{0}.geo".format(i)
fem.make_inclusion(points, startpoint=1000 * (i + 1))
# fem.make_inclusion(points, startpoint=1000 )
i += 1
fem.make_mesh()
# fem.open_gmsh_gui()
nvar = len(fem.des[0])
# epsixx = np.ones(nvar)*epsiE
# epsiyy = np.ones(nvar)*epsi0
# epsizz = np.ones(nvar)*epsi0
# epsi = epsixx, epsiyy, epsizz
def couple(E):
i = 0
while True:
epsixx = epsilonr_ferroelectric(E[0].real)
epsiyy = epsilonr_ferroelectric(E[1].real)
epsizz = epsilonr_ferroelectric(E[2].real)
epsi = epsixx, epsiyy, epsizz
# e = np.ones_like(epsixx) * fem.eps_incl
# epsi = e, e, e
make_pos_tensor_eps(fem, epsi, interp=False)
fem.compute_solution()
Emean, Pmean = fem.postpro_mean_fields()
eps_hom_xx = Pmean[0] / Emean[0]
print("eps_hom_xx = ", eps_hom_xx)
E = fem.postpro_electrostatic_field()
if i == 0:
eps_hom_xx_no_coupling = np.copy(eps_hom_xx)
if i > 0:
cv = np.abs(1 - eps_hom_xx / eps_hom_xx_)
print(" cv = ", cv)
if cv < 1e-2:
break
eps_hom_xx_ = np.copy(eps_hom_xx)
i += 1
return eps_hom_xx, eps_hom_xx_no_coupling
eps_hom_xx = []
eps_hom_xx_no_coupling = []
Ebias = np.linspace(2, 2, 1)
# Ebias = np.linspace(2, 2, 1)
S = fem.hx_des * fem.hy_des
f = nb_incl * pi * r ** 2 / (S)
eps_host = epsilonr_ferroelectric(Ebias)
# eps_host = eps_Theo_0
epsmg = maxwell_garnett(f, fem.eps_incl, eps_Theo_0, dim=2)
print("Maxwell-Garnett = ", epsmg)
print("eps_Theo_h_0 = ", eps_Theo_h_0)
run = True
save = False
if run:
for fem.E_static in Ebias:
E = np.ones(nvar) * fem.E_static, np.ones(nvar) * 0, np.ones(nvar) * 0
print("-------------------")
eps = couple(E)
eps_hom_xx.append(eps[0])
eps_hom_xx_no_coupling.append(eps[1])
if save:
np.savez(
"test_theo.npz",
Ebias=Ebias,
eps_hom_xx=eps_hom_xx,
eps_hom_xx_no_coupling=eps_hom_xx_no_coupling,
)
else:
arch = np.load("test_theo.npz")
Ebias = arch["Ebias"]
eps_hom_xx = arch["eps_hom_xx"]
eps_hom_xx_no_coupling = arch["eps_hom_xx_no_coupling"]
# fem.postpro_fields(filetype="pos")
# fem.open_gmsh_gui()
from aotomat.tools.plottools import *
plt.close("all")
col1 = "#6078cf"
col2 = "#c13f3f"
plt.figure()
plt.plot(E_Theo, eps_Theo, "s", color=col1, alpha=0.3, label="bulk meas")
plt.plot(E_Theo_h, eps_Theo_h, "o", color=col2, alpha=0.3, label="mtm meas")
plt.plot(
Ebias,
epsilonr_ferroelectric(Ebias) / epsilonr_ferroelectric(Ebias)[0],
label="bulk",
color=col1,
)
plt.plot(Ebias, eps_hom_xx / eps_hom_xx[0], label="coupling", color=col2)
plt.plot(
Ebias,
eps_hom_xx_no_coupling / eps_hom_xx_no_coupling[0],
"--",
label="no coupling",
color=col2,
)
plt.xlabel("$E$ (kV/mm)")
plt.ylabel("normalized permittivity")
plt.legend()
plt.figure()
# plt.plot(E_Theo, eps_Theo, "s", color=col1, alpha=0.3, label="bulk meas")
plt.plot(
E_Theo_h,
eps_Theo_h * eps_Theo_h_0,
"o",
color=col2,
alpha=0.3,
label="mtm meas",
)
# plt.plot(Ebias, epsilonr_ferroelectric(Ebias)/epsilonr_ferroelectric(Ebias)[0],label="bulk", color=col1)
plt.plot(Ebias, eps_hom_xx, label="coupling", color=col2)
plt.plot(Ebias, eps_hom_xx_no_coupling, "--", label="no coupling", color=col2)
plt.xlabel("$E$ (kV/mm)")
plt.ylabel("relative permittivity")
plt.legend()
| [
"ferromtm.models.electrostatics.nonper2D.femmodel.FemModel",
"importlib.reload"
] | [((204, 230), 'importlib.reload', 'importlib.reload', (['femmodel'], {}), '(femmodel)\n', (220, 230), False, 'import importlib\n'), ((399, 418), 'ferromtm.models.electrostatics.nonper2D.femmodel.FemModel', 'femmodel.FemModel', ([], {}), '()\n', (416, 418), False, 'from ferromtm.models.electrostatics.nonper2D import femmodel\n')] |
# Coded by @HS <NAME>
# follow me on instagram :) - https://instagram.com/hs_devansh_raghav.75
import threading
from concurrent.futures import ThreadPoolExecutor
from grepX.core.args import PATTERN, FILE, CONCURRENCY
from grepX.core.colors import RED, RESET
from grepX.core.greper import greper ,xss_patterns, sqli_patterns, ssrf_patterns, ssti_patterns, lfi_patterns, rce_patterns, idor_patterns, redirect_patterns
def grep():
if PATTERN == 'xss':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in xss_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'sqli':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in sqli_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'lfi':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in lfi_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'rce':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in rce_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'idor':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in idor_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'ssrf':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in ssrf_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'redirect':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in redirect_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
elif PATTERN == 'ssti':
try:
with ThreadPoolExecutor(max_workers=CONCURRENCY) as executor:
for each_pattern in ssti_patterns:
executor.submit(greper, each_pattern)
except KeyboardInterrupt:
quit()
else:
print(RED + '[-] ' + RESET + "No such pattern :(")
| [
"concurrent.futures.ThreadPoolExecutor"
] | [((486, 529), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (504, 529), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((767, 810), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (785, 810), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1048, 1091), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (1066, 1091), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1328, 1371), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (1346, 1371), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1605, 1648), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (1623, 1648), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1883, 1926), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (1901, 1926), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((2165, 2208), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (2183, 2208), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((2447, 2490), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'CONCURRENCY'}), '(max_workers=CONCURRENCY)\n', (2465, 2490), False, 'from concurrent.futures import ThreadPoolExecutor\n')] |
import os
from setuptools import setup
version_file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'jettison', '_version.py')
with open(version_file_path, 'r') as version_file:
exec(compile(version_file.read(), version_file_path, 'exec'))
setup(
name='jettison',
version=__version__, # noqa -- flake8 should ignore this line
description=('Encode binary data in a way compatible with the jettison '
'JavaScript library'),
url='https://github.com/noonat/jettison-python',
packages=['jettison'],
install_requires=[
'six',
],
extras_require={
'docs': [
'sphinx',
],
'tests': [
'coverage',
'flake8',
'mock',
'pytest',
],
}
)
| [
"os.path.dirname",
"setuptools.setup"
] | [((295, 634), 'setuptools.setup', 'setup', ([], {'name': '"""jettison"""', 'version': '__version__', 'description': '"""Encode binary data in a way compatible with the jettison JavaScript library"""', 'url': '"""https://github.com/noonat/jettison-python"""', 'packages': "['jettison']", 'install_requires': "['six']", 'extras_require': "{'docs': ['sphinx'], 'tests': ['coverage', 'flake8', 'mock', 'pytest']}"}), "(name='jettison', version=__version__, description=\n 'Encode binary data in a way compatible with the jettison JavaScript library'\n , url='https://github.com/noonat/jettison-python', packages=['jettison'\n ], install_requires=['six'], extras_require={'docs': ['sphinx'],\n 'tests': ['coverage', 'flake8', 'mock', 'pytest']})\n", (300, 634), False, 'from setuptools import setup\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n')] |
import numpy as np
import math
import cv2
from skimage import transform as stf
def transform(data, center, output_size, scale, rotation):
scale_ratio = float(output_size)/scale
rot = float(rotation)*np.pi/180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = stf.SimilarityTransform(scale=scale_ratio)
cx = center[0]*scale_ratio
cy = center[1]*scale_ratio
t2 = stf.SimilarityTransform(translation=(-1*cx, -1*cy))
t3 = stf.SimilarityTransform(rotation=rot)
t4 = stf.SimilarityTransform(translation=(output_size/2, output_size/2))
t = t1+t2+t3+t4
trans = t.params[0:2]
#print('M', scale, rotation, trans)
cropped = cv2.warpAffine(data,trans,(output_size, output_size), borderValue = 0.0)
return cropped, trans
def transform2(data, label, output_size, bbox=None, dataset='ibug'):
if bbox is None:
record = np.zeros((4,), dtype=np.float32)
for b in xrange(label.shape[0]):
ind_gt = label[b]
if b==0:
record[0:2] = ind_gt
record[2:4] = ind_gt
else:
record[0:2] = np.minimum(record[0:2], ind_gt)
record[2:4] = np.maximum(record[2:4], ind_gt)
if dataset=='ibug':
record[1] = 0 if record[1]<36 else record[1]-36 # ibug
elif dataset=='cofw_testset':
record[1] = 0 if record[1]<45 else record[1]-45 # cofw_testset
elif dataset=='300W':
record[1] = 0 if record[1]<40 else record[1]-40 # 300W
else:
record[1] = 0 if record[1]<30 else record[1]-30 # AFLW2000-3D
bbox = record
trans = estimate_trans_bbox(bbox, output_size, s = 1.2)
#print('M', scale, rotation, trans)
cropped = cv2.warpAffine(data,trans,(output_size, output_size), borderValue = 0.0)
# cv2.rectangle(data, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 2)
# cv2.imshow("detection result", data)
# cv2.waitKey(0)
return cropped, trans
def transform_pt(pt, trans):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(trans, new_pt)
#print('new_pt', new_pt.shape, new_pt)
return new_pt[:2]
def gaussian(img, pt, sigma):
# Draw a 2D gaussian
assert(sigma>=0)
if sigma==0:
img[pt[1], pt[0]] = 1.0
return True
#assert pt[0]<=img.shape[1]
#assert pt[1]<=img.shape[0]
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] > img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
#print('gaussian error')
return False
#return img
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 20
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) * 20 # multiply by 20
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return True
#return img
def estimate_trans_bbox(face, input_size, s = 2.0):
w = face[2] - face[0]
h = face[3] - face[1]
wc = int( (face[2]+face[0])/2 )
hc = int( (face[3]+face[1])/2 )
im_size = max(w, h)
#size = int(im_size*1.2)
scale = input_size/(max(w,h)*s)
M = [
[scale, 0, input_size/2-wc*scale],
[0, scale, input_size/2-hc*scale],
]
M = np.array(M)
return M
def preprocess(data, label, output_size):
M = None
image_size = [data.shape[1], data.shape[0]]
if label.shape[0]==68:
landmark = np.zeros((5,2), dtype=np.float32)
landmark[0,:] = (label[36,:]+label[39,:])/2 #left eye
landmark[1,:] = (label[42,:]+label[45,:])/2 #right eye
landmark[2,:] = label[30,:] #nose
landmark[3,:] = label[48,:] #left mouth
landmark[4,:] = label[54,:] #right mouth
elif label.shape[0]==5:
landmark = np.zeros((5,2), dtype=np.float32)
landmark[0,:] = label[0,:] #left eye
landmark[1,:] = label[1,:] #right eye
landmark[2,:] = label[2,:] #nose
landmark[3,:] = label[3,:] #left mouth
landmark[4,:] = label[4,:] #right mouth
# for i in range(5):
# cv2.circle(data, (landmark[i][0], landmark[i][1]), 1, (0, 0, 255), 2)
# cv2.imshow("landmark", data)
# cv2.waitKey(0)
if landmark is not None:
assert len(image_size)==2
src = np.array([
[38.2946, 41.6963],
[73.5318, 41.5014],
[56.0252, 61.7366],
[41.5493, 82.3655],
[70.7299, 82.2041] ], dtype=np.float32 )
if output_size==384:
src = src * 2 + 80.0
dst = landmark.astype(np.float32)
# for i in range(5):
# cv2.circle(data, (src[i][0], src[i][1]), 1, (0, 0, 255), 2)
# cv2.imshow("landmark", data)
# cv2.waitKey(0)
tform = stf.SimilarityTransform()
tform.estimate(dst, src)
trans = tform.params[0:2,:]
warped = cv2.warpAffine(data, trans, (output_size,output_size), borderValue = 0.0)
label_out = np.zeros(label.shape, dtype=np.float32)
for i in xrange(label.shape[0]):
label_out[i] = transform_pt(label[i], trans)
# for i in range(label.shape[0]):
# cv2.circle(warped, (label_out[i][0], label_out[i][1]), 1, (0, 0, 255), 2)
# cv2.imshow("label", warped)
# cv2.waitKey(0)
return warped, label_out, trans | [
"cv2.warpAffine",
"numpy.minimum",
"skimage.transform.SimilarityTransform",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.maximum",
"numpy.arange"
] | [((332, 374), 'skimage.transform.SimilarityTransform', 'stf.SimilarityTransform', ([], {'scale': 'scale_ratio'}), '(scale=scale_ratio)\n', (355, 374), True, 'from skimage import transform as stf\n'), ((449, 504), 'skimage.transform.SimilarityTransform', 'stf.SimilarityTransform', ([], {'translation': '(-1 * cx, -1 * cy)'}), '(translation=(-1 * cx, -1 * cy))\n', (472, 504), True, 'from skimage import transform as stf\n'), ((511, 548), 'skimage.transform.SimilarityTransform', 'stf.SimilarityTransform', ([], {'rotation': 'rot'}), '(rotation=rot)\n', (534, 548), True, 'from skimage import transform as stf\n'), ((559, 630), 'skimage.transform.SimilarityTransform', 'stf.SimilarityTransform', ([], {'translation': '(output_size / 2, output_size / 2)'}), '(translation=(output_size / 2, output_size / 2))\n', (582, 630), True, 'from skimage import transform as stf\n'), ((731, 803), 'cv2.warpAffine', 'cv2.warpAffine', (['data', 'trans', '(output_size, output_size)'], {'borderValue': '(0.0)'}), '(data, trans, (output_size, output_size), borderValue=0.0)\n', (745, 803), False, 'import cv2\n'), ((1775, 1847), 'cv2.warpAffine', 'cv2.warpAffine', (['data', 'trans', '(output_size, output_size)'], {'borderValue': '(0.0)'}), '(data, trans, (output_size, output_size), borderValue=0.0)\n', (1789, 1847), False, 'import cv2\n'), ((2135, 2156), 'numpy.dot', 'np.dot', (['trans', 'new_pt'], {}), '(trans, new_pt)\n', (2141, 2156), True, 'import numpy as np\n'), ((2905, 2933), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)', 'float'], {}), '(0, size, 1, float)\n', (2914, 2933), True, 'import numpy as np\n'), ((3922, 3933), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (3930, 3933), True, 'import numpy as np\n'), ((941, 973), 'numpy.zeros', 'np.zeros', (['(4,)'], {'dtype': 'np.float32'}), '((4,), dtype=np.float32)\n', (949, 973), True, 'import numpy as np\n'), ((2090, 2119), 'numpy.array', 'np.array', (['[pt[0], pt[1], 1.0]'], {}), '([pt[0], pt[1], 1.0])\n', (2098, 2119), True, 'import numpy as np\n'), ((3070, 3129), 'numpy.exp', 'np.exp', (['(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))'], {}), '(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n', (3076, 3129), True, 'import numpy as np\n'), ((4096, 4130), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {'dtype': 'np.float32'}), '((5, 2), dtype=np.float32)\n', (4104, 4130), True, 'import numpy as np\n'), ((5043, 5176), 'numpy.array', 'np.array', (['[[38.2946, 41.6963], [73.5318, 41.5014], [56.0252, 61.7366], [41.5493, \n 82.3655], [70.7299, 82.2041]]'], {'dtype': 'np.float32'}), '([[38.2946, 41.6963], [73.5318, 41.5014], [56.0252, 61.7366], [\n 41.5493, 82.3655], [70.7299, 82.2041]], dtype=np.float32)\n', (5051, 5176), True, 'import numpy as np\n'), ((5471, 5496), 'skimage.transform.SimilarityTransform', 'stf.SimilarityTransform', ([], {}), '()\n', (5494, 5496), True, 'from skimage import transform as stf\n'), ((5574, 5646), 'cv2.warpAffine', 'cv2.warpAffine', (['data', 'trans', '(output_size, output_size)'], {'borderValue': '(0.0)'}), '(data, trans, (output_size, output_size), borderValue=0.0)\n', (5588, 5646), False, 'import cv2\n'), ((5667, 5706), 'numpy.zeros', 'np.zeros', (['label.shape'], {'dtype': 'np.float32'}), '(label.shape, dtype=np.float32)\n', (5675, 5706), True, 'import numpy as np\n'), ((4480, 4514), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {'dtype': 'np.float32'}), '((5, 2), dtype=np.float32)\n', (4488, 4514), True, 'import numpy as np\n'), ((1163, 1194), 'numpy.minimum', 'np.minimum', (['record[0:2]', 'ind_gt'], {}), '(record[0:2], ind_gt)\n', (1173, 1194), True, 'import numpy as np\n'), ((1220, 1251), 'numpy.maximum', 'np.maximum', (['record[2:4]', 'ind_gt'], {}), '(record[2:4], ind_gt)\n', (1230, 1251), True, 'import numpy as np\n')] |
import pymysql
from flask import g
from config import config
def dbcnn():
if not hasattr(g, 'cnn'):
g.cnn = pymysql.connect(**config.db1)
| [
"pymysql.connect"
] | [((122, 151), 'pymysql.connect', 'pymysql.connect', ([], {}), '(**config.db1)\n', (137, 151), False, 'import pymysql\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0001_initial'),
('heritage', '0022_auto_20160209_1413'),
]
operations = [
migrations.CreateModel(
name='HeritageAsset',
fields=[
('secureasset_ptr',
models.OneToOneField(parent_link=True, primary_key=True, to='assets.SecureAsset', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('assets.secureasset',),
),
migrations.RemoveField(
model_name='projectasset',
name='secureasset_ptr',
),
migrations.AddField(
model_name='projectasset',
name='heritageasset_ptr',
field=models.OneToOneField(parent_link=True, primary_key=True, default=1, to='heritage.HeritageAsset', auto_created=True,
serialize=False),
preserve_default=False,
),
]
| [
"django.db.models.OneToOneField",
"django.db.migrations.RemoveField"
] | [((688, 761), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""projectasset"""', 'name': '"""secureasset_ptr"""'}), "(model_name='projectasset', name='secureasset_ptr')\n", (710, 761), False, 'from django.db import migrations, models\n'), ((922, 1059), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'primary_key': '(True)', 'default': '(1)', 'to': '"""heritage.HeritageAsset"""', 'auto_created': '(True)', 'serialize': '(False)'}), "(parent_link=True, primary_key=True, default=1, to=\n 'heritage.HeritageAsset', auto_created=True, serialize=False)\n", (942, 1059), False, 'from django.db import migrations, models\n'), ((419, 541), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'parent_link': '(True)', 'primary_key': '(True)', 'to': '"""assets.SecureAsset"""', 'auto_created': '(True)', 'serialize': '(False)'}), "(parent_link=True, primary_key=True, to=\n 'assets.SecureAsset', auto_created=True, serialize=False)\n", (439, 541), False, 'from django.db import migrations, models\n')] |
import mock
import unittest
from labs.lab01 import example as l1_example
class TestObject(unittest.TestCase):
def setUp(self):
self.mocked_param = mock.Mock()
self.tested_object = l1_example.Object(self.mocked_param)
def test_str(self):
self.assertEqual(str(self.tested_object),
"Object: %s" % self.mocked_param)
def test_repr(self):
self.assertEqual(repr(self.tested_object),
"<Object: __private_field=%s>" %
self.mocked_param)
def test_hash(self):
self.assertEqual(hash(self.tested_object),
hash(self.mocked_param))
def test_bool(self):
self.assertEqual(bool(self.tested_object),
bool(self.mocked_param))
class TestDynamicObject(unittest.TestCase):
def setUp(self):
self.tested_object = l1_example.DynamicObject()
def test_attr(self):
mocked_attr = mock.Mock()
self.tested_object.attr = mocked_attr
self.assertEqual(self.tested_object.attr, mocked_attr)
del self.tested_object.attr
self.assertRaises(ValueError, self.tested_object.attr)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"labs.lab01.example.Object",
"labs.lab01.example.DynamicObject",
"mock.Mock"
] | [((1233, 1248), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1246, 1248), False, 'import unittest\n'), ((163, 174), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (172, 174), False, 'import mock\n'), ((204, 240), 'labs.lab01.example.Object', 'l1_example.Object', (['self.mocked_param'], {}), '(self.mocked_param)\n', (221, 240), True, 'from labs.lab01 import example as l1_example\n'), ((905, 931), 'labs.lab01.example.DynamicObject', 'l1_example.DynamicObject', ([], {}), '()\n', (929, 931), True, 'from labs.lab01 import example as l1_example\n'), ((980, 991), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (989, 991), False, 'import mock\n')] |
from memory_profiler import profile
@profile
def load():
import conllu
data_file = open("cs-ud-train-l.conllu", "r", encoding="utf-8")
# parse_incr is recommended for large files (more than 1 MB)
# since it returns a generator, which is why a conversion to list
tokenlist = list(conllu.parse_incr(data_file))
for sentence in tokenlist:
for token in sentence:
form_lemma = token['form'] + token['lemma']
for sentence in tokenlist:
chain = []
for token in sentence:
if token['head']:
parent = sentence[token['head'] - 1]
if token['deprel'] == "case" and parent['deprel'] == "nmod":
chain.append(token)
for sentence in tokenlist:
for token in sentence:
token['deprel'] = 'dep'
with open('out.conllu', 'w', encoding='utf8') as f:
f.writelines([sentence.serialize() + "\n" for sentence in tokenlist])
if __name__ == '__main__':
load()
| [
"conllu.parse_incr"
] | [((301, 329), 'conllu.parse_incr', 'conllu.parse_incr', (['data_file'], {}), '(data_file)\n', (318, 329), False, 'import conllu\n')] |
from bson.objectid import ObjectId
from app.database.mongodb import (
growjo_collection,
# indeed_collection,
# linkedin_collection
)
from app.database.mongodb_helper import (
growjo_helper,
# indeed_helper,
# linkedin_helper,
)
"""
CRUD Operations
"""
# top 25 fastest growing companies in the US params
filter = {'country': 'United States'}
sort=list({
'ranking': {
'$exists': True
},
'ranking': 1
}.items())
limit=25
# retrieve top 25 fastest growing US companies in the database
async def retrieve_companies():
companies = []
async for entity in growjo_collection.find(
filter=filter,
sort=sort,
limit=limit,
):
companies.append(growjo_helper(entity))
return companies
# add a new company into the database
async def add_company(company_data: dict) -> dict:
entity = await growjo_collection.insert_one(company_data)
new_company = await growjo_collection.find_one({"_id": entity.inserted_id})
return growjo_helper(new_company)
# retrieve a company with a matching ID
async def retrieve_company(id: str) -> dict:
entity = await growjo_collection.find_one({"_id": ObjectId(id)})
if entity:
return growjo_helper(entity)
# update a company with a matching ID
async def update_company(id: str, data: dict):
# return false if any empty request body is sent
if not data:
return False
entity = await growjo_collection.find_one({"_id": ObjectId(id)})
if entity:
updated_company = await growjo_collection.update_one(
{"_id": ObjectId(id)}, {"$set": data})
return bool(updated_company)
# delete a company from the database
async def delete_company(id: str):
entity = await growjo_collection.find_one({"_id": ObjectId(id)})
if entity:
await growjo_collection.delete_one({"_id": ObjectId(id)})
return True
# add indeed jobs to the database
#async def add_indeed_data(indeed_data: dict) -> dict:
# entity = await indeed_collection.insertMany(
# [{'i': i} for i in range(len(indeed_response))]
# )
# indeed_data = await indeed_collection.find_one({"id": entity.inserted_id})
# return indeed_helper(indeed_data)
# add linkedin jobs to the database
# async def add_linkedin_data(linkedin_data: dict) -> dict:
# entity = await linkedin_collection.insertMany(
# [{'i': i} for i in range(len(linkedin_response))]
# )
# linkedin_data = await linkedin_collection.find_one(
# {"id": entity.inserted_id})
# return linkedin_helper(linkedin_data)
| [
"app.database.mongodb_helper.growjo_helper",
"app.database.mongodb.growjo_collection.insert_one",
"bson.objectid.ObjectId",
"app.database.mongodb.growjo_collection.find_one",
"app.database.mongodb.growjo_collection.find"
] | [((607, 668), 'app.database.mongodb.growjo_collection.find', 'growjo_collection.find', ([], {'filter': 'filter', 'sort': 'sort', 'limit': 'limit'}), '(filter=filter, sort=sort, limit=limit)\n', (629, 668), False, 'from app.database.mongodb import growjo_collection\n'), ((1013, 1039), 'app.database.mongodb_helper.growjo_helper', 'growjo_helper', (['new_company'], {}), '(new_company)\n', (1026, 1039), False, 'from app.database.mongodb_helper import growjo_helper\n'), ((879, 921), 'app.database.mongodb.growjo_collection.insert_one', 'growjo_collection.insert_one', (['company_data'], {}), '(company_data)\n', (907, 921), False, 'from app.database.mongodb import growjo_collection\n'), ((946, 1001), 'app.database.mongodb.growjo_collection.find_one', 'growjo_collection.find_one', (["{'_id': entity.inserted_id}"], {}), "({'_id': entity.inserted_id})\n", (972, 1001), False, 'from app.database.mongodb import growjo_collection\n'), ((1225, 1246), 'app.database.mongodb_helper.growjo_helper', 'growjo_helper', (['entity'], {}), '(entity)\n', (1238, 1246), False, 'from app.database.mongodb_helper import growjo_helper\n'), ((726, 747), 'app.database.mongodb_helper.growjo_helper', 'growjo_helper', (['entity'], {}), '(entity)\n', (739, 747), False, 'from app.database.mongodb_helper import growjo_helper\n'), ((1180, 1192), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1188, 1192), False, 'from bson.objectid import ObjectId\n'), ((1478, 1490), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1486, 1490), False, 'from bson.objectid import ObjectId\n'), ((1802, 1814), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1810, 1814), False, 'from bson.objectid import ObjectId\n'), ((1607, 1619), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1615, 1619), False, 'from bson.objectid import ObjectId\n'), ((1884, 1896), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1892, 1896), False, 'from bson.objectid import ObjectId\n')] |
import codecademylib3_seaborn
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
samples = iris.data
x = samples[:,0]
y = samples[:,1]
plt.scatter(x, y, alpha=0.5)
plt.xlabel('sepal length (cm)')
plt.ylabel('petal length (cm)')
plt.show()
| [
"sklearn.datasets.load_iris",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((99, 119), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (117, 119), False, 'from sklearn import datasets\n'), ((177, 205), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'alpha': '(0.5)'}), '(x, y, alpha=0.5)\n', (188, 205), True, 'import matplotlib.pyplot as plt\n'), ((207, 238), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal length (cm)"""'], {}), "('sepal length (cm)')\n", (217, 238), True, 'import matplotlib.pyplot as plt\n'), ((239, 270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""petal length (cm)"""'], {}), "('petal length (cm)')\n", (249, 270), True, 'import matplotlib.pyplot as plt\n'), ((272, 282), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (280, 282), True, 'import matplotlib.pyplot as plt\n')] |
from django import forms
from django.contrib import admin
from apprentice_learner.models import Agent
from apprentice_learner.models import Project
from apprentice_learner.models import Operator
from codemirror2.widgets import CodeMirrorEditor
class AgentAdmin(admin.ModelAdmin):
pass
# class OperatorAdminForm(forms.ModelForm):
# model = Operator
# class Meta:
# fields = '__all__'
# widgets = {
# 'code': PythonEditor(attrs={'style':'width: 90%; height: 100%;'}),
# }
class OperatorAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.attname in ['head', 'conditions', 'effects']:
kwargs['widget'] = CodeMirrorEditor(options={'mode':'python', 'lineNumbers':True})
return super(OperatorAdmin, self).formfield_for_dbfield(db_field, **kwargs)
# Register your models here.
admin.site.register(Agent, AgentAdmin)
admin.site.register(Project)
admin.site.register(Operator, OperatorAdmin)
| [
"django.contrib.admin.site.register",
"codemirror2.widgets.CodeMirrorEditor"
] | [((896, 934), 'django.contrib.admin.site.register', 'admin.site.register', (['Agent', 'AgentAdmin'], {}), '(Agent, AgentAdmin)\n', (915, 934), False, 'from django.contrib import admin\n'), ((935, 963), 'django.contrib.admin.site.register', 'admin.site.register', (['Project'], {}), '(Project)\n', (954, 963), False, 'from django.contrib import admin\n'), ((964, 1008), 'django.contrib.admin.site.register', 'admin.site.register', (['Operator', 'OperatorAdmin'], {}), '(Operator, OperatorAdmin)\n', (983, 1008), False, 'from django.contrib import admin\n'), ((716, 781), 'codemirror2.widgets.CodeMirrorEditor', 'CodeMirrorEditor', ([], {'options': "{'mode': 'python', 'lineNumbers': True}"}), "(options={'mode': 'python', 'lineNumbers': True})\n", (732, 781), False, 'from codemirror2.widgets import CodeMirrorEditor\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 16:41:01 2017
@author: andi
"""
import sys
sys.path.append( '/home/andi/dev/visr-build-debug/python' )
import visr
import rcl
import rrl
# exec(open("./runAtomicComponent.py").read())
import numpy as np
import matplotlib.pyplot as plt
fs = 48000
blockSize = 128
numBlocks = 16
numSamples = numBlocks*blockSize
t = np.arange(0,numSamples,dtype=np.float32)/fs
inputSignal = np.zeros( [4,numSamples], dtype=np.float32 )
inputSignal[0,:] = np.sin( 2*np.pi*440 * t )
inputSignal[1,:] = 0.5*np.sin( 2*np.pi*880 * t )
inputSignal[2,:] = 0.15*np.sin( 2*np.pi*1340 * t )
referenceOutput = inputSignal[0:2,:] + inputSignal[2:,:]
outputSignal = np.zeros( (2, numSamples), dtype = np.float32 )
c = visr.SignalFlowContext(blockSize, fs )
adder = rcl.Add( c, 'add', numInputs = 2, width=2)
flow = rrl.AudioSignalFlow( adder )
for blockIdx in range(0,numBlocks):
inputBlock = inputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize]
outputBlock = flow.process( inputBlock )
outputSignal[:, blockIdx*blockSize:(blockIdx+1)*blockSize] = outputBlock
plt.figure(1)
plt.plot( t, referenceOutput[0,:], 'bo-', t, outputSignal[0,:], 'rx-' )
plt.show()
| [
"matplotlib.pyplot.plot",
"visr.SignalFlowContext",
"numpy.zeros",
"rcl.Add",
"matplotlib.pyplot.figure",
"numpy.sin",
"rrl.AudioSignalFlow",
"sys.path.append",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((95, 152), 'sys.path.append', 'sys.path.append', (['"""/home/andi/dev/visr-build-debug/python"""'], {}), "('/home/andi/dev/visr-build-debug/python')\n", (110, 152), False, 'import sys\n'), ((432, 475), 'numpy.zeros', 'np.zeros', (['[4, numSamples]'], {'dtype': 'np.float32'}), '([4, numSamples], dtype=np.float32)\n', (440, 475), True, 'import numpy as np\n'), ((496, 523), 'numpy.sin', 'np.sin', (['(2 * np.pi * 440 * t)'], {}), '(2 * np.pi * 440 * t)\n', (502, 523), True, 'import numpy as np\n'), ((697, 740), 'numpy.zeros', 'np.zeros', (['(2, numSamples)'], {'dtype': 'np.float32'}), '((2, numSamples), dtype=np.float32)\n', (705, 740), True, 'import numpy as np\n'), ((750, 787), 'visr.SignalFlowContext', 'visr.SignalFlowContext', (['blockSize', 'fs'], {}), '(blockSize, fs)\n', (772, 787), False, 'import visr\n'), ((799, 838), 'rcl.Add', 'rcl.Add', (['c', '"""add"""'], {'numInputs': '(2)', 'width': '(2)'}), "(c, 'add', numInputs=2, width=2)\n", (806, 838), False, 'import rcl\n'), ((851, 877), 'rrl.AudioSignalFlow', 'rrl.AudioSignalFlow', (['adder'], {}), '(adder)\n', (870, 877), False, 'import rrl\n'), ((1115, 1128), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1125, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1200), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'referenceOutput[0, :]', '"""bo-"""', 't', 'outputSignal[0, :]', '"""rx-"""'], {}), "(t, referenceOutput[0, :], 'bo-', t, outputSignal[0, :], 'rx-')\n", (1137, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1209, 1211), True, 'import matplotlib.pyplot as plt\n'), ((373, 415), 'numpy.arange', 'np.arange', (['(0)', 'numSamples'], {'dtype': 'np.float32'}), '(0, numSamples, dtype=np.float32)\n', (382, 415), True, 'import numpy as np\n'), ((545, 572), 'numpy.sin', 'np.sin', (['(2 * np.pi * 880 * t)'], {}), '(2 * np.pi * 880 * t)\n', (551, 572), True, 'import numpy as np\n'), ((595, 623), 'numpy.sin', 'np.sin', (['(2 * np.pi * 1340 * t)'], {}), '(2 * np.pi * 1340 * t)\n', (601, 623), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import datetime
from scipy.stats import bernoulli, norm
sigmoid = lambda x: 1/(1+np.exp(-x))
softplus = lambda x: np.log(1+np.exp(x))
def timestamp():
now = datetime.datetime.now()
return now.strftime("%Y%m%d%H%M%S")
def generate_data(N, D, T, beta0, beta, alpha):
"""
Genarate N samples of data from the true model with parameter [beta0, beta, alpha].
Returns:
x: 3-d array of size [N, T, D]
y: 2-d array of size [N, T]
z: 1-d array of size [n_MC, N]
"""
z = np.random.randn(N) * softplus(alpha)**(1/2.)
x = np.random.randn(N*T*D).reshape([N,T,D])
y = bernoulli(p=sigmoid(beta0+x@beta+z.reshape([N,1]))).rvs()
return x,y,z | [
"numpy.exp",
"datetime.datetime.now",
"numpy.random.randn"
] | [((202, 225), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (223, 225), False, 'import datetime\n'), ((545, 563), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (560, 563), True, 'import numpy as np\n'), ((121, 131), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (127, 131), True, 'import numpy as np\n'), ((163, 172), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (169, 172), True, 'import numpy as np\n'), ((598, 624), 'numpy.random.randn', 'np.random.randn', (['(N * T * D)'], {}), '(N * T * D)\n', (613, 624), True, 'import numpy as np\n')] |
import time, itertools
from stdiomask import getpass, chrlist
def cracker(password, chrlist, showprcs):
guess = ''
for i in range(1, 6):
for g in itertools.product(chrlist, repeat=i):
if showprcs:
print('Guess: ' + ''.join(g), end='\r')
if (guess := ''.join(g)) == password:
return ''.join(guess)
time.sleep(.000001)
password = getpass()
showprcs = True if input('Show cracking-process? (Y/N) ').upper() == 'Y' else False
start = time.time()
guess = cracker(password, chrlist, showprcs)
end = time.time()
print('------------------')
print('Guess: ' + guess)
print('Time: ' + str(round(end - start, 5)) + 's')
| [
"stdiomask.getpass",
"itertools.product",
"time.time",
"time.sleep"
] | [((388, 397), 'stdiomask.getpass', 'getpass', ([], {}), '()\n', (395, 397), False, 'from stdiomask import getpass, chrlist\n'), ((491, 502), 'time.time', 'time.time', ([], {}), '()\n', (500, 502), False, 'import time, itertools\n'), ((554, 565), 'time.time', 'time.time', ([], {}), '()\n', (563, 565), False, 'import time, itertools\n'), ((159, 195), 'itertools.product', 'itertools.product', (['chrlist'], {'repeat': 'i'}), '(chrlist, repeat=i)\n', (176, 195), False, 'import time, itertools\n'), ((356, 373), 'time.sleep', 'time.sleep', (['(1e-06)'], {}), '(1e-06)\n', (366, 373), False, 'import time, itertools\n')] |
# Domain –Education
import glob
import csv
import pandas as pd
csvfiles = glob.glob('c:/temp/score/*.csv')
wf = csv.writer(open('c:/temp/score/ScoreFinalTemp.csv','wb'),delimiter = ',')
for files in csvfiles:
rd = csv.reader(open(files,'r'), delimiter = ',')
# rd.next() # To skip header
for row in rd:
# print row
wf.writerow(row)
data = pd.read_csv('c:/temp/score/ScoreFinalTemp.csv')
data = data.drop(['Name','Ethinicity'],axis=1)
data.to_csv('c:/temp/score/ScoreFinal.csv')
rd = csv.reader(open('c:/temp/score/ScoreFinal.csv','r'), delimiter = ',')
for row in rd:
row.replace('M',1)
row.replace('F',0)
| [
"glob.glob",
"pandas.read_csv"
] | [((74, 106), 'glob.glob', 'glob.glob', (['"""c:/temp/score/*.csv"""'], {}), "('c:/temp/score/*.csv')\n", (83, 106), False, 'import glob\n'), ((368, 415), 'pandas.read_csv', 'pd.read_csv', (['"""c:/temp/score/ScoreFinalTemp.csv"""'], {}), "('c:/temp/score/ScoreFinalTemp.csv')\n", (379, 415), True, 'import pandas as pd\n')] |
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from cobra import Model
from driven.data_sets.fluxes import FluxConstraints
from driven.flux_analysis.results import FluxBasedFluxDistribution
def fba(model, objective=None, distribution=None, relax=0.01, *args, **kwargs):
"""
Runs a Flux Balance Analysis using a set of measured fluxes.
Arguments
---------
model : cobra.Model
A constraint-based model
distribution : FluxConstraints
A set of experimental or computational determined flux constraints
objective : objective
Optional objective for the model
relax : float
A relax value to make the computation feasible
"""
if not isinstance(model, Model):
raise ValueError("Argument model must be instance of SolverBasedModel, not %s" % model.__class__)
if not isinstance(distribution, FluxConstraints):
raise ValueError("Argument distribution must be instance of FluxConstraints, not %s" % distribution.__class__)
with model:
for reaction_id in distribution:
reaction = model.reactions.get_by_id(reaction_id)
bounds = distribution[reaction_id]
reaction.bounds = bounds[0] - bounds[0] * relax, bounds[1] + bounds[1] * relax
if objective is not None:
model.objective = objective
solution = model.optimize()
return FluxBasedFluxDistribution(solution.fluxes, distribution) # TODO bug here, missing arg
| [
"driven.flux_analysis.results.FluxBasedFluxDistribution"
] | [((2012, 2068), 'driven.flux_analysis.results.FluxBasedFluxDistribution', 'FluxBasedFluxDistribution', (['solution.fluxes', 'distribution'], {}), '(solution.fluxes, distribution)\n', (2037, 2068), False, 'from driven.flux_analysis.results import FluxBasedFluxDistribution\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import tifffile as tiff
import cv2
import os
from tqdm.notebook import tqdm
import zipfile
import rasterio
from rasterio.windows import Window
from torch.utils.data import Dataset
import gc
from pathlib import Path
import warnings
warnings.filterwarnings("ignore")
import time
start = time.time()
sz = 256 #the size of tiles
reduce = 4 #reduce the original images by 4 times
BASE_PATH = '/N/slate/soodn/'
dataset = "kidney"
# dataset = "colon"
MASKS = Path(BASE_PATH+'hubmap-'+dataset+'-segmentation/train.csv')
df_masks = pd.read_csv(MASKS, index_col = 'id')
DATA = Path(BASE_PATH+'hubmap-'+dataset+'-segmentation/train')
INFO = Path(BASE_PATH+'hubmap-'+dataset+'-segmentation/HuBMAP-20-dataset_information.csv')
df_info = pd.read_csv(INFO)
OUT_TRAIN = Path(r'train.zip')
OUT_MASKS = Path(r'masks.zip')
def elapsed_time(start_time):
return time.time() - start_time
def enc2mask(encs, shape):
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for m,enc in enumerate(encs):
if isinstance(enc,np.float) and np.isnan(enc): continue
s = enc.split()
for i in range(len(s)//2):
start = int(s[2*i]) - 1
length = int(s[2*i+1])
img[start:start+length] = 1 + m
return img.reshape(shape).T
def mask2enc(mask, n=1):
pixels = mask.T.flatten()
encs = []
for i in range(1,n+1):
p = (pixels == i).astype(np.int8)
if p.sum() == 0: encs.append(np.nan)
else:
p = np.concatenate([[0], p, [0]])
runs = np.where(p[1:] != p[:-1])[0] + 1
runs[1::2] -= runs[::2]
encs.append(' '.join(str(x) for x in runs))
return encs
s_th = 40 #saturation blancking threshold
p_th = 1000*(sz//256)**2 #threshold for the minimum number of pixels
class HuBMAPDataset(Dataset):
def __init__(self, idx, sz=sz, reduce=reduce, encs=None):
super().__init__()
self.data = rasterio.open(os.path.join(DATA,idx+'.tiff'))
if self.data.count != 3:
subdatasets = self.data.subdatasets
self.layers = []
if len(subdatasets) > 0:
for i, subdataset in enumerate(subdatasets, 0):
self.layers.append(rasterio.open(subdataset))
self.shape = self.data.shape
self.reduce = reduce
self.sz = reduce*sz
self.pad0 = (self.sz - self.shape[0]%self.sz)%self.sz
self.pad1 = (self.sz - self.shape[1]%self.sz)%self.sz
self.n0max = (self.shape[0] + self.pad0)//self.sz
self.n1max = (self.shape[1] + self.pad1)//self.sz
self.mask = enc2mask(encs,(self.shape[1],self.shape[0])) if encs is not None else None
def __len__(self):
return self.n0max*self.n1max
def __getitem__(self, idx):
n0,n1 = idx//self.n1max, idx%self.n1max
x0,y0 = -self.pad0//2 + n0*self.sz, -self.pad1//2 + n1*self.sz
# make sure that the region to read is within the image
p00,p01 = max(0,x0), min(x0+self.sz,self.shape[0])
p10,p11 = max(0,y0), min(y0+self.sz,self.shape[1])
img = np.zeros((self.sz,self.sz,3),np.uint8)
mask = np.zeros((self.sz,self.sz),np.uint8)
# mapping the loade region to the tile
if self.data.count == 3:
img[(p00-x0):(p01-x0),(p10-y0):(p11-y0)] = np.moveaxis(self.data.read([1,2,3],
window=Window.from_slices((p00,p01),(p10,p11))), 0, -1)
else:
for i,layer in enumerate(self.layers):
img[(p00-x0):(p01-x0),(p10-y0):(p11-y0),i] =\
layer.read(1,window=Window.from_slices((p00,p01),(p10,p11)))
if self.mask is not None: mask[(p00-x0):(p01-x0),(p10-y0):(p11-y0)] = self.mask[p00:p01,p10:p11]
if self.reduce != 1:
img = cv2.resize(img,(self.sz//reduce,self.sz//reduce),
interpolation = cv2.INTER_AREA)
mask = cv2.resize(mask,(self.sz//reduce,self.sz//reduce),
interpolation = cv2.INTER_NEAREST)
#check for empty imges
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
#return -1 for empty images
return img, mask, (-1 if (s>s_th).sum() <= p_th or img.sum() <= p_th else idx)
x_tot,x2_tot = [],[]
with zipfile.ZipFile(OUT_TRAIN, 'w') as img_out,\
zipfile.ZipFile(OUT_MASKS, 'w') as mask_out:
for index, encs in tqdm(df_masks.iterrows(),total=len(df_masks)):
#image+mask dataset
ds = HuBMAPDataset(index,encs=encs)
for i in range(len(ds)):
im,m,idx = ds[i]
if idx < 0: continue
x_tot.append((im/255.0).reshape(-1,3).mean(0))
x2_tot.append(((im/255.0)**2).reshape(-1,3).mean(0))
#write data
im = cv2.imencode('.png',cv2.cvtColor(im, cv2.COLOR_RGB2BGR))[1]
img_out.writestr(f'{index}_{idx:04d}.png', im)
m = cv2.imencode('.png',m)[1]
mask_out.writestr(f'{index}_{idx:04d}.png', m)
#image stats
img_avr = np.array(x_tot).mean(0)
img_std = np.sqrt(np.array(x2_tot).mean(0) - img_avr**2)
print('mean:',img_avr, ', std:', img_std)
print ("Run time = ", elapsed_time(start)) | [
"cv2.imencode",
"pandas.read_csv",
"pathlib.Path",
"zipfile.ZipFile",
"numpy.where",
"rasterio.open",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"cv2.cvtColor",
"cv2.split",
"numpy.concatenate",
"cv2.resize",
"time.time",
"warnings.filterwarnings",
"rasterio.windows.Window.from_slices"
] | [((325, 358), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (348, 358), False, 'import warnings\n'), ((380, 391), 'time.time', 'time.time', ([], {}), '()\n', (389, 391), False, 'import time\n'), ((553, 618), 'pathlib.Path', 'Path', (["(BASE_PATH + 'hubmap-' + dataset + '-segmentation/train.csv')"], {}), "(BASE_PATH + 'hubmap-' + dataset + '-segmentation/train.csv')\n", (557, 618), False, 'from pathlib import Path\n'), ((624, 658), 'pandas.read_csv', 'pd.read_csv', (['MASKS'], {'index_col': '"""id"""'}), "(MASKS, index_col='id')\n", (635, 658), True, 'import pandas as pd\n'), ((668, 729), 'pathlib.Path', 'Path', (["(BASE_PATH + 'hubmap-' + dataset + '-segmentation/train')"], {}), "(BASE_PATH + 'hubmap-' + dataset + '-segmentation/train')\n", (672, 729), False, 'from pathlib import Path\n'), ((731, 824), 'pathlib.Path', 'Path', (["(BASE_PATH + 'hubmap-' + dataset +\n '-segmentation/HuBMAP-20-dataset_information.csv')"], {}), "(BASE_PATH + 'hubmap-' + dataset +\n '-segmentation/HuBMAP-20-dataset_information.csv')\n", (735, 824), False, 'from pathlib import Path\n'), ((825, 842), 'pandas.read_csv', 'pd.read_csv', (['INFO'], {}), '(INFO)\n', (836, 842), True, 'import pandas as pd\n'), ((856, 873), 'pathlib.Path', 'Path', (['"""train.zip"""'], {}), "('train.zip')\n", (860, 873), False, 'from pathlib import Path\n'), ((887, 904), 'pathlib.Path', 'Path', (['"""masks.zip"""'], {}), "('masks.zip')\n", (891, 904), False, 'from pathlib import Path\n'), ((1011, 1056), 'numpy.zeros', 'np.zeros', (['(shape[0] * shape[1])'], {'dtype': 'np.uint8'}), '(shape[0] * shape[1], dtype=np.uint8)\n', (1019, 1056), True, 'import numpy as np\n'), ((4399, 4430), 'zipfile.ZipFile', 'zipfile.ZipFile', (['OUT_TRAIN', '"""w"""'], {}), "(OUT_TRAIN, 'w')\n", (4414, 4430), False, 'import zipfile\n'), ((4445, 4476), 'zipfile.ZipFile', 'zipfile.ZipFile', (['OUT_MASKS', '"""w"""'], {}), "(OUT_MASKS, 'w')\n", (4460, 4476), False, 'import zipfile\n'), ((948, 959), 'time.time', 'time.time', ([], {}), '()\n', (957, 959), False, 'import time\n'), ((3190, 3231), 'numpy.zeros', 'np.zeros', (['(self.sz, self.sz, 3)', 'np.uint8'], {}), '((self.sz, self.sz, 3), np.uint8)\n', (3198, 3231), True, 'import numpy as np\n'), ((3244, 3282), 'numpy.zeros', 'np.zeros', (['(self.sz, self.sz)', 'np.uint8'], {}), '((self.sz, self.sz), np.uint8)\n', (3252, 3282), True, 'import numpy as np\n'), ((4181, 4217), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (4193, 4217), False, 'import cv2\n'), ((4234, 4248), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (4243, 4248), False, 'import cv2\n'), ((5191, 5206), 'numpy.array', 'np.array', (['x_tot'], {}), '(x_tot)\n', (5199, 5206), True, 'import numpy as np\n'), ((1129, 1142), 'numpy.isnan', 'np.isnan', (['enc'], {}), '(enc)\n', (1137, 1142), True, 'import numpy as np\n'), ((1573, 1602), 'numpy.concatenate', 'np.concatenate', (['[[0], p, [0]]'], {}), '([[0], p, [0]])\n', (1587, 1602), True, 'import numpy as np\n'), ((2030, 2063), 'os.path.join', 'os.path.join', (['DATA', "(idx + '.tiff')"], {}), "(DATA, idx + '.tiff')\n", (2042, 2063), False, 'import os\n'), ((3891, 3981), 'cv2.resize', 'cv2.resize', (['img', '(self.sz // reduce, self.sz // reduce)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (self.sz // reduce, self.sz // reduce), interpolation=cv2.\n INTER_AREA)\n', (3901, 3981), False, 'import cv2\n'), ((4021, 4115), 'cv2.resize', 'cv2.resize', (['mask', '(self.sz // reduce, self.sz // reduce)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask, (self.sz // reduce, self.sz // reduce), interpolation=cv2.\n INTER_NEAREST)\n', (4031, 4115), False, 'import cv2\n'), ((5073, 5096), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'm'], {}), "('.png', m)\n", (5085, 5096), False, 'import cv2\n'), ((5234, 5250), 'numpy.array', 'np.array', (['x2_tot'], {}), '(x2_tot)\n', (5242, 5250), True, 'import numpy as np\n'), ((1622, 1647), 'numpy.where', 'np.where', (['(p[1:] != p[:-1])'], {}), '(p[1:] != p[:-1])\n', (1630, 1647), True, 'import numpy as np\n'), ((4945, 4980), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (4957, 4980), False, 'import cv2\n'), ((2312, 2337), 'rasterio.open', 'rasterio.open', (['subdataset'], {}), '(subdataset)\n', (2325, 2337), False, 'import rasterio\n'), ((3475, 3517), 'rasterio.windows.Window.from_slices', 'Window.from_slices', (['(p00, p01)', '(p10, p11)'], {}), '((p00, p01), (p10, p11))\n', (3493, 3517), False, 'from rasterio.windows import Window\n'), ((3689, 3731), 'rasterio.windows.Window.from_slices', 'Window.from_slices', (['(p00, p01)', '(p10, p11)'], {}), '((p00, p01), (p10, p11))\n', (3707, 3731), False, 'from rasterio.windows import Window\n')] |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AllowedPhaseTwoParameters(object):
"""
Allowed phase two parameters.
"""
def __init__(self, **kwargs):
"""
Initializes a new AllowedPhaseTwoParameters object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param encryption_algorithms:
The value to assign to the encryption_algorithms property of this AllowedPhaseTwoParameters.
:type encryption_algorithms: list[str]
:param authentication_algorithms:
The value to assign to the authentication_algorithms property of this AllowedPhaseTwoParameters.
:type authentication_algorithms: list[str]
:param pfs_dh_groups:
The value to assign to the pfs_dh_groups property of this AllowedPhaseTwoParameters.
:type pfs_dh_groups: list[str]
"""
self.swagger_types = {
'encryption_algorithms': 'list[str]',
'authentication_algorithms': 'list[str]',
'pfs_dh_groups': 'list[str]'
}
self.attribute_map = {
'encryption_algorithms': 'encryptionAlgorithms',
'authentication_algorithms': 'authenticationAlgorithms',
'pfs_dh_groups': 'pfsDhGroups'
}
self._encryption_algorithms = None
self._authentication_algorithms = None
self._pfs_dh_groups = None
@property
def encryption_algorithms(self):
"""
Gets the encryption_algorithms of this AllowedPhaseTwoParameters.
Allowed phase two encryption algorithms.
:return: The encryption_algorithms of this AllowedPhaseTwoParameters.
:rtype: list[str]
"""
return self._encryption_algorithms
@encryption_algorithms.setter
def encryption_algorithms(self, encryption_algorithms):
"""
Sets the encryption_algorithms of this AllowedPhaseTwoParameters.
Allowed phase two encryption algorithms.
:param encryption_algorithms: The encryption_algorithms of this AllowedPhaseTwoParameters.
:type: list[str]
"""
self._encryption_algorithms = encryption_algorithms
@property
def authentication_algorithms(self):
"""
Gets the authentication_algorithms of this AllowedPhaseTwoParameters.
Allowed phase two authentication algorithms.
:return: The authentication_algorithms of this AllowedPhaseTwoParameters.
:rtype: list[str]
"""
return self._authentication_algorithms
@authentication_algorithms.setter
def authentication_algorithms(self, authentication_algorithms):
"""
Sets the authentication_algorithms of this AllowedPhaseTwoParameters.
Allowed phase two authentication algorithms.
:param authentication_algorithms: The authentication_algorithms of this AllowedPhaseTwoParameters.
:type: list[str]
"""
self._authentication_algorithms = authentication_algorithms
@property
def pfs_dh_groups(self):
"""
Gets the pfs_dh_groups of this AllowedPhaseTwoParameters.
Allowed perfect forward secrecy Diffie-Hellman groups.
:return: The pfs_dh_groups of this AllowedPhaseTwoParameters.
:rtype: list[str]
"""
return self._pfs_dh_groups
@pfs_dh_groups.setter
def pfs_dh_groups(self, pfs_dh_groups):
"""
Sets the pfs_dh_groups of this AllowedPhaseTwoParameters.
Allowed perfect forward secrecy Diffie-Hellman groups.
:param pfs_dh_groups: The pfs_dh_groups of this AllowedPhaseTwoParameters.
:type: list[str]
"""
self._pfs_dh_groups = pfs_dh_groups
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict"
] | [((4337, 4362), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (4356, 4362), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
import logging.config
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.autoreload
from tornado.options import define, options
import os
from memory.application import application
from memory.settings import LOGGING, NEED_CERTIFICATE, SERVER_PORT
from memory.util import refresh_certification, safety_certification
from memory.model.settings import MODEL_DIR
define("port", default=SERVER_PORT, help="run on the given port", type=int)
def main():
logging.config.dictConfig(LOGGING)
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
print("Development server is running at http://127.0.0.1:%s" % options.port)
print("Quit the server with Control-C")
if NEED_CERTIFICATE:
safety_certification()
# refresh every hour
tornado.ioloop.PeriodicCallback(refresh_certification, 1000 * 3600).start()
# refresh every five days
tornado.ioloop.PeriodicCallback(safety_certification, 1000 * 3600 * 24 * 5).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| [
"memory.util.safety_certification",
"tornado.options.define"
] | [((398, 473), 'tornado.options.define', 'define', (['"""port"""'], {'default': 'SERVER_PORT', 'help': '"""run on the given port"""', 'type': 'int'}), "('port', default=SERVER_PORT, help='run on the given port', type=int)\n", (404, 473), False, 'from tornado.options import define, options\n'), ((826, 848), 'memory.util.safety_certification', 'safety_certification', ([], {}), '()\n', (846, 848), False, 'from memory.util import refresh_certification, safety_certification\n')] |
# Generated by Django 2.0.5 on 2018-06-06 07:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('it_purchase_app', '0016_purchase_support_user'),
]
operations = [
migrations.RemoveField(
model_name='purchase',
name='support_user',
),
]
| [
"django.db.migrations.RemoveField"
] | [((238, 304), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""purchase"""', 'name': '"""support_user"""'}), "(model_name='purchase', name='support_user')\n", (260, 304), False, 'from django.db import migrations\n')] |
# View Modules
from .common import *
from .ajax import *
from .admin import *
from .other import *
import smtplib
def awards(request):
data = {}
today = date.fromtimestamp(time.time())
data["running_contest"] = False
data["contest"] = Contest.objects.filter(startDate__lte=today, endDate__gte=today).order_by('-id')
if data["contest"]:
data["running_contest"] = True
data["contest"] = data["contest"][0]
data["time"] = str(data["contest"].endDate - today)
if data["time"][0] == "-":
data["time"] = "no time remaining"
else:
data["time"] = data["time"].split(",")[0]
return render(request, "awards.html", data)
def browse(request, page=1):
page = int(page)
data = {"title":" - Browse Items"}
data["cookie"] = request.COOKIES
# Get news
data["news"] = News.objects.all().order_by("-id")
data["show_news"] = False
# Fetch the latest news post and see if it needs to be displayed
if data["news"]:
data["news"] = data["news"][0]
if str(data["news"].timestamp)[:10] > request.COOKIES.get("latest_news", "1970-01-01"):
data["show_news"] = True
#Get items
data["type"] = "browse"
data["items"] = loadItems(page, data["type"])
data["itemCount"] = Submissions.objects.filter(Q(set="0") | Q(slot="set")).count()
data["maxPage"] = int(math.ceil(data["itemCount"] / 10.0))
#Navigation
data["pageCurrent"] = page
data["pageList"] = range(max(1, page-3), min(page+4, data["maxPage"]+1))
return render(request, "browse.html", data)
def browseUser(request, steamID="0", page=1):
page = int(page)
data = {"title":" - User Items"}
#Get items
data["type"] = "browse-"+steamID
data["items"] = loadItems(page, data["type"])
data["itemCount"] = Submissions.objects.filter(Q(set="0") | Q(slot="set"), user__steamID=steamID).count()
data["maxPage"] = int(math.ceil(data["itemCount"] / 10.0))
#Navigation
data["pageCurrent"] = page
data["pageList"] = range(max(1, page-3), min(page+4, data["maxPage"]+1))
return render(request, "browse.html", data)
def contest(request, theme, page=1):
page = int(page)
data = {"title":" - Contest Entries"}
#Get items
data["type"] = "contest/" + theme
data["items"] = loadItems(page, data["type"])
contest = Contest.objects.filter(theme=theme).order_by("-id")[0]
data["itemCount"] = Submissions.objects.filter(Q(set="0") | Q(slot="set"), keywords__contains=theme, timestamp__gte=contest.startDate, timestamp__lte=contest.endDate).count()
data["maxPage"] = int(math.ceil(data["itemCount"] / 10.0))
#Navigation
data["pageCurrent"] = page
data["pageList"] = range(max(1, page-3), min(page+4, data["maxPage"]+1))
return render(request, "browse.html", data)
def create(request):
data = {'tf':tf}
data["user"] = getUser(request.COOKIES.get("session", None))
if data["user"]["steamID"] == "0":
return redirect("/openid/login")
if request.GET.get("debug"):
data["debug"] = True
# Check the user isn't banned
if banned(data["user"]["steamID"], request):
return redirect("/error/banned")
# Check for Spycheck mode
#if SPYCHECK and data["user"]["steamID"] == "0":
# return redirect("/error/spycheck")
# Check the user can still submit items today
if data["user"]["submitted"] == data["user"]["max_submitted"]:
return redirect("/error/max_submissions")
return render(request, "create.html", data)
def delete_comment(request):
return HttpResponse("Delete", context_instance=RequestContext(request))
def error(request, type="unknown"):
data = {"error":type}
data["user"] = getUser(request.COOKIES.get("session", None))
if type == "banned":
data["ban"] = Bans.objects.filter(Q(ip=request.META["REMOTE_ADDR"]) | Q(steamID=data["user"]["steamID"])).order_by('-id')[0]
return render(request, "error.html", data)
def generic(request, template, title=""):
return render(request, template, {"title": title})
def images(request, item):
data = {"title": " - Image Audit"}
#data["item"] = tf.itemInfo(int(item))["item_name"]
data["item"] = Item.objects.get(defindex=int(item)).item_name
data["images"] = []
images = glob.glob(ROOT + "assets/items/"+str(item)+"/*.png")
for image in images:
data["images"].append(image[25:])
data["images"].sort()
data["count"] = len(data["images"])
#data["next"] =
return render(request, "images.html", data)
def login(request):
choices = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
shuffle(choices)
nonce = (datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ") + "".join(choices[:6]))
post_params = "openid.return_to="+URL+"openid/complete/?janrain_nonce="+nonce
post_params += "&openid.realm="+URL+"&openid.ns=http://specs.openid.net/auth/2.0&openid.sreg.optional=email,fullname,nickname"
post_params += "&openid.claimed_id=http://specs.openid.net/auth/2.0/identifier_select&openid.ns.sreg=http://openid.net/extensions/sreg/1.1"
post_params += "&openid.identity=http://specs.openid.net/auth/2.0/identifier_select&openid.mode=checkid_setup"
if not request.GET.get("janrain_nonce"):
# Send request
url = "https://steamcommunity.com/openid/login?"+(post_params)
return redirect(url)
else:
# Check the response is valid
new_params = request.GET.copy()
new_params["openid.mode"] = "check_authentication"
resp = urllib.request.urlopen("https://steamcommunity.com/openid/login"+"?"+new_params.urlencode())
body = resp.read().decode("utf-8")
if "is_valid:true" not in body:
return redirect("/error/login")
# Valid login
data = {}
data["response"] = request.GET["openid.claimed_id"]
data["id"] = request.GET["openid.claimed_id"][36:]
# Set your Steam API key here
data["url"] = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=TODO&steamids="+data["id"]
#Get public info
try:
data["info"] = json.loads(urllib.request.urlopen(data["url"]).read().decode("utf-8"))["response"]["players"][0]
except:
error_log = "==========\n"
error_log += str(datetime.now()) + "\n"
error_log += "ID: " + data["id"] + "\n"
try:
error_log += urllib.urlopen(data["url"]).read() + "\n"
except:
error_log + "Can't parse url " + data["url"] + "\n"
output = open("/var/projects/tf2tags.com/assets/data/login_errors.log", "w")
output.write(error_log)
output.close()
return redirect("/error/login")
if not data["info"].get("profilestate"):
return redirect("/error/login")
#Get/Set User data
user = Users.objects.filter(steamID=data["info"]["steamid"])
#Prepare response page
response = redirect("/")
if len(user) == 0: #New User
s = SessionStore()
s.save()
data["key"] = s.session_key
user = Users(steamID=data["info"]["steamid"], name=data["info"]["personaname"], profile=data["info"]["profileurl"], avatar=data["info"]["avatar"], session=data["key"])
else:
s = SessionStore()
s.save()
user = user[0]
data["key"] = user.session
if data["key"] == "":
data["key"] = s.session_key
#Update user session in DB
user = Users.objects.filter(steamID=data["info"]["steamid"])[0]
user.session = data["key"]
user.profile = data["info"]["profileurl"]
user.avatar = data["info"]["avatar"]
try:
response.set_cookie('session', data["key"], max_age=(3600*24*30))
user.save()
except:
return redirect("/error/login")
return response
def logout(request):
response = redirect("/")
#Clear DB session
user = Users.objects.filter(session=request.COOKIES.get("session", None))
if user:
user[0].session = ""
user[0].save()
#Clear cookie
response.delete_cookie('session')
response.delete_cookie('sessionid')
return response
def newsArchive(request, id=None):
data = {}
if id == None:
data["news"] = News.objects.all().order_by('-timestamp')
return render(request, "newsArchive.html", data)
elif id == "latest":
data["news"] = News.objects.filter().order_by('-timestamp')[0]
data["set_cookie"] = True
return render(request, "newsPost.html", data)
else:
data["news"] = News.objects.get(pk=id)
return render(request, "newsPost.html", data)
def modifyItem(request, item):
data = {}
data["user"] = getUser(request.COOKIES.get("session", None))
# First confirm the item is the user's or the user is an admin
itemID = item
items = Submissions.objects.filter(pk=item)
if items[0].set != "0":
items = Submissions.objects.filter(set=items[0].set)
if ((data["user"]["steamID"] != items[0].user.steamID) or (items[0].user.steamID == "0")):
if not data["user"]["admin"]:
return redirect("/")
if request.POST.get("action"):
if request.POST["action"] == "Confirm Deletion":
set = request.POST["set_id"]
if set == "0":
tf2.deleteItem(request.POST["id"])
else: # Delete the set
items = Submissions.objects.filter(set=set)
for item in items:
tf2.deleteItem(item.id)
return redirect("/")
else:
item_ids = request.POST.getlist("item_id")
items = Submissions.objects.filter(pk__in=item_ids)
count = 0
names = request.POST.getlist("name")
descs = request.POST.getlist("desc")
keywords = request.POST.get("keywords")
for item in items:
# See if the name has been changed
if (item.name != names[count] or item.desc != descs[count]):
posted = datetime.strptime(str(item.timestamp)[:19], "%Y-%m-%d %H:%M:%S")
now = datetime.now()
days = (now - posted)
# Change fields
item.name = names[count]
item.desc = descs[count]
# If the item is a day old, delete votes/comments
if days.days >= 1:
item.upVotes = 0
item.downVotes = 0
item.score = 0
tf2.deleteVotes(itemID)
item.keywords = keywords
item.save()
count += 1
return redirect(viewItem, item=itemID)
posted = datetime.strptime(str(items[0].timestamp)[:19], "%Y-%m-%d %H:%M:%S")
now = datetime.now()
days = (now - posted)
data["reset"] = (days.days >= 1)
itemslist = []
for item in items:
item.prepare()
itemslist.append(item)
# Remove set head from
if len(itemslist) > 1:
itemslist = itemslist[:-1]
data['items'] = itemslist
data["count"] = len(data["items"])
return render(request, "modifyItem.html", data)
def profile(request, steamID="0"):
data = {}
data["user"] = getUser(request.COOKIES.get("session", None))
page = 1
comment_list = 10 # Comments to show per page
comment_page = int(request.GET.get("cpage", 1))
data["next"] = comment_page + 1
data["prev"] = max(comment_page - 1, 1)
if request.POST.get("name") and steamID != "0":
if len(request.POST["name"].strip()) > 0:
you = Users.objects.get(steamID=steamID)
you.name = request.POST["name"]
you.save()
data["stats"] = getProfile(steamID)
data["you"] = False
if (data["user"]["steamID"] == data["stats"]["steamID"]) and data["user"]["steamID"] != "0":
data["you"] = True
# Latest Comments
if data["you"]:
comment_ids = {}
commented_on = Submissions.objects.filter(Q(set="0") | Q(slot="set"), user__steamID=data["stats"]["steamID"], comments__gte=1).order_by("-id")
for submission in commented_on:
if not comment_ids.get(submission.id):
comment_ids[submission.id] = submission.name
comments = Comments.objects.filter(itemID__in=comment_ids).order_by("-timestamp")[(comment_list*(comment_page-1)):(comment_list*(comment_page-1))+comment_list]
parsed_comments = []
parsed_ids = []
for comment in comments:
if True or comment.itemID not in parsed_ids:
comment.name = comment_ids[comment.itemID]
parsed_comments.append(comment)
parsed_ids.append(comment.itemID)
data["comments"] = parsed_comments
# Submissions - Only the latest 10
#data["submissions"] = Submissions.objects.filter(Q(set="0") | Q(slot="set"), user__steamID=data["stats"]["steamID"]).order_by("-id")[(10*(page-1)):(10*(page-1))+10]
return render(request, "profile.html", data)
def random(request):
data = {"title":" - Random Items"}
#Get items
data["type"] = "random"
data["items"] = loadItems(1, data["type"])
return render(request, "browse.html", data)
def results(request, page=1):
data = {}
page = int(page)
results = Submissions.objects.all()
if request.GET.get("class") and not request.GET.get("base"):
results = results.filter(role=request.GET["class"])
if request.GET.get("slot") and not request.GET.get("base"):
results = results.filter(slot=request.GET["slot"])
if request.GET.get("base"):
results = results.filter(base=request.GET["base"])
if request.GET.get("name"):
results = results.filter(name__icontains=request.GET["name"])
if request.GET.get("desc"):
results = results.filter(desc__icontains=request.GET["desc"])
if request.GET.get("start"):
results = results.filter(timestamp__gte=request.GET["start"])
if request.GET.get("end"):
results = results.filter(timestamp__lte=request.GET["end"])
if request.GET.get("keywords"):
keywords = request.GET.get("keywords").lower().replace(", ", ",")
results = results.filter(keywords__contains=keywords)
if request.GET.get("rating"):
results = results.filter(score__gte=request.GET["rating"])
# Order
if request.GET.get("order"):
if request.GET["order"] in ("timestamp", "score"):
descend = "-"
else:
descend = ""
results = results.order_by(descend + request.GET["order"], "-id")
# Page
data["itemCount"] = results.count()
results = results[(page-1)*10:(page-1)*10+10]
for item in results:
item.prepare()
#Get items
data["type"] = "results"
data["items"] = results
data["maxPage"] = int(math.ceil(data["itemCount"] / 10.0))
#Navigation
data["page"] = page
data["pageCurrent"] = page
data["pageList"] = range(max(1, page-2), min(page+7, data["maxPage"]+1))
data["qs"] = "?" + request.META["QUERY_STRING"]
return render(request, "browse.html", data)
def streak_search(request):
""" This feature has been removed """
return redirect("/")
"""
start = datetime.now()
data = {}
data["post"] = request.POST
if not request.POST.get("action"):
data["results"] = []
return render_to_response('streak_search.html', data, context_instance=RequestContext(request))
market_data = json.loads(open("/var/projects/tf2tags.com/assets/data/streak_search.json").read())
# Process results
results = []
data["meta"] = market_data["meta"]
max_price = request.POST.get("max_price", 32767)
if max_price == "":
max_price = 32767
max_price = float(max_price)
if market_data["meta"]["success"] != 1:
return render_to_response('streak_search.html', data, context_instance=RequestContext(request))
for row in market_data["data"]:
# General Filters
if row["name"][:13] == "Strange Part:":
continue
if float(row["price"][1:]) > max_price:
continue
#else:
# print float(row["price"][1:]), max_price
# print float(row["price"][1:]) > float(max_price), " greater than max"
# Item filters
if not request.POST.get("show_specialized_fabricator") and "Specialized" == row["name"][:11] and " Kit Fabricator" in row["name"]:
continue
if not request.POST.get("show_professional_fabricator") and "Professional" == row["name"][:12] and " Kit Fabricator" in row["name"]:
continue
if not request.POST.get("show_killstreak_kit") and "Kit" == row["name"][-3:] and "Killstreak" == row["name"][:10]:
continue
if not request.POST.get("show_specialized_kit") and "Kit" == row["name"][-3:] and "Specialized Killstreak" == row["name"][:22]:
continue
if not request.POST.get("show_professional_kit") and "Kit" == row["name"][-3:] and "Professional Killstreak" == row["name"][:23]:
continue
if not request.POST.get("show_killstreak_item") and " Kit" not in row["name"] and " Fabricator" not in row["name"] and "Killstreak" in row["name"] and "Professional Killstreak" not in row["name"] and "Specialized Killstreak" not in row["name"]:
continue
if not request.POST.get("show_specialized_killstreak_item") and " Kit" not in row["name"] and " Fabricator" not in row["name"] and "Specialized Killstreak" in row["name"]:
continue
if not request.POST.get("show_professional_killstreak_item") and " Kit" not in row["name"] and " Fabricator" not in row["name"] and "Professional Killstreak" in row["name"]:
continue
# Rarity filters
if not request.POST.get("show_unique") and "Vintage" not in row["name"] and "Genuine" not in row["name"] and "Strange" not in row["name"] and "Collector's" not in row["name"] and "Haunted" not in row["name"]:
continue
if not request.POST.get("show_vintage") and "Vintage" in row["name"]:
continue
if not request.POST.get("show_genuine") and "Genuine" in row["name"]:
continue
if not request.POST.get("show_strange") and "Strange" in row["name"]:
continue
if not request.POST.get("show_collectors") and "Collector's" in row["name"]:
continue
if not request.POST.get("show_haunted") and "Haunted" in row["name"]:
continue
results.append({"name":row["name"], "quantity":row["quantity"], "price":row["price"], "link":row["link"]})
data["results"] = sorted(results, key=lambda results: float(results["price"][1:]))
data["displaying"] = len(results)
return render_to_response('streak_search.html', data, context_instance=RequestContext(request))
"""
def submitComment(request):
user = getUser(request.COOKIES.get("session", None))
data = {}
data["user"] = user
if user["steamID"] == 0:
return redirect("/")
# Check the user isn't banned
if banned(user["steamID"], request):
return redirect("/error/banned")
if request.POST["comment"] == "":
return redirect("/")
# Check the user still has comments left for today
if data["user"]["posted_comments"] >= data["user"]["max_posted_comments"]:
return redirect("/error/max_submissions")
# Get user and update submission count
user_account = Users.objects.get(pk=data["user"]["id"])
user_account.posted_comments += 1
user_account.save()
data["user"]["posted_comments"] += 1
# This is "temporary"
if "toggaf "[::-1] in request.POST["comment"].lower():
return redirect("/")
# Save comment
comment = Comments(itemID=int(request.POST["item"]), user_id=user["id"], ip=request.META["REMOTE_ADDR"], comment=request.POST["comment"][:500])
comment.save()
# Update item's comments counter
item = Submissions.objects.get(pk=int(request.POST["item"]))
item.comments += 1
item.save()
return redirect("/view-"+request.POST["item"])
def submitItem(request):
data = {}
data["user"] = getUser(request.COOKIES.get("session", None))
ip = request.META["REMOTE_ADDR"]
# Check the user isn't banned
if banned(data["user"]["steamID"], request):
return redirect("/error/banned")
# Check the user still has submissions left for today
if data["user"]["steamID"] == "0":
if SPYCHECK:
return redirect("/") # Spy Checking mode
today = date.fromtimestamp(time.time())
submitted = Submissions.objects.filter(ip=ip, timestamp__gte=today).count()
if submitted >= 3:
return redirect("/error/max_submissions")
elif data["user"]["submitted"] >= data["user"]["max_submitted"]:
return redirect("/error/max_submissions")
# Get user
user_account = Users.objects.get(pk=data["user"]["id"])
# Prepare Data
items = []
post = json.loads(request.POST["submission"])
now = time.time()
# Manage set info
if len(post["items"]) > 1:
item_set = True
if post["meta"]["set_name"] == "":
post["meta"]["set_name"] == "The Set With No Name"
if post["meta"]["set_icon"] == "":
post["meta"]["set_icon"] = "-10"
set_id = str(now).replace(".","") + "-" + ip.replace(".", "")
else:
item_set = False
set_id = "0"
# Check for an item with the same name + desc + base if it's not a set.
#print("POST IS ", post)
if not item_set and duplicate_check(post["items"][0]["name"], post["items"][0]["desc"], post["items"][0]["base"]):
return redirect("/error/duplicate")
# Loop over each item in the submission
for item in post["items"]:
if item["style"] == "":
item["style"] = 0
if item["color"] == "":
item["color"] = "FFD700"
if item["prefix"] == "Unique":
item["prefix"] = ""
# Confirm no-paint yes-style image exists
if (int(item["style"]) > 0) and (item["paint"] == ""):
if not os.path.isfile("/var/projects/tf2tags.com/assets/items/"+str(item["defindex"])+"-"+str(item["style"])+".png"):
item["style"] = 0
item = Submissions(set=set_id, defindex=item["defindex"], role=item["role"], slot=item["slot"], base=item["base"], name=item["name"][:40], desc=item["desc"][:80], prefix=item["prefix"],
filter=item["filter"], color=item["color"], paint=item["paint"], particles=item["particles"], style=item["style"], keywords=post["meta"]["keywords"].lower(), user=user_account, ip=ip)
# Slur check
words = item.name.split(" ") + item.desc.split(" ")
for slur in SLURS:
if slur.lower() in words:
# Congrats, you're getting banned
if data["user"]["steamID"] == "0": # Ban anon
ban = Bans(ip=ip, steamID="-", notes="Automatic ban for slurs.", begins=date.fromtimestamp(time.time()), ends=date.fromtimestamp(time.time() + 86400))
else:
ban = Bans(ip=ip, steamID=data["user"]["steamID"], notes="Automatic ban for slurs.", begins=date.fromtimestamp(time.time()), ends=date.fromtimestamp(time.time() + 43200))
ban.save()
return redirect("/error/banned")
# Validate or Break
valid = tf.validate(item)
if valid == "SUCCESS":
items.append(item)
else:
data["failure"] = json.dumps(item)
data["errors"] = valid
return render_to_response('submissionError.html', data, context_instance=RequestContext(request))
# Save items
for item in items:
item.save()
outgoing_id = items[-1].id
# Save set head
if item_set:
item = Submissions(set=set_id, defindex=post["meta"]["set_icon"], slot="set", base="Set Head", name=post["meta"]["set_name"][:40], desc="Contains "+str(len(post["items"]))+" Items", keywords=post["meta"]["keywords"].lower(), user=user_account, ip=ip)
item.save()
outgoing_id = item.id
if (len(items) > 0):
# Update submission count
user_account.submitted += 1
user_account.save()
data["user"]["submitted"] += 1
return redirect(viewItem, item=outgoing_id)
return redirect("/")
def submitReport(request):
user = getUser(request.COOKIES.get("session", None))
# Save report
report = Flagged(itemID=int(request.POST["item"]), type=request.POST["reason"], explanation=request.POST["explanation"], ip=request.META["REMOTE_ADDR"], steamID=user["steamID"])
report.save()
# Get item
item = Submissions.objects.filter(pk=int(request.POST["item"]))
if len(item) != 1:
return redirect("/view-"+request.POST["item"])
item = item[0]
start_date = datetime.now()
end_date = start_date
# Email me about it!
REPORT_MAIL = "An item has been reported on tf2tags.\n\nThe item in question can be found here: http://tf2tags.com/view-{}\n\nSUBMISSION INFORMATION:\nBase: {}\nName: {}\nDesc: {}\n\nIP: {}\nSteamID: {}\nAuthor: {}\n\n\nREPORT INFORMATION:\nItem #: {}\nReason: {}\nExplanation: {}\nIP: {}\nSteamID: {}\nSteam Name: {}\n\nBAN QUERY:\n\nINSERT INTO tf2tags_bans (ip, steamID, notes, begins, ends) VALUES ('{}', '{}', 'NOTES', '{}', '{}');"
#SERVER = "tf2tags.com"
SERVER = "localhost"
FROM = "TODO"
TO = ["TODO"]
SUBJ = "Item #"+request.POST["item"]+" - Reported for " + request.POST["reason"]
TEXT = REPORT_MAIL.format(item.id, item.base, item.name, item.desc, item.ip, item.user.steamID, "---", item.id, request.POST["reason"], request.POST["explanation"], request.META["REMOTE_ADDR"], user["steamID"], user["name"], item.ip, item.user.steamID, start_date, end_date)
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJ, TEXT)
#print message
try:
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, message)
server.quit()
except:
print("Could not send email")
print(message)
None
return redirect("/view-"+request.POST["item"])
def summary(request):
steam_id = request.GET.get("steam_id")
profile = getProfile(steam_id)
return HttpResponse(json.dumps(profile))
def topItems(request, days=0, page=1):
page = int(page)
data = {}
#Get items
data["type"] = "top/"+str(days)
data["items"] = loadItems(page, data["type"])
if days == 0:
data["itemCount"] = Submissions.objects.filter(Q(set="0") | Q(slot="set")).count()
data["type"] = "top"
else:
days = int(days)
now = datetime.utcnow().replace(tzinfo=utc)
timediff = now - timedelta(days=days)
data["itemCount"] = Submissions.objects.filter(Q(set="0") | Q(slot="set"), timestamp__gte=(timediff)).count()
data["maxPage"] = int(math.ceil(data["itemCount"] / 10.0))
#Navigation
data["pageCurrent"] = page
data["pageList"] = range(max(1, page-2), min(page+7, data["maxPage"]+1))
return render(request, "browse.html", data)
def viewItem(request, item=1):
data = {"viewing":True}
items = Submissions.objects.filter(pk=item)
if len(items) < 1:
return redirect("/browse")
if items[0].set != "0":
items = Submissions.objects.filter(set=items[0].set)
itemslist = []
for item in items:
item.prepare()
if item.paint != "":
for x in range(0,len(tf.hex)):
if item.paint == tf.hex[x]:
item.paint_name = tf.paints[x]
break
if item.style != 0:
data["style_name"] = Style.objects.filter(defindex_id=item.defindex, style_num=item.style)[0].name
itemslist.append(item)
data["identifier"] = "Item #"+str(items[len(items)-1].id)
data["upVotes"] = items[len(items)-1].upVotes
data["downVotes"] = items[len(items)-1].downVotes
data["ktd"] = items[len(items)-1].ktd
data["ktdColor"] = items[len(items)-1].ktdColor
if items[0].set != "0":
data["identifier"] = items[len(items)-1].name
# Get comments
if (itemslist[-1].comments > 0):
data["comments"] = Comments.objects.filter(itemID=itemslist[-1].id).order_by("-id")
else:
data["comments"] = []
data['items'] = itemslist
data["count"] = len(data["items"])
return render(request, "viewItem.html", data)
def votes_item(request, item):
data = {}
offset = int(request.GET.get("offset", 0))
data["next"] = offset + 25
data["prev"] = max(offset - 25, 0)
votes = Votes.objects.filter(itemID=item).order_by("-timestamp")[offset:offset+25]
data["votes"] = votes
return render(request, "votes_item.html", data)
def votes_user(request, vote_id):
data = {}
offset = int(request.GET.get("offset", 0))
data["next"] = offset + 25
data["prev"] = max(offset - 25, 0)
source = Votes.objects.get(id=vote_id)
votes = Votes.objects.filter(ip=source.ip).order_by("-timestamp")[offset:offset+25]
# Find the submission authors
item_ids = []
for vote in votes:
item_ids.append(vote.itemID)
items = Submissions.objects.filter(id__in=item_ids)
for vote in votes:
for item in items:
if vote.itemID == item.id:
vote.author = item.user.name
break
data["votes"] = votes
data["voter_key"] = source.public()
data["item"] = source.itemID
return render(request, "votes_user.html", data)
def winners(request, year=2013):
data = {}
#contests = Contest.objects.filter(endDate__gte=str(year)+"-01-01", endDate__lte=str(year)+"-12-31")
contests = Contest.objects.all().order_by("-id")
winners = []
themes = []
for winner in contests:
if winner.winner != 0:
winners.append(winner.winner)
themes.append(winner.theme)
# Get winning items
items = Submissions.objects.filter(pk__in=winners).order_by('-id')
prevDate = "1970-01-01"
count = 0
for item in items:
prevDate = item.prepare(prevDate)
item.theme = themes[count]
count += 1
data["items"] = items
return render(request, "winners.html", data)
def test(request, page=1):
from django import VERSION
return HttpResponse("V"+str(VERSION))
| [
"smtplib.SMTP"
] | [((27139, 27159), 'smtplib.SMTP', 'smtplib.SMTP', (['SERVER'], {}), '(SERVER)\n', (27151, 27159), False, 'import smtplib\n')] |
import matplotlib.pyplot as plt
import numpy as np
a = np.loadtxt('/home/doublepoints/Documents/selfdriving-T3/CarND-Semantic-Segmentation/loss/20180919222812.log')
print(a)
x = np.arange(0,50,1,dtype=int)
y =a
plt.plot(x,y)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((56, 175), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/doublepoints/Documents/selfdriving-T3/CarND-Semantic-Segmentation/loss/20180919222812.log"""'], {}), "(\n '/home/doublepoints/Documents/selfdriving-T3/CarND-Semantic-Segmentation/loss/20180919222812.log'\n )\n", (66, 175), True, 'import numpy as np\n'), ((181, 211), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(1)'], {'dtype': 'int'}), '(0, 50, 1, dtype=int)\n', (190, 211), True, 'import numpy as np\n'), ((215, 229), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (223, 229), True, 'import matplotlib.pyplot as plt\n'), ((229, 248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (239, 248), True, 'import matplotlib.pyplot as plt\n'), ((249, 267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (259, 267), True, 'import matplotlib.pyplot as plt\n'), ((269, 279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (277, 279), True, 'import matplotlib.pyplot as plt\n')] |
from client import Client
import numpy as np
import configparser
from utilities import *
# Handle configuration file
config = configparser.ConfigParser()
config.read('config.ini')
binance_config = config['Binance Futures']
order_layout_mode = binance_config['OrderLayoutMode']
if order_layout_mode not in ('constant', 'incrementalRising', 'incrementalFalling'):
print('Wrong layout mode value in config.ini')
exit(-1)
api_key = binance_config['ApiKey']
api_secret = binance_config['ApiSecret']
is_testnet = str2bool(binance_config['Testnet'])
margin_type = binance_config['MarginType'].upper()
is_post_only = str2bool(binance_config['PostOnly'])
print('Connecting to Binance...')
client = Client(api_key=api_key, api_secret=api_secret, testnet=is_testnet)
exchange_info = client.futures_exchange_info()
futures_account_positions = client.futures_account()['positions']
available_balance_usdt = float(client.futures_account()['availableBalance'])
symbol = input('Trading pair [BTCUSDT]: ').upper() or 'BTCUSDT'
# Show ticker info
tickers = client.get_symbol_ticker()
user_ticker = find_object(tickers, symbol, 'symbol')
try:
print("%s price: %s" % (symbol, user_ticker['price']))
except NameError:
print('Wrong symbol, use the same format as \'BTCUSDT\'')
exit(-1)
print('Current balance: %g USDT' % available_balance_usdt)
leverage = int(input('Leverage to use [1]: ') or '1')
if order_layout_mode == 'constant':
single_amount_crypto = float(input('Constant amount to layer in %s: ' % symbol))
delta_amount_crypto = 0
else:
single_amount_crypto = float(input('Initial amount to layer in %s: ' % symbol))
delta_amount_crypto = float(input('Increment for each order: '))
from_price = float(input('First price: '))
to_price = float(input('Last price: '))
side = input('Long or Short? [short] ').lower()
futures_side = 'BUY' if side == 'long' else 'SELL'
# start layering from prices closer to market price
if (futures_side == 'BUY' and from_price < to_price) or (futures_side == 'SELL' and from_price > to_price):
from_price, to_price = to_price, from_price
available_margin = available_balance_usdt * leverage
max_n = max_orders_per_interval(from_price, to_price, available_margin, single_amount_crypto, order_layout_mode, delta_amount_crypto)
max_n = max_n if max_n < 101 else '100+'
orders_input = 'How many orders [Max = {0}]? '.format(str(max_n))
order_num = int(input(orders_input))
cur_leverage = find_object(futures_account_positions, symbol, 'symbol')['leverage']
cur_isolated = find_object(futures_account_positions, symbol, 'symbol')['isolated']
exchange_symbols = exchange_info['symbols']
asset_precision = find_object(exchange_symbols, symbol, 'symbol')['quantityPrecision']
usdt_precision = find_object(exchange_symbols, symbol, 'symbol')['pricePrecision']
# Truncate inputs
if order_layout_mode == 'constant':
single_amount_crypto = float(truncate(single_amount_crypto, asset_precision))
else:
single_amount_crypto = float(truncate(single_amount_crypto, asset_precision))
delta_amount_crypto = float(truncate(delta_amount_crypto, asset_precision))
if int(cur_leverage) != leverage:
client.futures_change_leverage(symbol=symbol, leverage=leverage)
print('Changed leverage to ' + str(leverage))
cur_margin = 'CROSS' if cur_isolated == False else 'ISOLATED'
if cur_margin != margin_type:
client.futures_change_margin_type(symbol=symbol, marginType=margin_type)
print('Changed margin type to ' + margin_type)
order_prices = np.linspace(from_price, to_price, num=order_num).round(usdt_precision)
if order_layout_mode == 'incrementalFalling':
order_prices = np.flipud(order_prices)
time_in_force = 'GTC' if is_post_only == False else 'GTX'
if order_layout_mode == 'constant':
for order_price in order_prices:
print('Setting order: %s %g of %s @%g USDT' % (futures_side, single_amount_crypto, symbol, order_price))
client.futures_create_order(symbol=symbol, side=futures_side, type='LIMIT', quantity=single_amount_crypto, price=order_price, timeInForce=time_in_force)
else:
order_quantity = float(truncate(single_amount_crypto, asset_precision)) # python randomly adds decimals
for x in range(order_num):
print('Setting order: %s %g of %s @%g USDT' % (futures_side, order_quantity, symbol, order_prices[x]))
client.futures_create_order(symbol=symbol, side=futures_side, type='LIMIT', quantity=order_quantity, price=order_prices[x], timeInForce=time_in_force)
order_quantity = float(truncate(order_quantity + delta_amount_crypto, asset_precision))
print('Done')
| [
"numpy.linspace",
"configparser.ConfigParser",
"client.Client",
"numpy.flipud"
] | [((127, 154), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (152, 154), False, 'import configparser\n'), ((699, 765), 'client.Client', 'Client', ([], {'api_key': 'api_key', 'api_secret': 'api_secret', 'testnet': 'is_testnet'}), '(api_key=api_key, api_secret=api_secret, testnet=is_testnet)\n', (705, 765), False, 'from client import Client\n'), ((3646, 3669), 'numpy.flipud', 'np.flipud', (['order_prices'], {}), '(order_prices)\n', (3655, 3669), True, 'import numpy as np\n'), ((3510, 3558), 'numpy.linspace', 'np.linspace', (['from_price', 'to_price'], {'num': 'order_num'}), '(from_price, to_price, num=order_num)\n', (3521, 3558), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Usage: $./find_uuids_by_addr.py address path_to_jsons
import os
import json
import sys
def get_files_in_directory(dirpath):
files = [f for f in os.listdir(dirpath) if os.path.isfile(f)]
return files
def json_files_loader(jsons_list): # jsons_list - list of files to read
dicts_list = []
for json_file in jsons_list:
if ".json" in json_file:
with open(json_file, 'r') as f:
fstring = (f.read()).encode('utf-8').strip()
jsf = json.loads(fstring)
dicts_list.append(jsf)
return dicts_list
def find_uuids_by_addr(dirpath, address):
"""Returns list with all swaps(uuids) made to/from address"""
jflist = get_files_in_directory(dirpath)
dicts = json_files_loader(jflist)
swaps_list = []
for d in dicts:
swap_uuid = d.get('uuid')
print('.. checking ' + swap_uuid)
events = d.get('events')
for event in events:
try:
from_field = event.get('event').get('data').get('from')
if address in from_field:
swaps_list.append(swap_uuid)
break
to_field = event.get('event').get('data').get('to')
if address in to_field:
swaps_list.append(swap_uuid)
break
except Exception as e:
pass
return swaps_list
def main():
"""Prints all swaps(uuids) made to/from address if any"""
try:
address = sys.argv[1]
try:
path = sys.argv[2]
swaps = find_uuids_by_addr(path, address)
if swaps:
print("\nfound in swaps : " + str(swaps))
else:
print("\n" + address + " not found")
except Exception as e:
print("Error: " + str(e))
except Exception as e:
print(e)
print("Usage: ./find_uuids_by_addr.py address path_to_jsons")
if __name__ == '__main__':
main()
| [
"os.path.isfile",
"json.loads",
"os.listdir"
] | [((185, 204), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (195, 204), False, 'import os\n'), ((208, 225), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (222, 225), False, 'import os\n'), ((542, 561), 'json.loads', 'json.loads', (['fstring'], {}), '(fstring)\n', (552, 561), False, 'import json\n')] |
import re
__all__ = ['strip_newlines']
def strip_newlines(contents, max_consecutive_newlines):
"""Removes consecutive newlines in excess of max_consecutive_newlines.
If max_consecutive_newlines < 0, the contents is returned
unchanged.
If max_consecutive_newlines == 0, all strings of consecutive
newlines are replaced by a single space.
If max_consecutive_newlines > 0, all strings of consecutive
newlines in excess of max_consecutive_newlines are replaced by
max_consecutive_newlines newlines.
"""
if max_consecutive_newlines < 0:
return contents
elif max_consecutive_newlines == 0:
return re.sub(r'\n+', ' ', contents)
else:
newlines = r'\n' * max_consecutive_newlines
return re.sub(newlines + '+', newlines, contents)
| [
"re.sub"
] | [((656, 685), 're.sub', 're.sub', (['"""\\\\n+"""', '""" """', 'contents'], {}), "('\\\\n+', ' ', contents)\n", (662, 685), False, 'import re\n'), ((763, 805), 're.sub', 're.sub', (["(newlines + '+')", 'newlines', 'contents'], {}), "(newlines + '+', newlines, contents)\n", (769, 805), False, 'import re\n')] |
from flask import Flask
from flask_restful import Resource, Api, reqparse
import glob
import pandas as pd
import json
import os
import sys
app = Flask(__name__)
api = Api(app)
DATA_PATH = sys.argv[1]
IMG_PATHS = os.path.join(DATA_PATH, 'img', '*.jpg')
TAG_STORE_FILE = os.path.join(DATA_PATH, 'is_car_tag.dict')
ipaths = glob.iglob(IMG_PATHS)
car_models = pd.DataFrame(
data = [p for p in ipaths],
columns = ['img_path']
)
car_models['car_model'] = car_models.img_path.map(lambda s: '-'.join(s.split('/')[-1].split('__')[0].split('-')[1:-2]))
car_models['img_name'] = car_models.img_path.str.split('/').map(lambda x: x[-1])
with open(TAG_STORE_FILE) as fl:
is_car = json.load(fl)
def generate_dataset():
dataset = car_models.sample(frac=1).to_dict(orient='records')
for data in dataset:
if data['img_path'] in is_car:
continue
yield data
dataset_generator = generate_dataset()
parser = reqparse.RequestParser()
parser.add_argument('img_path')
parser.add_argument('label')
class ImageLabeler(Resource):
def get(self):
payload = dataset_generator.next()
N = len(is_car)
car_count = sum(is_car.values())
not_car_count = N - car_count
payload['car_count'] = car_count
payload['not_car_count'] = not_car_count
return payload
def post(self):
args = parser.parse_args()
is_car[args['img_path']] = int(args['label'])
data = {'img_path': args['img_path'], 'label': int(args['label'])}
if len(is_car) % 10 == 0:
print('Saving {} labeled data...'.format(len(is_car)))
with open(TAG_STORE_FILE, 'w') as fl:
json.dump(is_car, fl)
print(data)
return data, 201
api.add_resource(ImageLabeler, '/')
if __name__ == '__main__':
# Start a SimpleHttpServer in the image directory
# python -m SimpleHTTPServer 8800
# <ip>:8800/<image name>
app.run(host='0.0.0.0', debug=True)
| [
"flask_restful.reqparse.RequestParser",
"glob.iglob",
"flask_restful.Api",
"flask.Flask",
"os.path.join",
"json.load",
"pandas.DataFrame",
"json.dump"
] | [((148, 163), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (153, 163), False, 'from flask import Flask\n'), ((170, 178), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (173, 178), False, 'from flask_restful import Resource, Api, reqparse\n'), ((216, 255), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""img"""', '"""*.jpg"""'], {}), "(DATA_PATH, 'img', '*.jpg')\n", (228, 255), False, 'import os\n'), ((273, 315), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""is_car_tag.dict"""'], {}), "(DATA_PATH, 'is_car_tag.dict')\n", (285, 315), False, 'import os\n'), ((326, 347), 'glob.iglob', 'glob.iglob', (['IMG_PATHS'], {}), '(IMG_PATHS)\n', (336, 347), False, 'import glob\n'), ((361, 421), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[p for p in ipaths]', 'columns': "['img_path']"}), "(data=[p for p in ipaths], columns=['img_path'])\n", (373, 421), True, 'import pandas as pd\n'), ((949, 973), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (971, 973), False, 'from flask_restful import Resource, Api, reqparse\n'), ((685, 698), 'json.load', 'json.load', (['fl'], {}), '(fl)\n', (694, 698), False, 'import json\n'), ((1701, 1722), 'json.dump', 'json.dump', (['is_car', 'fl'], {}), '(is_car, fl)\n', (1710, 1722), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Ice core AR(n) fits
Created on Wed May 12 14:00:10 2021
@author: lizz
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
## Read in Greenland time series
core_accum_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Ice_core_accum/Andersen_etal_2006_Annual_Accumulation_22Mar2011-trimmed.csv'
core_tseries = pd.read_csv(core_accum_fpath, index_col=0, parse_dates=[0])
core_names = core_tseries.columns
series_to_test = core_tseries
## Pre-process data
anomaly_series = series_to_test - series_to_test.mean()
def adf_test(timeseries):
print('A timeseries ready for AR(n) fitting should have ADF test statistic more negative than critical value (reject the null hypothesis).')
print ('Results of Dickey-Fuller Test:')
dftest = sm.tsa.stattools.adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
def kpss_test(timeseries):
print('A timeseries ready for AR(n) fitting should have KPSS statistic lower than the critical value (fail to reject the null hypothesis).')
print ('Results of KPSS Test:')
kpsstest = sm.tsa.stattools.kpss(timeseries, regression='c', nlags="auto")
kpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic','p-value','Lags Used'])
for key,value in kpsstest[3].items():
kpss_output['Critical Value (%s)'%key] = value
print (kpss_output)
stationarity_test_case = anomaly_series[core_names[0]][~np.isnan(anomaly_series[core_names[0]])]
adf_test(stationarity_test_case)
kpss_test(stationarity_test_case)
# ## Fit an AR[n] model
# n = 1
# for m in model_names:
# mod = AutoReg(anomaly_series[m], n, seasonal=True)
# results = mod.fit()
# print('Bayesian Information Criterion for model {}, AR({}): {}'.format(
# m, n, results.bic))
## Compile BIC for different AR[n] models
test_period = anomaly_series.iloc[150:180]
comparison_n = range(1,6)
bic_per_n = pd.DataFrame(index=comparison_n, columns=core_names)
for c in core_names:
for n in comparison_n:
mod = AutoReg(test_period[c], n, seasonal=False)
results = mod.fit()
bic_per_n[c][n] = results.bic
b = bic_per_n.astype(float)
## Is there a best fit AR[n], as judged by BIC?
bic_difference = b.transform(lambda x: x-x.min())
print('Among AR(n) fits with n in {}: \r'.format(comparison_n))
print('Best fit n per model is as follows: \n',
b.idxmin())
for c in core_names:
bic_difference[c] = pd.to_numeric(bic_difference[c])
if any(bic_difference[c] > 2):
print('Difference is statistically significant for core {}.'.format(c))
else:
print('No significant difference among fits to {} output'.format(c))
| [
"pandas.Series",
"pandas.read_csv",
"statsmodels.api.tsa.stattools.adfuller",
"statsmodels.api.tsa.stattools.kpss",
"pandas.to_numeric",
"numpy.isnan",
"statsmodels.tsa.ar_model.AutoReg",
"pandas.DataFrame"
] | [((480, 539), 'pandas.read_csv', 'pd.read_csv', (['core_accum_fpath'], {'index_col': '(0)', 'parse_dates': '[0]'}), '(core_accum_fpath, index_col=0, parse_dates=[0])\n', (491, 539), True, 'import pandas as pd\n'), ((2235, 2287), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'comparison_n', 'columns': 'core_names'}), '(index=comparison_n, columns=core_names)\n', (2247, 2287), True, 'import pandas as pd\n'), ((912, 964), 'statsmodels.api.tsa.stattools.adfuller', 'sm.tsa.stattools.adfuller', (['timeseries'], {'autolag': '"""AIC"""'}), "(timeseries, autolag='AIC')\n", (937, 964), True, 'import statsmodels.api as sm\n'), ((980, 1088), 'pandas.Series', 'pd.Series', (['dftest[0:4]'], {'index': "['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used']"}), "(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used',\n 'Number of Observations Used'])\n", (989, 1088), True, 'import pandas as pd\n'), ((1420, 1483), 'statsmodels.api.tsa.stattools.kpss', 'sm.tsa.stattools.kpss', (['timeseries'], {'regression': '"""c"""', 'nlags': '"""auto"""'}), "(timeseries, regression='c', nlags='auto')\n", (1441, 1483), True, 'import statsmodels.api as sm\n'), ((1502, 1576), 'pandas.Series', 'pd.Series', (['kpsstest[0:3]'], {'index': "['Test Statistic', 'p-value', 'Lags Used']"}), "(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])\n", (1511, 1576), True, 'import pandas as pd\n'), ((2757, 2789), 'pandas.to_numeric', 'pd.to_numeric', (['bic_difference[c]'], {}), '(bic_difference[c])\n', (2770, 2789), True, 'import pandas as pd\n'), ((1753, 1792), 'numpy.isnan', 'np.isnan', (['anomaly_series[core_names[0]]'], {}), '(anomaly_series[core_names[0]])\n', (1761, 1792), True, 'import numpy as np\n'), ((2350, 2392), 'statsmodels.tsa.ar_model.AutoReg', 'AutoReg', (['test_period[c]', 'n'], {'seasonal': '(False)'}), '(test_period[c], n, seasonal=False)\n', (2357, 2392), False, 'from statsmodels.tsa.ar_model import AutoReg, ar_select_order\n')] |
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Some utility functions.
"""
from builtins import str
# from builtins import object
from future.types import newstr
import six
import datetime
from functools import partial
from etgen import etree
from django.utils.functional import Promise
from django.utils.encoding import force_text
def join_elems(elems, sep=' '):
"""
Examples:
>>> join_elems([1, 2, 3])
[1, ' ', 2, ' ', 3]
>>> join_elems([1, 2, 3],' / ')
[1, ' / ', 2, ' / ', 3]
>>> join_elems([])
[]
"""
if not callable(sep):
sep_value = sep
def sep():
return sep_value
l = []
s = None
for e in elems:
if s is not None:
l.append(s)
s = sep()
l.append(e)
return l
def forcetext(iter):
"""
Resolve any lazy translatable strings. Utility function for lxml
element trees.
"""
for i in iter:
if isinstance(i, Promise):
yield str(i)
else:
yield i
def pretty_print(elem):
"""
Return a pretty-printed XML string for the Element.
"""
return prettify(etree.tostring(elem, 'utf-8'))
# the following also indented:
# from http://renesd.blogspot.com/2007/05/pretty-print-xml-with-python.html
# via http://broadcast.oreilly.com/2010/03/pymotw-creating-xml-documents.html
#~ from xml.dom import minidom
#~ rough_string = etree.tostring(elem, 'utf-8')
#~ reparsed = minidom.parseString(rough_string)
#~ return reparsed.toprettyxml(indent=" ")
def prettify(s):
return s.replace('><', '>\n<')
def compatstr(s):
"""
The `python-future <http://python-future.org/>`__ package
introduces a special helper class `newstr` which simulates, under
Python 2, the behaviour of Python 3 strings. But
`xml.etree.ElementTree
<https://docs.python.org/2/library/xml.etree.elementtree.html>`__
in Python 2 doesn't know about `python-future` and produces
invalid XML when you feed it with such a string.
So this function converts any `newstr` back to a real newstr.
TODO: Not yet tested under Python 3. At the best it is just
unefficient.
"""
# assert_pure(s)
if six.PY2 and isinstance(s, newstr):
return six.text_type(s)
return s
RESERVED_WORDS = frozenset("""
and del from not while
as elif global or with
assert else if pass yield
break except import print
class exec in raise
continue finally is return
def for lambda try
""".split())
TYPEMAP = {
#~ datetime.datetime: py2str,
#~ IncompleteDate : lambda e,v : str(v),
datetime.datetime: lambda e, v: v.strftime("%Y%m%dT%H%M%S"),
datetime.date: lambda e, v: v.strftime("%Y-%m-%d"),
int: lambda e, v: str(v),
}
class Namespace(object):
"""
An XML namespace. Base class for :class:`etgen.html.HtmlNamespace`
and the namespaces defined in :mod:`etgen.intervat`.
"""
prefix = None
targetNamespace = None
names = None
def __init__(self, targetNamespace=None, names=None, prefix=None):
#~ if prefix is not None:
#~ self.prefix = prefix
#~ kw.setdefault('typemap',TYPEMAP)
#~ kw.setdefault('makeelement',self.makeelement)
#~ nsmap = kw.setdefault('nsmap',{})
if prefix is not None:
self.prefix = prefix
if names is not None:
self.names = names
if targetNamespace is not None:
self.targetNamespace = targetNamespace
if self.targetNamespace is not None:
#~ kw.update(namespace=self.targetNamespace)
self._ns = '{' + self.targetNamespace + '}'
if self.prefix is not None:
etree.register_namespace(self.prefix, self.targetNamespace)
#~ if prefix:
#~ nsmap[prefix] = self.targetNamespace
#~ if used_namespaces is not None:
#~ self.used_namespaces = used_namespaces
#~ if self.used_namespaces is not None:
#~ for ns in self.used_namespaces:
#~ nsmap[ns.prefix] = ns.targetNamespace
#~ self._element_maker = ElementMaker(**kw)
#~ self._source_elements = {}
if self.names is not None:
self.define_names(self.names)
self.setup_namespace()
def iselement(self, *args, **kw):
return etree.iselement(*args, **kw)
def setup_namespace(self):
pass
def tostring(self, element, *args, **kw):
class dummy(object):
pass
data = []
file = dummy()
file.write = data.append
if self.targetNamespace is not None:
kw.setdefault('default_namespace', self.targetNamespace)
etree.ElementTree(element).write(file, *args, **kw)
return b"".join(data).decode("utf-8")
def tostring_pretty(self, *args, **kw):
#~ kw.setdefault('xml_declaration',False)
#~ kw.setdefault('encoding','utf-8')
#~ kw.update(xml_declaration=False)
#~ kw.update(encoding='utf-8')
s = self.tostring(*args, **kw)
#~ return s
#~ return minidom.parseString(s).toprettyxml(indent=" ")
return prettify(s)
def addns(self, tag):
if self.targetNamespace is None or tag[0] == "{":
return tag
return self._ns + tag
def makeattribs(self, **kw):
#~ ns = self._element_maker._namespace
#~ if ns is None: return kw
xkw = dict()
for k, v in list(kw.items()):
k = getattr(self, k).args[0] # convert iname to tagname
xkw[self.addns(compatstr(k))] = compatstr(v)
return xkw
def create_element(self, tag, *children, **attrib):
nsattrib = self.makeattribs(**attrib)
tag = self.addns(tag)
elem = etree.Element(tag, nsattrib)
for item in children:
if isinstance(item, Promise):
item = force_text(item)
# assert_pure(item)
if isinstance(item, dict):
elem.attrib.update(self.makeattribs(**item))
elif isinstance(item, six.string_types):
# assert_pure(item)
#~ if len(elem) and len(elem[-1]) == 0:
if len(elem):
last = elem[-1]
last.tail = (last.tail or "") + item
else:
elem.text = (elem.text or "") + item
elif etree.iselement(item):
elem.append(item)
else:
raise TypeError("bad argument: %r" % item)
#~ print "20130805 added %s --> %s" % (item,self.tostring(elem))
return elem
def define_names(self, names):
if isinstance(names, six.string_types):
raise Exception("{} must now call itself split().".format(
self))
for tag in names:
iname = tag.replace("-", "_")
iname = iname.replace(".", "_")
#~ if iname in ('class','for','in','def'):
if iname in RESERVED_WORDS:
iname += "_"
#~ setattr(self,iname,getattr(self._element_maker,name))
p = partial(self.create_element, tag)
setattr(self, iname, p)
def getnsattr(self, elem, name):
#~ if self.targetNamespace is None or name.startswith('{'):
#~ return elem.get(name)
return elem.get(self._element_maker._namespace + name)
#~ def update_attribs(self,root,**kw):
def update(self, root, **kw):
root.attrib.update(self.makeattribs(**kw))
def add_child(self, parent, _name, *args, **kw):
ecl = getattr(self, _name)
#~ kw = self.makeattribs(**kw)
#~ print 20120420, kw
e = ecl(*args, **kw)
parent.append(e)
return e
def fromstring(self, s, **kwargs):
"""Build an element tree from the given XML source string.
This just forwards to the
:meth:`xml.etree.ElementTree.fromstring` library function.
See the `Parsing XML
<https://docs.python.org/2.7/library/xml.etree.elementtree.html#parsing-xml>`__
section of the Python docs.
"""
return etree.etree.fromstringlist([s], **kwargs)
def raw(self, *args):
"""Parses the given string into an XML Element."""
return RAW(*args)
RAW = etree.XML
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"etgen.etree.iselement",
"etgen.etree.register_namespace",
"builtins.str",
"etgen.etree.ElementTree",
"django.utils.encoding.force_text",
"etgen.etree.Element",
"etgen.etree.etree.fromstringlist",
"doctest.testmod",
"etgen.etree.tostring",
"functools.partial",
"six.text_type"
] | [((8559, 8576), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (8574, 8576), False, 'import doctest\n'), ((1187, 1216), 'etgen.etree.tostring', 'etree.tostring', (['elem', '"""utf-8"""'], {}), "(elem, 'utf-8')\n", (1201, 1216), False, 'from etgen import etree\n'), ((2319, 2335), 'six.text_type', 'six.text_type', (['s'], {}), '(s)\n', (2332, 2335), False, 'import six\n'), ((2909, 2915), 'builtins.str', 'str', (['v'], {}), '(v)\n', (2912, 2915), False, 'from builtins import str\n'), ((4512, 4540), 'etgen.etree.iselement', 'etree.iselement', (['*args'], {}), '(*args, **kw)\n', (4527, 4540), False, 'from etgen import etree\n'), ((5955, 5983), 'etgen.etree.Element', 'etree.Element', (['tag', 'nsattrib'], {}), '(tag, nsattrib)\n', (5968, 5983), False, 'from etgen import etree\n'), ((8350, 8391), 'etgen.etree.etree.fromstringlist', 'etree.etree.fromstringlist', (['[s]'], {}), '([s], **kwargs)\n', (8376, 8391), False, 'from etgen import etree\n'), ((7326, 7359), 'functools.partial', 'partial', (['self.create_element', 'tag'], {}), '(self.create_element, tag)\n', (7333, 7359), False, 'from functools import partial\n'), ((1029, 1035), 'builtins.str', 'str', (['i'], {}), '(i)\n', (1032, 1035), False, 'from builtins import str\n'), ((3873, 3932), 'etgen.etree.register_namespace', 'etree.register_namespace', (['self.prefix', 'self.targetNamespace'], {}), '(self.prefix, self.targetNamespace)\n', (3897, 3932), False, 'from etgen import etree\n'), ((4875, 4901), 'etgen.etree.ElementTree', 'etree.ElementTree', (['element'], {}), '(element)\n', (4892, 4901), False, 'from etgen import etree\n'), ((6079, 6095), 'django.utils.encoding.force_text', 'force_text', (['item'], {}), '(item)\n', (6089, 6095), False, 'from django.utils.encoding import force_text\n'), ((6596, 6617), 'etgen.etree.iselement', 'etree.iselement', (['item'], {}), '(item)\n', (6611, 6617), False, 'from etgen import etree\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Class for working with point in frame
Author <NAME>, 2019
"""
import numpy as np
from typing import List, Union
class Point(object):
"""
This class describes 3D point.
"""
def __init__(self, coord_xyz: Union[List[float], np.ndarray], intensity: float = None):
"""
:param coord_xyz: iterable with 3 values
:param intensity: float value
"""
if type(coord_xyz) is not np.ndarray:
coord_xyz = np.array(coord_xyz, dtype=np.float64)
assert coord_xyz.size == 3
self.coord_xyz = coord_xyz # 3x,
self.coord_xyz_homogenous = np.append(self.coord_xyz, 1.0)
self.intensity = intensity # scalar
def print_info(self) -> None:
pass
def get_xyz(self) -> np.ndarray:
"""
:return: 3x,
"""
return self.coord_xyz
class FramePoint(Point):
"""
Describes point in a keyframe that has UV coordinates, inv depth and other properties
"""
def __init__(self,
coord_u: float,
coord_v: float,
inv_depth: float,
inv_depth_hessian: float,
max_rel_baseline: float,
num_good_res: float,
status: float,
neighb_intensity_info: Union[List[float], np.ndarray]):
"""
:param coord_u:
:param coord_v:
:param inv_depth:
:param inv_depth_hessian:
:param max_rel_baseline:
:param num_good_res:
:param status:
:param neighb_intensity_info:
"""
assert coord_u >= 0 # in pixels starting from 0, hence no negative
assert coord_v >= 0 # in pixels starting from 0, hence no negative
assert len(neighb_intensity_info) >= 5
# assert inv_depth > 0, 'Depth cannot be negative, because otherwise 3D coordinates cannot be computed.'
# TODO double check if we allow inv_depth=0?
self.coord_u = coord_u
self.coord_v = coord_v
self.inv_depth = inv_depth
self.inv_depth_hessian = inv_depth_hessian
self.max_rel_baseline = max_rel_baseline
self.num_good_res = num_good_res
self.status = status
self.neighb_intensity_info = neighb_intensity_info
self.intensity = neighb_intensity_info[4] # intensity information of current point being point 5 of 8
super(FramePoint, self).__init__([0, 0, 0], self.intensity)
def get_array_of_fields(self) -> List[float]:
array_fields = [self.coord_u, self.coord_v,
self.inv_depth, self.inv_depth_hessian,
self.max_rel_baseline, self.num_good_res,
self.status]
array_fields.extend(self.neighb_intensity_info)
array_fields.append(self.intensity)
assert len(array_fields) == 16
return array_fields
@staticmethod
def get_field_order() -> List[str]:
"""
:return: list of strings containing the order of fields for this structure
"""
fields = ['coord_u', 'coord_v',
'inv_depth', 'inv_depth_hessian',
'max_rel_baseline', 'num_good_res',
'status',
'neighb_intensity_info_0',
'neighb_intensity_info_1',
'neighb_intensity_info_2',
'neighb_intensity_info_3',
'neighb_intensity_info_4',
'neighb_intensity_info_5',
'neighb_intensity_info_6',
'neighb_intensity_info_7',
'intensity']
return fields
def coord_xyz_hom(self, calib_matrix: np.ndarray) -> np.ndarray:
"""
Get homogenous coordinates of the point given calibration matrix
:param calib_matrix: calibration matrix 3x3
:return: homogenous coordinates 4x1
"""
assert(calib_matrix.shape == (3, 3))
coord_xyz = np.dot(np.linalg.inv(calib_matrix), np.array([self.coord_u, self.coord_v, 1.0]) / self.inv_depth)
# compute 3D position
coord_xyz_homogenous = np.append(coord_xyz, 1.0)
return coord_xyz_homogenous
@classmethod
def init_from_lines(cls, lines: List[str]) -> 'FramePoint':
"""
:param lines: list of strings
:return: frame point
"""
assert(len(lines) == 2)
point_info = list(map(float, lines[0].strip().split(',')))
assert len(point_info) > 6
coord_u = point_info[0]
coord_v = point_info[1]
inv_depth = point_info[2]
inv_depth_hessian = point_info[3]
max_rel_baseline = point_info[4]
num_good_res = point_info[5]
status = point_info[6]
intensity_strs = lines[1].strip().split(',')
neighb_intensity_info = list(map(int, intensity_strs[:-1]))
assert(len(neighb_intensity_info) == 8)
return cls(coord_u, coord_v, inv_depth, inv_depth_hessian,
max_rel_baseline, num_good_res, status, neighb_intensity_info)
| [
"numpy.append",
"numpy.array",
"numpy.linalg.inv"
] | [((669, 699), 'numpy.append', 'np.append', (['self.coord_xyz', '(1.0)'], {}), '(self.coord_xyz, 1.0)\n', (678, 699), True, 'import numpy as np\n'), ((4202, 4227), 'numpy.append', 'np.append', (['coord_xyz', '(1.0)'], {}), '(coord_xyz, 1.0)\n', (4211, 4227), True, 'import numpy as np\n'), ((517, 554), 'numpy.array', 'np.array', (['coord_xyz'], {'dtype': 'np.float64'}), '(coord_xyz, dtype=np.float64)\n', (525, 554), True, 'import numpy as np\n'), ((4049, 4076), 'numpy.linalg.inv', 'np.linalg.inv', (['calib_matrix'], {}), '(calib_matrix)\n', (4062, 4076), True, 'import numpy as np\n'), ((4078, 4121), 'numpy.array', 'np.array', (['[self.coord_u, self.coord_v, 1.0]'], {}), '([self.coord_u, self.coord_v, 1.0])\n', (4086, 4121), True, 'import numpy as np\n')] |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.flatpages import views as flatpageviews
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
# 3rd party
# Own Apps
url(r'^news/', include('news.urls', namespace='news')),
url(r'^kataloge/', include('downloads.urls', namespace='downloads')),
url(r'^produkte/', include('products.urls', namespace='products')),
url(r'^messen/', include('fairs.urls', namespace='fairs')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Named Staticpages
urlpatterns += [
url(r'^unternehmen/$', views.company, name='company'),
url(r'^impressum/$', flatpageviews.flatpage, {'url': '/impressum/'}, name='impressum'),
url(r'^datenschutz/$', flatpageviews.flatpage, {'url': '/datenschutz/'}, name='datenschutz'),
url(r'^(?P<url>.*/)$', flatpageviews.flatpage, name='page'),
]
| [
"django.conf.urls.include",
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((1000, 1061), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1006, 1061), False, 'from django.conf.urls.static import static\n'), ((1104, 1156), 'django.conf.urls.url', 'url', (['"""^unternehmen/$"""', 'views.company'], {'name': '"""company"""'}), "('^unternehmen/$', views.company, name='company')\n", (1107, 1156), False, 'from django.conf.urls import include, url\n'), ((1163, 1253), 'django.conf.urls.url', 'url', (['"""^impressum/$"""', 'flatpageviews.flatpage', "{'url': '/impressum/'}"], {'name': '"""impressum"""'}), "('^impressum/$', flatpageviews.flatpage, {'url': '/impressum/'}, name=\n 'impressum')\n", (1166, 1253), False, 'from django.conf.urls import include, url\n'), ((1255, 1350), 'django.conf.urls.url', 'url', (['"""^datenschutz/$"""', 'flatpageviews.flatpage', "{'url': '/datenschutz/'}"], {'name': '"""datenschutz"""'}), "('^datenschutz/$', flatpageviews.flatpage, {'url': '/datenschutz/'},\n name='datenschutz')\n", (1258, 1350), False, 'from django.conf.urls import include, url\n'), ((1354, 1412), 'django.conf.urls.url', 'url', (['"""^(?P<url>.*/)$"""', 'flatpageviews.flatpage'], {'name': '"""page"""'}), "('^(?P<url>.*/)$', flatpageviews.flatpage, name='page')\n", (1357, 1412), False, 'from django.conf.urls import include, url\n'), ((918, 981), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (924, 981), False, 'from django.conf.urls.static import static\n'), ((267, 303), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (270, 303), False, 'from django.conf.urls import include, url\n'), ((340, 380), 'django.conf.urls.include', 'include', (['"""django.contrib.flatpages.urls"""'], {}), "('django.contrib.flatpages.urls')\n", (347, 380), False, 'from django.conf.urls import include, url\n'), ((417, 441), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (424, 441), False, 'from django.conf.urls import include, url\n'), ((477, 509), 'django.conf.urls.include', 'include', (['"""django.conf.urls.i18n"""'], {}), "('django.conf.urls.i18n')\n", (484, 509), False, 'from django.conf.urls import include, url\n'), ((606, 644), 'django.conf.urls.include', 'include', (['"""news.urls"""'], {'namespace': '"""news"""'}), "('news.urls', namespace='news')\n", (613, 644), False, 'from django.conf.urls import include, url\n'), ((684, 732), 'django.conf.urls.include', 'include', (['"""downloads.urls"""'], {'namespace': '"""downloads"""'}), "('downloads.urls', namespace='downloads')\n", (691, 732), False, 'from django.conf.urls import include, url\n'), ((772, 818), 'django.conf.urls.include', 'include', (['"""products.urls"""'], {'namespace': '"""products"""'}), "('products.urls', namespace='products')\n", (779, 818), False, 'from django.conf.urls import include, url\n'), ((856, 896), 'django.conf.urls.include', 'include', (['"""fairs.urls"""'], {'namespace': '"""fairs"""'}), "('fairs.urls', namespace='fairs')\n", (863, 896), False, 'from django.conf.urls import include, url\n')] |
from utilities import *
from main_person_course_data import main as generate_person_course_data
try:
from custom_utilities import get_course_metadata
except ImportError:
from custom_utilities_example import get_course_metadata
def main():
print("---")
print("Starting generation of course metadata file ...")
# Ensure that the results directory exists
create_results_dir()
course_ids = get_course_ids()
# 1. Generate the course metadata file
all_course_metadata = get_course_metadata()
courses_without_metadata = [course_id for course_id in course_ids if course_id not in all_course_metadata]
if len(courses_without_metadata) > 0:
print("""
Error: Some courses have no metadata entries. Consider:
- Addressing the cases for courses that should be included in the analysis in your get_course_metadata function.
- Adding courses that shouldn't be included in the analysis to the \"excluded_course_ids\" setting in settings.py
List of courses with no metadata:
{}""".format(courses_without_metadata))
return
output_file_path = SETTINGS["results_path"] + "course_metadata.csv"
f = open(output_file_path, "w")
csvw = csv.writer(f, delimiter=",")
header = ["semester", "course_launch", "course_id", "4-way"]
rows = [header]
for course_id in course_ids:
metadata = all_course_metadata[course_id]
rows.append([metadata[k] for k in header])
csvw.writerows(rows)
print("Generated successfully. Please find it in the results directory specified in your settings.py")
print("---")
# 2. Run the course-person data file generator
generate_person_course_data()
if __name__ == "__main__":
main()
| [
"custom_utilities_example.get_course_metadata",
"main_person_course_data.main"
] | [((505, 526), 'custom_utilities_example.get_course_metadata', 'get_course_metadata', ([], {}), '()\n', (524, 526), False, 'from custom_utilities_example import get_course_metadata\n'), ((1652, 1681), 'main_person_course_data.main', 'generate_person_course_data', ([], {}), '()\n', (1679, 1681), True, 'from main_person_course_data import main as generate_person_course_data\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.