code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import os
import pandas as pd
import pytest
from user_similarity_model.config.core import DATASET_DIR, config
@pytest.fixture()
def sample_local_data():
"""AI is creating summary for sample_local_data
Returns:
[Dict]: This function returns a dictionary with CSV files which
in dataset folder. The data will be compared in tests against data
that are pulled from Azure PostgreSQL server.
"""
sample_data = {}
for file in config.app_config.csv_files:
sample_data[file[0:-4]] = pd.read_csv(os.path.join(DATASET_DIR, file))
return sample_data
| [
"pytest.fixture",
"os.path.join"
]
| [((115, 131), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (129, 131), False, 'import pytest\n'), ((544, 575), 'os.path.join', 'os.path.join', (['DATASET_DIR', 'file'], {}), '(DATASET_DIR, file)\n', (556, 575), False, 'import os\n')] |
from django.apps import AppConfig
import logging
logger = logging.getLogger(__name__)
class WeatherConfig(AppConfig):
name = 'weather'
def ready(self):
from forecastUpdater import updater
updater.start()
| [
"logging.getLogger",
"forecastUpdater.updater.start"
]
| [((59, 86), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (76, 86), False, 'import logging\n'), ((216, 231), 'forecastUpdater.updater.start', 'updater.start', ([], {}), '()\n', (229, 231), False, 'from forecastUpdater import updater\n')] |
import logging
import os
from datetime import datetime
from inspect import signature, Parameter
from pathlib import Path
from pprint import pprint
from textwrap import dedent
from typing import Optional, Union
import fire
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN
from tensorflow.keras import Model
from spellnn import models
from spellnn.data import alphabet
from spellnn.data.alphabet import get_chars
from spellnn.data.processing import DataProcessor
from spellnn.data.util import nb_lines
from spellnn.layers.mapping import CharMapping
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
class Gym:
def __init__(self):
self.train_dataset: Optional[tf.data.Dataset] = None
self.valid_dataset: Optional[tf.data.Dataset] = None
self.char2int: Optional[CharMapping] = None
self.model: Optional[Model] = None
self.nb_train_samples: int = 0
self.nb_valid_samples: int = 0
self.batch_size = 0
def construct_dataset(self, path: str, locale: str, batch_size: int = 32, validation_split: float = 0.3):
pprint(locals())
all_chars = [alphabet.START, alphabet.END] + get_chars(locale)
char_weights = [0.5 if c.isalpha() and c.islower() else
0.2 if c.isalpha() else
0.1 if c not in {alphabet.START, alphabet.END} else
0 for c in all_chars]
self.char2int = CharMapping(chars=all_chars, include_unknown=True)
data_processor = DataProcessor(locale=locale, char2id=self.char2int,
alphabet=all_chars, alphabet_weighs=char_weights)
print('Calculating number of lines in the file...', end=' ')
all_samples = nb_lines(path)
print(all_samples)
self.batch_size = batch_size
self.nb_train_samples = int((1 - validation_split) * all_samples)
self.nb_valid_samples = all_samples - self.nb_train_samples
dataset = tf.data.TextLineDataset(path)
self.train_dataset = dataset.take(self.nb_train_samples)
self.train_dataset = self.train_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.train_dataset = self.train_dataset.batch(batch_size, drop_remainder=True)
self.train_dataset = self.train_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.train_dataset = self.train_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.train_dataset = self.train_dataset.repeat()
self.valid_dataset = dataset.skip(self.nb_train_samples)
self.valid_dataset = self.valid_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.valid_dataset = self.valid_dataset.batch(batch_size, drop_remainder=True)
self.valid_dataset = self.valid_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.valid_dataset = self.valid_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.valid_dataset = self.valid_dataset.repeat()
return self
def create_model(self, name):
arguments = signature(getattr(models, name).__init__)
arguments = {k: v.default for k, v in arguments.parameters.items()
if v.default is not Parameter.empty and k != 'self'}
arguments['nb_symbols'] = len(self.char2int)
arg_str = ', '.join([f'{k}=' + str(v) if type(v) != str else f'{k}=' '"' + str(v) + '"'
for k, v in arguments.items()])
# print(arg_str)
exec(dedent(f'''
def create({arg_str}):
self.model = {name}(**locals())
return self
create.__name__ = {name}.__name__
create.__doc__ = {name}.__init__.__doc__
setattr(self, create.__name__, create)
'''), {'self': self, name: getattr(models, name), arg_str: arg_str})
return getattr(self, name)
def train(self, epochs: int, monitor_metric='val_acc', patience: int = 5,
steps_per_epoch: Union[int, str] = 'auto', validation_steps: Union[int, str] = 'auto',
log_dir: str = 'logs',
use_multiprocessing: bool = False):
pprint(locals())
log_dir = Path(log_dir).joinpath(datetime.now().replace(microsecond=0).isoformat())
model_path = Path(log_dir).joinpath('checkpoints').joinpath('best-model.h5py')
model_path = str(model_path)
if steps_per_epoch == 'auto':
steps_per_epoch = self.nb_train_samples // self.batch_size
if validation_steps == 'auto':
validation_steps = self.nb_valid_samples // self.batch_size
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
history = self.model.fit_generator(
self.train_dataset.as_numpy_iterator(), steps_per_epoch=steps_per_epoch,
validation_data=self.valid_dataset.as_numpy_iterator(), validation_steps=validation_steps,
epochs=epochs,
use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1,
callbacks=[
TerminateOnNaN(),
TensorBoard(log_dir=log_dir),
ModelCheckpoint(model_path, monitor=monitor_metric, verbose=1, save_best_only=True),
EarlyStopping(monitor=monitor_metric, patience=patience),
])
return history.history
if __name__ == '__main__':
cli = Gym()
fire.Fire(cli)
| [
"logging.getLogger",
"textwrap.dedent",
"tensorflow.keras.callbacks.TerminateOnNaN",
"spellnn.data.processing.DataProcessor",
"tensorflow.keras.callbacks.TensorBoard",
"fire.Fire",
"pathlib.Path",
"tensorflow.numpy_function",
"tensorflow.data.TextLineDataset",
"spellnn.data.util.nb_lines",
"tensorflow.keras.callbacks.EarlyStopping",
"datetime.datetime.now",
"os.cpu_count",
"tensorflow.keras.callbacks.ModelCheckpoint",
"spellnn.data.alphabet.get_chars",
"spellnn.layers.mapping.CharMapping"
]
| [((5759, 5773), 'fire.Fire', 'fire.Fire', (['cli'], {}), '(cli)\n', (5768, 5773), False, 'import fire\n'), ((674, 705), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (691, 705), False, 'import logging\n'), ((1555, 1605), 'spellnn.layers.mapping.CharMapping', 'CharMapping', ([], {'chars': 'all_chars', 'include_unknown': '(True)'}), '(chars=all_chars, include_unknown=True)\n', (1566, 1605), False, 'from spellnn.layers.mapping import CharMapping\n'), ((1631, 1736), 'spellnn.data.processing.DataProcessor', 'DataProcessor', ([], {'locale': 'locale', 'char2id': 'self.char2int', 'alphabet': 'all_chars', 'alphabet_weighs': 'char_weights'}), '(locale=locale, char2id=self.char2int, alphabet=all_chars,\n alphabet_weighs=char_weights)\n', (1644, 1736), False, 'from spellnn.data.processing import DataProcessor\n'), ((1864, 1878), 'spellnn.data.util.nb_lines', 'nb_lines', (['path'], {}), '(path)\n', (1872, 1878), False, 'from spellnn.data.util import nb_lines\n'), ((2105, 2134), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['path'], {}), '(path)\n', (2128, 2134), True, 'import tensorflow as tf\n'), ((1279, 1296), 'spellnn.data.alphabet.get_chars', 'get_chars', (['locale'], {}), '(locale)\n', (1288, 1296), False, 'from spellnn.data.alphabet import get_chars\n'), ((3852, 4123), 'textwrap.dedent', 'dedent', (['f"""\n def create({arg_str}):\n self.model = {name}(**locals())\n return self\n create.__name__ = {name}.__name__\n create.__doc__ = {name}.__init__.__doc__\n setattr(self, create.__name__, create)\n """'], {}), '(\n f"""\n def create({arg_str}):\n self.model = {name}(**locals())\n return self\n create.__name__ = {name}.__name__\n create.__doc__ = {name}.__init__.__doc__\n setattr(self, create.__name__, create)\n """\n )\n', (3858, 4123), False, 'from textwrap import dedent\n'), ((2475, 2574), 'tensorflow.numpy_function', 'tf.numpy_function', ([], {'func': 'data_processor.process_batch', 'inp': '[b]', 'Tout': "['int32', 'int32', 'int32']"}), "(func=data_processor.process_batch, inp=[b], Tout=['int32',\n 'int32', 'int32'])\n", (2492, 2574), True, 'import tensorflow as tf\n'), ((3077, 3176), 'tensorflow.numpy_function', 'tf.numpy_function', ([], {'func': 'data_processor.process_batch', 'inp': '[b]', 'Tout': "['int32', 'int32', 'int32']"}), "(func=data_processor.process_batch, inp=[b], Tout=['int32',\n 'int32', 'int32'])\n", (3094, 3176), True, 'import tensorflow as tf\n'), ((4523, 4536), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (4527, 4536), False, 'from pathlib import Path\n'), ((5365, 5379), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (5377, 5379), False, 'import os\n'), ((5425, 5441), 'tensorflow.keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), '()\n', (5439, 5441), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((5459, 5487), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (5470, 5487), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((5505, 5592), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_path'], {'monitor': 'monitor_metric', 'verbose': '(1)', 'save_best_only': '(True)'}), '(model_path, monitor=monitor_metric, verbose=1,\n save_best_only=True)\n', (5520, 5592), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((5606, 5662), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'monitor_metric', 'patience': 'patience'}), '(monitor=monitor_metric, patience=patience)\n', (5619, 5662), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN\n'), ((4618, 4631), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (4622, 4631), False, 'from pathlib import Path\n'), ((4546, 4560), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4558, 4560), False, 'from datetime import datetime\n')] |
import fileinput
counts = {}
for line in fileinput.input():
line = line.strip()
p1, p2 = line.split('>')
p1 = p1[:-2]
x1, y1 = p1.split(',')
x1 = int(x1)
y1 = int(y1)
p2 = p2[1:]
x2, y2 = p2.split(',')
x2 = int(x2)
y2 = int(y2)
if x1 == x2:
dx = 0
elif x1 > x2:
dx = -1
else:
dx = 1
if y1 == y2:
dy = 0
elif y1 > y2:
dy = -1
else:
dy = 1
x = x1
y = y1
while True:
pt = (x, y)
counts[pt] = counts.get(pt, 0) + 1
if x == x2 and y == y2:
break
x += dx
y += dy
n = 0
for _, ct in counts.items():
if ct > 1:
n += 1
print(n) | [
"fileinput.input"
]
| [((42, 59), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (57, 59), False, 'import fileinput\n')] |
import logging
from web3 import Web3
import sys
import time
import meditation.meditation as meditation
if __name__ == "__main__":
log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'
logger = logging.getLogger("DFK-meditation")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)
rpc_server = 'https://api.harmony.one'
logger.info("Using RPC server " + rpc_server)
private_key = None # set private key
account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02'
gas_price_gwei = 10
tx_timeout_seconds = 30
w3 = Web3(Web3.HTTPProvider(rpc_server))
active_meditations = meditation.get_active_meditations(account_address, rpc_server)
logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations))
level = 1
hero_id = 1
required_runes = meditation.get_required_runes(level, rpc_server)
meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'),
meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server)
logger.info("Pending meditation "+str(hero_meditation))
time.sleep(5)
meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
| [
"logging.getLogger",
"logging.basicConfig",
"meditation.meditation.get_hero_meditation",
"meditation.meditation.stat2id",
"meditation.meditation.get_active_meditations",
"time.sleep",
"meditation.meditation.get_required_runes",
"web3.Web3.HTTPProvider"
]
| [((213, 248), 'logging.getLogger', 'logging.getLogger', (['"""DFK-meditation"""'], {}), "('DFK-meditation')\n", (230, 248), False, 'import logging\n'), ((288, 365), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_format', 'stream': 'sys.stdout'}), '(level=logging.INFO, format=log_format, stream=sys.stdout)\n', (307, 365), False, 'import logging\n'), ((693, 755), 'meditation.meditation.get_active_meditations', 'meditation.get_active_meditations', (['account_address', 'rpc_server'], {}), '(account_address, rpc_server)\n', (726, 755), True, 'import meditation.meditation as meditation\n'), ((912, 960), 'meditation.meditation.get_required_runes', 'meditation.get_required_runes', (['level', 'rpc_server'], {}), '(level, rpc_server)\n', (941, 960), True, 'import meditation.meditation as meditation\n'), ((1312, 1363), 'meditation.meditation.get_hero_meditation', 'meditation.get_hero_meditation', (['hero_id', 'rpc_server'], {}), '(hero_id, rpc_server)\n', (1342, 1363), True, 'import meditation.meditation as meditation\n'), ((1428, 1441), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1438, 1441), False, 'import time\n'), ((636, 665), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', (['rpc_server'], {}), '(rpc_server)\n', (653, 665), False, 'from web3 import Web3\n'), ((996, 1026), 'meditation.meditation.stat2id', 'meditation.stat2id', (['"""strength"""'], {}), "('strength')\n", (1014, 1026), True, 'import meditation.meditation as meditation\n'), ((1028, 1059), 'meditation.meditation.stat2id', 'meditation.stat2id', (['"""endurance"""'], {}), "('endurance')\n", (1046, 1059), True, 'import meditation.meditation as meditation\n'), ((1061, 1087), 'meditation.meditation.stat2id', 'meditation.stat2id', (['"""luck"""'], {}), "('luck')\n", (1079, 1087), True, 'import meditation.meditation as meditation\n')] |
import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
| [
"random.randint",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.time.get_ticks",
"pygame.display.set_mode",
"pygame.mouse.get_pos",
"pygame.time.Clock",
"pygame.mouse.set_visible",
"pygame.draw.rect",
"pygame.display.set_caption",
"pygame.image.load",
"pygame.display.update",
"pygame.font.SysFont"
]
| [((29, 42), 'pygame.init', 'pygame.init', ([], {}), '()\n', (40, 42), False, 'import pygame\n'), ((52, 71), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (69, 71), False, 'import pygame\n'), ((178, 232), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screen_width, screen_height)'], {}), '((screen_width, screen_height))\n', (201, 232), False, 'import pygame\n'), ((233, 269), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Battle"""'], {}), "('Battle')\n", (259, 269), False, 'import pygame\n'), ((445, 487), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Times New Roman"""', '(26)'], {}), "('Times New Roman', 26)\n", (464, 487), False, 'import pygame\n'), ((6178, 6191), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6189, 6191), False, 'import pygame\n'), ((4919, 4949), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (4943, 4949), False, 'import pygame\n'), ((4957, 4979), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (4977, 4979), False, 'import pygame\n'), ((5996, 6014), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6012, 6014), False, 'import pygame\n'), ((6153, 6176), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6174, 6176), False, 'import pygame\n'), ((592, 642), 'pygame.image.load', 'pygame.image.load', (['"""img/Background/background.png"""'], {}), "('img/Background/background.png')\n", (609, 642), False, 'import pygame\n'), ((684, 724), 'pygame.image.load', 'pygame.image.load', (['"""img/Icons/panel.png"""'], {}), "('img/Icons/panel.png')\n", (701, 724), False, 'import pygame\n'), ((766, 806), 'pygame.image.load', 'pygame.image.load', (['"""img/Icons/sword.png"""'], {}), "('img/Icons/sword.png')\n", (783, 806), False, 'import pygame\n'), ((1851, 1874), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (1872, 1874), False, 'import pygame\n'), ((3147, 3170), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (3168, 3170), False, 'import pygame\n'), ((3233, 3254), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (3247, 3254), False, 'import random\n'), ((3497, 3520), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (3518, 3520), False, 'import pygame\n'), ((3826, 3882), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'red', '(self.x, self.y, 150, 20)'], {}), '(screen, red, (self.x, self.y, 150, 20))\n', (3842, 3882), False, 'import pygame\n'), ((3885, 3951), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'green', '(self.x, self.y, 150 * ratio, 20)'], {}), '(screen, green, (self.x, self.y, 150 * ratio, 20))\n', (3901, 3951), False, 'import pygame\n'), ((1942, 1992), 'pygame.image.load', 'pygame.image.load', (['f"""img/{self.name}/Idle/{i}.png"""'], {}), "(f'img/{self.name}/Idle/{i}.png')\n", (1959, 1992), False, 'import pygame\n'), ((2209, 2261), 'pygame.image.load', 'pygame.image.load', (['f"""img/{self.name}/Attack/{i}.png"""'], {}), "(f'img/{self.name}/Attack/{i}.png')\n", (2226, 2261), False, 'import pygame\n'), ((2838, 2861), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2859, 2861), False, 'import pygame\n'), ((5080, 5111), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (5104, 5111), False, 'import pygame\n'), ((2751, 2774), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2772, 2774), False, 'import pygame\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: test_uidattr
# Purpose: Test driver for module 'uidattr'
#
# Author: <NAME> (<EMAIL>)
#
# Copyright: (c) 2018 <NAME>
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Test driver for module 'uidattr'"""
import unittest
from uuid import uuid1
from camd3.infrastructure.component import (
Component, register_utility, UniqueIdAttribute)
from camd3.infrastructure.component.idfactories import (
UUIDGenerator, uuid_generator)
# factory for UUIDs
def custom_uuid_generator() -> UUIDGenerator: # noqa: D103
while True:
yield uuid1()
class ExplID(Component):
id = UniqueIdAttribute(uid_gen=custom_uuid_generator())
def __init__(self):
self.__class__.id.set_once(self)
class ImplID(Component):
id = UniqueIdAttribute()
def __init__(self):
self.__class__.id.set_once(self)
class UniqueIdAttributeTest(unittest.TestCase):
def setUp(self):
register_utility(uuid_generator(), UUIDGenerator)
self.cid = ImplID()
def test_init(self):
cid = ImplID()
self.assertIsNotNone(cid.id)
self.assertIsNotNone(cid._id)
def test_uniqueness(self):
ids = {self.cid.id}
for i in range(10):
cid = ExplID()
self.assertNotIn(cid.id, ids)
ids.add(cid.id)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| [
"unittest.main",
"camd3.infrastructure.component.UniqueIdAttribute",
"camd3.infrastructure.component.idfactories.uuid_generator",
"uuid.uuid1"
]
| [((972, 991), 'camd3.infrastructure.component.UniqueIdAttribute', 'UniqueIdAttribute', ([], {}), '()\n', (989, 991), False, 'from camd3.infrastructure.component import Component, register_utility, UniqueIdAttribute\n'), ((1606, 1621), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1619, 1621), False, 'import unittest\n'), ((773, 780), 'uuid.uuid1', 'uuid1', ([], {}), '()\n', (778, 780), False, 'from uuid import uuid1\n'), ((1155, 1171), 'camd3.infrastructure.component.idfactories.uuid_generator', 'uuid_generator', ([], {}), '()\n', (1169, 1171), False, 'from camd3.infrastructure.component.idfactories import UUIDGenerator, uuid_generator\n')] |
#!//anaconda/envs/py36/bin/python
#
# File name: kmc_pld.py
# Date: 2018/08/03 09:07
# Author: <NAME>
#
# Description:
#
import numpy as np
from collections import Counter
class EventTree:
"""
Class maintaining a binary tree for random event type lookup
and arrays for choosing specific event.
"""
def __init__(self, rates, events):
self.rates = rates
self.events = events
self.__setup()
def __build_tree(self, e_ratio):
self.event_tree = []
# create event ratio array level 0 - bottom
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
# create the bottom level (rates*numbers)
self.event_tree.append(np.array(e_ratio))
# create partial summs (iteratively) up to the 2nd highest level
while len(e_ratio) > 2:
e_ratio = [e_ratio[i]+e_ratio[i+1] for i in range(0, len(e_ratio), 2)]
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
self.event_tree.append(np.array(e_ratio))
# create top level = sum of all rates
self.event_tree.append(np.array(sum(e_ratio)))
def __setup(self):
# Get dictionary of event type counts
e_counts = Counter([e['type'] for e in self.events])
print(e_counts)
# create a list of events based on event types
self.event_counts = [[] for _ in range(len(self.rates))]
for e in self.events:
self.event_counts[e['type']].append(e)
e_ratio = [e_counts.get(t, 0)*r for t, r in enumerate(self.rates)]
print('e_ratio', e_ratio)
self.__build_tree(e_ratio)
def update_events(self, old_events, new_events):
"""
Update tree: remove old events and add new events
"""
pass
def find_event(self):
"""Find and return an event"""
# generate a random number [0,Rs)
q = self.Rs*np.random.random()
# cycle through levels (top->down)
# start with top-level child (k-2) end with level above bottom (1)
j = 0
for k in range(len(self.event_tree)-2, 0, -1):
# left child value
left = self.event_tree[k][j]
if q < left:
j = 2*j
else:
q -= left
j = 2*j + 1
# bottom level - return selected event type
if q < self.event_tree[0][j]:
event_type = self.events[j]
else:
event_type = self.events[j+1]
# select a random event index of a given type
event_number = np.random.randint(len(self.event_counts[event_type]))
# get the event object
event = event_counts[event_type][event_number]
return event
| [
"collections.Counter",
"numpy.array",
"numpy.random.random"
]
| [((1264, 1305), 'collections.Counter', 'Counter', (["[e['type'] for e in self.events]"], {}), "([e['type'] for e in self.events])\n", (1271, 1305), False, 'from collections import Counter\n'), ((724, 741), 'numpy.array', 'np.array', (['e_ratio'], {}), '(e_ratio)\n', (732, 741), True, 'import numpy as np\n'), ((1958, 1976), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1974, 1976), True, 'import numpy as np\n'), ((1044, 1061), 'numpy.array', 'np.array', (['e_ratio'], {}), '(e_ratio)\n', (1052, 1061), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 <NAME>, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from odf.namespaces import METANS
from odf.element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
args.setdefault('type', 'simple')
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
| [
"odf.element.Element"
]
| [((957, 1003), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'auto-reload')"}), "(qname=(METANS, 'auto-reload'), **args)\n", (964, 1003), False, 'from odf.element import Element\n'), ((1043, 1091), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'creation-date')"}), "(qname=(METANS, 'creation-date'), **args)\n", (1050, 1091), False, 'from odf.element import Element\n'), ((1129, 1175), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'date-string')"}), "(qname=(METANS, 'date-string'), **args)\n", (1136, 1175), False, 'from odf.element import Element\n'), ((1220, 1273), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'document-statistic')"}), "(qname=(METANS, 'document-statistic'), **args)\n", (1227, 1273), False, 'from odf.element import Element\n'), ((1314, 1363), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'editing-cycles')"}), "(qname=(METANS, 'editing-cycles'), **args)\n", (1321, 1363), False, 'from odf.element import Element\n'), ((1406, 1457), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'editing-duration')"}), "(qname=(METANS, 'editing-duration'), **args)\n", (1413, 1457), False, 'from odf.element import Element\n'), ((1494, 1538), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'generator')"}), "(qname=(METANS, 'generator'), **args)\n", (1501, 1538), False, 'from odf.element import Element\n'), ((1584, 1638), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'hyperlink-behaviour')"}), "(qname=(METANS, 'hyperlink-behaviour'), **args)\n", (1591, 1638), False, 'from odf.element import Element\n'), ((1680, 1730), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'initial-creator')"}), "(qname=(METANS, 'initial-creator'), **args)\n", (1687, 1730), False, 'from odf.element import Element\n'), ((1765, 1807), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'keyword')"}), "(qname=(METANS, 'keyword'), **args)\n", (1772, 1807), False, 'from odf.element import Element\n'), ((1844, 1889), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'print-date')"}), "(qname=(METANS, 'print-date'), **args)\n", (1851, 1889), False, 'from odf.element import Element\n'), ((1926, 1971), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'printed-by')"}), "(qname=(METANS, 'printed-by'), **args)\n", (1933, 1971), False, 'from odf.element import Element\n'), ((2045, 2088), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'template')"}), "(qname=(METANS, 'template'), **args)\n", (2052, 2088), False, 'from odf.element import Element\n'), ((2127, 2174), 'odf.element.Element', 'Element', ([], {'qname': "(METANS, 'user-defined')"}), "(qname=(METANS, 'user-defined'), **args)\n", (2134, 2174), False, 'from odf.element import Element\n')] |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
all_bounding_boxnind = []
for i in range(boxes.shape[0]):
bounding_box = [0.0] * 6
bounding_box[0] = det_classes[i]
bounding_box[1] = confd[i]
bounding_box[2] = coords_xyminmax[i][0]
bounding_box[3] = coords_xyminmax[i][1]
bounding_box[4] = coords_xyminmax[i][2]
bounding_box[5] = coords_xyminmax[i][3]
bounding_box = str(bounding_box)[1:-1]# remove square brackets
bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name
bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "".
all_bounding_boxnind.append(bounding_box)
all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string
all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list
# replacing commas with spaces
for i in range(len(all_bounding_boxnind)):
all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ')
for i in range(len(all_bounding_boxnind)):
# check if file exiscts else make new
with open(out +'{}.txt'.format(name), "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(all_bounding_boxnind[i])
#%%
import glob, random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
img_path = random.choice(img_paths)
img1 = cv2.imread(img_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
t = np.asarray(coords_xyminmax)
op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False)
plt.imshow(op)
print('='*50)
print('Image Name: ', os.path.basename(img_path),img1.shape)
print('\nClass_name ', '| B_box Coords ', '| Confidence')
print('_'*50)
for k in range(len(det_classes)):
print(det_classes[k], t[k], confd[k])
print('='*50) | [
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"matplotlib.pyplot.imshow",
"os.listdir",
"numpy.asarray",
"torch.cuda.memory_reserved",
"my_utils.xyxy_2_xyxyo",
"torch.cuda.current_device",
"glob.glob",
"random.choice",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"utils.torch_utils.time_synchronized",
"torch.cuda.get_device_name",
"models.experimental.attempt_load",
"my_utils.draw_boxes",
"utils.torch_utils.select_device",
"torch.cuda.memory_allocated",
"tqdm.tqdm",
"os.path.join",
"numpy.zeros",
"utils.general.non_max_suppression",
"os.path.basename",
"torch.zeros",
"torch.rand"
]
| [((64, 78), 'torch.rand', 'torch.rand', (['(10)'], {}), '(10)\n', (74, 78), False, 'import torch\n'), ((1189, 1206), 'utils.torch_utils.select_device', 'select_device', (['""""""'], {}), "('')\n", (1202, 1206), False, 'from utils.torch_utils import select_device, load_classifier, time_synchronized\n'), ((2183, 2238), 'tqdm.tqdm', 'tqdm', (['filelist'], {'desc': '"""Deleting old files fro directory"""'}), "(filelist, desc='Deleting old files fro directory')\n", (2187, 2238), False, 'from tqdm import tqdm, trange\n'), ((2300, 2342), 'models.experimental.attempt_load', 'attempt_load', (['weights'], {'map_location': 'device'}), '(weights, map_location=device)\n', (2312, 2342), False, 'from models.experimental import attempt_load\n'), ((2492, 2534), 'models.experimental.attempt_load', 'attempt_load', (['weights'], {'map_location': 'device'}), '(weights, map_location=device)\n', (2504, 2534), False, 'from models.experimental import attempt_load\n'), ((6058, 6082), 'random.choice', 'random.choice', (['img_paths'], {}), '(img_paths)\n', (6071, 6082), False, 'import glob, random\n'), ((6092, 6112), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (6102, 6112), False, 'import os, cv2\n'), ((6120, 6157), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_BGR2RGB'], {}), '(img1, cv2.COLOR_BGR2RGB)\n', (6132, 6157), False, 'import os, cv2\n'), ((6284, 6369), 'utils.general.non_max_suppression', 'non_max_suppression', (['pred', 'conf_thres', 'iou_thres'], {'classes': 'classes', 'agnostic': '(True)'}), '(pred, conf_thres, iou_thres, classes=classes, agnostic=True\n )\n', (6303, 6369), False, 'from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer\n'), ((6465, 6494), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 4)'], {}), '((boxes.shape[0], 4))\n', (6473, 6494), True, 'import numpy as np\n'), ((6523, 6552), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 1)'], {}), '((boxes.shape[0], 1))\n', (6531, 6552), True, 'import numpy as np\n'), ((6565, 6594), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 1)'], {}), '((boxes.shape[0], 1))\n', (6573, 6594), True, 'import numpy as np\n'), ((6916, 6943), 'numpy.asarray', 'np.asarray', (['coords_xyminmax'], {}), '(coords_xyminmax)\n', (6926, 6943), True, 'import numpy as np\n'), ((6949, 7040), 'my_utils.draw_boxes', 'draw_boxes', (['img1', 'confd', 't', 'det_classes', 'class_names'], {'order': '"""xy_minmax"""', 'analysis': '(False)'}), "(img1, confd, t, det_classes, class_names, order='xy_minmax',\n analysis=False)\n", (6959, 7040), False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((7037, 7051), 'matplotlib.pyplot.imshow', 'plt.imshow', (['op'], {}), '(op)\n', (7047, 7051), True, 'import matplotlib.pyplot as plt\n'), ((181, 206), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (204, 206), False, 'import torch\n'), ((214, 242), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {}), '()\n', (240, 242), False, 'import torch\n'), ((250, 277), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (275, 277), False, 'import torch\n'), ((1343, 1381), 'cv2.resize', 'cv2.resize', (['img1', '(img_size, img_size)'], {}), '(img1, (img_size, img_size))\n', (1353, 1381), False, 'import os, cv2\n'), ((2637, 2711), 'glob.glob', 'glob.glob', (['"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png"""'], {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png')\n", (2646, 2711), False, 'import glob, random\n'), ((2728, 2802), 'glob.glob', 'glob.glob', (['"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg"""'], {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')\n", (2737, 2802), False, 'import glob, random\n'), ((3028, 3044), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3038, 3044), False, 'import os, cv2\n'), ((3056, 3093), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_BGR2RGB'], {}), '(img1, cv2.COLOR_BGR2RGB)\n', (3068, 3093), False, 'import os, cv2\n'), ((3258, 3277), 'utils.torch_utils.time_synchronized', 'time_synchronized', ([], {}), '()\n', (3275, 3277), False, 'from utils.torch_utils import select_device, load_classifier, time_synchronized\n'), ((3347, 3432), 'utils.general.non_max_suppression', 'non_max_suppression', (['pred', 'conf_thres', 'iou_thres'], {'classes': 'classes', 'agnostic': '(True)'}), '(pred, conf_thres, iou_thres, classes=classes, agnostic=True\n )\n', (3366, 3432), False, 'from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer\n'), ((3670, 3699), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 4)'], {}), '((boxes.shape[0], 4))\n', (3678, 3699), True, 'import numpy as np\n'), ((3732, 3761), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 1)'], {}), '((boxes.shape[0], 1))\n', (3740, 3761), True, 'import numpy as np\n'), ((3778, 3807), 'numpy.zeros', 'np.zeros', (['(boxes.shape[0], 1)'], {}), '((boxes.shape[0], 1))\n', (3786, 3807), True, 'import numpy as np\n'), ((5881, 5955), 'glob.glob', 'glob.glob', (['"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png"""'], {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png')\n", (5890, 5955), False, 'import glob, random\n'), ((5972, 6046), 'glob.glob', 'glob.glob', (['"""/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg"""'], {}), "('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')\n", (5981, 6046), False, 'import glob, random\n'), ((7088, 7114), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (7104, 7114), False, 'import os, cv2\n'), ((312, 337), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (335, 337), False, 'import torch\n'), ((459, 488), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (485, 488), False, 'import torch\n'), ((2132, 2147), 'os.listdir', 'os.listdir', (['out'], {}), '(out)\n', (2142, 2147), False, 'import os, cv2\n'), ((2256, 2276), 'os.path.join', 'os.path.join', (['out', 'f'], {}), '(out, f)\n', (2268, 2276), False, 'import os, cv2\n'), ((3205, 3227), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3221, 3227), False, 'import os, cv2\n'), ((6810, 6854), 'my_utils.xyxy_2_xyxyo', 'xyxy_2_xyxyo', (['img_w', 'img_h', 'coords_minmax[i]'], {}), '(img_w, img_h, coords_minmax[i])\n', (6822, 6854), False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((1467, 1489), 'torch.from_numpy', 'torch.from_numpy', (['img2'], {}), '(img2)\n', (1483, 1489), False, 'import torch\n'), ((4055, 4099), 'my_utils.xyxy_2_xyxyo', 'xyxy_2_xyxyo', (['img_w', 'img_h', 'coords_minmax[i]'], {}), '(img_w, img_h, coords_minmax[i])\n', (4067, 4099), False, 'from my_utils import xyxy_2_xyxyo, draw_boxes\n'), ((547, 577), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['(0)'], {}), '(0)\n', (574, 577), False, 'import torch\n'), ((626, 655), 'torch.cuda.memory_reserved', 'torch.cuda.memory_reserved', (['(0)'], {}), '(0)\n', (652, 655), False, 'import torch\n'), ((3573, 3616), 'numpy.array', 'np.array', (['[10.0, 20.0, 30.0, 50.0, 0.75, 0]'], {}), '([10.0, 20.0, 30.0, 50.0, 0.75, 0])\n', (3581, 3616), True, 'import numpy as np\n'), ((2855, 2886), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', 'imgsz', 'imgsz'], {}), '(1, 3, imgsz, imgsz)\n', (2866, 2886), False, 'import torch\n')] |
# Copyright 2015 Carnegie Mellon University
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import json
from oslo.config import cfg
from stevedore import driver
from monasca.common import es_conn
from monasca.common import email_sender
from monasca.common import kafka_conn
from monasca.openstack.common import log
from monasca.openstack.common import service as os_service
es_opts = [
cfg.StrOpt('topic',
default='alarm',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('topic2',
default='notification_methods',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('doc_type',
default='',
help=('The document type which defines what document '
'type the messages will be save into. If not '
'specified, then the topic will be used.')),
cfg.StrOpt('processor',
default='',
help=('The message processer to load to process the message.'
'If the message does not need to be process anyway,'
'leave the default')),
]
es_group = cfg.OptGroup(name='notification', title='notification')
cfg.CONF.register_group(es_group)
cfg.CONF.register_opts(es_opts, es_group)
LOG = log.getLogger(__name__)
class NotificationEngine(os_service.Service):
def __init__(self, threads=1000):
super(NotificationEngine, self).__init__(threads)
self._kafka_conn = kafka_conn.KafkaConnection(
cfg.CONF.notification.topic)
# Use doc_type if it is defined.
if cfg.CONF.notification.doc_type:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.doc_type)
else:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.topic2)
def handle_alarm_msg(self, msg):
if msg and msg.message:
LOG.debug("Message received for alarm: " + msg.message.value)
value = msg.message.value
if value:
# value's format is:
# {
# "metrics": {
# "timestamp": 1432672915.409,
# "name": "biz",
# "value": 1500,
# "dimensions": {
# "key2": "value2",
# "key1": "value1"
# }
# },
# "state_updated_timestamp": 1432672915,
# "state": "ALARM",
# "alarm-definition": {
# "alarm_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "undetermined_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "name": "Average CPU percent greater than 10",
# "match_by": [
# "hostname"
# ],
# "description": "The average CPU percent is greater than 10",
# "ok_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "expression": "max(foo{hostname=mini-mon,mu=na}, 120) > 1100
# and max(bar { asd = asd} )>1200 or avg(biz)>1300",
# "id": "c60ec47e-5038-4bf1-9f95-4046c6e91111",
# "severity": "LOW"
# }
# }
# convert to dict, and get state to determine the actions(notification method id) needed.
# the method id can be used to match the notification method in elasticSearch
# Then an email will be sent (TODO: phone txt msg are not dealt with for now)
dict_msg = ast.literal_eval(value)
state = dict_msg["state"]
if state not in ["ALARM","OK","UNDETERMINED"]:
LOG.error("state of alarm is not defined as expected")
return
actions = []
if state == 'ALARM':
actions = dict_msg["alarm-definition"]["alarm_actions"]
if state == 'OK':
actions = dict_msg["alarm-definition"]["ok_actions"]
if state == 'UNDETERMINED':
actions = dict_msg["alarm-definition"]["undetermined_actions"]
addresses = []
types = []
# the action_id is an id of notification method
# there can be multiple ids in one alarm message with different types
for action_id in actions:
es_res = self._es_conn.get_message_by_id(action_id)
def _get_notification_method_response(res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
else:
return None
es_res = _get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
if es_res is None:
LOG.error("The provided is not defined as expected")
return
name = es_res["hits"][0]["_source"]["name"]
type = es_res["hits"][0]["_source"]["type"]
address = es_res["hits"][0]["_source"]["address"]
types.append(type)
addresses.append(address)
email_addresses = []
for i in range(len(types)):
if types[i] == "EMAIL":
email_addresses.append(addresses[i])
email_sender.send_emails(email_addresses, "Alarm to User", dict_msg["alarm-definition"]["description"])
def start(self):
while True:
try:
for msg in self._kafka_conn.get_messages():
self.handle_alarm_msg(msg)
# if autocommit is set, this will be a no-op call.
self._kafka_conn.commit()
except Exception:
LOG.exception('Error occurred while handling kafka messages.')
def stop(self):
self._kafka_conn.close()
super(NotificationEngine, self).stop()
| [
"oslo.config.cfg.StrOpt",
"monasca.common.kafka_conn.KafkaConnection",
"monasca.openstack.common.log.getLogger",
"ast.literal_eval",
"oslo.config.cfg.CONF.register_group",
"monasca.common.email_sender.send_emails",
"oslo.config.cfg.CONF.register_opts",
"oslo.config.cfg.OptGroup",
"monasca.common.es_conn.ESConnection"
]
| [((1954, 2009), 'oslo.config.cfg.OptGroup', 'cfg.OptGroup', ([], {'name': '"""notification"""', 'title': '"""notification"""'}), "(name='notification', title='notification')\n", (1966, 2009), False, 'from oslo.config import cfg\n'), ((2010, 2043), 'oslo.config.cfg.CONF.register_group', 'cfg.CONF.register_group', (['es_group'], {}), '(es_group)\n', (2033, 2043), False, 'from oslo.config import cfg\n'), ((2044, 2085), 'oslo.config.cfg.CONF.register_opts', 'cfg.CONF.register_opts', (['es_opts', 'es_group'], {}), '(es_opts, es_group)\n', (2066, 2085), False, 'from oslo.config import cfg\n'), ((2093, 2116), 'monasca.openstack.common.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (2106, 2116), False, 'from monasca.openstack.common import log\n'), ((932, 1099), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (['"""topic"""'], {'default': '"""alarm"""', 'help': '"""The topic that messages will be retrieved from.This also will be used as a doc type when saved to ElasticSearch."""'}), "('topic', default='alarm', help=\n 'The topic that messages will be retrieved from.This also will be used as a doc type when saved to ElasticSearch.'\n )\n", (942, 1099), False, 'from oslo.config import cfg\n'), ((1176, 1359), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (['"""topic2"""'], {'default': '"""notification_methods"""', 'help': '"""The topic that messages will be retrieved from.This also will be used as a doc type when saved to ElasticSearch."""'}), "('topic2', default='notification_methods', help=\n 'The topic that messages will be retrieved from.This also will be used as a doc type when saved to ElasticSearch.'\n )\n", (1186, 1359), False, 'from oslo.config import cfg\n'), ((1436, 1618), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (['"""doc_type"""'], {'default': '""""""', 'help': '"""The document type which defines what document type the messages will be save into. If not specified, then the topic will be used."""'}), "('doc_type', default='', help=\n 'The document type which defines what document type the messages will be save into. If not specified, then the topic will be used.'\n )\n", (1446, 1618), False, 'from oslo.config import cfg\n'), ((1694, 1868), 'oslo.config.cfg.StrOpt', 'cfg.StrOpt', (['"""processor"""'], {'default': '""""""', 'help': '"""The message processer to load to process the message.If the message does not need to be process anyway,leave the default"""'}), "('processor', default='', help=\n 'The message processer to load to process the message.If the message does not need to be process anyway,leave the default'\n )\n", (1704, 1868), False, 'from oslo.config import cfg\n'), ((2288, 2343), 'monasca.common.kafka_conn.KafkaConnection', 'kafka_conn.KafkaConnection', (['cfg.CONF.notification.topic'], {}), '(cfg.CONF.notification.topic)\n', (2314, 2343), False, 'from monasca.common import kafka_conn\n'), ((2470, 2522), 'monasca.common.es_conn.ESConnection', 'es_conn.ESConnection', (['cfg.CONF.notification.doc_type'], {}), '(cfg.CONF.notification.doc_type)\n', (2490, 2522), False, 'from monasca.common import es_conn\n'), ((2582, 2632), 'monasca.common.es_conn.ESConnection', 'es_conn.ESConnection', (['cfg.CONF.notification.topic2'], {}), '(cfg.CONF.notification.topic2)\n', (2602, 2632), False, 'from monasca.common import es_conn\n'), ((4657, 4680), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (4673, 4680), False, 'import ast\n'), ((6747, 6855), 'monasca.common.email_sender.send_emails', 'email_sender.send_emails', (['email_addresses', '"""Alarm to User"""', "dict_msg['alarm-definition']['description']"], {}), "(email_addresses, 'Alarm to User', dict_msg[\n 'alarm-definition']['description'])\n", (6771, 6855), False, 'from monasca.common import email_sender\n')] |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
from main.models import UserInfo, User, Child, Volunteer, Donor, Letter, Need, PurchaseForInstitute, PurchaseForNeed, \
Activity, OngoingUserInfo
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
class UserInfoInline(admin.TabularInline):
model = UserInfo
extra = 1
max_num = 1
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
list_display = ('email', 'userinfo', 'is_staff')
search_fields = ('email', 'userinfo__first_name', 'userinfo__last_name')
ordering = ('email',)
inlines = [UserInfoInline]
admin.site.unregister(Group)
admin.site.register(Child)
admin.site.register(Volunteer)
admin.site.register(Donor)
admin.site.register(Letter)
admin.site.register(Need)
admin.site.register(PurchaseForInstitute)
admin.site.register(PurchaseForNeed)
admin.site.register(Activity)
admin.site.register(OngoingUserInfo)
| [
"django.contrib.admin.site.unregister",
"django.contrib.admin.register",
"django.utils.translation.ugettext_lazy",
"django.contrib.admin.site.register"
]
| [((355, 375), 'django.contrib.admin.register', 'admin.register', (['User'], {}), '(User)\n', (369, 375), False, 'from django.contrib import admin\n'), ((1095, 1123), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['Group'], {}), '(Group)\n', (1116, 1123), False, 'from django.contrib import admin\n'), ((1124, 1150), 'django.contrib.admin.site.register', 'admin.site.register', (['Child'], {}), '(Child)\n', (1143, 1150), False, 'from django.contrib import admin\n'), ((1151, 1181), 'django.contrib.admin.site.register', 'admin.site.register', (['Volunteer'], {}), '(Volunteer)\n', (1170, 1181), False, 'from django.contrib import admin\n'), ((1182, 1208), 'django.contrib.admin.site.register', 'admin.site.register', (['Donor'], {}), '(Donor)\n', (1201, 1208), False, 'from django.contrib import admin\n'), ((1209, 1236), 'django.contrib.admin.site.register', 'admin.site.register', (['Letter'], {}), '(Letter)\n', (1228, 1236), False, 'from django.contrib import admin\n'), ((1237, 1262), 'django.contrib.admin.site.register', 'admin.site.register', (['Need'], {}), '(Need)\n', (1256, 1262), False, 'from django.contrib import admin\n'), ((1263, 1304), 'django.contrib.admin.site.register', 'admin.site.register', (['PurchaseForInstitute'], {}), '(PurchaseForInstitute)\n', (1282, 1304), False, 'from django.contrib import admin\n'), ((1305, 1341), 'django.contrib.admin.site.register', 'admin.site.register', (['PurchaseForNeed'], {}), '(PurchaseForNeed)\n', (1324, 1341), False, 'from django.contrib import admin\n'), ((1342, 1371), 'django.contrib.admin.site.register', 'admin.site.register', (['Activity'], {}), '(Activity)\n', (1361, 1371), False, 'from django.contrib import admin\n'), ((1372, 1408), 'django.contrib.admin.site.register', 'admin.site.register', (['OngoingUserInfo'], {}), '(OngoingUserInfo)\n', (1391, 1408), False, 'from django.contrib import admin\n'), ((599, 615), 'django.utils.translation.ugettext_lazy', '_', (['"""Permissions"""'], {}), "('Permissions')\n", (600, 615), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((682, 702), 'django.utils.translation.ugettext_lazy', '_', (['"""Important dates"""'], {}), "('Important dates')\n", (683, 702), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import tdl
import time
import hunting.constants as c
class Renderer:
def __init__(self, main_console=None, level_display_width=c.SCREEN_WIDTH, level_display_height=c.SCREEN_HEIGHT):
if main_console is None:
self.main_console = tdl.init(level_display_width, level_display_height, 'From Renderer Default Constructor')
else:
self.main_console = main_console
self.level_display_width = level_display_width
self.level_display_height = level_display_height
self._level_console = tdl.Console(level_display_width, level_display_height)
def _render_level(self, con, level):
for x in range(level.width):
for y in range(level.height):
if level[x][y].blocks is not False:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[120, 0, 50])
else:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[30, 255, 30])
# TODO: This is pretty hacky!
i = 1
for o in level._all_objects:
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(o.x, o.y, i, color)
i += 1
con.blit(self._level_console)
def render_all(self, level):
self._render_level(self.main_console, level)
tdl.flush()
def clear(self, level):
for o in level._all_objects:
self._level_console.draw_char(o.x, o.y, ' ')
def render_event(self, level, event):
if event[c.EVENT_TYPE] == c.MOVEMENT_EVENT:
# Clear previous location
self._level_console.draw_char(event[c.MOVEMENT_PREV_X], event[c.MOVEMENT_PREV_Y], ' ', bg=[0, 15, 7])
# Retrieve faction and color
o = level.get_object_by_id(event[c.OBJ_ID])
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], o.faction, fg=color)
elif event[c.EVENT_TYPE] == c.OBJECT_DESTRUCTION_EVENT:
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], ' ', bg=[0, 15, 7])
# Render
self.main_console.blit(self._level_console)
tdl.flush()
def visualize(level, show_time=1):
Renderer().render_all(level)
time.sleep(show_time) | [
"tdl.Console",
"tdl.init",
"time.sleep",
"tdl.flush"
]
| [((2494, 2515), 'time.sleep', 'time.sleep', (['show_time'], {}), '(show_time)\n', (2504, 2515), False, 'import time\n'), ((543, 597), 'tdl.Console', 'tdl.Console', (['level_display_width', 'level_display_height'], {}), '(level_display_width, level_display_height)\n', (554, 597), False, 'import tdl\n'), ((1432, 1443), 'tdl.flush', 'tdl.flush', ([], {}), '()\n', (1441, 1443), False, 'import tdl\n'), ((2408, 2419), 'tdl.flush', 'tdl.flush', ([], {}), '()\n', (2417, 2419), False, 'import tdl\n'), ((253, 345), 'tdl.init', 'tdl.init', (['level_display_width', 'level_display_height', '"""From Renderer Default Constructor"""'], {}), "(level_display_width, level_display_height,\n 'From Renderer Default Constructor')\n", (261, 345), False, 'import tdl\n')] |
from zenslackchat.zendesk_base_webhook import BaseWebHook
from zenslackchat.zendesk_email_to_slack import email_from_zendesk
from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk
class CommentsWebHook(BaseWebHook):
"""Handle Zendesk Comment Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle the comment trigger event we have been POSTed.
Recover and update the comments with lastest from Zendesk.
"""
comments_from_zendesk(event, slack_client, zendesk_client)
class EmailWebHook(BaseWebHook):
"""Handle Zendesk Email Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle an email created issue and create it on slack.
"""
email_from_zendesk(event, slack_client, zendesk_client)
| [
"zenslackchat.zendesk_email_to_slack.email_from_zendesk",
"zenslackchat.zendesk_comments_to_slack.comments_from_zendesk"
]
| [((501, 559), 'zenslackchat.zendesk_comments_to_slack.comments_from_zendesk', 'comments_from_zendesk', (['event', 'slack_client', 'zendesk_client'], {}), '(event, slack_client, zendesk_client)\n', (522, 559), False, 'from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk\n'), ((789, 844), 'zenslackchat.zendesk_email_to_slack.email_from_zendesk', 'email_from_zendesk', (['event', 'slack_client', 'zendesk_client'], {}), '(event, slack_client, zendesk_client)\n', (807, 844), False, 'from zenslackchat.zendesk_email_to_slack import email_from_zendesk\n')] |
y_pred=ml.predict(x_test)
print(y_pred)
from sklearn.metrics import r2_score
r2_score(y_test,y_pred)
pred_y_df=pd.DataFrame({'Actual Value':y_test,'Predicted Value':y_pred, 'Difference': y_test-y_pred})
pred_y_df[0:20] | [
"sklearn.metrics.r2_score"
]
| [((78, 102), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (86, 102), False, 'from sklearn.metrics import r2_score\n')] |
import asyncio
import discord
from discord.ext import commands
import re
import sqlite3
from urllib.parse import quote as uriquote
import html
CURR = ["AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR",
"GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD",
"THB", "TRY", "TWD", "ZAR"]
class Finance(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def coin(self, ctx, *, line: str):
"""Look up a cryptocurrency such as Bitcoin
Optionally specify a quantity such as `0.6 ETH`
Optionally specify a conversion value such as `2 BTC in ETH` or `ETH in CAD`"""
coin = await self.parse_coinline(line)
if not coin:
await ctx.send(f"Unable to find coin {line}")
return
url = f"https://api.coinmarketcap.com/v1/ticker/{coin['coin']}{coin['currency']}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data[0]
cid = data['symbol'].upper()
name = data['name']
pUSD = data['price_usd']
pC24 = data['percent_change_24h']
pC1 = data['percent_change_1h']
output = ""
if coin.get('cvtto', ''):
cvtval = await self.convert_coin(coin, data)
if not cvtval:
await ctx.send(f"Failed to look up {coin['cvtto']}")
return
if coin['qty'] == 1:
output = "{} {} | Value: {} {} (${} USD) | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, cvtval, coin['cvtto'].upper(), pUSD, pC1, pC24)
else:
usdfinal = float(pUSD) * coin['qty']
output = "{} {} : {} {} (${:.2f} USD)".format(coin['qty'], cid, cvtval, coin['cvtto'].upper(), usdfinal)
else:
if coin['qty'] == 1:
output = "{} {} | Value: ${} | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, pUSD, pC1, pC24)
else:
finalprice = float(pUSD) * coin['qty']
output = "{} {} : ${:.2f}".format(coin['qty'], cid, finalprice)
if output:
await ctx.send(output)
async def convert_coin(self, coin, data):
if coin['currency']:
cvtval = "{:.2f}".format(float(data['price_{}'.format(coin['cvtto'].lower())]) * coin['qty'])
else:
if not coin['cvtto']:
cvtval = ''
if coin['cvtto'] == "bitcoin":
#api gives us BTC by default
cvtval = self.ffstr(float(data['price_btc']) * coin['qty'])
coin['cvtto'] = "BTC"
else:
pUSD = data['price_usd']
url = "https://api.coinmarketcap.com/v1/ticker/{}".format(coin['cvtto'])
async with self.bot.session.get(url) as resp:
tojson = await resp.json()
coin['cvtto'] = tojson[0]['symbol'].upper()
toval = float(tojson[0]['price_usd'])
cvtval = self.ffstr((float(pUSD) * coin['qty']) / toval)
return cvtval
def ffstr(self, number):
return "{:.8f}".format(float(number)).rstrip('0').rstrip('.')
async def parse_coinline(self, line):
coinqty = 1
qtycheck = re.search(r"(^(\d*\.)?\d+)\s?(\w.+)", line)
if qtycheck:
coinqty = float(qtycheck.group(1))
line = qtycheck.group(3).strip()
curr = ""
cvtto = ""
if " in " in line or " to " in line:
if " in " in line:
coin, cvtto = line.split(" in ")
elif " to " in line:
coin, cvtto = line.split(" to ")
coinid = await self.findcoin(coin)
if cvtto.upper() in CURR:
curr = "?convert={}".format(cvtto)
else:
cvtto = await self.findcoin(cvtto)
else:
coin = line
coinid = await self.findcoin(coin)
if not coinid:
return None
return {'coin': coinid,
'qty': coinqty,
'currency': curr,
'cvtto': cvtto}
async def findcoin(self, coin):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT coinid FROM coins WHERE coinid = (?) OR symbol = (?)", (coin, coin)).fetchone()
if not result:
like = "%{}%".format(coin)
result = cursor.execute("SELECT coinid FROM coins WHERE name LIKE (?)", [like]).fetchone()
if result:
return result[0]
@commands.command(hidden=True)
@commands.is_owner()
async def newcoins(self, ctx):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coins';").fetchone()
if not result:
cursor.execute("CREATE TABLE 'coins' ('symbol' TEXT, 'coinid' TEXT UNIQUE ON CONFLICT REPLACE, 'name' TEXT);")
conn.commit()
url = "https://api.coinmarketcap.com/v1/ticker/?limit=0"
async with self.bot.session.get(url) as resp:
data = await resp.json()
for coin in data:
sym = coin['symbol'].lower()
cid = coin['id'].lower()
name = coin['name'].lower()
cursor.execute("insert into coins values (?, ?, ?)", (sym,cid,name))
conn.commit()
conn.close()
@commands.command(aliases=['stonks', 'stocks'])
async def stock (self, ctx, name: str):
"""Look up a stock and show its current price, change, etc"""
symbol = ""
url = f"https://autoc.finance.yahoo.com/autoc?query={uriquote(name)}®ion=1&lang=en&guccounter=1"
async with self.bot.session.get(url) as resp:
data = await resp.json()
symbol = data['ResultSet']['Result'][0]['symbol']
if not symbol:
await ctx.send(f"Unable to find a stonk named `{name}`")
return
url = f"http://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data["quoteResponse"]["result"][0]
downup = "\N{CHART WITH UPWARDS TREND}" if data['regularMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr = "{}{}: {} {} :: Today's change: {:.2f} ({:.2f}%) {}"
longn = ' ({})'.format(data['shortName']) if 'shortName' in data else ''
outstr = outstr.format(data['symbol'], longn, data['regularMarketPrice'], data['currency'],
float(data['regularMarketChange']), float(data['regularMarketChangePercent']),
downup)
if 'postMarketPrice' in data and (data['marketState'] == "CLOSED" or "POST" in data['marketState']):
pdu = "\N{CHART WITH UPWARDS TREND}" if data['postMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr += " :: After Hours: {:.2f} - Change: {:.2f} {}".format(data['postMarketPrice'],
data['postMarketChange'], pdu)
await ctx.send(html.unescape(outstr))
def setup(bot):
bot.add_cog(Finance(bot))
| [
"sqlite3.connect",
"urllib.parse.quote",
"html.unescape",
"discord.ext.commands.is_owner",
"discord.ext.commands.command",
"re.search"
]
| [((487, 505), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (503, 505), False, 'from discord.ext import commands\n'), ((4741, 4770), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (4757, 4770), False, 'from discord.ext import commands\n'), ((4776, 4795), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (4793, 4795), False, 'from discord.ext import commands\n'), ((5644, 5690), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['stonks', 'stocks']"}), "(aliases=['stonks', 'stocks'])\n", (5660, 5690), False, 'from discord.ext import commands\n'), ((3410, 3457), 're.search', 're.search', (['"""(^(\\\\d*\\\\.)?\\\\d+)\\\\s?(\\\\w.+)"""', 'line'], {}), "('(^(\\\\d*\\\\.)?\\\\d+)\\\\s?(\\\\w.+)', line)\n", (3419, 3457), False, 'import re\n'), ((4338, 4370), 'sqlite3.connect', 'sqlite3.connect', (['"""coins.sqlite3"""'], {}), "('coins.sqlite3')\n", (4353, 4370), False, 'import sqlite3\n'), ((4846, 4878), 'sqlite3.connect', 'sqlite3.connect', (['"""coins.sqlite3"""'], {}), "('coins.sqlite3')\n", (4861, 4878), False, 'import sqlite3\n'), ((5886, 5900), 'urllib.parse.quote', 'uriquote', (['name'], {}), '(name)\n', (5894, 5900), True, 'from urllib.parse import quote as uriquote\n'), ((7420, 7441), 'html.unescape', 'html.unescape', (['outstr'], {}), '(outstr)\n', (7433, 7441), False, 'import html\n')] |
import unittest
import unittest.mock as mock
import asyncio
import pyx.http as http
def create_dummy_message():
msg = http.HttpMessage(None)
msg.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Cookie', 'a'),
http.HttpHeader('Cookie', 'b'),
]
return msg
def create_dummy_connection():
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
@asyncio.coroutine
def dummy_drain():
yield from asyncio.sleep(0.001)
writer = mock.Mock(spec=asyncio.StreamWriter)
writer.attach_mock(mock.Mock(wraps=dummy_drain), 'drain')
conn = http.HttpConnection(reader, writer)
return conn
def create_dummy_request():
conn = create_dummy_connection()
req = http.HttpRequest(conn)
return req
class TestHttpMessage(unittest.TestCase):
def test_get_header(self):
msg = create_dummy_message()
self.assertEqual(msg.get_header("server"), ["Pyx"])
self.assertEqual(msg.get_header("SERVER"), ["Pyx"])
self.assertEqual(msg.get_header("pragma"), [])
self.assertEqual(msg.get_header("cookie"), ["a", "b"])
self.assertEqual(msg.get_first_header("cookie"), "a")
self.assertTrue(msg.get_first_header("pragma") is None)
def test_write_headers(self):
msg = create_dummy_message()
self.assertEqual(msg.write_headers(),
['Server: Pyx', 'Cookie: a', 'Cookie: b'])
msg.headers = []
self.assertEqual(msg.write_headers(), [])
class TestHttpRequest(unittest.TestCase):
def test_parse_req_line(self):
req = create_dummy_request()
req._parse_req_line(b'POST / HTTP/1.1\r\n')
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/')
self.assertTrue(req.query is None)
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
req._parse_req_line(
b'GET /some/path?some=query&some_other=query HTTP/1.1\r\n')
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/some/path')
self.assertEqual(req.query, 'some=query&some_other=query')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET /\r\n')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET / GARBAGE\r\n')
req._parse_req_line(b'GET / HTTP/1\r\n')
self.assertEqual(req.version, (1, 0))
def test_parse_header(self):
req = create_dummy_request()
req._parse_header(b'Server: Pyx\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', 'Pyx')])
req.headers = []
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b'Server\r\n')
req.headers = []
req._parse_header(b'Server:\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Server: \r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Host: some.badasshost.com:8080\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Host', 'some.badasshost.com:8080')])
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b': pyx\r\n')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' : pyx')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' \t : pyx')
def test_parse(self):
loop = asyncio.get_event_loop()
conn = create_dummy_connection()
reader = conn.reader
reader.feed_data(
b'GET /?q=p&s=t HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: Keep-Alive\r\n'
b'Pragma: Test\r\n'
b' : Test\r\n'
b'\r\n')
req = loop.run_until_complete(http.HttpRequest.parse(conn))
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/')
self.assertEqual(req.query, 'q=p&s=t')
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
self.assertEqual(req.headers,
[
http.HttpHeader('Host', 'localhost'),
http.HttpHeader('Connection', 'Keep-Alive'),
http.HttpHeader('Pragma', 'Test'),
])
def test_respond(self):
req = create_dummy_request()
req.version = (1, 1)
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertEqual(resp.version, (1, 1))
req.version = (1, 0)
resp = req.respond(400)
self.assertEqual(resp.code, 400)
self.assertEqual(resp.version, (1, 0))
class TestHttpResponse(unittest.TestCase):
def test_write(self):
resp = http.HttpResponse(200, None)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Connection', 'keep-alive')
]
self.assertEqual(resp.write(),
['HTTP/1.1 200 OK',
'Server: Pyx',
'Connection: keep-alive',
'\r\n'])
self.assertEqual(str(resp),
'HTTP/1.1 200 OK\r\n'
'Server: Pyx\r\n'
'Connection: keep-alive\r\n'
'\r\n')
def test_send(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertFalse(req.responded)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Content-Length', '100'),
http.HttpHeader('Content-Type', 'text/plain'),
]
loop.run_until_complete(resp.send())
resp.connection.writer.write.assert_called_with(str(resp).encode())
self.assertTrue(req.responded)
def test_send_body(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
loop.run_until_complete(resp.send())
self.assertTrue(req.responded)
loop.run_until_complete(resp.send_body(b'Yes, this is the body.'))
resp.connection.writer.write.assert_called_with(b'Yes, this is the body.')
loop.run_until_complete(resp.send_body('This is another string body.'))
resp.connection.writer.write.assert_called_with(b'This is another string body.')
class DummyResource(http.UrlResource):
def get_child(self, key):
if key == 'hello':
return self
elif key == "static":
return http.StaticRootResource('.')
else:
raise http.HttpError(404, '{} not found'.format(key))
class TestUrlResource(unittest.TestCase):
def test_traverse(self):
res = DummyResource()
self.assertEqual(res.traverse(''), res)
self.assertEqual(res.traverse('/'), res)
self.assertEqual(res.traverse('/hello'), res)
with self.assertRaises(http.HttpError):
res.traverse('/does/not/exist')
sres = res.traverse('/static')
self.assertEqual(sres.root, '.')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/some/path')
self.assertEqual(sres._build_real_path(), './some/path')
def test_not_implemented(self):
res = http.UrlResource()
with self.assertRaises(NotImplementedError):
res.traverse('/hello')
req = create_dummy_request()
with self.assertRaises(NotImplementedError):
res.handle_request(req)
class TestStaticRootResource(unittest.TestCase):
def test_build_real_path(self):
res = http.StaticRootResource('local_root')
res = res.traverse('/some/long/path/where/ever/it/leads/')
self.assertEqual(res._build_real_path(),
'local_root/some/long/path/where/ever/it/leads')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/%2e%2e%2f%2e%2e/dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
| [
"pyx.http.HttpResponse",
"unittest.mock.Mock",
"asyncio.StreamReader",
"pyx.http.HttpRequest",
"pyx.http.HttpRequest.parse",
"pyx.http.HttpConnection",
"pyx.http.UrlResource",
"pyx.http.HttpHeader",
"pyx.http.HttpMessage",
"asyncio.sleep",
"asyncio.get_event_loop",
"pyx.http.StaticRootResource"
]
| [((124, 146), 'pyx.http.HttpMessage', 'http.HttpMessage', (['None'], {}), '(None)\n', (140, 146), True, 'import pyx.http as http\n'), ((354, 378), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (376, 378), False, 'import asyncio\n'), ((393, 424), 'asyncio.StreamReader', 'asyncio.StreamReader', ([], {'loop': 'loop'}), '(loop=loop)\n', (413, 424), False, 'import asyncio\n'), ((525, 561), 'unittest.mock.Mock', 'mock.Mock', ([], {'spec': 'asyncio.StreamWriter'}), '(spec=asyncio.StreamWriter)\n', (534, 561), True, 'import unittest.mock as mock\n'), ((636, 671), 'pyx.http.HttpConnection', 'http.HttpConnection', (['reader', 'writer'], {}), '(reader, writer)\n', (655, 671), True, 'import pyx.http as http\n'), ((765, 787), 'pyx.http.HttpRequest', 'http.HttpRequest', (['conn'], {}), '(conn)\n', (781, 787), True, 'import pyx.http as http\n'), ((175, 207), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '"""Pyx"""'], {}), "('Server', 'Pyx')\n", (190, 207), True, 'import pyx.http as http\n'), ((217, 247), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Cookie"""', '"""a"""'], {}), "('Cookie', 'a')\n", (232, 247), True, 'import pyx.http as http\n'), ((257, 287), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Cookie"""', '"""b"""'], {}), "('Cookie', 'b')\n", (272, 287), True, 'import pyx.http as http\n'), ((585, 613), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'dummy_drain'}), '(wraps=dummy_drain)\n', (594, 613), True, 'import unittest.mock as mock\n'), ((3733, 3757), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3755, 3757), False, 'import asyncio\n'), ((5098, 5126), 'pyx.http.HttpResponse', 'http.HttpResponse', (['(200)', 'None'], {}), '(200, None)\n', (5115, 5126), True, 'import pyx.http as http\n'), ((5730, 5754), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5752, 5754), False, 'import asyncio\n'), ((6306, 6330), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6328, 6330), False, 'import asyncio\n'), ((7839, 7857), 'pyx.http.UrlResource', 'http.UrlResource', ([], {}), '()\n', (7855, 7857), True, 'import pyx.http as http\n'), ((8174, 8211), 'pyx.http.StaticRootResource', 'http.StaticRootResource', (['"""local_root"""'], {}), "('local_root')\n", (8197, 8211), True, 'import pyx.http as http\n'), ((8417, 8454), 'pyx.http.StaticRootResource', 'http.StaticRootResource', (['"""local_root"""'], {}), "('local_root')\n", (8440, 8454), True, 'import pyx.http as http\n'), ((8627, 8664), 'pyx.http.StaticRootResource', 'http.StaticRootResource', (['"""local_root"""'], {}), "('local_root')\n", (8650, 8664), True, 'import pyx.http as http\n'), ((8840, 8877), 'pyx.http.StaticRootResource', 'http.StaticRootResource', (['"""local_root"""'], {}), "('local_root')\n", (8863, 8877), True, 'import pyx.http as http\n'), ((491, 511), 'asyncio.sleep', 'asyncio.sleep', (['(0.001)'], {}), '(0.001)\n', (504, 511), False, 'import asyncio\n'), ((4093, 4121), 'pyx.http.HttpRequest.parse', 'http.HttpRequest.parse', (['conn'], {}), '(conn)\n', (4115, 4121), True, 'import pyx.http as http\n'), ((5164, 5196), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '"""Pyx"""'], {}), "('Server', 'Pyx')\n", (5179, 5196), True, 'import pyx.http as http\n'), ((5210, 5253), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Connection"""', '"""keep-alive"""'], {}), "('Connection', 'keep-alive')\n", (5225, 5253), True, 'import pyx.http as http\n'), ((5943, 5975), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '"""Pyx"""'], {}), "('Server', 'Pyx')\n", (5958, 5975), True, 'import pyx.http as http\n'), ((5989, 6029), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Content-Length"""', '"""100"""'], {}), "('Content-Length', '100')\n", (6004, 6029), True, 'import pyx.http as http\n'), ((6043, 6088), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Content-Type"""', '"""text/plain"""'], {}), "('Content-Type', 'text/plain')\n", (6058, 6088), True, 'import pyx.http as http\n'), ((2762, 2794), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '"""Pyx"""'], {}), "('Server', 'Pyx')\n", (2777, 2794), True, 'import pyx.http as http\n'), ((3032, 3061), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '""""""'], {}), "('Server', '')\n", (3047, 3061), True, 'import pyx.http as http\n'), ((3172, 3201), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Server"""', '""""""'], {}), "('Server', '')\n", (3187, 3201), True, 'import pyx.http as http\n'), ((3334, 3385), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Host"""', '"""some.badasshost.com:8080"""'], {}), "('Host', 'some.badasshost.com:8080')\n", (3349, 3385), True, 'import pyx.http as http\n'), ((4442, 4478), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Host"""', '"""localhost"""'], {}), "('Host', 'localhost')\n", (4457, 4478), True, 'import pyx.http as http\n'), ((4509, 4552), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Connection"""', '"""Keep-Alive"""'], {}), "('Connection', 'Keep-Alive')\n", (4524, 4552), True, 'import pyx.http as http\n'), ((4583, 4616), 'pyx.http.HttpHeader', 'http.HttpHeader', (['"""Pragma"""', '"""Test"""'], {}), "('Pragma', 'Test')\n", (4598, 4616), True, 'import pyx.http as http\n'), ((6985, 7013), 'pyx.http.StaticRootResource', 'http.StaticRootResource', (['"""."""'], {}), "('.')\n", (7008, 7013), True, 'import pyx.http as http\n')] |
from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
from stix_shifter_modules.aws_athena.entry_point import EntryPoint
import unittest
MODULE = "aws_athena"
entry_point = EntryPoint()
map_data = entry_point.get_results_translator().map_data
data_source = {
"type": "identity",
"id": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"name": "aws_athena",
"identity_class": "events"
}
options = {}
class TestAwsResultsToStix(unittest.TestCase):
"""
class to perform unit test case for Aws Athena logs translate results
"""
@staticmethod
def get_first(itr, constraint):
"""
return the obj in the itr if constraint is true
"""
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
"""
to check whether the object belongs to respective stix object
"""
return TestAwsResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ)
def test_common_prop(self):
"""
to test the common stix object properties
"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "\u00d6rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
assert result_bundle_identity['id'] == data_source['id']
assert result_bundle_identity['name'] == data_source['name']
assert result_bundle_identity['identity_class'] == data_source['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id'] is not None
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['created'] is not None
assert observed_data['modified'] is not None
assert observed_data['number_observed'] is not None
def test_vpc_flow_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'src_ref', 'dst_ref', 'src_port', 'dst_port', 'protocols', 'start', 'end'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['src_ref'] == '1'
assert network_obj['dst_ref'] == '4'
assert network_obj['src_port'] == 58387
assert network_obj['dst_port'] == 51289
assert network_obj['protocols'] == ['tcp']
assert network_obj['start'] == '2020-06-19T06:23:16.000Z'
assert network_obj['end'] == '2020-06-19T06:23:18.000Z'
def test_vpc_flow_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'interfaceid', 'date', 'logstatus', 'numbytes', 'region', 'version'}
assert custom_object['date'] == '2020-06-19'
assert custom_object['logstatus'] == 'OK'
assert custom_object['numbytes'] == 40
assert custom_object['region'] == 'us-east-1'
assert custom_object['version'] == 2
def test_guardduty_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding"
"/7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "<NAME>"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'dst_port', 'src_ref', 'dst_ref', 'src_port', 'protocols'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['dst_port'] == 38420
assert network_obj['src_ref'] == '3'
assert network_obj['dst_ref'] == '9'
assert network_obj['src_port'] == 22
assert network_obj['protocols'] == ['tcp']
def test_guardduty_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1."
" Brute force attacks are used to gain unauthorized access to your instance by guessing "
"the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'service_action_networkconnectionaction_remoteipdetails_country_countryname',
'finding_id', 'arn', 'createdat', 'partition', 'resource',
'schemaversion', 'service', 'updatedat'}
assert custom_object['arn'] == 'arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed' \
'494f3b7ca56acdc74df/finding/7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['finding_id'] == '7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['createdat'] == '2020-07-31T06:37:13.745Z'
assert custom_object['partition'] == 'aws'
assert custom_object['schemaversion'] == 2.0
assert custom_object['updatedat'] == '2020-09-12T09:25:34.086Z'
| [
"stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers",
"stix_shifter_modules.aws_athena.entry_point.EntryPoint"
]
| [((309, 321), 'stix_shifter_modules.aws_athena.entry_point.EntryPoint', 'EntryPoint', ([], {}), '()\n', (319, 321), False, 'from stix_shifter_modules.aws_athena.entry_point import EntryPoint\n'), ((6400, 6431), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', (['MODULE'], {}), '(MODULE)\n', (6423, 6431), False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((8180, 8211), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', (['MODULE'], {}), '(MODULE)\n', (8203, 8211), False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((10199, 10230), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', (['MODULE'], {}), '(MODULE)\n', (10222, 10230), False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((16224, 16255), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', (['MODULE'], {}), '(MODULE)\n', (16247, 16255), False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((22418, 22449), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', (['MODULE'], {}), '(MODULE)\n', (22441, 22449), False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was created using the DirectGUI Designer
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectLabel import DirectLabel
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectOptionMenu import DirectOptionMenu
from panda3d.core import (
LPoint3f,
LVecBase3f,
LVecBase4f,
TextNode
)
class GUI:
def __init__(self, rootParent=None):
self.frmMain = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-1.777778, 1.77777778, -1.1638, 1.1638),
hpr=LVecBase3f(0, 0, 0),
image='assets/menu/Background.png',
pos=LPoint3f(0, 0, 0),
image_scale=LVecBase3f(1.77778, 1, 1.1638),
image_pos=LPoint3f(0, 0, 0),
parent=rootParent,
)
self.frmMain.setTransparency(0)
self.frmSinglePlayerCreateGame = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.65, 0.65, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.425, 0, 0),
relief=5,
parent=self.frmMain,
)
self.frmSinglePlayerCreateGame.setTransparency(0)
self.pg703 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.425),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg703.setTransparency(0)
self.pg13803 = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.35, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Start',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_start"],
)
self.pg13803.setTransparency(0)
self.pg5219 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.6, 0, 0.02),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Class',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg5219.setTransparency(0)
self.optionPlayerClass = DirectOptionMenu(
items=['item1'],
frameSize=(0.07500000298023224, 3.012500149011612, -0.11250001192092896, 0.75),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.2, 0, 0.005),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='item1',
cancelframe_frameSize=(-1, 1, -1, 1),
cancelframe_hpr=LVecBase3f(0, 0, 0),
cancelframe_pos=LPoint3f(0, 0, 0),
cancelframe_relief=None,
item_frameSize=(0.07500000298023224, 2.4125001430511475, -0.11250001192092896, 0.75),
item_hpr=LVecBase3f(0, 0, 0),
item_pos=LPoint3f(-0.075, 0, -0.75),
item_text='item1',
item0_text_align=TextNode.A_left,
item0_text_scale=(1, 1),
item0_text_pos=(0, 0),
item0_text_fg=LVecBase4f(0, 0, 0, 1),
item0_text_bg=LVecBase4f(0, 0, 0, 0),
item0_text_wordwrap=None,
popupMarker_frameSize=(-0.5, 0.5, -0.2, 0.2),
popupMarker_hpr=LVecBase3f(0, 0, 0),
popupMarker_pos=LPoint3f(2.7125, 0, 0.31875),
popupMarker_relief=2,
popupMarker_scale=LVecBase3f(0.4, 0.4, 0.4),
popupMenu_frameSize=(0, 2.3375001400709152, -0.862500011920929, 0),
popupMenu_hpr=LVecBase3f(0, 0, 0),
popupMenu_pos=LPoint3f(0, 0, 0),
popupMenu_relief='raised',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.optionPlayerClass.setTransparency(0)
self.btnCancel = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.325, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Cancel',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_cancel"],
)
self.btnCancel.setTransparency(0)
self.frmPlayerInfo = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.5, 0.5, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.765, 0, 0),
relief=3,
parent=self.frmMain,
)
self.frmPlayerInfo.setTransparency(0)
self.lblInfoHeader = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblInfoHeader.setTransparency(0)
self.frmImageHero = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-0.15, 0.15, -0.2, 0.2),
hpr=LVecBase3f(0, 0, 0),
image='/home/fireclaw/workspace/Ankandora/AnkandoraLight/design/guiGraphics/heroArcher.png',
pos=LPoint3f(-0.275, 0, 0.195),
image_scale=LVecBase3f(0.15, 1, 0.2),
image_pos=LPoint3f(0, 0, 0),
parent=self.frmPlayerInfo,
)
self.frmImageHero.setTransparency(1)
self.lblClassDescription = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.12, 0, 0.31),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='The archer shoots from afar and gains the first-strike',
text_align=TextNode.A_left,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=10.0,
parent=self.frmPlayerInfo,
)
self.lblClassDescription.setTransparency(0)
self.lblHealth = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.28, 0, -0.1),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Health',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealth.setTransparency(0)
self.lblAttack = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.285),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Attack',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttack.setTransparency(0)
self.lblHealthValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.17),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='7',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealthValue.setTransparency(0)
self.lblAttackValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.36),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='4',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttackValue.setTransparency(0)
def show(self):
self.frmMain.show()
def hide(self):
self.frmMain.hide()
def destroy(self):
self.frmMain.destroy()
| [
"panda3d.core.LVecBase3f",
"panda3d.core.LVecBase4f",
"panda3d.core.LPoint3f"
]
| [((648, 667), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (658, 667), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((733, 750), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (741, 750), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((776, 806), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(1.77778)', '(1)', '(1.1638)'], {}), '(1.77778, 1, 1.1638)\n', (786, 806), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((830, 847), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (838, 847), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1126, 1145), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1136, 1145), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1163, 1185), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.425)', '(0)', '(0)'], {}), '(-0.425, 0, 0)\n', (1171, 1185), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1361, 1380), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1371, 1380), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1398, 1419), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0.425)'], {}), '(0, 0, 0.425)\n', (1406, 1419), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1439, 1464), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (1449, 1464), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1620, 1642), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (1630, 1642), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1664, 1686), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (1674, 1686), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1873, 1892), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1883, 1892), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1910, 1935), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.35)', '(0)', '(-0.45)'], {}), '(-0.35, 0, -0.45)\n', (1918, 1935), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((1955, 1980), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (1965, 1980), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2130, 2152), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (2140, 2152), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2174, 2196), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (2184, 2196), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2479, 2498), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2489, 2498), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2516, 2539), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.6)', '(0)', '(0.02)'], {}), '(-0.6, 0, 0.02)\n', (2524, 2539), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2559, 2584), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (2569, 2584), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2739, 2761), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (2749, 2761), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((2783, 2805), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (2793, 2805), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3128, 3147), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3138, 3147), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3165, 3188), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0.2)', '(0)', '(0.005)'], {}), '(0.2, 0, 0.005)\n', (3173, 3188), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3208, 3233), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (3218, 3233), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3339, 3358), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3349, 3358), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3388, 3405), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3396, 3405), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3563, 3582), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3573, 3582), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3605, 3631), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.075)', '(0)', '(-0.75)'], {}), '(-0.075, 0, -0.75)\n', (3613, 3631), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3808, 3830), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (3818, 3830), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((3858, 3880), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (3868, 3880), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4006, 4025), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4016, 4025), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4055, 4083), 'panda3d.core.LPoint3f', 'LPoint3f', (['(2.7125)', '(0)', '(0.31875)'], {}), '(2.7125, 0, 0.31875)\n', (4063, 4083), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4149, 4174), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4)\n', (4159, 4174), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4282, 4301), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4292, 4301), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4329, 4346), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4337, 4346), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4507, 4529), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (4517, 4529), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4551, 4573), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (4561, 4573), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4774, 4793), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4784, 4793), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4811, 4836), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0.325)', '(0)', '(-0.45)'], {}), '(0.325, 0, -0.45)\n', (4819, 4836), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((4856, 4881), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (4866, 4881), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5032, 5054), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (5042, 5054), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5076, 5098), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (5086, 5098), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5514, 5533), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (5524, 5533), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5551, 5572), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0.765)', '(0)', '(0)'], {}), '(0.765, 0, 0)\n', (5559, 5572), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5789, 5808), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (5799, 5808), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5826, 5846), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0.45)'], {}), '(0, 0, 0.45)\n', (5834, 5846), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((5866, 5891), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (5876, 5891), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6040, 6062), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (6050, 6062), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6084, 6106), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (6094, 6106), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6378, 6397), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6388, 6397), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6520, 6546), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.275)', '(0)', '(0.195)'], {}), '(-0.275, 0, 0.195)\n', (6528, 6546), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6572, 6596), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.15)', '(1)', '(0.2)'], {}), '(0.15, 1, 0.2)\n', (6582, 6596), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6620, 6637), 'panda3d.core.LPoint3f', 'LPoint3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6628, 6637), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6843, 6862), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6853, 6862), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6880, 6904), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.12)', '(0)', '(0.31)'], {}), '(-0.12, 0, 0.31)\n', (6888, 6904), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((6924, 6949), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (6934, 6949), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7150, 7172), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (7160, 7172), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7194, 7216), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (7204, 7216), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7451, 7470), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (7461, 7470), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7488, 7512), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.28)', '(0)', '(-0.1)'], {}), '(-0.28, 0, -0.1)\n', (7496, 7512), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7532, 7557), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (7542, 7557), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7712, 7734), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (7722, 7734), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((7756, 7778), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (7766, 7778), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8003, 8022), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (8013, 8022), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8040, 8067), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.275)', '(0)', '(-0.285)'], {}), '(-0.275, 0, -0.285)\n', (8048, 8067), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8087, 8112), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (8097, 8112), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8267, 8289), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (8277, 8289), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8311, 8333), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (8321, 8333), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8563, 8582), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (8573, 8582), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8600, 8626), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.275)', '(0)', '(-0.17)'], {}), '(-0.275, 0, -0.17)\n', (8608, 8626), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8646, 8671), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (8656, 8671), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8821, 8843), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (8831, 8843), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((8865, 8887), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (8875, 8887), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((9122, 9141), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (9132, 9141), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((9159, 9185), 'panda3d.core.LPoint3f', 'LPoint3f', (['(-0.275)', '(0)', '(-0.36)'], {}), '(-0.275, 0, -0.36)\n', (9167, 9185), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((9205, 9230), 'panda3d.core.LVecBase3f', 'LVecBase3f', (['(0.1)', '(0.1)', '(0.1)'], {}), '(0.1, 0.1, 0.1)\n', (9215, 9230), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((9380, 9402), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(1)'], {}), '(0, 0, 0, 1)\n', (9390, 9402), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((9424, 9446), 'panda3d.core.LVecBase4f', 'LVecBase4f', (['(0)', '(0)', '(0)', '(0)'], {}), '(0, 0, 0, 0)\n', (9434, 9446), False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n')] |
# Advent of Code 2020
# Day 21
# Author: irobin591
import os
import doctest
import re
re_entry = re.compile(r'^([a-z ]+) \(contains ([a-z, ]*)\)$')
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
def part1(input_data):
"""
>>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
5
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = contents
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# print(ingredients)
ingredients_with_allergens = set([y for x in allergens.values() for y in x])
# print(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
return len(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
def part2(input_data):
"""
>>> part2(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
'mxmxvkd,sqjhc,fvjkl'
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = list(contents)
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# (allergen, ingredient)
assigned_allergens = []
while sum([len(ingreds) for ingreds in allergens.values()]) > 0:
for allergen in allergens:
if len(allergens[allergen]) == 1:
ingredient = allergens[allergen][0]
assigned_allergens.append((allergen, ingredient))
for allergen2 in allergens:
if ingredient in allergens[allergen2]:
allergens[allergen2].remove(ingredient)
assigned_allergens.sort(key=lambda x: x[0])
return ",".join([x[1] for x in assigned_allergens])
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | [
"os.path.dirname",
"doctest.testmod",
"re.compile"
]
| [((99, 150), 're.compile', 're.compile', (['"""^([a-z ]+) \\\\(contains ([a-z, ]*)\\\\)$"""'], {}), "('^([a-z ]+) \\\\(contains ([a-z, ]*)\\\\)$')\n", (109, 150), False, 'import re\n'), ((2887, 2904), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (2902, 2904), False, 'import doctest\n'), ((174, 199), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (189, 199), False, 'import os\n')] |
"""
Test that escaping characters for HTML is disabled.
"""
import os, subprocess
def test_escape_singlequote(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world ' universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world ' universe\n"
def test_escape_gt(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world > universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world > universe\n"
def test_escape_ampersand(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world & universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world & universe\n"
| [
"subprocess.check_output"
]
| [((800, 930), 'subprocess.check_output', 'subprocess.check_output', (["['pandoc', doc['path'].strpath, '--filter', 'pandoc-mustache', '--to=plain']"], {'universal_newlines': '(True)'}), "(['pandoc', doc['path'].strpath, '--filter',\n 'pandoc-mustache', '--to=plain'], universal_newlines=True)\n", (823, 930), False, 'import os, subprocess\n'), ((1703, 1833), 'subprocess.check_output', 'subprocess.check_output', (["['pandoc', doc['path'].strpath, '--filter', 'pandoc-mustache', '--to=plain']"], {'universal_newlines': '(True)'}), "(['pandoc', doc['path'].strpath, '--filter',\n 'pandoc-mustache', '--to=plain'], universal_newlines=True)\n", (1726, 1833), False, 'import os, subprocess\n'), ((2613, 2743), 'subprocess.check_output', 'subprocess.check_output', (["['pandoc', doc['path'].strpath, '--filter', 'pandoc-mustache', '--to=plain']"], {'universal_newlines': '(True)'}), "(['pandoc', doc['path'].strpath, '--filter',\n 'pandoc-mustache', '--to=plain'], universal_newlines=True)\n", (2636, 2743), False, 'import os, subprocess\n')] |
from flask import Flask, jsonify, request
from w3lib.html import get_base_url
import extruct
import requests
app = Flask(__name__)
def extract_osm_tags(data):
tags = {}
schema_org_type = data.get('@type')
if schema_org_type == 'Restaurant':
tags['amenity'] = 'restaurant'
serves_cuisine = tags.get('servesCuisine')
if serves_cuisine:
cuisine = []
if 'Burgers' in serves_cuisine:
cuisine.append('burger')
if 'Fast Casual' in serves_cuisine:
tags['amenity'] = 'fast_food'
elif schema_org_type == 'Hotel':
tags['tourism'] = 'hotel'
elif schema_org_type == 'ExerciseGym':
tags['leisure'] = 'fitness_centre'
elif schema_org_type == 'BankOrCreditUnion':
tags['amenity'] = 'bank'
else:
return {}
address = data.get('address', {}).get('streetAddress')
if address:
tags['addr:full'] = address
address = data.get('address', {}).get('addressLocality')
if address:
tags['addr:city'] = address
address = data.get('address', {}).get('addressRegion')
if address:
tags['addr:state'] = address
address = data.get('address', {}).get('postalCode')
if address:
tags['postcode'] = address
address = data.get('address', {}).get('addressCountry')
if address:
tags['addr:country'] = address
brand = data.get('brand')
if brand:
tags['brand'] = brand
name = data.get('name')
if name:
tags['name'] = name
telephone = data.get('telephone')
if telephone:
tags['phone'] = telephone
faxNumber = data.get('faxNumber')
if faxNumber:
tags['fax'] = faxNumber
url = data.get('url')
if url:
tags['website'] = url
return tags
@app.route("/extract")
def extract():
url = request.args.get('url')
if not url:
return jsonify(error="Must specify url parameter"), 400
app.logger.info("Extracting json-ld from %s", url)
r = requests.get(url)
if r.status_code != 200:
app.logger.info("HTTP %s from %s", r.status_code, url)
return jsonify(error="Error fetching url"), 502
base_url = get_base_url(r.text, r.url)
data = extruct.extract(r.text, base_url=base_url, syntaxes=["json-ld"])
data = data.get('json-ld')
output = {}
suggested_tags = {}
for entry in data:
suggested_tags.update(extract_osm_tags(entry))
output = {
'status': {
'url': url,
'success': len(suggested_tags) > 0,
},
'suggested_tags': suggested_tags,
}
if request.args.get('include_extracted', type=bool):
output['extracted'] = data
return jsonify(output)
| [
"flask.request.args.get",
"flask.Flask",
"requests.get",
"w3lib.html.get_base_url",
"extruct.extract",
"flask.jsonify"
]
| [((116, 131), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (121, 131), False, 'from flask import Flask, jsonify, request\n'), ((1866, 1889), 'flask.request.args.get', 'request.args.get', (['"""url"""'], {}), "('url')\n", (1882, 1889), False, 'from flask import Flask, jsonify, request\n'), ((2036, 2053), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2048, 2053), False, 'import requests\n'), ((2219, 2246), 'w3lib.html.get_base_url', 'get_base_url', (['r.text', 'r.url'], {}), '(r.text, r.url)\n', (2231, 2246), False, 'from w3lib.html import get_base_url\n'), ((2258, 2322), 'extruct.extract', 'extruct.extract', (['r.text'], {'base_url': 'base_url', 'syntaxes': "['json-ld']"}), "(r.text, base_url=base_url, syntaxes=['json-ld'])\n", (2273, 2322), False, 'import extruct\n'), ((2648, 2696), 'flask.request.args.get', 'request.args.get', (['"""include_extracted"""'], {'type': 'bool'}), "('include_extracted', type=bool)\n", (2664, 2696), False, 'from flask import Flask, jsonify, request\n'), ((2745, 2760), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (2752, 2760), False, 'from flask import Flask, jsonify, request\n'), ((1922, 1965), 'flask.jsonify', 'jsonify', ([], {'error': '"""Must specify url parameter"""'}), "(error='Must specify url parameter')\n", (1929, 1965), False, 'from flask import Flask, jsonify, request\n'), ((2162, 2197), 'flask.jsonify', 'jsonify', ([], {'error': '"""Error fetching url"""'}), "(error='Error fetching url')\n", (2169, 2197), False, 'from flask import Flask, jsonify, request\n')] |
"""
A simple Python DAG using the Taskflow API.
"""
import logging
import time
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
log = logging.getLogger(__name__)
with DAG(
dag_id='simple_python_taskflow_api',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['airflow101'],
) as dag:
@task(task_id="hello_message")
def say_hello():
"""Print a hello message"""
print("Hello, World!")
hello_task = say_hello()
@task(task_id="go_to_sleep")
def sleep_for_1():
"""Go to sleep"""
time.sleep(1)
sleeping_task = sleep_for_1()
hello_task >> sleeping_task
| [
"logging.getLogger",
"datetime.datetime",
"time.sleep",
"airflow.decorators.task"
]
| [((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((381, 410), 'airflow.decorators.task', 'task', ([], {'task_id': '"""hello_message"""'}), "(task_id='hello_message')\n", (385, 410), False, 'from airflow.decorators import task\n'), ((535, 562), 'airflow.decorators.task', 'task', ([], {'task_id': '"""go_to_sleep"""'}), "(task_id='go_to_sleep')\n", (539, 562), False, 'from airflow.decorators import task\n'), ((620, 633), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (630, 633), False, 'import time\n'), ((300, 320), 'datetime.datetime', 'datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (308, 320), False, 'from datetime import datetime\n')] |
from . import model
import numpy as np
from scipy import special, stats
class RoyleNicholsModel(model.UnmarkedModel):
def __init__(self, det_formula, abun_formula, data):
self.response = model.Response(data.y)
abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs)
det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs)
self.submodels = model.SubmodelDict(abun=abun, det=det)
def negloglik(self, x, mod, K):
x = np.array(x)
beta_abun = x[mod["abun"].index]
beta_det = x[mod["det"].index]
y = mod.response.y
N, J = y.shape
lam = mod["abun"].predict(beta=beta_abun, interval=False)
r = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J)
q = 1 - r
nll = 0.0
for i in range(N):
kvals = range(int(mod.response.Kmin[i]), int(K)+1)
f = stats.poisson.pmf(kvals, lam[i])
ymat = np.tile(y[i,], (len(kvals), 1))
qmat = np.tile(q[i,], (len(kvals), 1))
kmat = np.tile(kvals, (J, 1)).transpose()
pmat = 1 - qmat**kmat
g = stats.binom.logpmf(ymat, 1, pmat).sum(axis=1)
fg = f * np.exp(g)
nll -= np.log(fg.sum())
return nll
def simulate(self):
N, J = self.response.y.shape
lam = self.predict("abun", interval=False)
q = 1 - self.predict("det", interval=False).reshape(N, J)
z = np.random.poisson(lam, N)
zrep = np.tile(z, (J,1)).transpose()
p = 1 - q**zrep
y = np.empty((N, J))
for i in range(N):
y[i,] = np.random.binomial(1, p[i,], J)
return y
| [
"numpy.tile",
"scipy.stats.poisson.pmf",
"numpy.random.poisson",
"numpy.exp",
"numpy.array",
"scipy.stats.binom.logpmf",
"numpy.empty",
"numpy.random.binomial"
]
| [((522, 533), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (530, 533), True, 'import numpy as np\n'), ((1524, 1549), 'numpy.random.poisson', 'np.random.poisson', (['lam', 'N'], {}), '(lam, N)\n', (1541, 1549), True, 'import numpy as np\n'), ((1631, 1647), 'numpy.empty', 'np.empty', (['(N, J)'], {}), '((N, J))\n', (1639, 1647), True, 'import numpy as np\n'), ((948, 980), 'scipy.stats.poisson.pmf', 'stats.poisson.pmf', (['kvals', 'lam[i]'], {}), '(kvals, lam[i])\n', (965, 980), False, 'from scipy import special, stats\n'), ((1695, 1726), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p[i,]', 'J'], {}), '(1, p[i,], J)\n', (1713, 1726), True, 'import numpy as np\n'), ((1255, 1264), 'numpy.exp', 'np.exp', (['g'], {}), '(g)\n', (1261, 1264), True, 'import numpy as np\n'), ((1565, 1583), 'numpy.tile', 'np.tile', (['z', '(J, 1)'], {}), '(z, (J, 1))\n', (1572, 1583), True, 'import numpy as np\n'), ((1103, 1125), 'numpy.tile', 'np.tile', (['kvals', '(J, 1)'], {}), '(kvals, (J, 1))\n', (1110, 1125), True, 'import numpy as np\n'), ((1188, 1221), 'scipy.stats.binom.logpmf', 'stats.binom.logpmf', (['ymat', '(1)', 'pmat'], {}), '(ymat, 1, pmat)\n', (1206, 1221), False, 'from scipy import special, stats\n')] |
#Contains the functions needed to process both chords and regularized beards
# proc_chords is used for chords
#proc_beard_regularize for generating beards
#proc_pdf saves pdfs of a variable below cloud base
#Both have a large overlap, but I split them in two to keep the one script from getting to confusing.
import numpy as np
import math
from netCDF4 import Dataset
import os
import time as ttiimmee
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
#from scipy.interpolate import griddata
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
import sys
#sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/")
#from unionfind import UnionFind
from cusize_functions import *
#import matplotlib.pyplot as plt
import pandas as pd
import gc
import glob
import xarray as xr
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank
#want to keep the automatic x and y calculation
#Scaling shouldn't be needed, as all chord properties should be indepenent of wind direction (right?)
#Similarly, no basedefinition is needed, all values are relative to cloud base
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#Changing 3D output
#Default is now to always go over x and y directions
#TODO
#plot_flag disabled for the mean time
def proc_chords( date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output='/data/testbed/lasso/chords/',
data_dim_flag=1,
base_percentile = 25,
special_name='',
chord_times = 0,
N_it_min=0,
N_it_max=1e9):
# plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled
# data_dim_flag: 1 = column, 3 = 3D snapshot
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# N_it_min = start number of iterables, 3D timesteps or column files. Only reall makes sense for 3D to avoid some weird initial fields.
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded
dx = 25.0
date = date_str
n_percentiles = 7 #Number of percentiles
percentiles = np.array([5,10,35,50,65,90,95])
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 1200*100 #Made a 100 times longer
cell_min = 3 #Minimal number of cells needed per chord
# #1D clustering parameters,
#set super strict, but goes on for a loooong time as well
if chord_times == 1:
t_gap = 0. #should be pretty strict, no gaps allowed!
t_min = 0.0
t_max = 1e9
cell_min = 3 #Minimal number of cells needed per chord
ql_min = 1e-5 #value used to determine existence of cloud
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
filename_qt = directory_input+date+'/qt.nc'
filename_thl = directory_input+date+'/thl.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
file_thl = Dataset(filename_thl,read='r')
file_qt = Dataset(filename_qt,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
n_chords = 0
#I will try lists first, which I will then convert to arrays in the end before saving in pandas
chord_timesteps = []
chord_length = []
chord_duration = []
chord_time = []
chord_height = [] #percentile of cloud base
chord_w = []
chord_w_up = [] #mean over updrafts
chord_w_base = []
chord_w_star = []
chord_thl_star = []
chord_qt_star = []
chord_thl = []
chord_thl_25 = []
chord_thl_75 = []
chord_qt = []
chord_qt_25 = []
chord_qt_75 = []
chord_w_flux = [] #Sum of w below
#Coming next
chord_w_per = np.zeros([0,n_percentiles])
chord_w_per_up = np.zeros([0,n_percentiles])
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
thl_prof = file_prof['thl'][:,:]
qt_prof = file_prof['qt'][:,:]
nz_prof = w2.shape[1]
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
print('dz: ',dz)
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
thl_2d = file_col.variables['thl'][:]
thl_2d = thl_2d.transpose()
qt_2d = file_col.variables['qt'][:]
qt_2d = qt_2d.transpose()
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
#lets try saving memory by closing files
#file_col.close()
#The needed cbl height
cbl_1d = t_1d*0
#The needed surface_bouyancy_flux
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d = t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies of thl and qt we subtract the closet mean profile
for tt in range(len(time_prof)):
#globals().update(locals())
tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = thl_prof[tt,:]
#because the vectors don't perfectly align
thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = qt_prof[tt,:]
#because the vectors don't perfectly align
qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
qt_3d = grab_3d_field(file_qt ,it,'qt')
thl_3d = grab_3d_field(file_thl ,it,'thl')
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
qt_2d = np.array(qt_3d.reshape((nz,nx*ny)))
thl_2d = np.array(thl_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
qt_3d = np.transpose(qt_3d, (0, 2, 1))
thl_3d = np.transpose(thl_3d, (0, 2, 1))
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))])
qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del thl_3d
del qt_3d
#hopefully this helps
gc.collect()
#Getting anomalies of thl and qt
qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose()
thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
#dt_1d = t_1d*0
#dt_1d[1:] = t_1d[1:]-t_1d[:-1]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
thl_2d = np.zeros((nz,1))
qt_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>1e-6)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
### Clustering 1D
#Now we simply go through all cloudy timesteps and detect chords
#If they fulful chord time requirements and have a number of values which fulfills cell_min they are counted as a chord
#and their properties are calculatted immediately
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
chord_idx_list = []
while t_cloudy_idx < len(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN
#print(t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
#Originally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count
##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
ch_duration = 0
if ch_duration>t_min and ch_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_chord_end]))
#list of relevant chord indexes
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
### Now appending chord properties
chord_timesteps.append(t_chord_end-t_chord_begin)
chord_duration.append(ch_duration)
chord_length.append(ch_duration*V_ref)
tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz
chord_height.append(tmp_base_height) #25th percentile of cloud base
surf_b_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
w_star = (tmp_base_height*surf_b_flux)**(1./3.)
surf_qt_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
qt_star = surf_qt_flux/w_star
surf_thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
thl_star = surf_thl_flux/w_star
chord_w_star.append(w_star )
chord_thl_star.append(thl_star )
chord_qt_star.append(qt_star )
chord_w_base.append(np.mean(w_2d[cl_base[ch_idx_l],ch_idx_l]))
chord_w.append(np.mean(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_thl.append(np.mean(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
#get a fourth and 3/4 of the cloud base
cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.)
cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.)
#print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0])
chord_thl_25.append(np.mean(thl_2d[cl_base_25_idx,ch_idx_l]))
chord_thl_75.append(np.mean(thl_2d[cl_base_75_idx,ch_idx_l]))
chord_qt.append(np.mean(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_qt_75.append(np.mean(qt_2d[cl_base_75_idx,ch_idx_l]))
chord_qt_25.append(np.mean(qt_2d[cl_base_25_idx,ch_idx_l]))
chord_w_flux.append(np.sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l]
chord_w_up.append(np.mean(w_base_vec[w_base_vec>0.0]))
tmp_w_per = np.percentile(w_base_vec,percentiles)
if len(w_base_vec[w_base_vec>0.0])>0:
tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles)
else:
tmp_w_per_up = np.zeros(n_percentiles)
tmp_w_per_up[:] = 'nan'
chord_w_per = np.vstack([chord_w_per,tmp_w_per])
chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up])
if data_dim_flag==1:
chord_time.append(np.mean(t_1d[ch_idx_l]))
if data_dim_flag==3:
chord_time.append(time_prof[it])
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('iterable: ',it)
print('n_chords: ',n_chords)
print('number of time points included: ',len(cbl_cl_idx))
#Does it matter if I turn these from lists to arrays? Fuck it, will do it anyway
chord_timesteps=np.asarray(chord_timesteps)
chord_duration =np.asarray(chord_duration)
chord_length =np.asarray(chord_length)
chord_height =np.asarray(chord_height)
chord_w_base =np.asarray(chord_w_base)
chord_w_star =np.asarray(chord_w_star)
chord_thl_star =np.asarray(chord_thl_star)
chord_qt_star =np.asarray(chord_qt_star)
chord_w =np.asarray(chord_w)
chord_w_up =np.asarray(chord_w_up)
chord_w_flux =np.asarray(chord_w_flux)
chord_thl =np.asarray(chord_thl)
chord_thl_25 =np.asarray(chord_thl_25)
chord_thl_75 =np.asarray(chord_thl_75)
chord_qt =np.asarray(chord_qt)
chord_qt_25 =np.asarray(chord_qt_25)
chord_qt_75 =np.asarray(chord_qt_75)
chord_time =np.asarray(chord_time)
#Saving
print('all chords: ',len(chord_duration))
save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords)
filename_chord_panda = directory_output+save_string_base+'.pkl'
data_for_panda = list(zip(chord_timesteps,chord_duration,chord_length,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up,
chord_w_star,chord_thl_star,chord_qt_star,
chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75))
df = pd.DataFrame(data = data_for_panda, columns=['timesteps','duration','length','height','w_base','w','w_flux','time','w up','w per','w per up',
'w star','thl star','qt star',
'thl','thl 25','thl 75','qt','qt 25','qt 75'])
df.to_pickle(filename_chord_panda)
time_end = ttiimmee.time()
print('total run time of proc_chords in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print('chordlength properties saved as panda in ',filename_chord_panda)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#If the input data is a 3D field it will always go over x and y directions
#Two different scale_flags added to rotate the curtain to point upwind.
#TODO
#plot_flag disabled for the mean time
def proc_beard_regularize(reg_var = 'w',
date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output = 'data_curtains/',
data_dim_flag=1,
base_smoothing_flag=2,
plot_curtains_flag = 0,
base_percentile = 25,
special_name='',
scale_flag=2,
chord_times = 0,
anomaly_flag = 0,
N_it_max=1e9,
N_it_min=0,
size_bin_flag=0,
N_bins=12,
bin_size = 250,
curtain_extra = 1.0,
chord_max = 1e9,
boundary_scaling_flag = 0
):
# reg_var = variable that will be regularized
# plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var
# data_dim_flag: 1 = column, 3 = 3D snapshot
# time_slice_curtain: 0 only puts out the total sums, 1: adds a seperate output for each time slice, is needed for scale_flag
# scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1
# 1 the ref_lvl used is determined from the mean cloud base height
# 2, similar to 1 but now using a profile
#
# base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile
# base_percentile: percentile used to find chordlength bottom
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet mean profile
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# size_bin_flag bins the beards by their chord_lenth. Currently using 8 bins of 250 meters length to get started. The lowest bin should be empty, because we only calculate curtains when at least curtain_min is used
# curtain_extra: Regularized chord length before and after in the curtain, default is 1
# chord_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_max/2 is reached
# boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt*
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #Is recalculated from the profile file later on
dx = 25.0
date = date_str
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 120000
cell_min = 3 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed to convert into a curtain
# #1D clustering parameters,
#set super strict
if chord_times == 1:
t_gap = 0.#No gaps allowed!
t_min = 0
t_max = 1e9
cell_min = 10 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed per curtain
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
#z_min = 0 #Index of minimum z_vlvl of the cbl
#Flag clean up
if data_dim_flag==1:
scale_flag=0
#Creating dictionary to save all properties
settings_dict = {
'reg_var': reg_var,
'date_str':date_str,
'directory_input':directory_input,
'data_dim_flag':data_dim_flag,
'base_smoothing_flag':base_smoothing_flag,
'plot_curtains_flag' :plot_curtains_flag,
'base_percentile':base_percentile,
'special_name':special_name,
'scale_flag':scale_flag,
'chord_times':chord_times,
'anomaly_flag':anomaly_flag,
'N_it_max':N_it_max,
'N_it_min':N_it_min,
'size_bin_flag':size_bin_flag,
'bin_size':bin_size,
'N_bins':N_bins,
'curtain_extra':curtain_extra
}
#moved to an inner function to avoid issues with global and local variables
def func_curtain_reg(input_2d_field):
#function regularizes to cloud base
#2019-03-20: added smoother to hopefully avoid impact of harsch jumps
#2019-03-28: Added simplified version for base_smoothing_flag == 2 which gets rid of 1D pre interpolation
#I originally used interp2d, tried griddata but it was a lot slower
#Calculating the regularized t axis but for original resolution
#It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra
#takes the original time vector, subtracts it by mean time, then scales it by 1/(time_end_chord-time_beg_chord)
t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2.
t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord)
#Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution
#mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed
var_t_low_z_high = np.zeros([curtain_cells,n_z_reg])
#introducing z_idx_base vector
#Assigning reference cloud base where no cloud present
z_idx_base=cl_base*1.0+0.0
z_idx_base[:] = z_idx_base_default
for i in range(idx_beg_chord,idx_end_chord):
if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]:
z_idx_base[i] = cl_base[i]
#Here the smoother comes into play:
#We started with a simple 5 cell running mean,
#But now we are making it a function of the chordlength, using a 0.1 running mean
if base_smoothing_flag ==1:
z_idx_base_smooth = z_idx_base*1.0
N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1)
for i in range(idx_beg_chord-N,idx_end_chord+N):
z_idx_base_smooth[i] = sum(z_idx_base[i-N:i+N])/(2*N)
z_idx_base[:] = z_idx_base_smooth[:]
if base_smoothing_flag==2:
#just put the percentile back
z_idx_base[:] = z_idx_base_default
#default version for variable base height
if base_smoothing_flag<2:
#Now for each of the columns of the original curtain a vertical interpolation is done
for i in range(idx_beg_curtain,idx_end_curtain):
#assigining column value
var_orig_col = input_2d_field[:,i]
#Regularizing the z axes so that cloud base is at 1
d_z_tmp = 1.0/z_idx_base[i]
nz = var_orig_col.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_col = np.hstack([var_orig_col[0],var_orig_col])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
#f = interp1d(z_reg_orig, var_orig_col, kind='next')
f = interp1d(z_reg_orig, var_orig_col, kind='nearest')
try:
var_reg_inter = f(z_reg_mid)
except:
print(z_idx_base[i])
print(z_reg_orig)
print(z_reg_mid)
var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter
#Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid
#print(t_reg_orig.shape,z_reg_mid.shape)
f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
#constant base height version
if base_smoothing_flag==2:
#Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one.
i=idx_beg_curtain
d_z_tmp = 1.0/z_idx_base[i]
var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain]
nz = var_orig_2d.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#Have to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d])
f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
return var_curtain
#Creating regularized grid.
d_reg = 0.005
n_z_reg = int(1.5/d_reg)
n_t_reg = int((1+2*curtain_extra)/d_reg)
t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1)
t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg)
z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1)
z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg)
mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid)
var_curtain = np.zeros([n_t_reg,n_z_reg])
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_curtain_up = 0
n_curtain_dw = 0
if size_bin_flag==1:
N_bins = 12
n_curtain_bin = np.zeros([N_bins])
n_curtain_bin_up = np.zeros([N_bins])
n_curtain_bin_dw = np.zeros([N_bins])
var_curtain_bin_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_up_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_dw_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins)
print('mid_bin_size',mid_bin_size)
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
n_chords = 0
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#Setting curtains for var
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_chord = 0
n_curtain_up = 0
n_curtain_dw = 0
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
print('n_curtain: ',n_curtain)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution )
print('ref_lvl used to determine reference winds',ref_lvl )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
#Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is assigned when no clouds are present
if scale_flag > 0 and t_1d.shape[0]>3:
#calculate the profiles of u and v and their scaling
u_ref_prof = file_prof['u'][it,:]
v_ref_prof = file_prof['v'][it,:]
V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2)
scaling_factor_x_prof = u_ref_prof/V_ref_prof
scaling_factor_y_prof = v_ref_prof/V_ref_prof
#Using the mean cloud base height as the reference lvl
ref_idx = np.mean(cl_base[cbl_cl_idx])
if scale_flag == 1:
#a new reference level is com
scaling_factor_x = scaling_factor_x_prof[int(ref_idx)]
scaling_factor_y = scaling_factor_y_prof[int(ref_idx)]
print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx))
if scale_flag == 2:
#Regularizing the scaling profiles and interpolation them onto the regularized z axis
d_z_tmp = 1.0/ref_idx
nz = scaling_factor_x_prof.shape[0]
z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof])
scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')
f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')
scaling_factor_x_inter = f_x(z_reg_mid)
scaling_factor_y_inter = f_y(z_reg_mid)
print('Scaling flag 2:, mean scaling_factor_x_inter: ',np.mean(scaling_factor_x_inter),
' mean scaling_factor_y_inter: ',np.mean(scaling_factor_y_inter))
### Clustering 1D
#Now we simply go through all cloudy timesteps
#As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud
#As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them.
#if the difference is larger than 20s the cloud is over, and a chordlength is created which is a list of all timesteps that below to that chordlength
#However if the duration of the chordlength is lower than t_min or higher than t_max seconds it isn't
#I added an additional constraint that each chord must include at least cell_min cells, because it is possible to get
#Small chord lengths with more than t_min which are mostly gaps.
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
while t_cloudy_idx < len(cbl_cl_idx)-1 and n_chords<chord_max:
#print('t_chord_begin',t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#print('t_chord_end',t_chord_end)
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
chord_duration = 0
if chord_duration>t_min and chord_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx]))
#Here we start the interpolation stuff
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy
idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).argmin()-1
idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).argmin()+2
idx_end_curtain = min(idx_end_curtain,nt-1)
time_beg_curtain = t_1d[idx_beg_curtain]
time_end_curtain = t_1d[idx_end_curtain]
chord_cells = t_chord_end-t_chord_begin
curtain_cells = idx_end_curtain-idx_beg_curtain
#If curtain has more than curtain_min cells and curtain tail noes not extend beyond end of 2d field or the beginning extend before
#I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used.
if idx_end_curtain<nt-2 and idx_beg_curtain>2 and len(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_min-1:
n_curtain += 1
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile))
#Regularized curtains, I am too lazy to pass on all my variables to func_curtain_reg so I instead made it a nested function
var_curtain_tmp = (func_curtain_reg(var_2d)).transpose()
if boundary_scaling_flag == 1:
#Now adding the boundary scaling using w*
surf_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
var_curtain_tmp = var_curtain_tmp/boundary_scaling
#Finally add it to the mean one and track one more curtain
#detecting if chord base has a positive or negative w, then adds to the sum of up or downdraft chords
w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]]
#print(w_tmp)
#Scaling is now added here,
#Things are applied twice so that deviding by n it comes out fin
#We assume here that n_x and n_y are roughly same
#Could be made cleaner later on
if scale_flag>0 and data_dim_flag==3:
if scale_flag==1:
#find out if we need scaling_factor_x or y by seeing if we are in the first or second half
if idx_end_curtain<nt/2:
scaling_factor = 2*scaling_factor_x
else:
scaling_factor = 2*scaling_factor_y
if scaling_factor>0:
var_curtain_tmp = var_curtain_tmp[::-1,:]
var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp
if scale_flag==2:
if idx_end_curtain<nt/2:
scaling_factor_prof = 2*scaling_factor_x_inter
else:
scaling_factor_prof = 2*scaling_factor_y_inter
for n_prof in range(scaling_factor_prof.shape[0]):
if scaling_factor_prof[n_prof]>0:
var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof]
var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof]
#Now adding the var_curtain_tmp to the sums
var_curtain_sum = var_curtain_sum+var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_up += 1
var_curtain_up_sum += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_dw += 1
var_curtain_dw_sum += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
#globals().update(locals())
###############################################################################################################################################
################## SIZE BINNING ##############################################################################################################
###############################################################################################################################################
if size_bin_flag:
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
chord_length = ch_duration*V_ref
#if scale_flag==0:
# scaling_factor=1.
#find index of bin close to mid size bin
bin_idx = np.where(np.abs(chord_length-mid_bin_size)<125)[0]
if bin_idx.size>0:
#print('bin_idx,chord_length',bin_idx,chord_length)
n_curtain_bin[bin_idx] += 1
var_curtain_bin_sum[bin_idx,:,:] = var_curtain_bin_sum[bin_idx,:,:] + var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_bin_up[bin_idx] += 1
var_curtain_bin_up_sum[bin_idx,:,:] += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_bin_dw[bin_idx] += 1
var_curtain_bin_dw_sum[bin_idx,:,:] += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
##############################################################################################################################
#PLOTS
##############################################################################################################################
#If the plot flag is set the pre regularization curtains are plotted.
if plot_curtains_flag ==1:
print('plotting not implemented yet')
##############################################################################################################################
#switching to y direction if half of max chords reached
##############################################################################################################################
if n_chords == int(chord_max/2):
t_cloudy_idx = int(len(cbl_cl_idx)/2)
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('curtain processing:',(time3-time2)/60.0,'minutes')
print(':')
print(':')
print(':')
time_end = ttiimmee.time()
print('total run time of proc_beard_regularize in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print(':')
#Replacing saving with xarray
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_sum.transpose()/n_curtain),
reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_sum.transpose()/n_curtain_up),
reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_sum.transpose()/n_curtain_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid})
xr_dataset[reg_var].attrs['n']=n_curtain
xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up
xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw
xr_dataset.attrs = settings_dict
#Making save string
save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra))
if data_dim_flag==3:
save_string_base = save_string_base+'_sf'+str(scale_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain)
save_string = directory_output+ reg_var+save_string_base +'.nc'
xr_dataset.to_netcdf(save_string)
print('saved beard data to '+save_string)
if size_bin_flag==1:
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time','length'), var_curtain_bin_sum.transpose()/n_curtain_bin),
reg_var+'_up':(('regularized height', 'regularized time','length'), var_curtain_bin_up_sum.transpose()/n_curtain_bin_up),
reg_var+'_dw':(('regularized height', 'regularized time','length'), var_curtain_bin_dw_sum.transpose()/n_curtain_bin_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'length':mid_bin_size})
xr_dataset[reg_var].attrs['n'] =n_curtain_bin
xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up
xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw
xr_dataset.attrs = settings_dict
save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc'
xr_dataset.to_netcdf(save_string)
print('saved size binned beards to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
return
#A simple script which calculates a histogram below the cloud base and saves it
#I will try to keep it at least somewhat general with a flexible variable
def proc_pdf(reg_var = 'w',
date_str='20160611',
directory_input ='/data/testbed/lasso/sims/',
directory_output ='data_pdfs/',
data_dim_flag=3,
special_name='',
N_it_max=1e9,
N_it_min=0,
anomaly_flag =0,
N_bins=400,
base_percentile = 25,
boundary_scaling_flag = 1,
range_var = [-10,10] ):
#We are starting out with histograms of w from -10 to 10 and a 0.1 spacing
var_hist_sum=np.zeros(N_bins)
date = date_str
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0]
#filename_prof=directory_input+date+'/testbed.default.0000000.nc'
if date=='bomex':
filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#This might save a bit of memory
if reg_var == 'w':
var_2d = w_2d
if reg_var == 'ql':
var_2d = ql_2d
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#fake t vector,
t_1d = np.linspace(0,2*nx*ny,2*nx*ny)
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
if len(cbl_cl_idx)>0:
#Now calculating the var at cloud base
var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx]
#If boundary scaling is used, the variable is scaled accordingly
#Only called if there are any clouds
if boundary_scaling_flag == 1 and len(cbl_cl_idx)>1:
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
if data_dim_flag==3:
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile))
# Can't think of a good way to do this, will throw up an error for the mean time.
if data_dim_flag==1:
print('sorry, but I havent implemented star scaling for 1d data')
sys.exit()
#Now adding the boundary scaling using w*
#Is a bit overcooked currently as it only works with 3D data and thus all surface fluxes are the same everywhere.
surf_flux = np.mean(bflux_s_1d)
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d)
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d)
boundary_scaling = surf_flux/w_star
var_cl_base = var_cl_base/boundary_scaling
#Calculating the histogram, and adding it to the total histogram
var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins)
var_hist_sum = var_hist_sum+var_hist
else:
print('no cloudy columns apparently')
var_pdf = var_hist_sum
save_string_base = '_pdf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string = directory_output+ reg_var+save_string_base
save_string = save_string+'.npz'
np.savez(save_string,var_pdf=var_pdf,range_var=range_var)
print('saved pdf with ', sum(var_pdf), 'points to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
| [
"numpy.sqrt",
"math.floor",
"numpy.hstack",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.array",
"sys.exit",
"scipy.interpolate.interp2d",
"numpy.mean",
"numpy.savez",
"numpy.histogram",
"numpy.where",
"netCDF4.Dataset",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.vstack",
"numpy.min",
"pandas.DataFrame",
"numpy.meshgrid",
"glob.glob",
"numpy.abs",
"numpy.floor",
"numpy.argmax",
"gc.collect",
"numpy.transpose",
"time.time",
"numpy.sum",
"numpy.zeros",
"numpy.percentile"
]
| [((2599, 2614), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (2612, 2614), True, 'import time as ttiimmee\n'), ((2799, 2836), 'numpy.array', 'np.array', (['[5, 10, 35, 50, 65, 90, 95]'], {}), '([5, 10, 35, 50, 65, 90, 95])\n', (2807, 2836), True, 'import numpy as np\n'), ((4604, 4636), 'netCDF4.Dataset', 'Dataset', (['filename_prof'], {'read': '"""r"""'}), "(filename_prof, read='r')\n", (4611, 4636), False, 'from netCDF4 import Dataset\n'), ((5507, 5535), 'numpy.zeros', 'np.zeros', (['[0, n_percentiles]'], {}), '([0, n_percentiles])\n', (5515, 5535), True, 'import numpy as np\n'), ((5567, 5595), 'numpy.zeros', 'np.zeros', (['[0, n_percentiles]'], {}), '([0, n_percentiles])\n', (5575, 5595), True, 'import numpy as np\n'), ((6928, 6946), 'numpy.floor', 'np.floor', (['(LCL / dz)'], {}), '(LCL / dz)\n', (6936, 6946), True, 'import numpy as np\n'), ((21605, 21632), 'numpy.asarray', 'np.asarray', (['chord_timesteps'], {}), '(chord_timesteps)\n', (21615, 21632), True, 'import numpy as np\n'), ((21653, 21679), 'numpy.asarray', 'np.asarray', (['chord_duration'], {}), '(chord_duration)\n', (21663, 21679), True, 'import numpy as np\n'), ((21700, 21724), 'numpy.asarray', 'np.asarray', (['chord_length'], {}), '(chord_length)\n', (21710, 21724), True, 'import numpy as np\n'), ((21745, 21769), 'numpy.asarray', 'np.asarray', (['chord_height'], {}), '(chord_height)\n', (21755, 21769), True, 'import numpy as np\n'), ((21790, 21814), 'numpy.asarray', 'np.asarray', (['chord_w_base'], {}), '(chord_w_base)\n', (21800, 21814), True, 'import numpy as np\n'), ((21835, 21859), 'numpy.asarray', 'np.asarray', (['chord_w_star'], {}), '(chord_w_star)\n', (21845, 21859), True, 'import numpy as np\n'), ((21880, 21906), 'numpy.asarray', 'np.asarray', (['chord_thl_star'], {}), '(chord_thl_star)\n', (21890, 21906), True, 'import numpy as np\n'), ((21927, 21952), 'numpy.asarray', 'np.asarray', (['chord_qt_star'], {}), '(chord_qt_star)\n', (21937, 21952), True, 'import numpy as np\n'), ((21973, 21992), 'numpy.asarray', 'np.asarray', (['chord_w'], {}), '(chord_w)\n', (21983, 21992), True, 'import numpy as np\n'), ((22013, 22035), 'numpy.asarray', 'np.asarray', (['chord_w_up'], {}), '(chord_w_up)\n', (22023, 22035), True, 'import numpy as np\n'), ((22056, 22080), 'numpy.asarray', 'np.asarray', (['chord_w_flux'], {}), '(chord_w_flux)\n', (22066, 22080), True, 'import numpy as np\n'), ((22101, 22122), 'numpy.asarray', 'np.asarray', (['chord_thl'], {}), '(chord_thl)\n', (22111, 22122), True, 'import numpy as np\n'), ((22143, 22167), 'numpy.asarray', 'np.asarray', (['chord_thl_25'], {}), '(chord_thl_25)\n', (22153, 22167), True, 'import numpy as np\n'), ((22188, 22212), 'numpy.asarray', 'np.asarray', (['chord_thl_75'], {}), '(chord_thl_75)\n', (22198, 22212), True, 'import numpy as np\n'), ((22233, 22253), 'numpy.asarray', 'np.asarray', (['chord_qt'], {}), '(chord_qt)\n', (22243, 22253), True, 'import numpy as np\n'), ((22274, 22297), 'numpy.asarray', 'np.asarray', (['chord_qt_25'], {}), '(chord_qt_25)\n', (22284, 22297), True, 'import numpy as np\n'), ((22318, 22341), 'numpy.asarray', 'np.asarray', (['chord_qt_75'], {}), '(chord_qt_75)\n', (22328, 22341), True, 'import numpy as np\n'), ((22362, 22384), 'numpy.asarray', 'np.asarray', (['chord_time'], {}), '(chord_time)\n', (22372, 22384), True, 'import numpy as np\n'), ((23253, 23499), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_for_panda', 'columns': "['timesteps', 'duration', 'length', 'height', 'w_base', 'w', 'w_flux',\n 'time', 'w up', 'w per', 'w per up', 'w star', 'thl star', 'qt star',\n 'thl', 'thl 25', 'thl 75', 'qt', 'qt 25', 'qt 75']"}), "(data=data_for_panda, columns=['timesteps', 'duration',\n 'length', 'height', 'w_base', 'w', 'w_flux', 'time', 'w up', 'w per',\n 'w per up', 'w star', 'thl star', 'qt star', 'thl', 'thl 25', 'thl 75',\n 'qt', 'qt 25', 'qt 75'])\n", (23265, 23499), True, 'import pandas as pd\n'), ((23633, 23648), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (23646, 23648), True, 'import time as ttiimmee\n'), ((27299, 27314), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (27312, 27314), True, 'import time as ttiimmee\n'), ((34082, 34149), 'numpy.linspace', 'np.linspace', (['(-0.5 - curtain_extra)', '(0.5 + curtain_extra)', '(n_t_reg + 1)'], {}), '(-0.5 - curtain_extra, 0.5 + curtain_extra, n_t_reg + 1)\n', (34093, 34149), True, 'import numpy as np\n'), ((34166, 34257), 'numpy.linspace', 'np.linspace', (['(-0.5 - curtain_extra + d_reg / 2)', '(0.5 + curtain_extra - d_reg / 2)', 'n_t_reg'], {}), '(-0.5 - curtain_extra + d_reg / 2, 0.5 + curtain_extra - d_reg /\n 2, n_t_reg)\n', (34177, 34257), True, 'import numpy as np\n'), ((34265, 34297), 'numpy.linspace', 'np.linspace', (['(0)', '(1.5)', '(n_z_reg + 1)'], {}), '(0, 1.5, n_z_reg + 1)\n', (34276, 34297), True, 'import numpy as np\n'), ((34338, 34390), 'numpy.linspace', 'np.linspace', (['(0 + d_reg / 2)', '(1.5 - d_reg / 2)', 'n_z_reg'], {}), '(0 + d_reg / 2, 1.5 - d_reg / 2, n_z_reg)\n', (34349, 34390), True, 'import numpy as np\n'), ((34423, 34456), 'numpy.meshgrid', 'np.meshgrid', (['t_reg_mid', 'z_reg_mid'], {}), '(t_reg_mid, z_reg_mid)\n', (34434, 34456), True, 'import numpy as np\n'), ((34478, 34506), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (34486, 34506), True, 'import numpy as np\n'), ((34528, 34556), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (34536, 34556), True, 'import numpy as np\n'), ((34586, 34614), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (34594, 34614), True, 'import numpy as np\n'), ((34639, 34667), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (34647, 34667), True, 'import numpy as np\n'), ((36299, 36331), 'netCDF4.Dataset', 'Dataset', (['filename_prof'], {'read': '"""r"""'}), "(filename_prof, read='r')\n", (36306, 36331), False, 'from netCDF4 import Dataset\n'), ((37773, 37791), 'numpy.floor', 'np.floor', (['(LCL / dz)'], {}), '(LCL / dz)\n', (37781, 37791), True, 'import numpy as np\n'), ((38812, 38840), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (38820, 38840), True, 'import numpy as np\n'), ((38865, 38893), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (38873, 38893), True, 'import numpy as np\n'), ((38918, 38946), 'numpy.zeros', 'np.zeros', (['[n_t_reg, n_z_reg]'], {}), '([n_t_reg, n_z_reg])\n', (38926, 38946), True, 'import numpy as np\n'), ((59961, 59976), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (59974, 59976), True, 'import time as ttiimmee\n'), ((63607, 63623), 'numpy.zeros', 'np.zeros', (['N_bins'], {}), '(N_bins)\n', (63615, 63623), True, 'import numpy as np\n'), ((64852, 64884), 'netCDF4.Dataset', 'Dataset', (['filename_prof'], {'read': '"""r"""'}), "(filename_prof, read='r')\n", (64859, 64884), False, 'from netCDF4 import Dataset\n'), ((66301, 66319), 'numpy.floor', 'np.floor', (['(LCL / dz)'], {}), '(LCL / dz)\n', (66309, 66319), True, 'import numpy as np\n'), ((75612, 75671), 'numpy.savez', 'np.savez', (['save_string'], {'var_pdf': 'var_pdf', 'range_var': 'range_var'}), '(save_string, var_pdf=var_pdf, range_var=range_var)\n', (75620, 75671), True, 'import numpy as np\n'), ((3686, 3736), 'glob.glob', 'glob.glob', (["(directory_input + date + '/*column*.nc')"], {}), "(directory_input + date + '/*column*.nc')\n", (3695, 3736), False, 'import glob\n'), ((4136, 4165), 'netCDF4.Dataset', 'Dataset', (['filename_w'], {'read': '"""r"""'}), "(filename_w, read='r')\n", (4143, 4165), False, 'from netCDF4 import Dataset\n'), ((4188, 4217), 'netCDF4.Dataset', 'Dataset', (['filename_l'], {'read': '"""r"""'}), "(filename_l, read='r')\n", (4195, 4217), False, 'from netCDF4 import Dataset\n'), ((4240, 4271), 'netCDF4.Dataset', 'Dataset', (['filename_thl'], {'read': '"""r"""'}), "(filename_thl, read='r')\n", (4247, 4271), False, 'from netCDF4 import Dataset\n'), ((4294, 4324), 'netCDF4.Dataset', 'Dataset', (['filename_qt'], {'read': '"""r"""'}), "(filename_qt, read='r')\n", (4301, 4324), False, 'from netCDF4 import Dataset\n'), ((4423, 4476), 'glob.glob', 'glob.glob', (["(directory_input + date + '/*default?0*.nc')"], {}), "(directory_input + date + '/*default?0*.nc')\n", (4432, 4476), False, 'import glob\n'), ((6554, 6596), 'numpy.exp', 'np.exp', (['(17.67 * (T - 273.15) / (T - 29.65))'], {}), '(17.67 * (T - 273.15) / (T - 29.65))\n', (6560, 6596), True, 'import numpy as np\n'), ((6794, 6809), 'numpy.log', 'np.log', (['rel_hum'], {}), '(rel_hum)\n', (6800, 6809), True, 'import numpy as np\n'), ((8086, 8101), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (8099, 8101), True, 'import time as ttiimmee\n'), ((14321, 14336), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (14334, 14336), True, 'import time as ttiimmee\n'), ((14497, 14509), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (14505, 14509), True, 'import numpy as np\n'), ((21278, 21293), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (21291, 21293), True, 'import time as ttiimmee\n'), ((30054, 30088), 'numpy.zeros', 'np.zeros', (['[curtain_cells, n_z_reg]'], {}), '([curtain_cells, n_z_reg])\n', (30062, 30088), True, 'import numpy as np\n'), ((34822, 34840), 'numpy.zeros', 'np.zeros', (['[N_bins]'], {}), '([N_bins])\n', (34830, 34840), True, 'import numpy as np\n'), ((34874, 34892), 'numpy.zeros', 'np.zeros', (['[N_bins]'], {}), '([N_bins])\n', (34882, 34892), True, 'import numpy as np\n'), ((34926, 34944), 'numpy.zeros', 'np.zeros', (['[N_bins]'], {}), '([N_bins])\n', (34934, 34944), True, 'import numpy as np\n'), ((34978, 35014), 'numpy.zeros', 'np.zeros', (['[N_bins, n_t_reg, n_z_reg]'], {}), '([N_bins, n_t_reg, n_z_reg])\n', (34986, 35014), True, 'import numpy as np\n'), ((35046, 35082), 'numpy.zeros', 'np.zeros', (['[N_bins, n_t_reg, n_z_reg]'], {}), '([N_bins, n_t_reg, n_z_reg])\n', (35054, 35082), True, 'import numpy as np\n'), ((35114, 35150), 'numpy.zeros', 'np.zeros', (['[N_bins, n_t_reg, n_z_reg]'], {}), '([N_bins, n_t_reg, n_z_reg])\n', (35122, 35150), True, 'import numpy as np\n'), ((35181, 35226), 'numpy.linspace', 'np.linspace', (['(125)', '(-125 + N_bins * 250)', 'N_bins'], {}), '(125, -125 + N_bins * 250, N_bins)\n', (35192, 35226), True, 'import numpy as np\n'), ((35449, 35499), 'glob.glob', 'glob.glob', (["(directory_input + date + '/*column*.nc')"], {}), "(directory_input + date + '/*column*.nc')\n", (35458, 35499), False, 'import glob\n'), ((35792, 35821), 'netCDF4.Dataset', 'Dataset', (['filename_w'], {'read': '"""r"""'}), "(filename_w, read='r')\n", (35799, 35821), False, 'from netCDF4 import Dataset\n'), ((35844, 35873), 'netCDF4.Dataset', 'Dataset', (['filename_l'], {'read': '"""r"""'}), "(filename_l, read='r')\n", (35851, 35873), False, 'from netCDF4 import Dataset\n'), ((36078, 36109), 'netCDF4.Dataset', 'Dataset', (['filename_var'], {'read': '"""r"""'}), "(filename_var, read='r')\n", (36085, 36109), False, 'from netCDF4 import Dataset\n'), ((36132, 36185), 'glob.glob', 'glob.glob', (["(directory_input + date + '/*default?0*.nc')"], {}), "(directory_input + date + '/*default?0*.nc')\n", (36141, 36185), False, 'import glob\n'), ((37399, 37441), 'numpy.exp', 'np.exp', (['(17.67 * (T - 273.15) / (T - 29.65))'], {}), '(17.67 * (T - 273.15) / (T - 29.65))\n', (37405, 37441), True, 'import numpy as np\n'), ((37639, 37654), 'numpy.log', 'np.log', (['rel_hum'], {}), '(rel_hum)\n', (37645, 37654), True, 'import numpy as np\n'), ((39250, 39265), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (39263, 39265), True, 'import time as ttiimmee\n'), ((44797, 44812), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (44810, 44812), True, 'import time as ttiimmee\n'), ((44973, 44985), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (44981, 44985), True, 'import numpy as np\n'), ((59808, 59823), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (59821, 59823), True, 'import time as ttiimmee\n'), ((63945, 64001), 'glob.glob', 'glob.glob', (["(directory_input + date + '/*.column.*.*.*.nc')"], {}), "(directory_input + date + '/*.column.*.*.*.nc')\n", (63954, 64001), False, 'import glob\n'), ((64285, 64314), 'netCDF4.Dataset', 'Dataset', (['filename_w'], {'read': '"""r"""'}), "(filename_w, read='r')\n", (64292, 64314), False, 'from netCDF4 import Dataset\n'), ((64337, 64366), 'netCDF4.Dataset', 'Dataset', (['filename_l'], {'read': '"""r"""'}), "(filename_l, read='r')\n", (64344, 64366), False, 'from netCDF4 import Dataset\n'), ((64555, 64586), 'netCDF4.Dataset', 'Dataset', (['filename_var'], {'read': '"""r"""'}), "(filename_var, read='r')\n", (64562, 64586), False, 'from netCDF4 import Dataset\n'), ((64609, 64669), 'glob.glob', 'glob.glob', (["(directory_input + date + '/testbed?default?0*.nc')"], {}), "(directory_input + date + '/testbed?default?0*.nc')\n", (64618, 64669), False, 'import glob\n'), ((65927, 65969), 'numpy.exp', 'np.exp', (['(17.67 * (T - 273.15) / (T - 29.65))'], {}), '(17.67 * (T - 273.15) / (T - 29.65))\n', (65933, 65969), True, 'import numpy as np\n'), ((66167, 66182), 'numpy.log', 'np.log', (['rel_hum'], {}), '(rel_hum)\n', (66173, 66182), True, 'import numpy as np\n'), ((67409, 67424), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (67422, 67424), True, 'import time as ttiimmee\n'), ((72305, 72320), 'time.time', 'ttiimmee.time', ([], {}), '()\n', (72318, 72320), True, 'import time as ttiimmee\n'), ((72481, 72493), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (72489, 72493), True, 'import numpy as np\n'), ((7619, 7639), 'math.floor', 'math.floor', (['(nz * 0.6)'], {}), '(nz * 0.6)\n', (7629, 7639), False, 'import math\n'), ((8213, 8251), 'netCDF4.Dataset', 'Dataset', (['filename_column[it]'], {'read': '"""r"""'}), "(filename_column[it], read='r')\n", (8220, 8251), False, 'from netCDF4 import Dataset\n'), ((14978, 15020), 'numpy.where', 'np.where', (['((cl_base - cbl_1d[:nt]) * dz < 0)'], {}), '((cl_base - cbl_1d[:nt]) * dz < 0)\n', (14986, 15020), True, 'import numpy as np\n'), ((33436, 33484), 'numpy.linspace', 'np.linspace', (['(0 + d_z_tmp / 2)', 'z_reg_orig_top', 'nz'], {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)\n', (33447, 33484), True, 'import numpy as np\n'), ((33591, 33619), 'numpy.hstack', 'np.hstack', (['[[0], z_reg_orig]'], {}), '([[0], z_reg_orig])\n', (33600, 33619), True, 'import numpy as np\n'), ((33647, 33690), 'numpy.vstack', 'np.vstack', (['[var_orig_2d[0, :], var_orig_2d]'], {}), '([var_orig_2d[0, :], var_orig_2d])\n', (33656, 33690), True, 'import numpy as np\n'), ((33723, 33783), 'scipy.interpolate.interp2d', 'interp2d', (['t_reg_orig', 'z_reg_orig', 'var_orig_2d'], {'kind': '"""linear"""'}), "(t_reg_orig, z_reg_orig, var_orig_2d, kind='linear')\n", (33731, 33783), False, 'from scipy.interpolate import interp2d\n'), ((38464, 38484), 'math.floor', 'math.floor', (['(nz * 0.6)'], {}), '(nz * 0.6)\n', (38474, 38484), False, 'import math\n'), ((39377, 39415), 'netCDF4.Dataset', 'Dataset', (['filename_column[it]'], {'read': '"""r"""'}), "(filename_column[it], read='r')\n", (39384, 39415), False, 'from netCDF4 import Dataset\n'), ((45456, 45498), 'numpy.where', 'np.where', (['((cl_base - cbl_1d[:nt]) * dz < 0)'], {}), '((cl_base - cbl_1d[:nt]) * dz < 0)\n', (45464, 45498), True, 'import numpy as np\n'), ((46024, 46066), 'numpy.sqrt', 'np.sqrt', (['(u_ref_prof ** 2 + v_ref_prof ** 2)'], {}), '(u_ref_prof ** 2 + v_ref_prof ** 2)\n', (46031, 46066), True, 'import numpy as np\n'), ((46280, 46308), 'numpy.mean', 'np.mean', (['cl_base[cbl_cl_idx]'], {}), '(cl_base[cbl_cl_idx])\n', (46287, 46308), True, 'import numpy as np\n'), ((66992, 67012), 'math.floor', 'math.floor', (['(nz * 0.6)'], {}), '(nz * 0.6)\n', (67002, 67012), False, 'import math\n'), ((67536, 67574), 'netCDF4.Dataset', 'Dataset', (['filename_column[it]'], {'read': '"""r"""'}), "(filename_column[it], read='r')\n", (67543, 67574), False, 'from netCDF4 import Dataset\n'), ((72964, 73006), 'numpy.where', 'np.where', (['((cl_base - cbl_1d[:nt]) * dz < 0)'], {}), '((cl_base - cbl_1d[:nt]) * dz < 0)\n', (72972, 73006), True, 'import numpy as np\n'), ((74967, 75022), 'numpy.histogram', 'np.histogram', (['var_cl_base'], {'range': 'range_var', 'bins': 'N_bins'}), '(var_cl_base, range=range_var, bins=N_bins)\n', (74979, 75022), True, 'import numpy as np\n'), ((11505, 11534), 'numpy.transpose', 'np.transpose', (['w_3d', '(0, 2, 1)'], {}), '(w_3d, (0, 2, 1))\n', (11517, 11534), True, 'import numpy as np\n'), ((11562, 11592), 'numpy.transpose', 'np.transpose', (['ql_3d', '(0, 2, 1)'], {}), '(ql_3d, (0, 2, 1))\n', (11574, 11592), True, 'import numpy as np\n'), ((11619, 11649), 'numpy.transpose', 'np.transpose', (['qt_3d', '(0, 2, 1)'], {}), '(qt_3d, (0, 2, 1))\n', (11631, 11649), True, 'import numpy as np\n'), ((11676, 11707), 'numpy.transpose', 'np.transpose', (['thl_3d', '(0, 2, 1)'], {}), '(thl_3d, (0, 2, 1))\n', (11688, 11707), True, 'import numpy as np\n'), ((12420, 12432), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12430, 12432), False, 'import gc\n'), ((13094, 13126), 'numpy.sqrt', 'np.sqrt', (['(u_ref ** 2 + v_ref ** 2)'], {}), '(u_ref ** 2 + v_ref ** 2)\n', (13101, 13126), True, 'import numpy as np\n'), ((13333, 13391), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * nx * ny * time_resolution)', '(2 * nx * ny)'], {}), '(0, 2 * nx * ny * time_resolution, 2 * nx * ny)\n', (13344, 13391), True, 'import numpy as np\n'), ((13726, 13743), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (13734, 13743), True, 'import numpy as np\n'), ((13770, 13787), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (13778, 13787), True, 'import numpy as np\n'), ((13814, 13831), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (13822, 13831), True, 'import numpy as np\n'), ((13858, 13875), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (13866, 13875), True, 'import numpy as np\n'), ((13902, 13913), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (13910, 13913), True, 'import numpy as np\n'), ((14732, 14751), 'numpy.max', 'np.max', (['ql_2d[:, t]'], {}), '(ql_2d[:, t])\n', (14738, 14751), True, 'import numpy as np\n'), ((14787, 14817), 'numpy.argmax', 'np.argmax', (['(ql_2d[:, t] > 1e-06)'], {}), '(ql_2d[:, t] > 1e-06)\n', (14796, 14817), True, 'import numpy as np\n'), ((16654, 16708), 'numpy.min', 'np.min', (['cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]'], {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])\n', (16660, 16708), True, 'import numpy as np\n'), ((31838, 31886), 'numpy.linspace', 'np.linspace', (['(0 + d_z_tmp / 2)', 'z_reg_orig_top', 'nz'], {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)\n', (31849, 31886), True, 'import numpy as np\n'), ((31990, 32018), 'numpy.hstack', 'np.hstack', (['[[0], z_reg_orig]'], {}), '([[0], z_reg_orig])\n', (31999, 32018), True, 'import numpy as np\n'), ((32051, 32093), 'numpy.hstack', 'np.hstack', (['[var_orig_col[0], var_orig_col]'], {}), '([var_orig_col[0], var_orig_col])\n', (32060, 32093), True, 'import numpy as np\n'), ((32304, 32354), 'scipy.interpolate.interp1d', 'interp1d', (['z_reg_orig', 'var_orig_col'], {'kind': '"""nearest"""'}), "(z_reg_orig, var_orig_col, kind='nearest')\n", (32312, 32354), False, 'from scipy.interpolate import interp1d\n'), ((42099, 42128), 'numpy.transpose', 'np.transpose', (['w_3d', '(0, 2, 1)'], {}), '(w_3d, (0, 2, 1))\n', (42111, 42128), True, 'import numpy as np\n'), ((42156, 42186), 'numpy.transpose', 'np.transpose', (['ql_3d', '(0, 2, 1)'], {}), '(ql_3d, (0, 2, 1))\n', (42168, 42186), True, 'import numpy as np\n'), ((42213, 42244), 'numpy.transpose', 'np.transpose', (['var_3d', '(0, 2, 1)'], {}), '(var_3d, (0, 2, 1))\n', (42225, 42244), True, 'import numpy as np\n'), ((42886, 42898), 'gc.collect', 'gc.collect', ([], {}), '()\n', (42896, 42898), False, 'import gc\n'), ((43616, 43648), 'numpy.sqrt', 'np.sqrt', (['(u_ref ** 2 + v_ref ** 2)'], {}), '(u_ref ** 2 + v_ref ** 2)\n', (43623, 43648), True, 'import numpy as np\n'), ((43913, 43971), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * nx * ny * time_resolution)', '(2 * nx * ny)'], {}), '(0, 2 * nx * ny * time_resolution, 2 * nx * ny)\n', (43924, 43971), True, 'import numpy as np\n'), ((44250, 44267), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (44258, 44267), True, 'import numpy as np\n'), ((44292, 44309), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (44300, 44309), True, 'import numpy as np\n'), ((44334, 44351), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (44342, 44351), True, 'import numpy as np\n'), ((44376, 44387), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (44384, 44387), True, 'import numpy as np\n'), ((45208, 45227), 'numpy.max', 'np.max', (['ql_2d[:, t]'], {}), '(ql_2d[:, t])\n', (45214, 45227), True, 'import numpy as np\n'), ((45263, 45294), 'numpy.argmax', 'np.argmax', (['(ql_2d[:, t] > ql_min)'], {}), '(ql_2d[:, t] > ql_min)\n', (45272, 45294), True, 'import numpy as np\n'), ((47015, 47063), 'numpy.linspace', 'np.linspace', (['(0 + d_z_tmp / 2)', 'z_reg_orig_top', 'nz'], {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)\n', (47026, 47063), True, 'import numpy as np\n'), ((47163, 47191), 'numpy.hstack', 'np.hstack', (['[[0], z_reg_orig]'], {}), '([[0], z_reg_orig])\n', (47172, 47191), True, 'import numpy as np\n'), ((47237, 47297), 'numpy.hstack', 'np.hstack', (['[scaling_factor_x_prof[0], scaling_factor_x_prof]'], {}), '([scaling_factor_x_prof[0], scaling_factor_x_prof])\n', (47246, 47297), True, 'import numpy as np\n'), ((47343, 47403), 'numpy.hstack', 'np.hstack', (['[scaling_factor_y_prof[0], scaling_factor_y_prof]'], {}), '([scaling_factor_y_prof[0], scaling_factor_y_prof])\n', (47352, 47403), True, 'import numpy as np\n'), ((47539, 47602), 'scipy.interpolate.interp1d', 'interp1d', (['z_reg_orig', 'scaling_factor_x_prof_ext'], {'kind': '"""nearest"""'}), "(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')\n", (47547, 47602), False, 'from scipy.interpolate import interp1d\n'), ((47625, 47688), 'scipy.interpolate.interp1d', 'interp1d', (['z_reg_orig', 'scaling_factor_y_prof_ext'], {'kind': '"""nearest"""'}), "(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')\n", (47633, 47688), False, 'from scipy.interpolate import interp1d\n'), ((49765, 49819), 'numpy.min', 'np.min', (['cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]'], {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])\n', (49771, 49819), True, 'import numpy as np\n'), ((69978, 70007), 'numpy.transpose', 'np.transpose', (['w_3d', '(0, 2, 1)'], {}), '(w_3d, (0, 2, 1))\n', (69990, 70007), True, 'import numpy as np\n'), ((70035, 70065), 'numpy.transpose', 'np.transpose', (['ql_3d', '(0, 2, 1)'], {}), '(ql_3d, (0, 2, 1))\n', (70047, 70065), True, 'import numpy as np\n'), ((70092, 70123), 'numpy.transpose', 'np.transpose', (['var_3d', '(0, 2, 1)'], {}), '(var_3d, (0, 2, 1))\n', (70104, 70123), True, 'import numpy as np\n'), ((70896, 70908), 'gc.collect', 'gc.collect', ([], {}), '()\n', (70906, 70908), False, 'import gc\n'), ((70969, 71009), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * nx * ny)', '(2 * nx * ny)'], {}), '(0, 2 * nx * ny, 2 * nx * ny)\n', (70980, 71009), True, 'import numpy as np\n'), ((71758, 71775), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (71766, 71775), True, 'import numpy as np\n'), ((71800, 71817), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (71808, 71817), True, 'import numpy as np\n'), ((71842, 71859), 'numpy.zeros', 'np.zeros', (['(nz, 1)'], {}), '((nz, 1))\n', (71850, 71859), True, 'import numpy as np\n'), ((71884, 71895), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (71892, 71895), True, 'import numpy as np\n'), ((72716, 72735), 'numpy.max', 'np.max', (['ql_2d[:, t]'], {}), '(ql_2d[:, t])\n', (72722, 72735), True, 'import numpy as np\n'), ((72771, 72802), 'numpy.argmax', 'np.argmax', (['(ql_2d[:, t] > ql_min)'], {}), '(ql_2d[:, t] > ql_min)\n', (72780, 72802), True, 'import numpy as np\n'), ((74299, 74318), 'numpy.mean', 'np.mean', (['bflux_s_1d'], {}), '(bflux_s_1d)\n', (74306, 74318), True, 'import numpy as np\n'), ((18446, 18494), 'numpy.mean', 'np.mean', (['bflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(bflux_s_1d[idx_beg_chord:idx_end_chord])\n', (18453, 18494), True, 'import numpy as np\n'), ((18598, 18647), 'numpy.mean', 'np.mean', (['qtflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(qtflux_s_1d[idx_beg_chord:idx_end_chord])\n', (18605, 18647), True, 'import numpy as np\n'), ((18734, 18784), 'numpy.mean', 'np.mean', (['thlflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(thlflux_s_1d[idx_beg_chord:idx_end_chord])\n', (18741, 18784), True, 'import numpy as np\n'), ((20439, 20477), 'numpy.percentile', 'np.percentile', (['w_base_vec', 'percentiles'], {}), '(w_base_vec, percentiles)\n', (20452, 20477), True, 'import numpy as np\n'), ((20901, 20936), 'numpy.vstack', 'np.vstack', (['[chord_w_per, tmp_w_per]'], {}), '([chord_w_per, tmp_w_per])\n', (20910, 20936), True, 'import numpy as np\n'), ((20984, 21022), 'numpy.vstack', 'np.vstack', (['[chord_w_per, tmp_w_per_up]'], {}), '([chord_w_per, tmp_w_per_up])\n', (20993, 21022), True, 'import numpy as np\n'), ((30790, 30829), 'numpy.floor', 'np.floor', (['(idx_end_chord - idx_beg_chord)'], {}), '(idx_end_chord - idx_beg_chord)\n', (30798, 30829), True, 'import numpy as np\n'), ((47873, 47904), 'numpy.mean', 'np.mean', (['scaling_factor_x_inter'], {}), '(scaling_factor_x_inter)\n', (47880, 47904), True, 'import numpy as np\n'), ((47961, 47992), 'numpy.mean', 'np.mean', (['scaling_factor_y_inter'], {}), '(scaling_factor_y_inter)\n', (47968, 47992), True, 'import numpy as np\n'), ((74051, 74061), 'sys.exit', 'sys.exit', ([], {}), '()\n', (74059, 74061), False, 'import sys\n'), ((74574, 74594), 'numpy.mean', 'np.mean', (['qtflux_s_1d'], {}), '(qtflux_s_1d)\n', (74581, 74594), True, 'import numpy as np\n'), ((74718, 74739), 'numpy.mean', 'np.mean', (['thlflux_s_1d'], {}), '(thlflux_s_1d)\n', (74725, 74739), True, 'import numpy as np\n'), ((17784, 17826), 'numpy.mean', 'np.mean', (['u_2d[cl_base[ch_idx_l], ch_idx_l]'], {}), '(u_2d[cl_base[ch_idx_l], ch_idx_l])\n', (17791, 17826), True, 'import numpy as np\n'), ((17856, 17898), 'numpy.mean', 'np.mean', (['v_2d[cl_base[ch_idx_l], ch_idx_l]'], {}), '(v_2d[cl_base[ch_idx_l], ch_idx_l])\n', (17863, 17898), True, 'import numpy as np\n'), ((17928, 17960), 'numpy.sqrt', 'np.sqrt', (['(u_ref ** 2 + v_ref ** 2)'], {}), '(u_ref ** 2 + v_ref ** 2)\n', (17935, 17960), True, 'import numpy as np\n'), ((18251, 18300), 'numpy.percentile', 'np.percentile', (['cl_base[ch_idx_l]', 'base_percentile'], {}), '(cl_base[ch_idx_l], base_percentile)\n', (18264, 18300), True, 'import numpy as np\n'), ((19093, 19135), 'numpy.mean', 'np.mean', (['w_2d[cl_base[ch_idx_l], ch_idx_l]'], {}), '(w_2d[cl_base[ch_idx_l], ch_idx_l])\n', (19100, 19135), True, 'import numpy as np\n'), ((19171, 19217), 'numpy.mean', 'np.mean', (['w_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'], {}), '(w_2d[cl_base[ch_idx_l] - 1, ch_idx_l])\n', (19178, 19217), True, 'import numpy as np\n'), ((19253, 19301), 'numpy.mean', 'np.mean', (['thl_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'], {}), '(thl_2d[cl_base[ch_idx_l] - 1, ch_idx_l])\n', (19260, 19301), True, 'import numpy as np\n'), ((19810, 19851), 'numpy.mean', 'np.mean', (['thl_2d[cl_base_25_idx, ch_idx_l]'], {}), '(thl_2d[cl_base_25_idx, ch_idx_l])\n', (19817, 19851), True, 'import numpy as np\n'), ((19892, 19933), 'numpy.mean', 'np.mean', (['thl_2d[cl_base_75_idx, ch_idx_l]'], {}), '(thl_2d[cl_base_75_idx, ch_idx_l])\n', (19899, 19933), True, 'import numpy as np\n'), ((19970, 20017), 'numpy.mean', 'np.mean', (['qt_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'], {}), '(qt_2d[cl_base[ch_idx_l] - 1, ch_idx_l])\n', (19977, 20017), True, 'import numpy as np\n'), ((20055, 20095), 'numpy.mean', 'np.mean', (['qt_2d[cl_base_75_idx, ch_idx_l]'], {}), '(qt_2d[cl_base_75_idx, ch_idx_l])\n', (20062, 20095), True, 'import numpy as np\n'), ((20135, 20175), 'numpy.mean', 'np.mean', (['qt_2d[cl_base_25_idx, ch_idx_l]'], {}), '(qt_2d[cl_base_25_idx, ch_idx_l])\n', (20142, 20175), True, 'import numpy as np\n'), ((20216, 20261), 'numpy.sum', 'np.sum', (['w_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'], {}), '(w_2d[cl_base[ch_idx_l] - 1, ch_idx_l])\n', (20222, 20261), True, 'import numpy as np\n'), ((20367, 20404), 'numpy.mean', 'np.mean', (['w_base_vec[w_base_vec > 0.0]'], {}), '(w_base_vec[w_base_vec > 0.0])\n', (20374, 20404), True, 'import numpy as np\n'), ((20595, 20651), 'numpy.percentile', 'np.percentile', (['w_base_vec[w_base_vec > 0.0]', 'percentiles'], {}), '(w_base_vec[w_base_vec > 0.0], percentiles)\n', (20608, 20651), True, 'import numpy as np\n'), ((20714, 20737), 'numpy.zeros', 'np.zeros', (['n_percentiles'], {}), '(n_percentiles)\n', (20722, 20737), True, 'import numpy as np\n'), ((73757, 73808), 'numpy.percentile', 'np.percentile', (['cl_base[cbl_cl_idx]', 'base_percentile'], {}), '(cl_base[cbl_cl_idx], base_percentile)\n', (73770, 73808), True, 'import numpy as np\n'), ((21107, 21130), 'numpy.mean', 'np.mean', (['t_1d[ch_idx_l]'], {}), '(t_1d[ch_idx_l])\n', (21114, 21130), True, 'import numpy as np\n'), ((52025, 52104), 'numpy.percentile', 'np.percentile', (['cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]', 'base_percentile'], {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]], base_percentile)\n', (52038, 52104), True, 'import numpy as np\n'), ((52518, 52566), 'numpy.mean', 'np.mean', (['bflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(bflux_s_1d[idx_beg_chord:idx_end_chord])\n', (52525, 52566), True, 'import numpy as np\n'), ((55553, 55567), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (55560, 55567), True, 'import numpy as np\n'), ((19444, 19493), 'numpy.percentile', 'np.percentile', (['cl_base[ch_idx_l]', 'base_percentile'], {}), '(cl_base[ch_idx_l], base_percentile)\n', (19457, 19493), True, 'import numpy as np\n'), ((50839, 50926), 'numpy.abs', 'np.abs', (['(t_1d - (time_beg_chord - curtain_extra * (time_end_chord - time_beg_chord)))'], {}), '(t_1d - (time_beg_chord - curtain_extra * (time_end_chord -\n time_beg_chord)))\n', (50845, 50926), True, 'import numpy as np\n'), ((50968, 51055), 'numpy.abs', 'np.abs', (['(t_1d - (time_end_chord + curtain_extra * (time_end_chord - time_beg_chord)))'], {}), '(t_1d - (time_end_chord + curtain_extra * (time_end_chord -\n time_beg_chord)))\n', (50974, 51055), True, 'import numpy as np\n'), ((52894, 52943), 'numpy.mean', 'np.mean', (['qtflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(qtflux_s_1d[idx_beg_chord:idx_end_chord])\n', (52901, 52943), True, 'import numpy as np\n'), ((53103, 53153), 'numpy.mean', 'np.mean', (['thlflux_s_1d[idx_beg_chord:idx_end_chord]'], {}), '(thlflux_s_1d[idx_beg_chord:idx_end_chord])\n', (53110, 53153), True, 'import numpy as np\n'), ((55714, 55728), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (55721, 55728), True, 'import numpy as np\n'), ((56948, 56990), 'numpy.mean', 'np.mean', (['u_2d[cl_base[ch_idx_l], ch_idx_l]'], {}), '(u_2d[cl_base[ch_idx_l], ch_idx_l])\n', (56955, 56990), True, 'import numpy as np\n'), ((57028, 57070), 'numpy.mean', 'np.mean', (['v_2d[cl_base[ch_idx_l], ch_idx_l]'], {}), '(v_2d[cl_base[ch_idx_l], ch_idx_l])\n', (57035, 57070), True, 'import numpy as np\n'), ((57108, 57140), 'numpy.sqrt', 'np.sqrt', (['(u_ref ** 2 + v_ref ** 2)'], {}), '(u_ref ** 2 + v_ref ** 2)\n', (57115, 57140), True, 'import numpy as np\n'), ((19560, 19609), 'numpy.percentile', 'np.percentile', (['cl_base[ch_idx_l]', 'base_percentile'], {}), '(cl_base[ch_idx_l], base_percentile)\n', (19573, 19609), True, 'import numpy as np\n'), ((55935, 55949), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (55942, 55949), True, 'import numpy as np\n'), ((58036, 58050), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (58043, 58050), True, 'import numpy as np\n'), ((57595, 57630), 'numpy.abs', 'np.abs', (['(chord_length - mid_bin_size)'], {}), '(chord_length - mid_bin_size)\n', (57601, 57630), True, 'import numpy as np\n'), ((58251, 58265), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (58258, 58265), True, 'import numpy as np\n'), ((58534, 58548), 'numpy.mean', 'np.mean', (['w_tmp'], {}), '(w_tmp)\n', (58541, 58548), True, 'import numpy as np\n')] |
"""D. mel housekeeping genes based on tau.
Uses the intersection of w1118 and orgR to create a list of
D. mel housekeeping genes.
"""
import os
from functools import partial
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
# Load mapping of YOgn to FBgn
annot = pickle_load(snakemake.input.annot[0])
pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male)
pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female)
def intersect_fbgns(file_names, annot):
return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names))))
def convert_to_fbgn(file_name, annot):
return set(
[
fbgn
for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name))
if fbgn is not None
]
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input=dict(
male=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl",
],
female=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl",
],
annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl",
),
)
main()
| [
"larval_gonad.io.pickle_load",
"functools.partial",
"os.getenv"
]
| [((310, 347), 'larval_gonad.io.pickle_load', 'pickle_load', (['snakemake.input.annot[0]'], {}), '(snakemake.input.annot[0])\n', (321, 347), False, 'from larval_gonad.io import pickle_load, pickle_dump\n'), ((912, 943), 'os.getenv', 'os.getenv', (['"""SNAKE_DEBUG"""', '(False)'], {}), "('SNAKE_DEBUG', False)\n", (921, 943), False, 'import os\n'), ((804, 826), 'larval_gonad.io.pickle_load', 'pickle_load', (['file_name'], {}), '(file_name)\n', (815, 826), False, 'from larval_gonad.io import pickle_load, pickle_dump\n'), ((608, 645), 'functools.partial', 'partial', (['convert_to_fbgn'], {'annot': 'annot'}), '(convert_to_fbgn, annot=annot)\n', (615, 645), False, 'from functools import partial\n')] |
#!/usr/bin/env python3
"""
Usage::
usage: auth.py [-h] [{google,apple,github,jwt}] [jwt]
Login to your comma account
positional arguments:
{google,apple,github,jwt}
jwt
optional arguments:
-h, --help show this help message and exit
Examples::
./auth.py # Log in with google account
./auth.py github # Log in with GitHub Account
./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI
"""
import argparse
import sys
import pprint
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Dict
from urllib.parse import parse_qs, urlencode
from tools.lib.api import APIError, CommaApi, UnauthorizedError
from tools.lib.auth_config import set_token, get_token
PORT = 3000
class ClientRedirectServer(HTTPServer):
query_params: Dict[str, Any] = {}
class ClientRedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith('/auth'):
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'Return to the CLI to continue')
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass # this prevent http server from dumping messages to stdout
def auth_redirect_link(method):
provider_id = {
'google': 'g',
'apple': 'a',
'github': 'h',
}[method]
params = {
'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/",
'state': f'service,localhost:{PORT}',
}
if method == 'google':
params.update({
'type': 'web_server',
'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com',
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/userinfo.email',
'prompt': 'select_account',
})
return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params)
elif method == 'github':
params.update({
'client_id': '28c4ecb54bb7272cb5a4',
'scope': 'read:user',
})
return 'https://github.com/login/oauth/authorize?' + urlencode(params)
elif method == 'apple':
params.update({
'client_id': 'ai.comma.login',
'response_type': 'code',
'response_mode': 'form_post',
'scope': 'name email',
})
return 'https://appleid.apple.com/auth/authorize?' + urlencode(params)
else:
raise NotImplementedError(f"no redirect implemented for method {method}")
def login(method):
oauth_uri = auth_redirect_link(method)
web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler)
print(f'To sign in, use your browser and navigate to {oauth_uri}')
webbrowser.open(oauth_uri, new=2)
while True:
web_server.handle_request()
if 'code' in web_server.query_params:
break
elif 'error' in web_server.query_params:
print('Authentication Error: "%s". Description: "%s" ' % (
web_server.query_params['error'],
web_server.query_params.get('error_description')), file=sys.stderr)
break
try:
auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']})
set_token(auth_resp['access_token'])
except APIError as e:
print(f'Authentication Error: {e}', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Login to your comma account')
parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt'])
parser.add_argument('jwt', nargs='?')
args = parser.parse_args()
if args.method == 'jwt':
if args.jwt is None:
print("method JWT selected, but no JWT was provided")
exit(1)
set_token(args.jwt)
else:
login(args.method)
try:
me = CommaApi(token=get_token()).get('/v1/me')
print("Authenticated!")
pprint.pprint(me)
except UnauthorizedError:
print("Got invalid JWT")
exit(1)
| [
"argparse.ArgumentParser",
"tools.lib.auth_config.set_token",
"webbrowser.open",
"urllib.parse.parse_qs",
"tools.lib.api.CommaApi",
"tools.lib.auth_config.get_token",
"urllib.parse.urlencode",
"pprint.pprint"
]
| [((2877, 2910), 'webbrowser.open', 'webbrowser.open', (['oauth_uri'], {'new': '(2)'}), '(oauth_uri, new=2)\n', (2892, 2910), False, 'import webbrowser\n'), ((3563, 3629), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Login to your comma account"""'}), "(description='Login to your comma account')\n", (3586, 3629), False, 'import argparse\n'), ((1082, 1121), 'urllib.parse.parse_qs', 'parse_qs', (['query'], {'keep_blank_values': '(True)'}), '(query, keep_blank_values=True)\n', (1090, 1121), False, 'from urllib.parse import parse_qs, urlencode\n'), ((3405, 3441), 'tools.lib.auth_config.set_token', 'set_token', (["auth_resp['access_token']"], {}), "(auth_resp['access_token'])\n", (3414, 3441), False, 'from tools.lib.auth_config import set_token, get_token\n'), ((3954, 3973), 'tools.lib.auth_config.set_token', 'set_token', (['args.jwt'], {}), '(args.jwt)\n', (3963, 3973), False, 'from tools.lib.auth_config import set_token, get_token\n'), ((4096, 4113), 'pprint.pprint', 'pprint.pprint', (['me'], {}), '(me)\n', (4109, 4113), False, 'import pprint\n'), ((2084, 2101), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (2093, 2101), False, 'from urllib.parse import parse_qs, urlencode\n'), ((2284, 2301), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (2293, 2301), False, 'from urllib.parse import parse_qs, urlencode\n'), ((3276, 3286), 'tools.lib.api.CommaApi', 'CommaApi', ([], {}), '()\n', (3284, 3286), False, 'from tools.lib.api import APIError, CommaApi, UnauthorizedError\n'), ((2559, 2576), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (2568, 2576), False, 'from urllib.parse import parse_qs, urlencode\n'), ((4037, 4048), 'tools.lib.auth_config.get_token', 'get_token', ([], {}), '()\n', (4046, 4048), False, 'from tools.lib.auth_config import set_token, get_token\n')] |
#! /usr/bin/env python3
"""Parse through the simulated sequencing group specific kmer counts."""
import argparse as ap
from collections import OrderedDict
import glob
import gzip
import os
import sys
import time
import numpy as np
import multiprocessing as mp
SAMPLES = OrderedDict()
KMERS = {}
HAMMING = OrderedDict()
SAMPLE_COLS = [
'sample', 'is_bcg', 'is_ba', 'has_lethal', 'simulated_coverage', 'group',
'total_kmers', 'tp', 'tn', 'fp', 'fn',
'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max',
'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean',
'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max'
]
KMER_COLS = [
'kmer', 'simulated_coverage', 'group', 'hamming_distance',
'tp', 'tn', 'fp', 'fn',
'group_kmer_cov_min',
'group_kmer_cov_mean',
'group_kmer_cov_median',
'group_kmer_cov_max',
'non_zero_group_kmer_cov_min',
'non_zero_group_kmer_cov_mean',
'non_zero_group_kmer_cov_median',
'non_zero_group_kmer_cov_max',
'outgroup_kmer_cov_min',
'outgroup_kmer_cov_mean',
'outgroup_kmer_cov_median',
'outgroup_kmer_cov_max',
'non_zero_outgroup_kmer_cov_min',
'non_zero_outgroup_kmer_cov_mean',
'non_zero_outgroup_kmer_cov_median',
'non_zero_outgroup_kmer_cov_max'
]
def get_group_status(sample, group):
"""Return if a sample is within a group or not."""
within_group = None
if group == 'ba':
within_group = True if SAMPLES[sample]['is_ba'] == 'True' else False
elif group == 'bcg':
within_group = True if SAMPLES[sample]['is_bcg'] == 'True' else False
else:
# lef
within_group = True if SAMPLES[sample]['has_lethal'] else False
return within_group
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
non_zero = [c for c in coverage if c]
np_array = np.array(coverage)
non_zero_array = np.array(non_zero)
return {
'min': min(coverage) if coverage else 0,
'median': int(np.median(np_array)) if coverage else 0,
'mean': "{0:.4f}".format(np.mean(np_array)) if coverage else 0,
'max': max(coverage) if coverage else 0,
'non_zero_min': min(non_zero_array) if non_zero else 0,
'non_zero_median': int(np.median(non_zero_array)) if non_zero else 0,
'non_zero_mean': int(round(np.mean(non_zero_array))) if non_zero else 0,
'non_zero_max': max(non_zero_array) if non_zero else 0,
}
def reverse_complement(seq):
"""Reverse complement a DNA sequence."""
complement = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G',
'a': 't', 't': 'a', 'g': 'c', 'c': 'g'
}
return ''.join([complement[b] for b in seq[::-1]])
def parse_counts(counts, sample, coverage, group, skip_kmers=False,
filter_kmers=False):
"""Parse kmer counts."""
within_group = get_group_status(sample, group)
sample_row = {'coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0}
with gzip.open(counts, 'r') as count_handle:
for line in count_handle:
kmer, count = line.decode().rstrip().split()
count = int(count)
parse = True
if filter_kmers:
parse = kmer in KMERS or reverse_complement(kmer) in KMERS
elif not skip_kmers:
if kmer not in KMERS:
kmer = reverse_complement(kmer)
if within_group:
KMERS[kmer][coverage]['group_coverages'].append(count)
if count:
KMERS[kmer][coverage]['tp'] += 1
else:
KMERS[kmer][coverage]['fn'] += 1
else:
KMERS[kmer][coverage]['outgroup_coverages'].append(count)
if count:
KMERS[kmer][coverage]['fp'] += 1
else:
KMERS[kmer][coverage]['tn'] += 1
if parse:
sample_row['coverages'].append(count)
if within_group:
if count:
sample_row['tp'] += 1
else:
sample_row['fn'] += 1
else:
if count:
sample_row['fp'] += 1
else:
sample_row['tn'] += 1
coverage_stats = get_coverage_stats(sample_row['coverages'])
SAMPLES[sample]['results'].append({
'simulated_coverage': coverage,
'within_group': within_group,
'tp': sample_row['tp'],
'tn': sample_row['tn'],
'fp': sample_row['fp'],
'fn': sample_row['fn'],
'kmer_cov_min': coverage_stats['min'],
'kmer_cov_mean': coverage_stats['mean'],
'kmer_cov_median': coverage_stats['median'],
'kmer_cov_max': coverage_stats['max'],
'non_zero_kmer_cov_min': coverage_stats['non_zero_min'],
'non_zero_kmer_cov_mean': coverage_stats['non_zero_mean'],
'non_zero_kmer_cov_median': coverage_stats['non_zero_median'],
'non_zero_kmer_cov_max': coverage_stats['non_zero_max'],
})
def parse_kmers(kmers, coverages, skip_kmers=False, has_hamming=True):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
kmer, distance = line.split("-")
if not has_hamming:
distance = False
KMERS[kmer] = OrderedDict()
HAMMING[kmer] = distance
if not skip_kmers:
for coverage in coverages:
KMERS[kmer][coverage] = {
'group_coverages': [], 'outgroup_coverages': [],
'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0
}
def parse_summary(summary):
"""Parse Summary file."""
cols = None
with open(summary, 'r') as summary_handle:
# Column Names:
# accession, gi, is_bcg, is_ba, species, genome_size, description
for line in summary_handle:
line = line.rstrip()
if line.startswith('#'):
cols = line.replace('#', '').split('\t')
else:
row = dict(zip(cols, line.split('\t')))
SAMPLES[row['accession']] = row
if row['accession'] == 'NZ_CP009941':
# NZ_CP009941 - Bacillus cereus w/ lef on chromosome
SAMPLES[row['accession']]['has_lethal'] = True
else:
SAMPLES[row['accession']]['has_lethal'] = False
SAMPLES[row['accession']]['results'] = []
def print_sample_summary(file_output):
"""Print the final per sample summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(SAMPLE_COLS)))
output_handle.write("\n")
for sample in SAMPLES:
if SAMPLES[sample]['results']:
for result in SAMPLES[sample]['results']:
row = {
'sample': sample,
'is_bcg': SAMPLES[sample]['is_bcg'],
'is_ba': SAMPLES[sample]['is_ba'],
'has_lethal': SAMPLES[sample]['has_lethal'],
'simulated_coverage': result['simulated_coverage'],
'group': args.group,
'within_group': result['within_group'],
'total_kmers': total_kmers,
'tp': result['tp'],
'tn': result['tn'],
'fp': result['fp'],
'fn': result['fn'],
'kmer_cov_min': result['kmer_cov_min'],
'kmer_cov_mean': result['kmer_cov_mean'],
'kmer_cov_median': result['kmer_cov_median'],
'kmer_cov_max': result['kmer_cov_max'],
'non_zero_kmer_cov_min': result['non_zero_kmer_cov_min'],
'non_zero_kmer_cov_mean': result['non_zero_kmer_cov_mean'],
'non_zero_kmer_cov_median': result['non_zero_kmer_cov_median'],
'non_zero_kmer_cov_max': result['non_zero_kmer_cov_max']
}
output_handle.write(("\t".join([
str(row[col]) for col in SAMPLE_COLS
])))
output_handle.write("\n")
def print_kmer_summary(file_output):
"""Print the final per kmer summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(KMER_COLS)))
output_handle.write("\n")
for kmer, coverages in KMERS.items():
for coverage in coverages:
within_group = get_coverage_stats(
KMERS[kmer][coverage]['group_coverages']
)
outgroup = get_coverage_stats(
KMERS[kmer][coverage]['outgroup_coverages']
)
row = {
'kmer': kmer,
'simulated_coverage': coverage,
'group': args.group,
'hamming_distance': HAMMING[kmer],
'tp': KMERS[kmer][coverage]['tp'],
'tn': KMERS[kmer][coverage]['tn'],
'fp': KMERS[kmer][coverage]['fp'],
'fn': KMERS[kmer][coverage]['fn'],
'group_kmer_cov_min': within_group['min'],
'group_kmer_cov_mean': within_group['mean'],
'group_kmer_cov_median': within_group['median'],
'group_kmer_cov_max': within_group['max'],
'non_zero_group_kmer_cov_min': within_group['non_zero_min'],
'non_zero_group_kmer_cov_mean': within_group['non_zero_mean'],
'non_zero_group_kmer_cov_median': within_group['non_zero_median'],
'non_zero_group_kmer_cov_max': within_group['non_zero_max'],
'outgroup_kmer_cov_min': outgroup['min'],
'outgroup_kmer_cov_mean': outgroup['mean'],
'outgroup_kmer_cov_median': outgroup['median'],
'outgroup_kmer_cov_max': outgroup['max'],
'non_zero_outgroup_kmer_cov_min': outgroup['non_zero_min'],
'non_zero_outgroup_kmer_cov_mean': outgroup['non_zero_mean'],
'non_zero_outgroup_kmer_cov_median': outgroup['non_zero_median'],
'non_zero_outgroup_kmer_cov_max': outgroup['non_zero_max'],
}
output_handle.write(("\t".join([
str(row[col]) for col in KMER_COLS
])))
output_handle.write("\n")
def read_lines(input_file):
"""Return lines in a text file as a list."""
lines = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
lines.append(line.rstrip())
return lines
def parse_filter_kmers(kmers):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
KMERS[line.split("-")[0]] = True
if __name__ == '__main__':
parser = ap.ArgumentParser(
prog='summarize-kmer-counts.py', conflict_handler='resolve',
description=("Summarize kmer counts of each simulation.")
)
parser.add_argument('summary', type=str, metavar="SUMMARY",
help='Summary of Bacillus genomes.')
parser.add_argument('directory', type=str, metavar="SIMUALTION_DIR",
help='Directory with group specific 31-mer counts.')
parser.add_argument('group', type=str, metavar="GROUP",
help='Which group to parse (ba, bcg or lef).')
parser.add_argument('kmers', type=str, metavar="KMERS",
help='Group specific k-mers.')
parser.add_argument('coverages', type=str, metavar="COVERAGES",
help=('Coverages to subsample to.'))
parser.add_argument('outdir', type=str, metavar="OUTDIR",
help='Directory to output to.')
parser.add_argument('--cpu', default=1, type=int, metavar="INT",
help='Number of cores to use (Default: 1)')
parser.add_argument('--single_sample', type=str, metavar="STR",
help='Process a single sample.')
parser.add_argument('--skip_kmers', action='store_true', default=False,
help='Skip kmer processing.')
parser.add_argument('--filter', action='store_true', default=False,
help='Filter counts based on input kmers.')
args = parser.parse_args()
if args.group not in ['ba', 'bcg', 'lef']:
raise Exception("GROUPS must be 'ba', 'bcg' or 'lef'")
coverages = read_lines(args.coverages)
print("Parsing Summary")
parse_summary(args.summary)
print("Parsing Kmers")
if args.filter:
print("Filtering Kmers")
args.skip_kmers = True
parse_filter_kmers(args.kmers)
else:
print("Parsing Kmers")
parse_kmers(args.kmers, coverages, skip_kmers=args.skip_kmers,
has_hamming=False if args.group == 'lef' else True)
total_kmers = len(KMERS)
current = 1
samples = list(SAMPLES.keys())
if args.single_sample:
samples = [args.single_sample]
total = len(samples)
for sample in samples:
path = "{0}/{1}".format(args.directory, sample)
if os.path.exists(path):
print("Working on {0} ({1} of {2})".format(sample, current, total))
current += 1
count_files = sorted(glob.glob(
"{0}/*-{1}.txt.gz".format(path, args.group)
))
for count_file in count_files:
coverage = os.path.basename(count_file).split('-')[1]
parse_counts(count_file, sample, coverage, args.group,
skip_kmers=args.skip_kmers,
filter_kmers=args.filter)
print("Output sample summary")
if args.single_sample:
print_sample_summary("{0}/count-summary-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_sample_summary("{0}/count-summary-sample-{1}.txt".format(
args.outdir, args.group
))
if not args.skip_kmers:
print("Output kmer summary")
if args.single_sample:
print_kmer_summary("{0}/count-summary-kmer-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_kmer_summary("{0}/count-summary-kmer-{1}.txt".format(
args.outdir, args.group
))
| [
"os.path.exists",
"collections.OrderedDict",
"numpy.median",
"numpy.mean",
"argparse.ArgumentParser",
"gzip.open",
"numpy.array",
"os.path.basename"
]
| [((271, 284), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (282, 284), False, 'from collections import OrderedDict\n'), ((306, 319), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (317, 319), False, 'from collections import OrderedDict\n'), ((1866, 1884), 'numpy.array', 'np.array', (['coverage'], {}), '(coverage)\n', (1874, 1884), True, 'import numpy as np\n'), ((1906, 1924), 'numpy.array', 'np.array', (['non_zero'], {}), '(non_zero)\n', (1914, 1924), True, 'import numpy as np\n'), ((11465, 11605), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'prog': '"""summarize-kmer-counts.py"""', 'conflict_handler': '"""resolve"""', 'description': '"""Summarize kmer counts of each simulation."""'}), "(prog='summarize-kmer-counts.py', conflict_handler=\n 'resolve', description='Summarize kmer counts of each simulation.')\n", (11482, 11605), True, 'import argparse as ap\n'), ((2983, 3005), 'gzip.open', 'gzip.open', (['counts', '"""r"""'], {}), "(counts, 'r')\n", (2992, 3005), False, 'import gzip\n'), ((13773, 13793), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13787, 13793), False, 'import os\n'), ((2009, 2028), 'numpy.median', 'np.median', (['np_array'], {}), '(np_array)\n', (2018, 2028), True, 'import numpy as np\n'), ((2083, 2100), 'numpy.mean', 'np.mean', (['np_array'], {}), '(np_array)\n', (2090, 2100), True, 'import numpy as np\n'), ((2266, 2291), 'numpy.median', 'np.median', (['non_zero_array'], {}), '(non_zero_array)\n', (2275, 2291), True, 'import numpy as np\n'), ((5548, 5561), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5559, 5561), False, 'from collections import OrderedDict\n'), ((2348, 2371), 'numpy.mean', 'np.mean', (['non_zero_array'], {}), '(non_zero_array)\n', (2355, 2371), True, 'import numpy as np\n'), ((14089, 14117), 'os.path.basename', 'os.path.basename', (['count_file'], {}), '(count_file)\n', (14105, 14117), False, 'import os\n')] |
from django.contrib import admin
from .models import Songs
admin.site.register(Songs)
# Register your models here.
| [
"django.contrib.admin.site.register"
]
| [((60, 86), 'django.contrib.admin.site.register', 'admin.site.register', (['Songs'], {}), '(Songs)\n', (79, 86), False, 'from django.contrib import admin\n')] |
# -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import wx
import ConfigParser
from wx.lib.mixins.listctrl import getListCtrlSelection
from wx.lib.pubsub import pub
from gui.RootGUI import RootGUI
from StepsDialog import StepsDialog
from PlotFrame import PlotFuncFrame, PlotCorrFrame
import interface
import mbox
class RootFrame(RootGUI):
calcs = []
plot_frame = None
def __init__(self, *args, **kwds):
super(RootFrame, self).__init__(*args, **kwds)
# set root
self.root = self.set_root()
# initialize choices
self.propChoices = interface.dataClasses()
calc_data_types = self.propChoices.types()
calc_data_classes = self.propChoices.classes(calc_data_types[0])
corr_classes = self.propChoices.classes("Histogram")
self.propType.SetItems(calc_data_types)
self.propChoice.SetItems(calc_data_classes)
self.xCorr.SetItems(corr_classes)
self.yCorr.SetItems(corr_classes)
self.propType.SetSelection(0)
self.propChoice.SetSelection(0)
self.xCorr.SetSelection(0)
self.yCorr.SetSelection(0)
# initialize calc tree
self.build_tree(self.root, self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()))
# initialize calc list
self.calcList.InsertColumn(0, 'Directory', width=180)
self.calcList.InsertColumn(1, 'Type', width=70)
self.calcList.InsertColumn(2, 'NSteps', width=100)
def set_root(self):
"""
Sets root directory fr GUI based on config file
:return: Root directory
"""
config_dir = os.path.expanduser("~/.local/shs")
config_file = os.path.join(config_dir, "shs_gui.cfg")
# check the file and create one if it's not there
if not os.path.isfile(config_file):
os.makedirs(config_dir)
open(config_file, 'w').close()
config = ConfigParser.ConfigParser()
config.read(config_file)
# if config exists and has needed option
if config.has_option("general", "root_dir"):
return config.get("general", "root_dir")
# make config
if not config.has_section("general"):
config.add_section("general")
dlg = wx.DirDialog(self, "Select root directory")
if dlg.ShowModal() == wx.ID_OK:
root_dir = dlg.GetPath()
config.set("general", "root_dir", root_dir)
else:
sys.exit(1)
with open(config_file, 'w') as f:
config.write(f)
return root_dir
def build_tree(self, root, calc_type):
"""
Adds a new root element and then its children
:param root: root directory for the tree
:param calc_type: calculation type
"""
self.calcTree.DeleteAllItems()
r = len(root.split(os.sep))
ids = {root: self.calcTree.AddRoot(root)}
for (dir_path, dir_names, file_names) in os.walk(root):
if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names):
# find the number of steps in MDE file, quickly
nsteps = interface.GetNumMDESteps(dir_path)
ancdirs = dir_path.split(os.sep)[r:]
if nsteps is not None:
ancdirs[-1] += ' [%i]' % nsteps
ad = root
for ancdir in ancdirs:
d = os.path.join(ad, ancdir)
if not d in ids:
ids[d] = self.calcTree.AppendItem(ids[ad], ancdir)
self.calcTree.SortChildren(ids[ad])
ad = d
def get_selection_dir(self):
item = self.calcTree.GetSelection()
parent = self.calcTree.GetItemParent(item)
path = [self.calcTree.GetItemText(item)]
while parent.IsOk():
path.append(self.calcTree.GetItemText(parent))
parent = self.calcTree.GetItemParent(parent)
# calculation directory
calc_dir = os.sep.join(path[::-1]).split()[0]
return calc_dir
# return os.sep.join((self.root, calc_dir))
def onSelChange(self, event):
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if interface.isCalcOfType(ctype, dir=cdir):
self.enqueueBtn.Enable()
else:
self.enqueueBtn.Enable(False)
def propTypeChange(self, event):
# property type
pt_num = self.propType.GetSelection()
pt = self.propType.GetItems()[pt_num]
self.propChoice.SetItems(self.propChoices.classes(pt))
self.propChoice.SetSelection(0)
def typeChange(self, event):
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
self.build_tree(self.root, ctype)
def upBtnPress(self, event):
# selection indices
sind = getListCtrlSelection(self.calcList)
if sind:
# number of deleted strings
ds = 0
for si in sind:
self.calcs.pop(si - ds)
self.calcList.DeleteItem(si - ds)
ds += 1
return 0
return 1
def downBtnPress(self, event):
# current list count
clc = self.calcList.GetItemCount()
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if not interface.isCalcOfType(ctype, dir=cdir):
mbox.NoResults(cdir, ctype)
return 1
# init steps range
r = None
if ctype in ('.output', '.ANI'):
# enter dialog
dlg = StepsDialog(None)
if dlg.ShowModal() == wx.ID_OK:
r = dlg.GetRange()
dlg.Destroy()
self.calcs.append(interface.getCalc(cdir, ctype, r))
self.calcList.InsertStringItem(clc, cdir[len(self.root)+1:])
self.calcList.SetStringItem(clc, 1, ctype)
self.calcList.SetStringItem(clc, 2, str(len(r)) if r is not None else '')
return 0
def on_enqueue_press(self, _):
from sshutils import getMount, getDevice, getRemoteDir
# on which device are we?
calc_dir = self.get_selection_dir()
mount_path = getMount(calc_dir)
device_name, device_type = getDevice(mount_path)
if 'ssh' in device_type:
user, host_dir = device_name.split('@')
hostname, remote_mount_path = host_dir.split(':')
remote_dir = getRemoteDir(calc_dir, mount_path, remote_mount_path)
self.enqueue_remote(remote_dir, hostname, user)
else:
self.enqueue_local(calc_dir)
@staticmethod
def enqueue_local(calc_dir):
"""
Enqueue a task on a local filesystem
:param calc_dir: calculation directory on a local filesystem
:return: error_code (0 is OK)
"""
import distutils.spawn
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
if distutils.spawn.find_executable('qstat') is not None:
q = 'pbs'
elif distutils.spawn.find_executable('sinfo') is not None:
q = 'slurm'
else:
mbox.JobSubmit(None, ())
return -1
comm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q, q + '.sh'))
submit = subprocess.Popen(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mbox.JobSubmit(q, submit.communicate())
@staticmethod
def enqueue_remote(calc_dir, host, user):
"""
Enqueue a task on a remote filesystem
:param calc_dir: calculation directory on a remote filesystem
:param host: host where to enqueue a task
:param user: user of a remote system who enqueues a task
:return: error code (0 is OK)
"""
from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand
ssh = getSSHClient(host, user)
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
q = getQueue(ssh)
if q is None:
mbox.JobSubmit(None, ())
return None
# queue putter on a local machine
local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q))
putter = q + '.sh'
sftp = copyFile(ssh, putter, local_dir, calc_dir)
remote_file = os.path.join(calc_dir, putter)
stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)
mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines())))
removeFile(sftp, remote_file)
ssh.close()
def plotBtnPress(self, event):
if self.noteBook.GetSelection() == 0:
self.plot_property()
else:
self.plot_correlation()
def plot_property(self):
# plot options - get all the data to plot
ptype = self.propType.GetItems()[self.propType.GetSelection()]
pchoice = self.propChoice.GetItems()[self.propChoice.GetSelection()]
data_class = self.propChoices.dataClass(ptype, pchoice)
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
t1 = time.clock()
plot_data = interface.getData(ptype, data_class, leg,
[self.calcs[i] for i in getListCtrlSelection(self.calcList)])
self.SetStatusText('Calculation time: %7.2f s.' % (time.clock() - t1))
msg = plot_data
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotFuncFrame(self)
self.plot_frame.Show()
pub.sendMessage('data.plot', message=msg)
def plot_correlation(self):
# correlate options - get all the data to plot
xchoice = self.xCorr.GetSelection()
ychoice = self.yCorr.GetSelection()
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
data, info = interface.getCorr(xchoice, ychoice, [self.calcs[i] for i in getListCtrlSelection(self.calcList)])
msg = [leg, data, info]
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotCorrFrame(self)
self.plot_frame.Show()
pub.sendMessage('corr.plot', message=msg)
| [
"sshutils.getRemoteDir",
"time.clock",
"interface.dataClasses",
"sshutils.getMount",
"StepsDialog.StepsDialog",
"ConfigParser.ConfigParser",
"sshutils.removeFile",
"os.sep.join",
"sys.exit",
"wx.lib.pubsub.pub.sendMessage",
"sshutils.copyFile",
"os.walk",
"PlotFrame.PlotCorrFrame",
"interface.isCalcOfType",
"wx.DirDialog",
"subprocess.Popen",
"sshutils.runCommand",
"sshutils.getQueue",
"interface.GetNumMDESteps",
"mbox.NoResults",
"os.path.expanduser",
"interface.getCalc",
"sshutils.getDevice",
"os.path.isfile",
"os.path.dirname",
"os.makedirs",
"os.path.join",
"mbox.JobSubmit",
"sshutils.getSSHClient",
"PlotFrame.PlotFuncFrame",
"wx.lib.mixins.listctrl.getListCtrlSelection"
]
| [((614, 637), 'interface.dataClasses', 'interface.dataClasses', ([], {}), '()\n', (635, 637), False, 'import interface\n'), ((1645, 1679), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.local/shs"""'], {}), "('~/.local/shs')\n", (1663, 1679), False, 'import os\n'), ((1702, 1741), 'os.path.join', 'os.path.join', (['config_dir', '"""shs_gui.cfg"""'], {}), "(config_dir, 'shs_gui.cfg')\n", (1714, 1741), False, 'import os\n'), ((1940, 1967), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (1965, 1967), False, 'import ConfigParser\n'), ((2280, 2323), 'wx.DirDialog', 'wx.DirDialog', (['self', '"""Select root directory"""'], {}), "(self, 'Select root directory')\n", (2292, 2323), False, 'import wx\n'), ((2977, 2990), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (2984, 2990), False, 'import os\n'), ((4344, 4383), 'interface.isCalcOfType', 'interface.isCalcOfType', (['ctype'], {'dir': 'cdir'}), '(ctype, dir=cdir)\n', (4366, 4383), False, 'import interface\n'), ((4962, 4997), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', (['self.calcList'], {}), '(self.calcList)\n', (4982, 4997), False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((6366, 6384), 'sshutils.getMount', 'getMount', (['calc_dir'], {}), '(calc_dir)\n', (6374, 6384), False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((6420, 6441), 'sshutils.getDevice', 'getDevice', (['mount_path'], {}), '(mount_path)\n', (6429, 6441), False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((7492, 7600), 'subprocess.Popen', 'subprocess.Popen', (["['/bin/bash', comm, '-d=' + calc_dir]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (7508, 7600), False, 'import subprocess\n'), ((8102, 8126), 'sshutils.getSSHClient', 'getSSHClient', (['host', 'user'], {}), '(host, user)\n', (8114, 8126), False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((8228, 8241), 'sshutils.getQueue', 'getQueue', (['ssh'], {}), '(ssh)\n', (8236, 8241), False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((8504, 8546), 'sshutils.copyFile', 'copyFile', (['ssh', 'putter', 'local_dir', 'calc_dir'], {}), '(ssh, putter, local_dir, calc_dir)\n', (8512, 8546), False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((8569, 8599), 'os.path.join', 'os.path.join', (['calc_dir', 'putter'], {}), '(calc_dir, putter)\n', (8581, 8599), False, 'import os\n'), ((8625, 8683), 'sshutils.runCommand', 'runCommand', (['ssh', "('bash ' + remote_file + ' -d=' + calc_dir)"], {}), "(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)\n", (8635, 8683), False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((8782, 8811), 'sshutils.removeFile', 'removeFile', (['sftp', 'remote_file'], {}), '(sftp, remote_file)\n', (8792, 8811), False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((9400, 9412), 'time.clock', 'time.clock', ([], {}), '()\n', (9410, 9412), False, 'import time\n'), ((9875, 9916), 'wx.lib.pubsub.pub.sendMessage', 'pub.sendMessage', (['"""data.plot"""'], {'message': 'msg'}), "('data.plot', message=msg)\n", (9890, 9916), False, 'from wx.lib.pubsub import pub\n'), ((10531, 10572), 'wx.lib.pubsub.pub.sendMessage', 'pub.sendMessage', (['"""corr.plot"""'], {'message': 'msg'}), "('corr.plot', message=msg)\n", (10546, 10572), False, 'from wx.lib.pubsub import pub\n'), ((1815, 1842), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1829, 1842), False, 'import os\n'), ((1856, 1879), 'os.makedirs', 'os.makedirs', (['config_dir'], {}), '(config_dir)\n', (1867, 1879), False, 'import os\n'), ((2483, 2494), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2491, 2494), False, 'import sys\n'), ((3007, 3069), 'interface.isCalcOfType', 'interface.isCalcOfType', (['calc_type'], {'dn': 'dir_names', 'fn': 'file_names'}), '(calc_type, dn=dir_names, fn=file_names)\n', (3029, 3069), False, 'import interface\n'), ((5541, 5580), 'interface.isCalcOfType', 'interface.isCalcOfType', (['ctype'], {'dir': 'cdir'}), '(ctype, dir=cdir)\n', (5563, 5580), False, 'import interface\n'), ((5594, 5621), 'mbox.NoResults', 'mbox.NoResults', (['cdir', 'ctype'], {}), '(cdir, ctype)\n', (5608, 5621), False, 'import mbox\n'), ((5765, 5782), 'StepsDialog.StepsDialog', 'StepsDialog', (['None'], {}), '(None)\n', (5776, 5782), False, 'from StepsDialog import StepsDialog\n'), ((5914, 5947), 'interface.getCalc', 'interface.getCalc', (['cdir', 'ctype', 'r'], {}), '(cdir, ctype, r)\n', (5931, 5947), False, 'import interface\n'), ((6614, 6667), 'sshutils.getRemoteDir', 'getRemoteDir', (['calc_dir', 'mount_path', 'remote_mount_path'], {}), '(calc_dir, mount_path, remote_mount_path)\n', (6626, 6667), False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((8276, 8300), 'mbox.JobSubmit', 'mbox.JobSubmit', (['None', '()'], {}), '(None, ())\n', (8290, 8300), False, 'import mbox\n'), ((3160, 3194), 'interface.GetNumMDESteps', 'interface.GetNumMDESteps', (['dir_path'], {}), '(dir_path)\n', (3184, 3194), False, 'import interface\n'), ((7335, 7359), 'mbox.JobSubmit', 'mbox.JobSubmit', (['None', '()'], {}), '(None, ())\n', (7349, 7359), False, 'import mbox\n'), ((7426, 7451), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7441, 7451), False, 'import os\n'), ((8416, 8441), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8431, 8441), False, 'import os\n'), ((9350, 9385), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', (['self.calcList'], {}), '(self.calcList)\n', (9370, 9385), False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((9812, 9831), 'PlotFrame.PlotFuncFrame', 'PlotFuncFrame', (['self'], {}), '(self)\n', (9825, 9831), False, 'from PlotFrame import PlotFuncFrame, PlotCorrFrame\n'), ((10146, 10181), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', (['self.calcList'], {}), '(self.calcList)\n', (10166, 10181), False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((10468, 10487), 'PlotFrame.PlotCorrFrame', 'PlotCorrFrame', (['self'], {}), '(self)\n', (10481, 10487), False, 'from PlotFrame import PlotFuncFrame, PlotCorrFrame\n'), ((3428, 3452), 'os.path.join', 'os.path.join', (['ad', 'ancdir'], {}), '(ad, ancdir)\n', (3440, 3452), False, 'import os\n'), ((4018, 4041), 'os.sep.join', 'os.sep.join', (['path[::-1]'], {}), '(path[::-1])\n', (4029, 4041), False, 'import os\n'), ((9537, 9572), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', (['self.calcList'], {}), '(self.calcList)\n', (9557, 9572), False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((9634, 9646), 'time.clock', 'time.clock', ([], {}), '()\n', (9644, 9646), False, 'import time\n'), ((10264, 10299), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', (['self.calcList'], {}), '(self.calcList)\n', (10284, 10299), False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('order', '0014_auto_20161028_0955'),
]
operations = [
migrations.AlterModelOptions(
name='deliverygroup',
options={'verbose_name': 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'},
),
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-last_status_change',), 'verbose_name': 'Order', 'verbose_name_plural': 'Orders'},
),
migrations.AlterModelOptions(
name='ordereditem',
options={'verbose_name': 'Ordered item', 'verbose_name_plural': 'Ordered items'},
),
migrations.AlterModelOptions(
name='orderhistoryentry',
options={'ordering': ('date',), 'verbose_name': 'Order history entry', 'verbose_name_plural': 'Order history entries'},
),
migrations.AlterModelOptions(
name='ordernote',
options={'verbose_name': 'Order note', 'verbose_name_plural': 'Order notes'},
),
migrations.AlterModelOptions(
name='payment',
options={'ordering': ('-pk',), 'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'},
),
migrations.AlterField(
model_name='deliverygroup',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='last updated'),
),
migrations.AlterField(
model_name='deliverygroup',
name='shipping_method_name',
field=models.CharField(blank=True, default=None, editable=False, max_length=255, null=True, verbose_name='shipping method name'),
),
migrations.AlterField(
model_name='deliverygroup',
name='tracking_number',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='tracking number'),
),
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='billing address'),
),
migrations.AlterField(
model_name='order',
name='discount_amount',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'),
),
migrations.AlterField(
model_name='order',
name='discount_name',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='discount name'),
),
migrations.AlterField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='shipping address'),
),
migrations.AlterField(
model_name='order',
name='total_net',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total net'),
),
migrations.AlterField(
model_name='order',
name='total_tax',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total tax'),
),
migrations.AlterField(
model_name='order',
name='tracking_client_id',
field=models.CharField(blank=True, editable=False, max_length=36, verbose_name='tracking client id'),
),
migrations.AlterField(
model_name='order',
name='user_email',
field=models.EmailField(blank=True, default='', editable=False, max_length=254, verbose_name='user email'),
),
migrations.AlterField(
model_name='order',
name='voucher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='discount.Voucher', verbose_name='voucher'),
),
migrations.AlterField(
model_name='ordereditem',
name='delivery_group',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='order.DeliveryGroup', verbose_name='delivery group'),
),
migrations.AlterField(
model_name='ordereditem',
name='stock',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.Stock', verbose_name='stock'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='comment',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='comment'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='order.Order', verbose_name='order'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='payment',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payments', to='order.Order', verbose_name='order'),
),
]
| [
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.migrations.AlterModelOptions",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((392, 532), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""deliverygroup"""', 'options': "{'verbose_name': 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'}"}), "(name='deliverygroup', options={'verbose_name':\n 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'})\n", (420, 532), False, 'from django.db import migrations, models\n'), ((573, 730), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""order"""', 'options': "{'ordering': ('-last_status_change',), 'verbose_name': 'Order',\n 'verbose_name_plural': 'Orders'}"}), "(name='order', options={'ordering': (\n '-last_status_change',), 'verbose_name': 'Order', 'verbose_name_plural':\n 'Orders'})\n", (601, 730), False, 'from django.db import migrations, models\n'), ((766, 900), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""ordereditem"""', 'options': "{'verbose_name': 'Ordered item', 'verbose_name_plural': 'Ordered items'}"}), "(name='ordereditem', options={'verbose_name':\n 'Ordered item', 'verbose_name_plural': 'Ordered items'})\n", (794, 900), False, 'from django.db import migrations, models\n'), ((941, 1123), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""orderhistoryentry"""', 'options': "{'ordering': ('date',), 'verbose_name': 'Order history entry',\n 'verbose_name_plural': 'Order history entries'}"}), "(name='orderhistoryentry', options={'ordering':\n ('date',), 'verbose_name': 'Order history entry', 'verbose_name_plural':\n 'Order history entries'})\n", (969, 1123), False, 'from django.db import migrations, models\n'), ((1160, 1288), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""ordernote"""', 'options': "{'verbose_name': 'Order note', 'verbose_name_plural': 'Order notes'}"}), "(name='ordernote', options={'verbose_name':\n 'Order note', 'verbose_name_plural': 'Order notes'})\n", (1188, 1288), False, 'from django.db import migrations, models\n'), ((1329, 1471), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""payment"""', 'options': "{'ordering': ('-pk',), 'verbose_name': 'Payment', 'verbose_name_plural':\n 'Payments'}"}), "(name='payment', options={'ordering': ('-pk',),\n 'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'})\n", (1357, 1471), False, 'from django.db import migrations, models\n'), ((1626, 1701), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)', 'verbose_name': '"""last updated"""'}), "(auto_now=True, null=True, verbose_name='last updated')\n", (1646, 1701), False, 'from django.db import migrations, models\n'), ((1844, 1970), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'editable': '(False)', 'max_length': '(255)', 'null': '(True)', 'verbose_name': '"""shipping method name"""'}), "(blank=True, default=None, editable=False, max_length=255,\n null=True, verbose_name='shipping method name')\n", (1860, 1970), False, 'from django.db import migrations, models\n'), ((2104, 2197), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(255)', 'verbose_name': '"""tracking number"""'}), "(blank=True, default='', max_length=255, verbose_name=\n 'tracking number')\n", (2120, 2197), False, 'from django.db import migrations, models\n'), ((2322, 2482), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""account.Address"""', 'verbose_name': '"""billing address"""'}), "(editable=False, on_delete=django.db.models.deletion.\n CASCADE, related_name='+', to='account.Address', verbose_name=\n 'billing address')\n", (2339, 2482), False, 'from django.db import migrations, models\n'), ((2885, 2976), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(255)', 'verbose_name': '"""discount name"""'}), "(blank=True, default='', max_length=255, verbose_name=\n 'discount name')\n", (2901, 2976), False, 'from django.db import migrations, models\n'), ((3102, 3274), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""account.Address"""', 'verbose_name': '"""shipping address"""'}), "(editable=False, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='+', to='account.Address', verbose_name=\n 'shipping address')\n", (3119, 3274), False, 'from django.db import migrations, models\n'), ((3943, 4042), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'editable': '(False)', 'max_length': '(36)', 'verbose_name': '"""tracking client id"""'}), "(blank=True, editable=False, max_length=36, verbose_name=\n 'tracking client id')\n", (3959, 4042), False, 'from django.db import migrations, models\n'), ((4162, 4266), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'default': '""""""', 'editable': '(False)', 'max_length': '(254)', 'verbose_name': '"""user email"""'}), "(blank=True, default='', editable=False, max_length=254,\n verbose_name='user email')\n", (4179, 4266), False, 'from django.db import migrations, models\n'), ((4384, 4527), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""discount.Voucher"""', 'verbose_name': '"""voucher"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to='discount.Voucher', verbose_name='voucher')\n", (4401, 4527), False, 'from django.db import migrations, models\n'), ((4658, 4825), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""items"""', 'to': '"""order.DeliveryGroup"""', 'verbose_name': '"""delivery group"""'}), "(editable=False, on_delete=django.db.models.deletion.\n CASCADE, related_name='items', to='order.DeliveryGroup', verbose_name=\n 'delivery group')\n", (4675, 4825), False, 'from django.db import migrations, models\n'), ((4941, 5061), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""product.Stock"""', 'verbose_name': '"""stock"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='product.Stock', verbose_name='stock')\n", (4958, 5061), False, 'from django.db import migrations, models\n'), ((5191, 5276), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(100)', 'verbose_name': '"""comment"""'}), "(blank=True, default='', max_length=100, verbose_name='comment'\n )\n", (5207, 5276), False, 'from django.db import migrations, models\n'), ((5403, 5534), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""history"""', 'to': '"""order.Order"""', 'verbose_name': '"""order"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='history', to='order.Order', verbose_name='order')\n", (5420, 5534), False, 'from django.db import migrations, models\n'), ((5660, 5800), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""user"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')\n", (5677, 5800), False, 'from django.db import migrations, models\n'), ((5917, 6049), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""payments"""', 'to': '"""order.Order"""', 'verbose_name': '"""order"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='payments', to='order.Order', verbose_name='order')\n", (5934, 6049), False, 'from django.db import migrations, models\n')] |
from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs | [
"llvmlite.ir.VoidType",
"xml.etree.ElementTree.parse",
"llvmlite.ir.Constant",
"llvmlite.ir.FunctionType",
"llvmlite.ir.IRBuilder",
"llvmlite.ir.IntType",
"llvmlite.ir.Function",
"llvmlite.ir.Module"
]
| [((68, 82), 'llvmlite.ir.IntType', 'ir.IntType', (['(32)'], {}), '(32)\n', (78, 82), False, 'from llvmlite import ir\n'), ((91, 105), 'llvmlite.ir.IntType', 'ir.IntType', (['(64)'], {}), '(64)\n', (101, 105), False, 'from llvmlite import ir\n'), ((113, 126), 'llvmlite.ir.IntType', 'ir.IntType', (['(1)'], {}), '(1)\n', (123, 126), False, 'from llvmlite import ir\n'), ((139, 152), 'llvmlite.ir.VoidType', 'ir.VoidType', ([], {}), '()\n', (150, 152), False, 'from llvmlite import ir\n'), ((421, 445), 'llvmlite.ir.Module', 'ir.Module', ([], {'name': '"""lifted"""'}), "(name='lifted')\n", (430, 445), False, 'from llvmlite import ir\n'), ((1775, 1788), 'llvmlite.ir.VoidType', 'ir.VoidType', ([], {}), '()\n', (1786, 1788), False, 'from llvmlite import ir\n'), ((1800, 1832), 'llvmlite.ir.FunctionType', 'ir.FunctionType', (['func_return', '[]'], {}), '(func_return, [])\n', (1815, 1832), False, 'from llvmlite import ir\n'), ((1847, 1897), 'llvmlite.ir.Function', 'ir.Function', (['module', 'fnty', '"""intra_function_branch"""'], {}), "(module, fnty, 'intra_function_branch')\n", (1858, 1897), False, 'from llvmlite import ir\n'), ((1975, 1988), 'llvmlite.ir.VoidType', 'ir.VoidType', ([], {}), '()\n', (1986, 1988), False, 'from llvmlite import ir\n'), ((2000, 2032), 'llvmlite.ir.FunctionType', 'ir.FunctionType', (['func_return', '[]'], {}), '(func_return, [])\n', (2015, 2032), False, 'from llvmlite import ir\n'), ((2047, 2089), 'llvmlite.ir.Function', 'ir.Function', (['module', 'fnty', '"""call_indirect"""'], {}), "(module, fnty, 'call_indirect')\n", (2058, 2089), False, 'from llvmlite import ir\n'), ((2159, 2172), 'llvmlite.ir.VoidType', 'ir.VoidType', ([], {}), '()\n', (2170, 2172), False, 'from llvmlite import ir\n'), ((2184, 2216), 'llvmlite.ir.FunctionType', 'ir.FunctionType', (['func_return', '[]'], {}), '(func_return, [])\n', (2199, 2216), False, 'from llvmlite import ir\n'), ((2231, 2274), 'llvmlite.ir.Function', 'ir.Function', (['module', 'fnty', '"""bit_extraction"""'], {}), "(module, fnty, 'bit_extraction')\n", (2242, 2274), False, 'from llvmlite import ir\n'), ((3032, 3045), 'llvmlite.ir.VoidType', 'ir.VoidType', ([], {}), '()\n', (3043, 3045), False, 'from llvmlite import ir\n'), ((3057, 3089), 'llvmlite.ir.FunctionType', 'ir.FunctionType', (['func_return', '[]'], {}), '(func_return, [])\n', (3072, 3089), False, 'from llvmlite import ir\n'), ((3104, 3135), 'llvmlite.ir.Function', 'ir.Function', (['module', 'fnty', 'name'], {}), '(module, fnty, name)\n', (3115, 3135), False, 'from llvmlite import ir\n'), ((3403, 3422), 'llvmlite.ir.IRBuilder', 'ir.IRBuilder', (['block'], {}), '(block)\n', (3415, 3422), False, 'from llvmlite import ir\n'), ((3862, 3875), 'llvmlite.ir.IntType', 'ir.IntType', (['(8)'], {}), '(8)\n', (3872, 3875), False, 'from llvmlite import ir\n'), ((379, 397), 'xml.etree.ElementTree.parse', 'et.parse', (['filename'], {}), '(filename)\n', (387, 397), True, 'import xml.etree.ElementTree as et\n'), ((3643, 3662), 'llvmlite.ir.IRBuilder', 'ir.IRBuilder', (['block'], {}), '(block)\n', (3655, 3662), False, 'from llvmlite import ir\n'), ((3939, 3973), 'llvmlite.ir.Constant', 'ir.Constant', (['int64', '(stack_size - 8)'], {}), '(int64, stack_size - 8)\n', (3950, 3973), False, 'from llvmlite import ir\n'), ((595, 608), 'llvmlite.ir.IntType', 'ir.IntType', (['(1)'], {}), '(1)\n', (605, 608), False, 'from llvmlite import ir\n'), ((674, 687), 'llvmlite.ir.IntType', 'ir.IntType', (['(1)'], {}), '(1)\n', (684, 687), False, 'from llvmlite import ir\n'), ((24798, 24818), 'llvmlite.ir.IntType', 'ir.IntType', (['var_size'], {}), '(var_size)\n', (24808, 24818), False, 'from llvmlite import ir\n'), ((888, 901), 'llvmlite.ir.IntType', 'ir.IntType', (['(8)'], {}), '(8)\n', (898, 901), False, 'from llvmlite import ir\n'), ((983, 996), 'llvmlite.ir.IntType', 'ir.IntType', (['(8)'], {}), '(8)\n', (993, 996), False, 'from llvmlite import ir\n'), ((4754, 4767), 'llvmlite.ir.IntType', 'ir.IntType', (['(1)'], {}), '(1)\n', (4764, 4767), False, 'from llvmlite import ir\n'), ((6001, 6022), 'llvmlite.ir.Constant', 'ir.Constant', (['int64', '(0)'], {}), '(int64, 0)\n', (6012, 6022), False, 'from llvmlite import ir\n'), ((5559, 5580), 'llvmlite.ir.Constant', 'ir.Constant', (['int64', '(0)'], {}), '(int64, 0)\n', (5570, 5580), False, 'from llvmlite import ir\n')] |
import wx
import wx.adv
import random
import util
import config
import time
import datetime
import threading
import requests
import json
from functools import partial
class ReqeusterThread(threading.Thread):
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html
def __init__(self, name, parent_thread, parent_panel):
threading.Thread.__init__(self, name=name)
self._stopevent = threading.Event()
self.parent_panel = parent_panel
self.parent_thread = parent_thread
def run(self):
while (not self._stopevent.is_set()) and self.parent_thread.is_alive():
print("hello")
# print(self.parent_panel.info_widget_dict)
# print(self.parent_panel.info)
# chnage to real time
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=1)
self.parent_panel.info["start"] = util.convert_to_GMT_zone(start)
self.parent_panel.info["end"] = util.convert_to_GMT_zone(end)
self.parent_panel._send_request(self.parent_panel.info)
self._stopevent.wait(5.0)
def join(self, timeout=None):
self._stopevent.set()
print("thread stop")
threading.Thread.join(self, timeout)
class RightPanel(wx.Panel):
def __init__(self, parent, info={}):
wx.Panel.__init__(self, parent=parent)
self.drop_down_menu_ID = None
self.result_visual_ID = None
self.info = info
self._init_UI()
def _init_UI(self):
self.SetBackgroundColour("#BAB86C")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
# add question label
st1 = wx.StaticText(self, label='Question')
st1.SetFont(font)
hbox1.Add(st1, proportion=2, flag=wx.RIGHT, border=10)
# add drop down menu
question_list = [
"1. How many people are in the building?",
"2. How many people are in a specific room?",
"3. Where is someone?",
# "4. Which room has someone visited?",
"4. What is the utilization of a specific room?"
]
drop_down_menu = wx.ComboBox(self, choices=question_list)
hbox1.Add(drop_down_menu, proportion=8, flag=wx.TOP, border=5)
vbox1 = wx.BoxSizer(wx.VERTICAL)
# add result label
# st2 = wx.StaticText(self, label='Result')
# st2.SetFont(font)
# vbox1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
# add canvas panel
# canvas_panel = CanvasPanel(self)
# vbox1.Add(canvas_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
result_panel = ResultPanel(self)
# result_panel.SetBackgroundColour("#000000")
vbox1.Add(result_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
vbox.Add(hbox1, proportion=1, flag=wx.EXPAND|wx.ALL, border=10)
vbox.Add(vbox1, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
self.SetSizer(vbox)
# listen combo
drop_down_menu.Bind(wx.EVT_COMBOBOX, partial(self.on_selection,
combo_box=drop_down_menu,
panel=result_panel))
def on_selection(self, event, combo_box, panel):
# print(self.drop_down_menu.GetValue())
print(combo_box.GetValue())
panel.init_question_UI(combo_box.GetValue()[0])
# st2 = wx.StaticText(self, label=combo_box.GetValue())
# st2.SetFont(font)
# sizer1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
class ResultPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self._init_UI()
self._q_dict = {"1": self._q1_panel,
"2": self._q2_panel,
"3": self._q3_panel,
# "4": self._q4_panel,
"4": self._q5_panel,}
self.info_widget_dict = {"feeder": {}, "consumer": {}}
self.worker = None
self.server = config.SERVER
self._set_font()
def _set_font(self):
self.font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
self.font.SetPointSize(12)
self.font.MakeBold()
def init_question_UI(self, q_idx):
# clean the panel
for child in self.GetChildren():
child.Destroy()
# stop the worker
if self.worker:
# print("the worker has been stop")
self.worker.join()
self.worker = None
self.info_widget_dict["feeder"].clear()
self.info_widget_dict["consumer"].clear()
decorate_panel = self._q_dict[q_idx]
decorate_panel()
def add_date_time_picker_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
# Start
start_label = wx.StaticText(self, label="START TIME")
start_label.SetFont(self.font)
dpc1 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc1 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox1.Add(start_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox1.Add(dpc1, proportion=3, flag=wx.RIGHT, border=5)
hbox1.Add(tpc1, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# End
end_label = wx.StaticText(self, label="END TIME")
end_label.SetFont(self.font)
dpc2 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc2 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox2.Add(end_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox2.Add(dpc2, proportion=3, flag=wx.RIGHT, border=5)
hbox2.Add(tpc2, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox2, proportion=0, flag=wx.ALL, border=5)
# Real time box
real_label = wx.StaticText(self, label="REAL TIME")
real_label.SetFont(self.font)
cb = wx.CheckBox(self)
hbox3.Add(real_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox3.Add(cb, proportion=3, flag=wx.RIGHT|wx.TOP, border=5)
vbox.Add(hbox3, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["feeder"]["start_date"] = dpc1
self.info_widget_dict["feeder"]["start_time"] = tpc1
self.info_widget_dict["feeder"]["end_date"] = dpc2
self.info_widget_dict["feeder"]["end_time"] = tpc2
self.info_widget_dict["feeder"]["real_time"] = cb
# self.SetBackgroundColour("#000000")
# r = lambda: random.randint(0,255)
# color = '#%02X%02X%02X' % (r(),r(),r())
return vbox
def _add_confirm_button(self, sizer, question_index):
"""
question_index => {1, 2, 3, 4}
"""
comfirm_btn = wx.Button(self, id=-1, label="Confirm")
sizer.Add(comfirm_btn, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
# self.Bind(wx.EVT_BUTTON, self.OnClick, comfirm_btn)
self.Bind(wx.EVT_BUTTON, lambda event: self.OnClick(event, question_index), comfirm_btn)
def _add_result_label(self, sizer):
result_label = wx.StaticText(self, label="RESULT")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
font.MakeBold()
result_label.SetFont(font)
sizer.Add(result_label, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL, border=20)
def OnClick(self, event, question_index):
info = {}
# handle date and time
if question_index in [1, 2, 3, 4]:
start_date = self.info_widget_dict["feeder"]["start_date"].GetValue()
start_time = self.info_widget_dict["feeder"]["start_time"].GetValue()
end_date = self.info_widget_dict["feeder"]["end_date"].GetValue()
end_time = self.info_widget_dict["feeder"]["end_time"].GetValue()
info["start"] = util.combine_datetime(start_date, start_time)
info["end"] = util.combine_datetime(end_date, end_time)
# print("start time = {}".format(info["start"]))
# print("end time = {}".format(info["end"]))
if_real_time = self.info_widget_dict["feeder"]["real_time"].GetValue()
if question_index == 1:
# requester send request to server
pass
elif question_index == 2:
# requester send request to server
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
print(room)
info["room"] = room
elif question_index == 3:
# requester send request to server
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index = 4
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index == 5
if_real_time = False
date = self.info_widget_dict["feeder"]["date_picker"].GetValue()
time = self.info_widget_dict["feeder"]["time_picker"].GetValue()
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
info["date"] = util.combine_datetime(date, time)
info["room"] = room
# requester send request to server
info["question_index"] = question_index
self.info = info
if if_real_time:
if not self.worker:
self.worker = ReqeusterThread(name="question_{}_requester".format(question_index), parent_thread=threading.currentThread(), parent_panel=self)
self.worker.start()
print("start worker")
else:
# first check if the worker is working
if self.worker:
self.worker.join()
self.worker = None
self._send_request(info)
def _request_handle(self, url, body={}, params={}, METHOD="post"):
# https://stackoverflow.com/questions/15900338/python-request-post-with-param-data
print("url", url)
print("body", body)
print("params", params)
resp = {}
if METHOD == "post":
r = requests.post(url, data=body)
else:
r = requests.get(url, params=params)
print(r.status_code)
if r.status_code == 200:
resp = r.json()
print(resp)
print(type(resp))
return resp
def _send_request(self, info):
question_index = int(info["question_index"])
if question_index == 1:
## get ##
url = self.server + "/people_building/"
body = {"start": info["start"], "end": info["end"]}
# body = {'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
except:
occu = str(0)
## received##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
elif question_index == 2:
## get ##
url = self.server + "/people_room/"
body = {"room": info["room"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
occupancy_info = response['occupancy_info']
except:
occu = str(0)
occupancy_info = []
## received ##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
nlb = self.info_widget_dict["consumer"]["name_list"]
nlb.Clear()
for name in occupancy_info:
nlb.Append(name)
elif question_index == 3:
## get ##
url = self.server + "/person_room/"
body = {"name": info["name"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
room_list = response['room']
count = str(len(room_list))
except:
count = str(0)
room_list = []
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 4:
## get ##
url = self.server + "question/4"
body = {"name": info["name"],
# "start_time": info["start"],
# "end_time": info["end"],
"time": info["start"],
}
response = self._request_handle(url=url, body=body, METHOD="post")
count = str(random.randint(0, 20))
room_list = ["Room_1_1_140", "Room_1_1_141"]
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 5:
## get ##
url = self.server + "/utilization/"
body = {"room": info["room"],
"date": info["date"],
# 'date': '2020-04-05 20:00:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
# self.request_handle(url, body, METHOD="post")
try:
response = json.loads(response)
utilization = "{:.2f}".format(response["utilization"]*100) + "%"
except:
utilization = "0%"
## received##
self.info_widget_dict["consumer"]["utilization_label"].SetLabel(utilization)
def _q1_panel(self):
print("q1")
main_vbox = self.add_date_time_picker_layout()
# confirm button
self._add_confirm_button(main_vbox, 1)
# add result label
self._add_result_label(main_vbox)
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q2_panel(self):
print("q2")
main_vbox = self.add_date_time_picker_layout()
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
# room_info = wx.TextCtrl(self)
# room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 2)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
namelb = wx.ListBox(self)
main_vbox.Add(namelb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.info_widget_dict["consumer"]["name_list"] = namelb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q3_panel(self):
print("q3")
vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 3)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q4_panel(self):
print("q4")
main_vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 4)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
main_vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q5_panel(self):
print("q5")
vbox = wx.BoxSizer(wx.VERTICAL)
# datetime
date_hbox = wx.BoxSizer(wx.HORIZONTAL)
date_label = wx.StaticText(self, label="Datetime")
date_label.SetFont(self.font)
dpc = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
date_hbox.Add(date_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
date_hbox.Add(dpc, proportion=3, flag=wx.RIGHT, border=5)
date_hbox.Add(tpc, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(date_hbox, proportion=0, flag=wx.ALL, border=5)
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 5)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["date_picker"] = dpc
self.info_widget_dict["feeder"]["time_picker"] = tpc
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Utilization")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["utilization_label"] = occu_label
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
| [
"requests.post",
"util.combine_datetime",
"wx.ListBox",
"threading.Thread.join",
"wx.Panel.__init__",
"datetime.timedelta",
"threading.Thread.__init__",
"wx.CheckBox",
"wx.SystemSettings.GetFont",
"wx.adv.TimePickerCtrl",
"random.randint",
"wx.adv.DatePickerCtrl",
"threading.currentThread",
"json.loads",
"requests.get",
"wx.StaticText",
"wx.TextCtrl",
"util.convert_to_GMT_zone",
"wx.Button",
"wx.ComboBox",
"wx.BoxSizer",
"threading.Event",
"datetime.datetime.now",
"functools.partial"
]
| [((364, 406), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {'name': 'name'}), '(self, name=name)\n', (389, 406), False, 'import threading\n'), ((442, 459), 'threading.Event', 'threading.Event', ([], {}), '()\n', (457, 459), False, 'import threading\n'), ((1280, 1316), 'threading.Thread.join', 'threading.Thread.join', (['self', 'timeout'], {}), '(self, timeout)\n', (1301, 1316), False, 'import threading\n'), ((1401, 1439), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (1418, 1439), False, 'import wx\n'), ((1649, 1694), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_SYSTEM_FONT'], {}), '(wx.SYS_SYSTEM_FONT)\n', (1674, 1694), False, 'import wx\n'), ((1741, 1765), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1752, 1765), False, 'import wx\n'), ((1783, 1809), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (1794, 1809), False, 'import wx\n'), ((1853, 1890), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Question"""'}), "(self, label='Question')\n", (1866, 1890), False, 'import wx\n'), ((2434, 2474), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'choices': 'question_list'}), '(self, choices=question_list)\n', (2445, 2474), False, 'import wx\n'), ((2563, 2587), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2574, 2587), False, 'import wx\n'), ((4019, 4050), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (4036, 4050), False, 'import wx\n'), ((4522, 4567), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_SYSTEM_FONT'], {}), '(wx.SYS_SYSTEM_FONT)\n', (4547, 4567), False, 'import wx\n'), ((5166, 5190), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (5177, 5190), False, 'import wx\n'), ((5207, 5233), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (5218, 5233), False, 'import wx\n'), ((5250, 5276), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (5261, 5276), False, 'import wx\n'), ((5293, 5319), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (5304, 5319), False, 'import wx\n'), ((5359, 5398), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""START TIME"""'}), "(self, label='START TIME')\n", (5372, 5398), False, 'import wx\n'), ((5453, 5504), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (5474, 5504), False, 'import wx\n'), ((5520, 5571), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (5541, 5571), False, 'import wx\n'), ((5880, 5919), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""END TIME"""'}), "(self, label='END TIME')\n", (5893, 5919), False, 'import wx\n'), ((5972, 6023), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (5993, 6023), False, 'import wx\n'), ((6039, 6090), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (6060, 6090), False, 'import wx\n'), ((6399, 6437), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""REAL TIME"""'}), "(self, label='REAL TIME')\n", (6412, 6437), False, 'import wx\n'), ((6489, 6506), 'wx.CheckBox', 'wx.CheckBox', (['self'], {}), '(self)\n', (6500, 6506), False, 'import wx\n'), ((7329, 7368), 'wx.Button', 'wx.Button', (['self'], {'id': '(-1)', 'label': '"""Confirm"""'}), "(self, id=-1, label='Confirm')\n", (7338, 7368), False, 'import wx\n'), ((7671, 7706), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""RESULT"""'}), "(self, label='RESULT')\n", (7684, 7706), False, 'import wx\n'), ((7722, 7767), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_SYSTEM_FONT'], {}), '(wx.SYS_SYSTEM_FONT)\n', (7747, 7767), False, 'import wx\n'), ((15347, 15373), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (15358, 15373), False, 'import wx\n'), ((15390, 15428), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Occupancy"""'}), "(self, label='Occupancy')\n", (15403, 15428), False, 'import wx\n'), ((15553, 15584), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""__"""'}), "(self, label='__')\n", (15566, 15584), False, 'import wx\n'), ((16194, 16220), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (16205, 16220), False, 'import wx\n'), ((16242, 16275), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Room"""'}), "(self, label='Room')\n", (16255, 16275), False, 'import wx\n'), ((16702, 16738), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'choices': 'room_list'}), '(self, choices=room_list)\n', (16713, 16738), False, 'import wx\n'), ((17327, 17353), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (17338, 17353), False, 'import wx\n'), ((17370, 17408), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Occupancy"""'}), "(self, label='Occupancy')\n", (17383, 17408), False, 'import wx\n'), ((17533, 17564), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""__"""'}), "(self, label='__')\n", (17546, 17564), False, 'import wx\n'), ((17785, 17801), 'wx.ListBox', 'wx.ListBox', (['self'], {}), '(self)\n', (17795, 17801), False, 'import wx\n'), ((18333, 18359), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (18344, 18359), False, 'import wx\n'), ((18382, 18415), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Name"""'}), "(self, label='Name')\n", (18395, 18415), False, 'import wx\n'), ((18564, 18581), 'wx.TextCtrl', 'wx.TextCtrl', (['self'], {}), '(self)\n', (18575, 18581), False, 'import wx\n'), ((19087, 19113), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (19098, 19113), False, 'import wx\n'), ((19130, 19169), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Room Count"""'}), "(self, label='Room Count')\n", (19143, 19169), False, 'import wx\n'), ((19294, 19325), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""__"""'}), "(self, label='__')\n", (19307, 19325), False, 'import wx\n'), ((19541, 19557), 'wx.ListBox', 'wx.ListBox', (['self'], {}), '(self)\n', (19551, 19557), False, 'import wx\n'), ((20094, 20120), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (20105, 20120), False, 'import wx\n'), ((20143, 20176), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Name"""'}), "(self, label='Name')\n", (20156, 20176), False, 'import wx\n'), ((20325, 20342), 'wx.TextCtrl', 'wx.TextCtrl', (['self'], {}), '(self)\n', (20336, 20342), False, 'import wx\n'), ((20863, 20889), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (20874, 20889), False, 'import wx\n'), ((20906, 20945), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Room Count"""'}), "(self, label='Room Count')\n", (20919, 20945), False, 'import wx\n'), ((21070, 21101), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""__"""'}), "(self, label='__')\n", (21083, 21101), False, 'import wx\n'), ((21322, 21338), 'wx.ListBox', 'wx.ListBox', (['self'], {}), '(self)\n', (21332, 21338), False, 'import wx\n'), ((21819, 21843), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (21830, 21843), False, 'import wx\n'), ((21884, 21910), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (21895, 21910), False, 'import wx\n'), ((21932, 21969), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Datetime"""'}), "(self, label='Datetime')\n", (21945, 21969), False, 'import wx\n'), ((22022, 22073), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (22043, 22073), False, 'import wx\n'), ((22088, 22139), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', (['self', '(-1)', 'wx.DefaultDateTime'], {}), '(self, -1, wx.DefaultDateTime)\n', (22109, 22139), False, 'import wx\n'), ((22458, 22484), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (22469, 22484), False, 'import wx\n'), ((22506, 22539), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Room"""'}), "(self, label='Room')\n", (22519, 22539), False, 'import wx\n'), ((22966, 23002), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'choices': 'room_list'}), '(self, choices=room_list)\n', (22977, 23002), False, 'import wx\n'), ((23554, 23580), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (23565, 23580), False, 'import wx\n'), ((23597, 23637), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""Utilization"""'}), "(self, label='Utilization')\n", (23610, 23637), False, 'import wx\n'), ((23762, 23793), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""__"""'}), "(self, label='__')\n", (23775, 23793), False, 'import wx\n'), ((825, 848), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (846, 848), False, 'import datetime\n'), ((951, 982), 'util.convert_to_GMT_zone', 'util.convert_to_GMT_zone', (['start'], {}), '(start)\n', (975, 982), False, 'import util\n'), ((1027, 1056), 'util.convert_to_GMT_zone', 'util.convert_to_GMT_zone', (['end'], {}), '(end)\n', (1051, 1056), False, 'import util\n'), ((3407, 3479), 'functools.partial', 'partial', (['self.on_selection'], {'combo_box': 'drop_down_menu', 'panel': 'result_panel'}), '(self.on_selection, combo_box=drop_down_menu, panel=result_panel)\n', (3414, 3479), False, 'from functools import partial\n'), ((8438, 8483), 'util.combine_datetime', 'util.combine_datetime', (['start_date', 'start_time'], {}), '(start_date, start_time)\n', (8459, 8483), False, 'import util\n'), ((8510, 8551), 'util.combine_datetime', 'util.combine_datetime', (['end_date', 'end_time'], {}), '(end_date, end_time)\n', (8531, 8551), False, 'import util\n'), ((9874, 9907), 'util.combine_datetime', 'util.combine_datetime', (['date', 'time'], {}), '(date, time)\n', (9895, 9907), False, 'import util\n'), ((10862, 10891), 'requests.post', 'requests.post', (['url'], {'data': 'body'}), '(url, data=body)\n', (10875, 10891), False, 'import requests\n'), ((10922, 10954), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (10934, 10954), False, 'import requests\n'), ((875, 904), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (893, 904), False, 'import datetime\n'), ((10228, 10253), 'threading.currentThread', 'threading.currentThread', ([], {}), '()\n', (10251, 10253), False, 'import threading\n'), ((13970, 13991), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (13984, 13991), False, 'import random\n'), ((14763, 14783), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (14773, 14783), False, 'import json\n')] |
import copy
import sys
PLAYER1, PLAYER2, EMPTY, BLOCKED = [0, 1, 2, 3]
S_PLAYER1, S_PLAYER2, S_EMPTY, S_BLOCKED, = ['0', '1', '.', 'x']
CHARTABLE = [(PLAYER1, S_PLAYER1), (PLAYER2, S_PLAYER2), (EMPTY, S_EMPTY), (BLOCKED, S_BLOCKED)]
DIRS = [
((-1, 0), "up"),
((1, 0), "down"),
((0, 1), "right"),
((0, -1), "left")
]
#the information of the whole grid
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.cell = [[EMPTY for col in range (0, width)] for row in range(0, height)]
def parse_cell_char(self, players, row, col, char):
result = -1
if char == S_PLAYER1:
players[0].row = row;
players[0].col = col;
elif char == S_PLAYER2:
players[1].row = row;
players[1].col = col;
for (i, symbol) in CHARTABLE:
if symbol == char:
result = i
break
return result
def parse_cell(self, players, row, col, data):
cell = []
for char in data:
item = self.parse_cell_char(players, row, col, char)
cell.append(item)
return cell
def parse(self, players, data):
cells = data.split(',')
col = 0
row = 0
for cell in cells:
if (col >= self.width):
col = 0
row +=1
self.cell[row][col] = self.parse_cell(players, row, col, cell)
col += 1
def in_bounds (self, row, col):
return row >= 0 and col >= 0 and col < self.width and row < self.height
def is_legal(self, row, col, my_id):
enemy_id = my_id ^ 1
return (self.in_bounds(row, col)) and (not BLOCKED == self.cell[row][col]) and (not enemy_id == self.cell[row][col])
def is_legal_tuple(self, loc):
row, col = loc
return self.is_legal(row, col)
def get_adjacent(self, row, col):
result = []
for (o_row, o_col), _ in DIRS:
t_row, t_col = o_row + row, o_col + col
if self.is_legal(t_row, t_col):
result.append((t_row, t_col))
return result
def legal_moves(self, my_id, players):
my_player = players[my_id]
result = []
for ((o_row, o_col), order) in DIRS:
t_row = my_player.row + o_row
t_col = my_player.col + o_col
if self.is_legal(t_row, t_col, my_id):
result.append(((o_row, o_col), order))
else:
pass
return result
def update_cell(self, row, col, data):
self.cell[row][col] = data
def output_cell(self, cell):
done = False
for (i, symbol) in CHARTABLE:
if i == cell:
if not done:
sys.stderr.write(symbol)
done = True
break
if not done:
sys.stderr.write("!")
done = True
def output(self):
for row in self.cell:
sys.stderr.write("\n")
for cell in row:
self.output_cell(cell)
sys.stderr.write("\n")
sys.stderr.flush()
def tostring(self):
res = ""
for row in xrange(self.height):
for col in xrange(self.width):
res += str(self.cell[row][col])
res += ","
return res
| [
"sys.stderr.write",
"sys.stderr.flush"
]
| [((3157, 3179), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (3173, 3179), False, 'import sys\n'), ((3188, 3206), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (3204, 3206), False, 'import sys\n'), ((2931, 2952), 'sys.stderr.write', 'sys.stderr.write', (['"""!"""'], {}), "('!')\n", (2947, 2952), False, 'import sys\n'), ((3058, 3080), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (3074, 3080), False, 'import sys\n'), ((2823, 2847), 'sys.stderr.write', 'sys.stderr.write', (['symbol'], {}), '(symbol)\n', (2839, 2847), False, 'import sys\n')] |
import sys
sys.setrecursionlimit(10000)
def dfs(r, c):
global visit
visit[r][c] = True
mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for i in range(4):
dr, dc = mov[i]
nr, nc = r + dr, c + dc
if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1:
dfs(nr, nc)
T = int(input())
for _ in range(T):
M, N, K = map(int, input().split())
board = [[0] * M for _ in range(N)]
for _ in range(K):
c, r = map(int, input().split())
board[r][c] = 1
visit = [[False] * M for _ in range(N)]
cnt = 0
for r in range(N):
for c in range(M):
if not visit[r][c] and board[r][c] == 1:
cnt += 1
dfs(r, c)
for ele in visit:
print(ele)
print()
print(cnt) | [
"sys.setrecursionlimit"
]
| [((12, 40), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (33, 40), False, 'import sys\n')] |
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PATH, CONF_USERNAME
DOMAIN = "vaddio_conferenceshot"
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
SERVICE_RECALL_PRESET = "move_to_preset"
ATTR_PRESET_ID = "preset"
| [
"voluptuous.Required"
]
| [((237, 260), 'voluptuous.Required', 'vol.Required', (['CONF_HOST'], {}), '(CONF_HOST)\n', (249, 260), True, 'import voluptuous as vol\n'), ((281, 308), 'voluptuous.Required', 'vol.Required', (['CONF_USERNAME'], {}), '(CONF_USERNAME)\n', (293, 308), True, 'import voluptuous as vol\n'), ((329, 356), 'voluptuous.Required', 'vol.Required', (['CONF_PASSWORD'], {}), '(CONF_PASSWORD)\n', (341, 356), True, 'import voluptuous as vol\n')] |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
| [
"scapy.modules.six.iterkeys",
"scapy.error.log_scapy.warning",
"cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate",
"scapy.error.warning",
"os.getenv",
"re.match",
"scapy.error.ScapyInvalidPlatformException",
"functools.partial",
"scapy.modules.six.iteritems",
"scapy.error.log_scapy.setLevel",
"scapy.themes.NoTheme",
"scapy.modules.six.itervalues",
"time.time",
"os.path.expanduser",
"scapy.main._load"
]
| [((11229, 11273), 're.match', 're.match', (['version_regexp', 'module.__version__'], {}), '(version_regexp, module.__version__)\n', (11237, 11273), False, 'import re\n'), ((16234, 16257), 'scapy.error.log_scapy.setLevel', 'log_scapy.setLevel', (['val'], {}), '(val)\n', (16252, 16257), False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((22185, 22274), 'scapy.error.log_scapy.warning', 'log_scapy.warning', (['"""IPv6 support disabled in Python. Cannot load Scapy IPv6 layers."""'], {}), "(\n 'IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.')\n", (22202, 22274), False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((4611, 4640), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.num2layer'], {}), '(self.num2layer)\n', (4624, 4640), False, 'from scapy.modules import six\n'), ((4962, 4991), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.layer2num'], {}), '(self.layer2num)\n', (4975, 4991), False, 'from scapy.modules import six\n'), ((7563, 7574), 'time.time', 'time.time', ([], {}), '()\n', (7572, 7574), False, 'import time\n'), ((7671, 7691), 'scapy.modules.six.iteritems', 'six.iteritems', (['other'], {}), '(other)\n', (7684, 7691), False, 'from scapy.modules import six\n'), ((8146, 8157), 'time.time', 'time.time', ([], {}), '()\n', (8155, 8157), False, 'import time\n'), ((8399, 8410), 'time.time', 'time.time', ([], {}), '()\n', (8408, 8410), False, 'import time\n'), ((8563, 8590), 'scapy.modules.six.iterkeys', 'six.iterkeys', (['self.__dict__'], {}), '(self.__dict__)\n', (8575, 8590), False, 'from scapy.modules import six\n'), ((8713, 8724), 'time.time', 'time.time', ([], {}), '()\n', (8722, 8724), False, 'import time\n'), ((8947, 8958), 'time.time', 'time.time', ([], {}), '()\n', (8956, 8958), False, 'import time\n'), ((9184, 9195), 'time.time', 'time.time', ([], {}), '()\n', (9193, 9195), False, 'import time\n'), ((9423, 9434), 'time.time', 'time.time', ([], {}), '()\n', (9432, 9434), False, 'import time\n'), ((12292, 12319), 'cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate', 'X25519PrivateKey.generate', ([], {}), '()\n', (12317, 12319), False, 'from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey\n'), ((13114, 13177), 'scapy.error.ScapyInvalidPlatformException', 'ScapyInvalidPlatformException', (['"""BSD-like (OSX, *BSD...) only !"""'], {}), "('BSD-like (OSX, *BSD...) only !')\n", (13143, 13177), False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((13288, 13361), 'scapy.error.ScapyInvalidPlatformException', 'ScapyInvalidPlatformException', (['"""Scapy only supports libpcap on Solaris !"""'], {}), "('Scapy only supports libpcap on Solaris !')\n", (13317, 13361), False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((14290, 14334), 'functools.partial', 'functools.partial', (['L3bpfSocket'], {'filter': '"""ip6"""'}), "(L3bpfSocket, filter='ip6')\n", (14307, 14334), False, 'import functools\n'), ((14446, 14469), 'scapy.main._load', '_load', (['"""scapy.arch.bpf"""'], {}), "('scapy.arch.bpf')\n", (14451, 14469), False, 'from scapy.main import _load\n'), ((14641, 14688), 'functools.partial', 'functools.partial', (['L3PacketSocket'], {'filter': '"""ip6"""'}), "(L3PacketSocket, filter='ip6')\n", (14658, 14688), False, 'import functools\n'), ((14794, 14819), 'scapy.main._load', '_load', (['"""scapy.arch.linux"""'], {}), "('scapy.arch.linux')\n", (14799, 14819), False, 'from scapy.main import _load\n'), ((19933, 19942), 'scapy.themes.NoTheme', 'NoTheme', ([], {}), '()\n', (19940, 19942), False, 'from scapy.themes import NoTheme, apply_ipython_style\n'), ((8104, 8132), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (8117, 8132), False, 'from scapy.modules import six\n'), ((8358, 8385), 'scapy.modules.six.iterkeys', 'six.iterkeys', (['self.__dict__'], {}), '(self.__dict__)\n', (8370, 8385), False, 'from scapy.modules import six\n'), ((8670, 8699), 'scapy.modules.six.itervalues', 'six.itervalues', (['self.__dict__'], {}), '(self.__dict__)\n', (8684, 8699), False, 'from scapy.modules import six\n'), ((9995, 10023), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (10008, 10023), False, 'from scapy.modules import six\n'), ((13880, 13925), 'functools.partial', 'functools.partial', (['L3pcapSocket'], {'filter': '"""ip6"""'}), "(L3pcapSocket, filter='ip6')\n", (13897, 13925), False, 'import functools\n'), ((14055, 14083), 'scapy.main._load', '_load', (['"""scapy.arch.pcapdnet"""'], {}), "('scapy.arch.pcapdnet')\n", (14060, 14083), False, 'from scapy.main import _load\n'), ((19552, 19575), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (19570, 19575), False, 'import os\n'), ((8195, 8223), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (8208, 8223), False, 'from scapy.modules import six\n'), ((8438, 8465), 'scapy.modules.six.iterkeys', 'six.iterkeys', (['self.__dict__'], {}), '(self.__dict__)\n', (8450, 8465), False, 'from scapy.modules import six\n'), ((8757, 8785), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (8770, 8785), False, 'from scapy.modules import six\n'), ((8996, 9024), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (9009, 9024), False, 'from scapy.modules import six\n'), ((9223, 9250), 'scapy.modules.six.iterkeys', 'six.iterkeys', (['self.__dict__'], {}), '(self.__dict__)\n', (9235, 9250), False, 'from scapy.modules import six\n'), ((9388, 9408), 'scapy.modules.six.itervalues', 'six.itervalues', (['self'], {}), '(self)\n', (9402, 9408), False, 'from scapy.modules import six\n'), ((9467, 9495), 'scapy.modules.six.iteritems', 'six.iteritems', (['self.__dict__'], {}), '(self.__dict__)\n', (9480, 9495), False, 'from scapy.modules import six\n'), ((13671, 13732), 'scapy.error.warning', 'warning', (['"""No libpcap provider available ! pcap won\'t be used"""'], {}), '("No libpcap provider available ! pcap won\'t be used")\n', (13678, 13732), False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((7077, 7088), 'time.time', 'time.time', ([], {}), '()\n', (7086, 7088), False, 'import time\n'), ((20329, 20364), 'os.getenv', 'os.getenv', (['"""SCAPY_USE_PCAPDNET"""', '""""""'], {}), "('SCAPY_USE_PCAPDNET', '')\n", (20338, 20364), False, 'import os\n'), ((9900, 9927), 'scapy.modules.six.iterkeys', 'six.iterkeys', (['self.__dict__'], {}), '(self.__dict__)\n', (9912, 9927), False, 'from scapy.modules import six\n'), ((20190, 20225), 'os.getenv', 'os.getenv', (['"""SCAPY_USE_PCAPDNET"""', '""""""'], {}), "('SCAPY_USE_PCAPDNET', '')\n", (20199, 20225), False, 'import os\n')] |
import kopf
from .functions import global_logger, reconcile_secret
@kopf.on.event("", "v1", "secrets")
def injector_secret_event(type, event, logger, **_):
obj = event["object"]
namespace = obj["metadata"]["namespace"]
name = obj["metadata"]["name"]
# If secret already exists, indicated by type being None, the
# secret is added or modified later, do a full reconcilation to
# ensure that if now match will inject the secret.
with global_logger(logger):
if type in (None, "ADDED", "MODIFIED"):
reconcile_secret(name, namespace, obj)
| [
"kopf.on.event"
]
| [((71, 105), 'kopf.on.event', 'kopf.on.event', (['""""""', '"""v1"""', '"""secrets"""'], {}), "('', 'v1', 'secrets')\n", (84, 105), False, 'import kopf\n')] |
from django.contrib import admin
from django.urls import path,include
from django.views.generic import TemplateView
from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage
# from .views import Index,UserDashboard,SignUp,AdminDashboard
app_name='management'
urlpatterns = [
# path('',homepage,name="index"),
path('',Index.as_view(), name='index'),
path('signup',SignUp.as_view(),name="signup"),
path('userdashboard',UserDashboard.as_view(),name="userDashboard"),
path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"),
path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'),
path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'),
path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'),
path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'),
path('uploadimage/',uploadImage,name="uploadImage"),
path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'),
path('logout',logout,name='logout')
]
| [
"django.urls.path"
]
| [((734, 825), 'django.urls.path', 'path', (['"""admindashboard/showuserdata/deleteuser/<userId>"""', 'deleteuser'], {'name': '"""deleteuser"""'}), "('admindashboard/showuserdata/deleteuser/<userId>', deleteuser, name=\n 'deleteuser')\n", (738, 825), False, 'from django.urls import path, include\n'), ((824, 915), 'django.urls.path', 'path', (['"""admindashboard/showuserdata/activeUser/<userId>"""', 'activeUser'], {'name': '"""activeUser"""'}), "('admindashboard/showuserdata/activeUser/<userId>', activeUser, name=\n 'activeUser')\n", (828, 915), False, 'from django.urls import path, include\n'), ((916, 1012), 'django.urls.path', 'path', (['"""admindashboard/showuserdata/deactiveUser/<userId>"""', 'deactiveUser'], {'name': '"""deactiveUser"""'}), "('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser,\n name='deactiveUser')\n", (920, 1012), False, 'from django.urls import path, include\n'), ((1014, 1067), 'django.urls.path', 'path', (['"""uploadimage/"""', 'uploadImage'], {'name': '"""uploadImage"""'}), "('uploadimage/', uploadImage, name='uploadImage')\n", (1018, 1067), False, 'from django.urls import path, include\n'), ((1147, 1184), 'django.urls.path', 'path', (['"""logout"""', 'logout'], {'name': '"""logout"""'}), "('logout', logout, name='logout')\n", (1151, 1184), False, 'from django.urls import path, include\n')] |
from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
| [
"datetime.datetime.strptime"
]
| [((573, 624), 'datetime.datetime.strptime', 'datetime.strptime', (['begin[i]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(begin[i], '%Y-%m-%d %H:%M:%S.%f')\n", (590, 624), False, 'from datetime import datetime\n'), ((641, 690), 'datetime.datetime.strptime', 'datetime.strptime', (['end[i]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(end[i], '%Y-%m-%d %H:%M:%S.%f')\n", (658, 690), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq import meters
from fairseq.checkpoint_utils import checkpoint_paths
from fairseq.data import iterators
from fairseq.file_io import PathManager
from fairseq.logging import metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
class Saver:
def __init__(self):
self.best = None
self.keep_best = []
def save_checkpoint(self, args, trainer, epoch_itr, val_loss):
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = val_loss if self.best is None else self.best
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
self.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
save_epoch_checkpoint = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = save_epoch_checkpoint
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not save_epoch_checkpoint
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
self.best is None
or is_better(val_loss, self.best)
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if self.best is not None:
extra_state.update({"best": self.best})
if args.keep_best_checkpoints > 0 and (len(self.keep_best) < args.keep_best_checkpoints or (
val_loss is not None and not is_better(self.keep_best[-1][0], val_loss))):
ckpt_name = "checkpoint{}{}.best_{:.4f}.pt".format(epoch, suffix, val_loss) if save_epoch_checkpoint \
else "checkpoint_{}_{}{}.best_{:.4f}.pt".format(epoch, updates, suffix, val_loss)
checkpoint_conds[ckpt_name] = True
self.keep_best.append((val_loss, ckpt_name))
self.keep_best = sorted(self.keep_best)
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if len(self.keep_best) > args.keep_best_checkpoints:
for _, x in self.keep_best[args.keep_best_checkpoints:]:
x = os.path.join(args.save_dir, x)
if os.path.lexists(x):
os.remove(x)
self.keep_best = self.keep_best[:args.keep_best_checkpoints]
def main(args):
saver = Saver()
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr, saver)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr, saver):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, saver)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
saver.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets, saver):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), saver)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats, saver):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(saver.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
saver.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| [
"logging.getLogger",
"fairseq.options.parse_args_and_arch",
"fairseq.logging.metrics.get_meter",
"fairseq.options.get_training_parser",
"fairseq.checkpoint_utils.checkpoint_paths",
"torch.autograd.profiler.record_function",
"fairseq.logging.metrics.reset",
"os.remove",
"os.path.lexists",
"torch.cuda.profiler.profile",
"numpy.random.seed",
"fairseq.file_io.PathManager.copy",
"fairseq.logging.metrics.aggregate",
"collections.OrderedDict",
"fairseq.distributed_utils.call_main",
"fairseq.trainer.Trainer",
"fairseq.utils.import_user_module",
"fairseq.data.iterators.GroupedIterator",
"fairseq.checkpoint_utils.verify_checkpoint_directory",
"fairseq.distributed_utils.is_master",
"fairseq.quantization_utils.Quantizer",
"fairseq.meters.StopwatchMeter",
"torch.autograd.profiler.emit_nvtx",
"os.makedirs",
"fairseq.tasks.setup_task",
"os.path.join",
"os.environ.get",
"fairseq.utils.set_torch_seed",
"fairseq.model_parallel.megatron_trainer.MegatronTrainer",
"fairseq.utils.tpu_data_loader",
"fairseq.logging.metrics.get_smoothed_values",
"fairseq.logging.metrics.reset_meters"
]
| [((1010, 1048), 'logging.getLogger', 'logging.getLogger', (['"""fairseq_cli.train"""'], {}), "('fairseq_cli.train')\n", (1027, 1048), False, 'import logging\n'), ((9829, 9855), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', (['"""train"""'], {}), "('train')\n", (9846, 9855), False, 'from fairseq.logging import metrics, progress_bar\n'), ((5502, 5532), 'fairseq.utils.import_user_module', 'utils.import_user_module', (['args'], {}), '(args)\n', (5526, 5532), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((5697, 5712), 'fairseq.logging.metrics.reset', 'metrics.reset', ([], {}), '()\n', (5710, 5712), False, 'from fairseq.logging import metrics, progress_bar\n'), ((5718, 5743), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5732, 5743), True, 'import numpy as np\n'), ((5748, 5779), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', (['args.seed'], {}), '(args.seed)\n', (5768, 5779), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((5788, 5821), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', (['args'], {}), '(args)\n', (5815, 5821), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((6004, 6026), 'fairseq.tasks.setup_task', 'tasks.setup_task', (['args'], {}), '(args)\n', (6020, 6026), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((8098, 8121), 'fairseq.meters.StopwatchMeter', 'meters.StopwatchMeter', ([], {}), '()\n', (8119, 8121), False, 'from fairseq import meters\n'), ((10333, 10376), 'fairseq.data.iterators.GroupedIterator', 'iterators.GroupedIterator', (['itr', 'update_freq'], {}), '(itr, update_freq)\n', (10358, 10376), False, 'from fairseq.data import iterators\n'), ((12293, 12322), 'fairseq.logging.metrics.reset_meters', 'metrics.reset_meters', (['"""train"""'], {}), "('train')\n", (12313, 12322), False, 'from fairseq.logging import metrics, progress_bar\n'), ((16463, 16492), 'fairseq.options.get_training_parser', 'options.get_training_parser', ([], {}), '()\n', (16490, 16492), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((16504, 16568), 'fairseq.options.parse_args_and_arch', 'options.parse_args_and_arch', (['parser'], {'modify_parser': 'modify_parser'}), '(parser, modify_parser=modify_parser)\n', (16531, 16568), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((1894, 1917), 'fairseq.meters.StopwatchMeter', 'meters.StopwatchMeter', ([], {}), '()\n', (1915, 1917), False, 'from fairseq import meters\n'), ((2155, 2180), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2178, 2180), False, 'import collections\n'), ((5831, 5890), 'fairseq.checkpoint_utils.verify_checkpoint_directory', 'checkpoint_utils.verify_checkpoint_directory', (['args.save_dir'], {}), '(args.save_dir)\n', (5875, 5890), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((6968, 7097), 'fairseq.quantization_utils.Quantizer', 'quantization_utils.Quantizer', ([], {'config_path': 'args.quantization_config_path', 'max_epoch': 'args.max_epoch', 'max_update': 'args.max_update'}), '(config_path=args.quantization_config_path,\n max_epoch=args.max_epoch, max_update=args.max_update)\n', (6996, 7097), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((7253, 7301), 'fairseq.trainer.Trainer', 'Trainer', (['args', 'task', 'model', 'criterion', 'quantizer'], {}), '(args, task, model, criterion, quantizer)\n', (7260, 7301), False, 'from fairseq.trainer import Trainer\n'), ((7330, 7375), 'fairseq.model_parallel.megatron_trainer.MegatronTrainer', 'MegatronTrainer', (['args', 'task', 'model', 'criterion'], {}), '(args, task, model, criterion)\n', (7345, 7375), False, 'from fairseq.model_parallel.megatron_trainer import MegatronTrainer\n'), ((10427, 10453), 'fairseq.utils.tpu_data_loader', 'utils.tpu_data_loader', (['itr'], {}), '(itr)\n', (10448, 10453), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((12162, 12198), 'fairseq.logging.metrics.get_smoothed_values', 'metrics.get_smoothed_values', (['"""train"""'], {}), "('train')\n", (12189, 12198), False, 'from fairseq.logging import metrics, progress_bar\n'), ((14568, 14616), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', (['args.fixed_validation_seed'], {}), '(args.fixed_validation_seed)\n', (14588, 14616), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((16762, 16801), 'fairseq.distributed_utils.call_main', 'distributed_utils.call_main', (['args', 'main'], {}), '(args, main)\n', (16789, 16801), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((1328, 1369), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {'exist_ok': '(True)'}), '(args.save_dir, exist_ok=True)\n', (1339, 1369), False, 'import os\n'), ((3775, 3806), 'os.path.join', 'os.path.join', (['args.save_dir', 'fn'], {}), '(args.save_dir, fn)\n', (3787, 3806), False, 'import os\n'), ((4517, 4587), 'fairseq.checkpoint_utils.checkpoint_paths', 'checkpoint_paths', (['args.save_dir'], {'pattern': '"""checkpoint_\\\\d+_(\\\\d+)\\\\.pt"""'}), "(args.save_dir, pattern='checkpoint_\\\\d+_(\\\\d+)\\\\.pt')\n", (4533, 4587), False, 'from fairseq.checkpoint_utils import checkpoint_paths\n'), ((4921, 4985), 'fairseq.checkpoint_utils.checkpoint_paths', 'checkpoint_paths', (['args.save_dir'], {'pattern': '"""checkpoint(\\\\d+)\\\\.pt"""'}), "(args.save_dir, pattern='checkpoint(\\\\d+)\\\\.pt')\n", (4937, 4985), False, 'from fairseq.checkpoint_utils import checkpoint_paths\n'), ((11066, 11098), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', (['"""train_inner"""'], {}), "('train_inner')\n", (11083, 11098), False, 'from fairseq.logging import metrics, progress_bar\n'), ((11100, 11160), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["('train_step-%d' % i)"], {}), "('train_step-%d' % i)\n", (11139, 11160), False, 'import torch\n'), ((14254, 14290), 'fairseq.logging.metrics.get_meter', 'metrics.get_meter', (['"""default"""', '"""wall"""'], {}), "('default', 'wall')\n", (14271, 14290), False, 'from fairseq.logging import metrics, progress_bar\n'), ((14957, 14983), 'fairseq.utils.tpu_data_loader', 'utils.tpu_data_loader', (['itr'], {}), '(itr)\n', (14978, 14983), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((15590, 15622), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', ([], {'new_root': '(True)'}), '(new_root=True)\n', (15607, 15622), False, 'from fairseq.logging import metrics, progress_bar\n'), ((16603, 16632), 'torch.cuda.profiler.profile', 'torch.cuda.profiler.profile', ([], {}), '()\n', (16630, 16632), False, 'import torch\n'), ((932, 966), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (946, 966), False, 'import os\n'), ((4019, 4071), 'fairseq.file_io.PathManager.copy', 'PathManager.copy', (['checkpoints[0]', 'cp'], {'overwrite': '(True)'}), '(checkpoints[0], cp, overwrite=True)\n', (4035, 4071), False, 'from fairseq.file_io import PathManager\n'), ((4704, 4728), 'os.path.lexists', 'os.path.lexists', (['old_chk'], {}), '(old_chk)\n', (4719, 4728), False, 'import os\n'), ((5068, 5092), 'os.path.lexists', 'os.path.lexists', (['old_chk'], {}), '(old_chk)\n', (5083, 5092), False, 'import os\n'), ((5284, 5314), 'os.path.join', 'os.path.join', (['args.save_dir', 'x'], {}), '(args.save_dir, x)\n', (5296, 5314), False, 'import os\n'), ((5334, 5352), 'os.path.lexists', 'os.path.lexists', (['x'], {}), '(x)\n', (5349, 5352), False, 'import os\n'), ((10684, 10717), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', (['args'], {}), '(args)\n', (10711, 10717), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((11748, 11783), 'fairseq.logging.metrics.reset_meters', 'metrics.reset_meters', (['"""train_inner"""'], {}), "('train_inner')\n", (11768, 11783), False, 'from fairseq.logging import metrics, progress_bar\n'), ((16651, 16686), 'torch.autograd.profiler.emit_nvtx', 'torch.autograd.profiler.emit_nvtx', ([], {}), '()\n', (16684, 16686), False, 'import torch\n'), ((16704, 16743), 'fairseq.distributed_utils.call_main', 'distributed_utils.call_main', (['args', 'main'], {}), '(args, main)\n', (16731, 16743), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((4750, 4768), 'os.remove', 'os.remove', (['old_chk'], {}), '(old_chk)\n', (4759, 4768), False, 'import os\n'), ((5114, 5132), 'os.remove', 'os.remove', (['old_chk'], {}), '(old_chk)\n', (5123, 5132), False, 'import os\n'), ((5374, 5386), 'os.remove', 'os.remove', (['x'], {}), '(x)\n', (5383, 5386), False, 'import os\n'), ((11485, 11527), 'fairseq.logging.metrics.get_smoothed_values', 'metrics.get_smoothed_values', (['"""train_inner"""'], {}), "('train_inner')\n", (11512, 11527), False, 'from fairseq.logging import metrics, progress_bar\n'), ((15292, 15325), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', (['args'], {}), '(args)\n', (15319, 15325), False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n')] |
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
import cv2 as cv
from matplotlib import cm
import numpy as np
class GradCAM:
"""
#### Args:
layer_name: module name (not child name), if None,
will use the last layer before average pooling
, default is None
"""
def __init__(self, model, device, layer_name=None, close_some_grad=True):
if layer_name is None:
layer_name = self.get_layer_name(model)
if layer_name is None:
raise ValueError(
"There is no global average pooling layer, plz specify 'layer_name'"
)
for n, m in model.named_children():
if close_some_grad:
m.requires_grad_(False)
for sub_n, sub_m in m.named_modules():
if '.'.join((n, sub_n)) == layer_name:
sub_m.register_forward_hook(self.forward_hook)
sub_m.register_full_backward_hook(self.backward_hook)
m.requires_grad_(True)
break
model = model.to(device)
self.model = model
self.device = device
self.feature_maps = {}
self.gradients = {}
def get_heatmap(self, img, img_tensor):
self.model.zero_grad()
img_tensor = img_tensor.to(self.device)
outputs = self.model(img_tensor)
_, pred_label = outputs.max(1)
# outputs shape = 1x2
outputs[0][pred_label].backward()
with torch.no_grad():
feature_maps = self.feature_maps["output"]
# "gradients" is a tuple with one item
grad_weights = self.gradients["output"][0]
h, w = grad_weights.size()[-2:]
grad_weights = grad_weights.sum((2,3), True) / (h * w)
cam = (grad_weights * feature_maps).sum(1)
F.relu(cam, True)
cam = cam / cam.max() * 255
cam = cam.to(dtype=torch.uint8, device="cpu")
cam = cam.numpy().transpose(1,2,0)
cam = cv.resize(cam, img.size[:2], interpolation=4)
cam = np.uint8(255 * cm.get_cmap("jet")(cam.squeeze()))
if not isinstance(img, np.ndarray):
img = np.asarray(img)
img_size = img.shape[:2][::-1] # w, h
overlay = np.uint8(0.6*img + 0.4 * cam[:,:,:3])
overlay = Image.fromarray(overlay)
if overlay.size != img_size:
overlay = overlay.resize(img_size, Image.BILINEAR)
return outputs.detach(), overlay
def get_layer_name(self, model):
layer_name = None
for n, m in model.named_children():
for sub_n, sub_m in m.named_modules():
if isinstance(sub_m, (nn.AdaptiveAvgPool2d, nn.AvgPool2d)):
layer_name = tmp
tmp = '.'.join((n, sub_n))
return layer_name
def forward_hook(self, module, x, y):
#self.feature_maps["input"] = x
self.feature_maps["output"] = y
def backward_hook(self, module, x, y):
#self.gradients["input"] = x
self.gradients["output"] = y
self.gradients["output"] = y
| [
"numpy.uint8",
"PIL.Image.fromarray",
"numpy.asarray",
"torch.nn.functional.relu",
"torch.no_grad",
"cv2.resize",
"matplotlib.cm.get_cmap"
]
| [((1644, 1659), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1657, 1659), False, 'import torch\n'), ((2015, 2032), 'torch.nn.functional.relu', 'F.relu', (['cam', '(True)'], {}), '(cam, True)\n', (2021, 2032), True, 'from torch.nn import functional as F\n'), ((2200, 2245), 'cv2.resize', 'cv.resize', (['cam', 'img.size[:2]'], {'interpolation': '(4)'}), '(cam, img.size[:2], interpolation=4)\n', (2209, 2245), True, 'import cv2 as cv\n'), ((2481, 2522), 'numpy.uint8', 'np.uint8', (['(0.6 * img + 0.4 * cam[:, :, :3])'], {}), '(0.6 * img + 0.4 * cam[:, :, :3])\n', (2489, 2522), True, 'import numpy as np\n'), ((2542, 2566), 'PIL.Image.fromarray', 'Image.fromarray', (['overlay'], {}), '(overlay)\n', (2557, 2566), False, 'from PIL import Image\n'), ((2389, 2404), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2399, 2404), True, 'import numpy as np\n'), ((2280, 2298), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (2291, 2298), False, 'from matplotlib import cm\n')] |
"""Generates a random terrain at Minitaur gym environment reset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import itertools
import math
import enum
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
_GRID_LENGTH = 15
_GRID_WIDTH = 10
_MAX_SAMPLE_SIZE = 30
_MIN_BLOCK_DISTANCE = 0.7
_MAX_BLOCK_LENGTH = _MIN_BLOCK_DISTANCE
_MIN_BLOCK_LENGTH = _MAX_BLOCK_LENGTH / 2
_MAX_BLOCK_HEIGHT = 0.05
_MIN_BLOCK_HEIGHT = _MAX_BLOCK_HEIGHT / 2
class PoissonDisc2D(object):
"""Generates 2D points using Poisson disk sampling method.
Implements the algorithm described in:
http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf
Unlike the uniform sampling method that creates small clusters of points,
Poisson disk method enforces the minimum distance between points and is more
suitable for generating a spatial distribution of non-overlapping objects.
"""
def __init__(self, grid_length, grid_width, min_radius, max_sample_size):
"""Initializes the algorithm.
Args:
grid_length: The length of the bounding square in which points are
sampled.
grid_width: The width of the bounding square in which points are
sampled.
min_radius: The minimum distance between any pair of points.
max_sample_size: The maximum number of sample points around a active site.
See details in the algorithm description.
"""
self._cell_length = min_radius / math.sqrt(2)
self._grid_length = grid_length
self._grid_width = grid_width
self._grid_size_x = int(grid_length / self._cell_length) + 1
self._grid_size_y = int(grid_width / self._cell_length) + 1
self._min_radius = min_radius
self._max_sample_size = max_sample_size
# Flattern the 2D grid as an 1D array. The grid is used for fast nearest
# point searching.
self._grid = [None] * self._grid_size_x * self._grid_size_y
# Generate the first sample point and set it as an active site.
first_sample = np.array(np.random.random_sample(2)) * [grid_length, grid_width]
self._active_list = [first_sample]
# Also store the sample point in the grid.
self._grid[self._point_to_index_1d(first_sample)] = first_sample
def _point_to_index_1d(self, point):
"""Computes the index of a point in the grid array.
Args:
point: A 2D point described by its coordinates (x, y).
Returns:
The index of the point within the self._grid array.
"""
return self._index_2d_to_1d(self._point_to_index_2d(point))
def _point_to_index_2d(self, point):
"""Computes the 2D index (aka cell ID) of a point in the grid.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
x_index: The x index of the cell the point belongs to.
y_index: The y index of the cell the point belongs to.
"""
x_index = int(point[0] / self._cell_length)
y_index = int(point[1] / self._cell_length)
return x_index, y_index
def _index_2d_to_1d(self, index2d):
"""Converts the 2D index to the 1D position in the grid array.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
The 1D position of the cell within the self._grid array.
"""
return index2d[0] + index2d[1] * self._grid_size_x
def _is_in_grid(self, point):
"""Checks if the point is inside the grid boundary.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
Whether the point is inside the grid.
"""
return (0 <= point[0] < self._grid_length) and (0 <= point[1] < self._grid_width)
def _is_in_range(self, index2d):
"""Checks if the cell ID is within the grid.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
Whether the cell (2D index) is inside the grid.
"""
return (0 <= index2d[0] < self._grid_size_x) and (0 <= index2d[1] < self._grid_size_y)
def _is_close_to_existing_points(self, point):
"""Checks if the point is close to any already sampled (and stored) points.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
True iff the distance of the point to any existing points is smaller than
the min_radius
"""
px, py = self._point_to_index_2d(point)
# Now we can check nearby cells for existing points
for neighbor_cell in itertools.product(xrange(px - 1, px + 2), xrange(py - 1, py + 2)):
if not self._is_in_range(neighbor_cell):
continue
maybe_a_point = self._grid[self._index_2d_to_1d(neighbor_cell)]
if maybe_a_point is not None and np.linalg.norm(maybe_a_point - point) < self._min_radius:
return True
return False
def sample(self):
"""Samples new points around some existing point.
Removes the sampling base point and also stores the new jksampled points if
they are far enough from all existing points.
"""
active_point = self._active_list.pop()
for _ in xrange(self._max_sample_size):
# Generate random points near the current active_point between the radius
random_radius = np.random.uniform(self._min_radius, 2 * self._min_radius)
random_angle = np.random.uniform(0, 2 * math.pi)
# The sampled 2D points near the active point
sample = random_radius * np.array([np.cos(random_angle),
np.sin(random_angle)]) + active_point
if not self._is_in_grid(sample):
continue
if self._is_close_to_existing_points(sample):
continue
self._active_list.append(sample)
self._grid[self._point_to_index_1d(sample)] = sample
def generate(self):
"""Generates the Poisson disc distribution of 2D points.
Although the while loop looks scary, the algorithm is in fact O(N), where N
is the number of cells within the grid. When we sample around a base point
(in some base cell), new points will not be pushed into the base cell
because of the minimum distance constraint. Once the current base point is
removed, all future searches cannot start from within the same base cell.
Returns:
All sampled points. The points are inside the quare [0, grid_length] x [0,
grid_width]
"""
while self._active_list:
self.sample()
all_sites = []
for p in self._grid:
if p is not None:
all_sites.append(p)
return all_sites
class TerrainType(enum.Enum):
"""The randomzied terrain types we can use in the gym env."""
RANDOM_BLOCKS = 1
TRIANGLE_MESH = 2
class MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Generates an uneven terrain in the gym env."""
def __init__(self,
terrain_type=TerrainType.TRIANGLE_MESH,
mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/"
"triangle_mesh_terrain/terrain9735.obj",
mesh_scale=None):
"""Initializes the randomizer.
Args:
terrain_type: Whether to generate random blocks or load a triangle mesh.
mesh_filename: The mesh file to be used. The mesh will only be loaded if
terrain_type is set to TerrainType.TRIANGLE_MESH.
mesh_scale: the scaling factor for the triangles in the mesh file.
"""
self._terrain_type = terrain_type
self._mesh_filename = mesh_filename
self._mesh_scale = mesh_scale if mesh_scale else [1.0, 1.0, 0.3]
def randomize_env(self, env):
"""Generate a random terrain for the current env.
Args:
env: A minitaur gym environment.
"""
if self._terrain_type is TerrainType.TRIANGLE_MESH:
self._load_triangle_mesh(env)
if self._terrain_type is TerrainType.RANDOM_BLOCKS:
self._generate_convex_blocks(env)
def _load_triangle_mesh(self, env):
"""Represents the random terrain using a triangle mesh.
It is possible for Minitaur leg to stuck at the common edge of two triangle
pieces. To prevent this from happening, we recommend using hard contacts
(or high stiffness values) for Minitaur foot in sim.
Args:
env: A minitaur gym environment.
"""
env.pybullet_client.removeBody(env.ground_id)
terrain_collision_shape_id = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_MESH,
fileName=self._mesh_filename,
flags=1,
meshScale=self._mesh_scale)
env.ground_id = env.pybullet_client.createMultiBody(
baseMass=0, baseCollisionShapeIndex=terrain_collision_shape_id, basePosition=[0, 0, 0])
def _generate_convex_blocks(self, env):
"""Adds random convex blocks to the flat ground.
We use the Possion disk algorithm to add some random blocks on the ground.
Possion disk algorithm sets the minimum distance between two sampling
points, thus voiding the clustering effect in uniform N-D distribution.
Args:
env: A minitaur gym environment.
"""
poisson_disc = PoissonDisc2D(_GRID_LENGTH, _GRID_WIDTH, _MIN_BLOCK_DISTANCE, _MAX_SAMPLE_SIZE)
block_centers = poisson_disc.generate()
for center in block_centers:
# We want the blocks to be in front of the robot.
shifted_center = np.array(center) - [2, _GRID_WIDTH / 2]
# Do not place blocks near the point [0, 0], where the robot will start.
if abs(shifted_center[0]) < 1.0 and abs(shifted_center[1]) < 1.0:
continue
half_length = np.random.uniform(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH) / (2 * math.sqrt(2))
half_height = np.random.uniform(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT) / 2
box_id = env.pybullet_client.createCollisionShape(
env.pybullet_client.GEOM_BOX, halfExtents=[half_length, half_length, half_height])
env.pybullet_client.createMultiBody(
baseMass=0,
baseCollisionShapeIndex=box_id,
basePosition=[shifted_center[0], shifted_center[1], half_height])
| [
"numpy.random.random_sample",
"inspect.currentframe",
"math.sqrt",
"numpy.linalg.norm",
"os.sys.path.insert",
"os.path.dirname",
"numpy.array",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin"
]
| [((398, 430), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (416, 430), False, 'import os, inspect\n'), ((313, 340), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (328, 340), False, 'import os, inspect\n'), ((370, 396), 'os.path.dirname', 'os.path.dirname', (['parentdir'], {}), '(parentdir)\n', (385, 396), False, 'import os, inspect\n'), ((259, 281), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (279, 281), False, 'import os, inspect\n'), ((1776, 1788), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1785, 1788), False, 'import math\n'), ((5485, 5542), 'numpy.random.uniform', 'np.random.uniform', (['self._min_radius', '(2 * self._min_radius)'], {}), '(self._min_radius, 2 * self._min_radius)\n', (5502, 5542), True, 'import numpy as np\n'), ((5564, 5597), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (5581, 5597), True, 'import numpy as np\n'), ((2328, 2354), 'numpy.random.random_sample', 'np.random.random_sample', (['(2)'], {}), '(2)\n', (2351, 2354), True, 'import numpy as np\n'), ((9556, 9572), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (9564, 9572), True, 'import numpy as np\n'), ((9785, 9840), 'numpy.random.uniform', 'np.random.uniform', (['_MIN_BLOCK_LENGTH', '_MAX_BLOCK_LENGTH'], {}), '(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH)\n', (9802, 9840), True, 'import numpy as np\n'), ((9882, 9937), 'numpy.random.uniform', 'np.random.uniform', (['_MIN_BLOCK_HEIGHT', '_MAX_BLOCK_HEIGHT'], {}), '(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT)\n', (9899, 9937), True, 'import numpy as np\n'), ((4986, 5023), 'numpy.linalg.norm', 'np.linalg.norm', (['(maybe_a_point - point)'], {}), '(maybe_a_point - point)\n', (5000, 5023), True, 'import numpy as np\n'), ((9848, 9860), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (9857, 9860), False, 'import math\n'), ((5692, 5712), 'numpy.cos', 'np.cos', (['random_angle'], {}), '(random_angle)\n', (5698, 5712), True, 'import numpy as np\n'), ((5755, 5775), 'numpy.sin', 'np.sin', (['random_angle'], {}), '(random_angle)\n', (5761, 5775), True, 'import numpy as np\n')] |
import asyncio
async def searchDomains(domain, q):
domains = []
proc = await asyncio.create_subprocess_shell(f"dnsrecon -d {domain} -t crt", stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="A":
if q:
await q.put(fields[2])
domains.append(fields[2])
return domains
async def findOpenPorts(ip, ports, q):
openPorts = []
proc = await asyncio.create_subprocess_shell(f"nmap -p {ports} --open {ip}",stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="open":
openPort = fields[0].split("/")
if q:
await q.put({"ip": ip, "port": openPort[0], "protocol": openPort[1]})
openPorts.append({"port": openPort[0], "protocol": openPort[1]})
return openPorts
| [
"asyncio.create_subprocess_shell"
]
| [((86, 186), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (['f"""dnsrecon -d {domain} -t crt"""'], {'stdout': 'asyncio.subprocess.PIPE'}), "(f'dnsrecon -d {domain} -t crt', stdout=\n asyncio.subprocess.PIPE)\n", (117, 186), False, 'import asyncio\n'), ((541, 641), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (['f"""nmap -p {ports} --open {ip}"""'], {'stdout': 'asyncio.subprocess.PIPE'}), "(f'nmap -p {ports} --open {ip}', stdout=\n asyncio.subprocess.PIPE)\n", (572, 641), False, 'import asyncio\n')] |
"""Riemannian and pseudo-Riemannian metrics."""
import math
import warnings
import autograd
import geomstats.backend as gs
from geomstats.geometry.connection import Connection
EPSILON = 1e-4
N_CENTERS = 10
TOLERANCE = 1e-5
N_REPETITIONS = 20
N_MAX_ITERATIONS = 50000
N_STEPS = 10
def loss(y_pred, y_true, metric):
"""Compute loss function between prediction and ground truth.
Loss function given by a Riemannian metric,
expressed as the squared geodesic distance between the prediction
and the ground truth.
Parameters
----------
y_pred
y_true
metric
Returns
-------
loss
"""
loss = metric.squared_dist(y_pred, y_true)
return loss
def grad(y_pred, y_true, metric):
"""Closed-form for the gradient of the loss function."""
tangent_vec = metric.log(base_point=y_pred, point=y_true)
grad_vec = - 2. * tangent_vec
inner_prod_mat = metric.inner_product_matrix(base_point=y_pred)
grad = gs.einsum('ni,nij->ni',
grad_vec,
gs.transpose(inner_prod_mat, axes=(0, 2, 1)))
return grad
class RiemannianMetric(Connection):
"""Class for Riemannian and pseudo-Riemannian metrics."""
def __init__(self, dimension, signature=None):
assert isinstance(dimension, int) or dimension == math.inf
assert dimension > 0
super().__init__(dimension=dimension)
self.signature = signature
def inner_product_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
raise NotImplementedError(
'The computation of the inner product matrix'
' is not implemented.')
def inner_product_inverse_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_matrix = self.inner_product_matrix(base_point)
cometric_matrix = gs.linalg.inv(metric_matrix)
return cometric_matrix
def inner_product_derivative_matrix(self, base_point=None):
"""Compute derivative of the inner prod matrix at base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_derivative = autograd.jacobian(self.inner_product_matrix)
return metric_derivative(base_point)
def christoffels(self, base_point):
"""Compute Christoffel symbols associated with the connection.
Parameters
----------
base_point: array-like, shape=[n_samples, dimension]
Returns
-------
christoffels: array-like,
shape=[n_samples, dimension, dimension, dimension]
"""
cometric_mat_at_point = self.inner_product_inverse_matrix(base_point)
metric_derivative_at_point = self.inner_product_derivative_matrix(
base_point)
term_1 = gs.einsum('nim,nmkl->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
term_2 = gs.einsum('nim,nmlk->nilk',
cometric_mat_at_point,
metric_derivative_at_point)
term_3 = - gs.einsum('nim,nklm->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
christoffels = 0.5 * (term_1 + term_2 + term_3)
return christoffels
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Inner product between two tangent vectors at a base point.
Parameters
----------
tangent_vec_a: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
tangent_vec_b: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
base_point: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
Returns
-------
inner_product : array-like, shape=[n_samples,]
"""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
n_tangent_vec_b = gs.shape(tangent_vec_b)[0]
inner_prod_mat = self.inner_product_matrix(base_point)
inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
n_mats = gs.shape(inner_prod_mat)[0]
if n_tangent_vec_a != n_mats:
if n_tangent_vec_a == 1:
tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
einsum_str_a = 'j,njk->nk'
elif n_mats == 1:
inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
einsum_str_a = 'nj,jk->nk'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_a = 'nj,njk->nk'
aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
n_auxs, _ = gs.shape(aux)
if n_tangent_vec_b != n_auxs:
if n_auxs == 1:
aux = gs.squeeze(aux, axis=0)
einsum_str_b = 'k,nk->n'
elif n_tangent_vec_b == 1:
tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
einsum_str_b = 'nk,k->n'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_b = 'nk,nk->n'
inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
assert gs.ndim(inner_prod) == 2, inner_prod.shape
return inner_prod
def squared_norm(self, vector, base_point=None):
"""Compute the square of the norm of a vector.
Squared norm of a vector associated to the inner product
at the tangent space at a base point.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
sq_norm : array-like, shape=[n_samples,]
"""
sq_norm = self.inner_product(vector, vector, base_point)
return sq_norm
def norm(self, vector, base_point=None):
"""Compute norm of a vector.
Norm of a vector associated to the inner product
at the tangent space at a base point.
Note: This only works for positive-definite
Riemannian metrics and inner products.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
norm : array-like, shape=[n_samples,]
"""
sq_norm = self.squared_norm(vector, base_point)
norm = gs.sqrt(sq_norm)
return norm
def geodesic(self, initial_point,
end_point=None, initial_tangent_vec=None,
point_type='vector'):
"""Return the geodesic as function of t.
Geodesic curve defined by either:
- an initial point and an initial tangent vector, or
- an initial point and an end point.
The geodesic is returned as a function parameterized by t.
Parameters
----------
initial_point : array-like, shape=[n_samples, dimension]
end_point : array-like, shape=[n_samples, dimension], optional
initial_tangent_vec : array-like, shape=[n_samples, dimension],
optional
point_type : str, optional
Returns
-------
path : callable
"""
point_ndim = 1
if point_type == 'matrix':
point_ndim = 2
initial_point = gs.to_ndarray(initial_point,
to_ndim=point_ndim + 1)
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
end_point = gs.to_ndarray(end_point,
to_ndim=point_ndim + 1)
shooting_tangent_vec = self.log(point=end_point,
base_point=initial_point)
if initial_tangent_vec is not None:
assert gs.allclose(shooting_tangent_vec, initial_tangent_vec)
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=point_ndim + 1)
def path(t):
"""Generate a function parameterizing the geodesic.
Parameters
----------
t : parameter value of the geodesic
Returns
-------
point_at_time_t : callable
"""
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_point = gs.to_ndarray(
initial_point,
to_ndim=point_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(
initial_tangent_vec,
to_ndim=point_ndim + 1)
if point_type == 'vector':
tangent_vecs = gs.einsum('il,nk->ik',
t,
new_initial_tangent_vec)
elif point_type == 'matrix':
tangent_vecs = gs.einsum('il,nkm->ikm',
t,
new_initial_tangent_vec)
point_at_time_t = self.exp(tangent_vec=tangent_vecs,
base_point=new_initial_point)
return point_at_time_t
return path
def squared_dist(self, point_a, point_b):
"""Squared geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
sq_dist : array-like, shape=[n_samples,]
"""
log = self.log(point=point_b, base_point=point_a)
sq_dist = self.squared_norm(vector=log, base_point=point_a)
return sq_dist
def dist(self, point_a, point_b):
"""Geodesic distance between two points.
Note: It only works for positive definite
Riemannian metrics.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
dist : array-like, shape=[n_samples,]
"""
sq_dist = self.squared_dist(point_a, point_b)
dist = gs.sqrt(sq_dist)
return dist
def variance(self,
points,
weights=None,
base_point=None,
point_type='vector'):
"""Variance of (weighted) points wrt a base point.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
"""
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
if base_point is None:
base_point = self.mean(points, weights)
variance = 0.
sq_dists = self.squared_dist(base_point, points)
variance += gs.einsum('nk,nj->j', weights, sq_dists)
variance = gs.array(variance)
variance /= sum_weights
variance = gs.to_ndarray(variance, to_ndim=1)
variance = gs.to_ndarray(variance, to_ndim=2, axis=1)
return variance
def mean(self, points,
weights=None,
n_max_iterations=32,
epsilon=EPSILON,
point_type='vector',
mean_method='default',
verbose=False):
"""Frechet mean of (weighted) points.
Parameters
----------
points : array-like, shape=[n_samples, dimension]
weights : array-like, shape=[n_samples, 1], optional
verbose : bool, optional
Returns
-------
mean : array-like
the Frechet mean of points, a point on the manifold
"""
if mean_method == 'default':
# TODO(nina): Profile this code to study performance,
# i.e. what to do with sq_dists_between_iterates.
def while_loop_cond(iteration, mean, variance, sq_dist):
result = ~gs.logical_or(
gs.isclose(variance, 0.),
gs.less_equal(sq_dist, epsilon * variance))
return result[0, 0] or iteration == 0
def while_loop_body(iteration, mean, variance, sq_dist):
logs = self.log(point=points, base_point=mean)
tangent_mean = gs.einsum('nk,nj->j', weights, logs)
tangent_mean /= sum_weights
mean_next = self.exp(
tangent_vec=tangent_mean,
base_point=mean)
sq_dist = self.squared_dist(mean_next, mean)
sq_dists_between_iterates.append(sq_dist)
variance = self.variance(points=points,
weights=weights,
base_point=mean_next)
mean = mean_next
iteration += 1
return [iteration, mean, variance, sq_dist]
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
mean = points[0]
if point_type == 'vector':
mean = gs.to_ndarray(mean, to_ndim=2)
if point_type == 'matrix':
mean = gs.to_ndarray(mean, to_ndim=3)
if n_points == 1:
return mean
sq_dists_between_iterates = []
iteration = 0
sq_dist = gs.array([[0.]])
variance = gs.array([[0.]])
last_iteration, mean, variance, sq_dist = gs.while_loop(
lambda i, m, v, sq: while_loop_cond(i, m, v, sq),
lambda i, m, v, sq: while_loop_body(i, m, v, sq),
loop_vars=[iteration, mean, variance, sq_dist],
maximum_iterations=n_max_iterations)
if last_iteration == n_max_iterations:
print('Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
if verbose:
print('n_iter: {}, final variance: {}, final dist: {}'.format(
last_iteration, variance, sq_dist))
mean = gs.to_ndarray(mean, to_ndim=2)
return mean
if mean_method == 'frechet-poincare-ball':
lr = 1e-3
tau = 5e-3
if len(points) == 1:
return points
iteration = 0
convergence = math.inf
barycenter = points.mean(0, keepdims=True) * 0
while convergence > tau and n_max_iterations > iteration:
iteration += 1
expand_barycenter = gs.repeat(barycenter, points.shape[0], 0)
grad_tangent = 2 * self.log(points, expand_barycenter)
cc_barycenter = self.exp(lr * grad_tangent.sum(0,
keepdims=True),
barycenter)
convergence = self.dist(cc_barycenter, barycenter).max().item()
barycenter = cc_barycenter
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached. The '
'mean may be inaccurate'.format(n_max_iterations))
return barycenter
def adaptive_gradientdescent_mean(self, points,
weights=None,
n_max_iterations=40,
epsilon=1e-12,
init_points=[],
verbose=False):
"""Compute Frechet mean of (weighted) points using adaptive time-steps.
Frechet mean of (weighted) points using adaptive time-steps
The loss function optimized is ||M_1(x)||_x (where M_1(x) is
the tangent mean at x) rather than the mean-square-distance (MSD)
because this saves computation time.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
init_points: array-like, shape=[n_init, dimension]
epsilon: tolerance for stopping the gradient descent
verbose: verbose mode printing the surrogate value
epsilon: tolerance for stopping the gradient descent
"""
# TODO(Xavier): This function assumes that all points are lists
# of vectors and not of matrices
n_points = gs.shape(points)[0]
if n_points == 1:
return gs.to_ndarray(points[0], to_ndim=2)
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
n_init = len(init_points)
if n_init == 0:
current_mean = points[0]
else:
current_mean = init_points[0]
tau = 1.0
iteration = 0
logs = self.log(point=points, base_point=current_mean)
current_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
current_tangent_mean /= sum_weights
norm_current_tangent_mean = gs.linalg.norm(current_tangent_mean)
while (norm_current_tangent_mean > epsilon
and iteration < n_max_iterations):
iteration = iteration + 1
shooting_vector = gs.to_ndarray(
tau * current_tangent_mean,
to_ndim=2)
next_mean = self.exp(
tangent_vec=shooting_vector,
base_point=current_mean)
logs = self.log(point=points, base_point=next_mean)
next_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
next_tangent_mean /= sum_weights
norm_next_tangent_mean = gs.linalg.norm(next_tangent_mean)
if verbose:
print(
"Iter {0}: tau= {1}, "
"norm_current_tangent_mean = {2}".format(
iter, tau, norm_current_tangent_mean))
if norm_next_tangent_mean < norm_current_tangent_mean:
current_mean = next_mean
current_tangent_mean = next_tangent_mean
norm_current_tangent_mean = norm_next_tangent_mean
tau = max(1.0, 1.0511111 * tau)
else:
tau = tau * 0.8
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
return gs.to_ndarray(current_mean, to_ndim=2)
def diameter(self, points):
"""Give the distance between two farthest points.
Distance between the two points that are farthest away from each other
in points.
Parameters
----------
points
Returns
-------
diameter
"""
diameter = 0.0
n_points = points.shape[0]
for i in range(n_points - 1):
dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :])
dist_to_farthest_neighbor = gs.amax(dist_to_neighbors)
diameter = gs.maximum(diameter, dist_to_farthest_neighbor)
return diameter
def closest_neighbor_index(self, point, neighbors):
"""Closest neighbor of point among neighbors.
Parameters
----------
point
neighbors
Returns
-------
closest_neighbor_index
"""
dist = self.dist(point, neighbors)
closest_neighbor_index = gs.argmin(dist)
return closest_neighbor_index
| [
"geomstats.backend.amax",
"geomstats.backend.sqrt",
"autograd.jacobian",
"geomstats.backend.transpose",
"geomstats.backend.shape",
"geomstats.backend.to_ndarray",
"geomstats.backend.maximum",
"geomstats.backend.argmin",
"geomstats.backend.allclose",
"geomstats.backend.ndim",
"geomstats.backend.cast",
"geomstats.backend.einsum",
"geomstats.backend.isclose",
"geomstats.backend.ones",
"geomstats.backend.squeeze",
"geomstats.backend.repeat",
"geomstats.backend.linalg.inv",
"geomstats.backend.linalg.norm",
"geomstats.backend.sum",
"geomstats.backend.less_equal",
"geomstats.backend.array"
]
| [((1052, 1096), 'geomstats.backend.transpose', 'gs.transpose', (['inner_prod_mat'], {'axes': '(0, 2, 1)'}), '(inner_prod_mat, axes=(0, 2, 1))\n', (1064, 1096), True, 'import geomstats.backend as gs\n'), ((2163, 2191), 'geomstats.backend.linalg.inv', 'gs.linalg.inv', (['metric_matrix'], {}), '(metric_matrix)\n', (2176, 2191), True, 'import geomstats.backend as gs\n'), ((2509, 2553), 'autograd.jacobian', 'autograd.jacobian', (['self.inner_product_matrix'], {}), '(self.inner_product_matrix)\n', (2526, 2553), False, 'import autograd\n'), ((3164, 3242), 'geomstats.backend.einsum', 'gs.einsum', (['"""nim,nmkl->nikl"""', 'cometric_mat_at_point', 'metric_derivative_at_point'], {}), "('nim,nmkl->nikl', cometric_mat_at_point, metric_derivative_at_point)\n", (3173, 3242), True, 'import geomstats.backend as gs\n'), ((3314, 3392), 'geomstats.backend.einsum', 'gs.einsum', (['"""nim,nmlk->nilk"""', 'cometric_mat_at_point', 'metric_derivative_at_point'], {}), "('nim,nmlk->nilk', cometric_mat_at_point, metric_derivative_at_point)\n", (3323, 3392), True, 'import geomstats.backend as gs\n'), ((4363, 4402), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['tangent_vec_a'], {'to_ndim': '(2)'}), '(tangent_vec_a, to_ndim=2)\n', (4376, 4402), True, 'import geomstats.backend as gs\n'), ((4427, 4466), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['tangent_vec_b'], {'to_ndim': '(2)'}), '(tangent_vec_b, to_ndim=2)\n', (4440, 4466), True, 'import geomstats.backend as gs\n'), ((4662, 4702), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['inner_prod_mat'], {'to_ndim': '(3)'}), '(inner_prod_mat, to_ndim=3)\n', (4675, 4702), True, 'import geomstats.backend as gs\n'), ((5224, 5278), 'geomstats.backend.einsum', 'gs.einsum', (['einsum_str_a', 'tangent_vec_a', 'inner_prod_mat'], {}), '(einsum_str_a, tangent_vec_a, inner_prod_mat)\n', (5233, 5278), True, 'import geomstats.backend as gs\n'), ((5299, 5312), 'geomstats.backend.shape', 'gs.shape', (['aux'], {}), '(aux)\n', (5307, 5312), True, 'import geomstats.backend as gs\n'), ((5768, 5811), 'geomstats.backend.einsum', 'gs.einsum', (['einsum_str_b', 'aux', 'tangent_vec_b'], {}), '(einsum_str_b, aux, tangent_vec_b)\n', (5777, 5811), True, 'import geomstats.backend as gs\n'), ((5833, 5877), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['inner_prod'], {'to_ndim': '(2)', 'axis': '(1)'}), '(inner_prod, to_ndim=2, axis=1)\n', (5846, 5877), True, 'import geomstats.backend as gs\n'), ((7133, 7149), 'geomstats.backend.sqrt', 'gs.sqrt', (['sq_norm'], {}), '(sq_norm)\n', (7140, 7149), True, 'import geomstats.backend as gs\n'), ((8055, 8107), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['initial_point'], {'to_ndim': '(point_ndim + 1)'}), '(initial_point, to_ndim=point_ndim + 1)\n', (8068, 8107), True, 'import geomstats.backend as gs\n'), ((8834, 8863), 'geomstats.backend.array', 'gs.array', (['initial_tangent_vec'], {}), '(initial_tangent_vec)\n', (8842, 8863), True, 'import geomstats.backend as gs\n'), ((8894, 8952), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['initial_tangent_vec'], {'to_ndim': '(point_ndim + 1)'}), '(initial_tangent_vec, to_ndim=point_ndim + 1)\n', (8907, 8952), True, 'import geomstats.backend as gs\n'), ((11249, 11265), 'geomstats.backend.sqrt', 'gs.sqrt', (['sq_dist'], {}), '(sq_dist)\n', (11256, 11265), True, 'import geomstats.backend as gs\n'), ((11977, 11994), 'geomstats.backend.array', 'gs.array', (['weights'], {}), '(weights)\n', (11985, 11994), True, 'import geomstats.backend as gs\n'), ((12013, 12054), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['weights'], {'to_ndim': '(2)', 'axis': '(1)'}), '(weights, to_ndim=2, axis=1)\n', (12026, 12054), True, 'import geomstats.backend as gs\n'), ((12078, 12093), 'geomstats.backend.sum', 'gs.sum', (['weights'], {}), '(weights)\n', (12084, 12093), True, 'import geomstats.backend as gs\n'), ((12279, 12319), 'geomstats.backend.einsum', 'gs.einsum', (['"""nk,nj->j"""', 'weights', 'sq_dists'], {}), "('nk,nj->j', weights, sq_dists)\n", (12288, 12319), True, 'import geomstats.backend as gs\n'), ((12340, 12358), 'geomstats.backend.array', 'gs.array', (['variance'], {}), '(variance)\n', (12348, 12358), True, 'import geomstats.backend as gs\n'), ((12411, 12445), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['variance'], {'to_ndim': '(1)'}), '(variance, to_ndim=1)\n', (12424, 12445), True, 'import geomstats.backend as gs\n'), ((12465, 12507), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['variance'], {'to_ndim': '(2)', 'axis': '(1)'}), '(variance, to_ndim=2, axis=1)\n', (12478, 12507), True, 'import geomstats.backend as gs\n'), ((18505, 18522), 'geomstats.backend.array', 'gs.array', (['weights'], {}), '(weights)\n', (18513, 18522), True, 'import geomstats.backend as gs\n'), ((18541, 18582), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['weights'], {'to_ndim': '(2)', 'axis': '(1)'}), '(weights, to_ndim=2, axis=1)\n', (18554, 18582), True, 'import geomstats.backend as gs\n'), ((18606, 18621), 'geomstats.backend.sum', 'gs.sum', (['weights'], {}), '(weights)\n', (18612, 18621), True, 'import geomstats.backend as gs\n'), ((18911, 18947), 'geomstats.backend.einsum', 'gs.einsum', (['"""nk,nj->j"""', 'weights', 'logs'], {}), "('nk,nj->j', weights, logs)\n", (18920, 18947), True, 'import geomstats.backend as gs\n'), ((19028, 19064), 'geomstats.backend.linalg.norm', 'gs.linalg.norm', (['current_tangent_mean'], {}), '(current_tangent_mean)\n', (19042, 19064), True, 'import geomstats.backend as gs\n'), ((20452, 20490), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['current_mean'], {'to_ndim': '(2)'}), '(current_mean, to_ndim=2)\n', (20465, 20490), True, 'import geomstats.backend as gs\n'), ((21467, 21482), 'geomstats.backend.argmin', 'gs.argmin', (['dist'], {}), '(dist)\n', (21476, 21482), True, 'import geomstats.backend as gs\n'), ((3466, 3544), 'geomstats.backend.einsum', 'gs.einsum', (['"""nim,nklm->nikl"""', 'cometric_mat_at_point', 'metric_derivative_at_point'], {}), "('nim,nklm->nikl', cometric_mat_at_point, metric_derivative_at_point)\n", (3475, 3544), True, 'import geomstats.backend as gs\n'), ((4493, 4516), 'geomstats.backend.shape', 'gs.shape', (['tangent_vec_a'], {}), '(tangent_vec_a)\n', (4501, 4516), True, 'import geomstats.backend as gs\n'), ((4546, 4569), 'geomstats.backend.shape', 'gs.shape', (['tangent_vec_b'], {}), '(tangent_vec_b)\n', (4554, 4569), True, 'import geomstats.backend as gs\n'), ((4720, 4744), 'geomstats.backend.shape', 'gs.shape', (['inner_prod_mat'], {}), '(inner_prod_mat)\n', (4728, 4744), True, 'import geomstats.backend as gs\n'), ((5894, 5913), 'geomstats.backend.ndim', 'gs.ndim', (['inner_prod'], {}), '(inner_prod)\n', (5901, 5913), True, 'import geomstats.backend as gs\n'), ((8405, 8453), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['end_point'], {'to_ndim': '(point_ndim + 1)'}), '(end_point, to_ndim=point_ndim + 1)\n', (8418, 8453), True, 'import geomstats.backend as gs\n'), ((9291, 9313), 'geomstats.backend.cast', 'gs.cast', (['t', 'gs.float32'], {}), '(t, gs.float32)\n', (9298, 9313), True, 'import geomstats.backend as gs\n'), ((9330, 9357), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['t'], {'to_ndim': '(1)'}), '(t, to_ndim=1)\n', (9343, 9357), True, 'import geomstats.backend as gs\n'), ((9374, 9409), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['t'], {'to_ndim': '(2)', 'axis': '(1)'}), '(t, to_ndim=2, axis=1)\n', (9387, 9409), True, 'import geomstats.backend as gs\n'), ((9442, 9494), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['initial_point'], {'to_ndim': '(point_ndim + 1)'}), '(initial_point, to_ndim=point_ndim + 1)\n', (9455, 9494), True, 'import geomstats.backend as gs\n'), ((9566, 9624), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['initial_tangent_vec'], {'to_ndim': '(point_ndim + 1)'}), '(initial_tangent_vec, to_ndim=point_ndim + 1)\n', (9579, 9624), True, 'import geomstats.backend as gs\n'), ((11723, 11755), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['points'], {'to_ndim': '(2)'}), '(points, to_ndim=2)\n', (11736, 11755), True, 'import geomstats.backend as gs\n'), ((11812, 11844), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['points'], {'to_ndim': '(3)'}), '(points, to_ndim=3)\n', (11825, 11844), True, 'import geomstats.backend as gs\n'), ((11864, 11880), 'geomstats.backend.shape', 'gs.shape', (['points'], {}), '(points)\n', (11872, 11880), True, 'import geomstats.backend as gs\n'), ((11935, 11957), 'geomstats.backend.ones', 'gs.ones', (['(n_points, 1)'], {}), '((n_points, 1))\n', (11942, 11957), True, 'import geomstats.backend as gs\n'), ((14698, 14715), 'geomstats.backend.array', 'gs.array', (['weights'], {}), '(weights)\n', (14706, 14715), True, 'import geomstats.backend as gs\n'), ((14738, 14779), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['weights'], {'to_ndim': '(2)', 'axis': '(1)'}), '(weights, to_ndim=2, axis=1)\n', (14751, 14779), True, 'import geomstats.backend as gs\n'), ((14807, 14822), 'geomstats.backend.sum', 'gs.sum', (['weights'], {}), '(weights)\n', (14813, 14822), True, 'import geomstats.backend as gs\n'), ((15190, 15207), 'geomstats.backend.array', 'gs.array', (['[[0.0]]'], {}), '([[0.0]])\n', (15198, 15207), True, 'import geomstats.backend as gs\n'), ((15230, 15247), 'geomstats.backend.array', 'gs.array', (['[[0.0]]'], {}), '([[0.0]])\n', (15238, 15247), True, 'import geomstats.backend as gs\n'), ((15940, 15970), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['mean'], {'to_ndim': '(2)'}), '(mean, to_ndim=2)\n', (15953, 15970), True, 'import geomstats.backend as gs\n'), ((18310, 18326), 'geomstats.backend.shape', 'gs.shape', (['points'], {}), '(points)\n', (18318, 18326), True, 'import geomstats.backend as gs\n'), ((18376, 18411), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['points[0]'], {'to_ndim': '(2)'}), '(points[0], to_ndim=2)\n', (18389, 18411), True, 'import geomstats.backend as gs\n'), ((18463, 18485), 'geomstats.backend.ones', 'gs.ones', (['(n_points, 1)'], {}), '((n_points, 1))\n', (18470, 18485), True, 'import geomstats.backend as gs\n'), ((19236, 19288), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['(tau * current_tangent_mean)'], {'to_ndim': '(2)'}), '(tau * current_tangent_mean, to_ndim=2)\n', (19249, 19288), True, 'import geomstats.backend as gs\n'), ((19538, 19574), 'geomstats.backend.einsum', 'gs.einsum', (['"""nk,nj->j"""', 'weights', 'logs'], {}), "('nk,nj->j', weights, logs)\n", (19547, 19574), True, 'import geomstats.backend as gs\n'), ((19657, 19690), 'geomstats.backend.linalg.norm', 'gs.linalg.norm', (['next_tangent_mean'], {}), '(next_tangent_mean)\n', (19671, 19690), True, 'import geomstats.backend as gs\n'), ((21010, 21036), 'geomstats.backend.amax', 'gs.amax', (['dist_to_neighbors'], {}), '(dist_to_neighbors)\n', (21017, 21036), True, 'import geomstats.backend as gs\n'), ((21060, 21107), 'geomstats.backend.maximum', 'gs.maximum', (['diameter', 'dist_to_farthest_neighbor'], {}), '(diameter, dist_to_farthest_neighbor)\n', (21070, 21107), True, 'import geomstats.backend as gs\n'), ((4856, 4889), 'geomstats.backend.squeeze', 'gs.squeeze', (['tangent_vec_a'], {'axis': '(0)'}), '(tangent_vec_a, axis=0)\n', (4866, 4889), True, 'import geomstats.backend as gs\n'), ((5402, 5425), 'geomstats.backend.squeeze', 'gs.squeeze', (['aux'], {'axis': '(0)'}), '(aux, axis=0)\n', (5412, 5425), True, 'import geomstats.backend as gs\n'), ((8694, 8748), 'geomstats.backend.allclose', 'gs.allclose', (['shooting_tangent_vec', 'initial_tangent_vec'], {}), '(shooting_tangent_vec, initial_tangent_vec)\n', (8705, 8748), True, 'import geomstats.backend as gs\n'), ((9729, 9779), 'geomstats.backend.einsum', 'gs.einsum', (['"""il,nk->ik"""', 't', 'new_initial_tangent_vec'], {}), "('il,nk->ik', t, new_initial_tangent_vec)\n", (9738, 9779), True, 'import geomstats.backend as gs\n'), ((13728, 13764), 'geomstats.backend.einsum', 'gs.einsum', (['"""nk,nj->j"""', 'weights', 'logs'], {}), "('nk,nj->j', weights, logs)\n", (13737, 13764), True, 'import geomstats.backend as gs\n'), ((14420, 14452), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['points'], {'to_ndim': '(2)'}), '(points, to_ndim=2)\n', (14433, 14452), True, 'import geomstats.backend as gs\n'), ((14517, 14549), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['points'], {'to_ndim': '(3)'}), '(points, to_ndim=3)\n', (14530, 14549), True, 'import geomstats.backend as gs\n'), ((14573, 14589), 'geomstats.backend.shape', 'gs.shape', (['points'], {}), '(points)\n', (14581, 14589), True, 'import geomstats.backend as gs\n'), ((14652, 14674), 'geomstats.backend.ones', 'gs.ones', (['(n_points, 1)'], {}), '((n_points, 1))\n', (14659, 14674), True, 'import geomstats.backend as gs\n'), ((14915, 14945), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['mean'], {'to_ndim': '(2)'}), '(mean, to_ndim=2)\n', (14928, 14945), True, 'import geomstats.backend as gs\n'), ((15008, 15038), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (['mean'], {'to_ndim': '(3)'}), '(mean, to_ndim=3)\n', (15021, 15038), True, 'import geomstats.backend as gs\n'), ((16418, 16459), 'geomstats.backend.repeat', 'gs.repeat', (['barycenter', 'points.shape[0]', '(0)'], {}), '(barycenter, points.shape[0], 0)\n', (16427, 16459), True, 'import geomstats.backend as gs\n'), ((4996, 5030), 'geomstats.backend.squeeze', 'gs.squeeze', (['inner_prod_mat'], {'axis': '(0)'}), '(inner_prod_mat, axis=0)\n', (5006, 5030), True, 'import geomstats.backend as gs\n'), ((5538, 5571), 'geomstats.backend.squeeze', 'gs.squeeze', (['tangent_vec_b'], {'axis': '(0)'}), '(tangent_vec_b, axis=0)\n', (5548, 5571), True, 'import geomstats.backend as gs\n'), ((9934, 9986), 'geomstats.backend.einsum', 'gs.einsum', (['"""il,nkm->ikm"""', 't', 'new_initial_tangent_vec'], {}), "('il,nkm->ikm', t, new_initial_tangent_vec)\n", (9943, 9986), True, 'import geomstats.backend as gs\n'), ((13418, 13443), 'geomstats.backend.isclose', 'gs.isclose', (['variance', '(0.0)'], {}), '(variance, 0.0)\n', (13428, 13443), True, 'import geomstats.backend as gs\n'), ((13464, 13506), 'geomstats.backend.less_equal', 'gs.less_equal', (['sq_dist', '(epsilon * variance)'], {}), '(sq_dist, epsilon * variance)\n', (13477, 13506), True, 'import geomstats.backend as gs\n')] |
import pandas as pd
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
def get_position_source(start_date, end_date, obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
.format(obsmode=obsmode)
sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
colors = []
if len(df) > 0:
ord_min = df['HrsOrder'].min()
ord_max = df['HrsOrder'].max()
colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in
df["HrsOrder"]]
df['colors'] = colors
source = ColumnDataSource(df)
return source
@data_quality(name='hrs_order', caption='HRS Order')
def hrs_order_plot(start_date, end_date):
"""Return a <div> element with the Order plot.
The plot shows the HRS order for obsmode High, low and medium over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order plot.
"""
def get_source(obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
" group by Date " \
.format(obsmode=obsmode)
sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
source = ColumnDataSource(df)
return source
low_source = get_source(1) # HrsMode_Id = 1 low
med_source = get_source(2) # HrsMode_Id = 2 med
high_source = get_source(3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>
<span style="font-size: 15px;"> @ord</span>
</div>
</div>
"""
)
p = figure(title="HRS Order",
x_axis_label='Date',
y_axis_label='Max(HrsOrder) - Min(HrsOrder)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10)
p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10)
p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10)
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_high', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position High Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_medium', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Medium Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_low', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Low Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p | [
"bokeh.plotting.ColumnDataSource",
"bokeh.plotting.figure",
"bokeh.models.formatters.DatetimeTickFormatter",
"app.decorators.data_quality",
"pandas.read_sql",
"bokeh.models.HoverTool"
]
| [((301, 535), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', ([], {'microseconds': "['%f']", 'milliseconds': "['%S.%2Ns']", 'seconds': "[':%Ss']", 'minsec': "[':%Mm:%Ss']", 'minutes': "['%H:%M:%S']", 'hourmin': "['%H:%M:']", 'hours': "['%H:%M']", 'days': "['%d %b']", 'months': "['%d %b %Y']", 'years': "['%b %Y']"}), "(microseconds=['%f'], milliseconds=['%S.%2Ns'],\n seconds=[':%Ss'], minsec=[':%Mm:%Ss'], minutes=['%H:%M:%S'], hourmin=[\n '%H:%M:'], hours=['%H:%M'], days=['%d %b'], months=['%d %b %Y'], years=\n ['%b %Y'])\n", (322, 535), False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((1731, 1782), 'app.decorators.data_quality', 'data_quality', ([], {'name': '"""hrs_order"""', 'caption': '"""HRS Order"""'}), "(name='hrs_order', caption='HRS Order')\n", (1743, 1782), False, 'from app.decorators import data_quality\n'), ((4360, 4417), 'app.decorators.data_quality', 'data_quality', ([], {'name': '"""hrs_order_position_high"""', 'caption': '""" """'}), "(name='hrs_order_position_high', caption=' ')\n", (4372, 4417), False, 'from app.decorators import data_quality\n'), ((6188, 6247), 'app.decorators.data_quality', 'data_quality', ([], {'name': '"""hrs_order_position_medium"""', 'caption': '""" """'}), "(name='hrs_order_position_medium', caption=' ')\n", (6200, 6247), False, 'from app.decorators import data_quality\n'), ((8020, 8076), 'app.decorators.data_quality', 'data_quality', ([], {'name': '"""hrs_order_position_low"""', 'caption': '""" """'}), "(name='hrs_order_position_low', caption=' ')\n", (8032, 8076), False, 'from app.decorators import data_quality\n'), ((1365, 1392), 'pandas.read_sql', 'pd.read_sql', (['sql', 'db.engine'], {}), '(sql, db.engine)\n', (1376, 1392), True, 'import pandas as pd\n'), ((1689, 1709), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (1705, 1709), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((3076, 3608), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': '"""\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>\n <span style="font-size: 15px;"> @ord</span>\n </div>\n </div>\n """'}), '(tooltips=\n """\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>\n <span style="font-size: 15px;"> @ord</span>\n </div>\n </div>\n """\n )\n', (3085, 3608), False, 'from bokeh.models import HoverTool\n'), ((3622, 3775), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""HRS Order"""', 'x_axis_label': '"""Date"""', 'y_axis_label': '"""Max(HrsOrder) - Min(HrsOrder)"""', 'x_axis_type': '"""datetime"""', 'tools': '[tool_list, _hover]'}), "(title='HRS Order', x_axis_label='Date', y_axis_label=\n 'Max(HrsOrder) - Min(HrsOrder)', x_axis_type='datetime', tools=[\n tool_list, _hover])\n", (3628, 3775), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((5066, 5809), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': '"""\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """'}), '(tooltips=\n """\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """\n )\n', (5075, 5809), False, 'from bokeh.models import HoverTool\n'), ((5823, 5973), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""HRS Order Position High Resolution"""', 'x_axis_label': '"""Date"""', 'y_axis_label': '"""y_upper"""', 'x_axis_type': '"""datetime"""', 'tools': '[tool_list, _hover]'}), "(title='HRS Order Position High Resolution', x_axis_label='Date',\n y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover])\n", (5829, 5973), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((6896, 7639), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': '"""\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """'}), '(tooltips=\n """\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """\n )\n', (6905, 7639), False, 'from bokeh.models import HoverTool\n'), ((7653, 7805), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""HRS Order Position Medium Resolution"""', 'x_axis_label': '"""Date"""', 'y_axis_label': '"""y_upper"""', 'x_axis_type': '"""datetime"""', 'tools': '[tool_list, _hover]'}), "(title='HRS Order Position Medium Resolution', x_axis_label='Date',\n y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover])\n", (7659, 7805), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((8725, 9468), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': '"""\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """'}), '(tooltips=\n """\n <div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Date: </span>\n <span style="font-size: 15px;"> @Time</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">Y Upper: </span>\n <span style="font-size: 15px;"> @y_upper</span>\n </div>\n <div>\n <span style="font-size: 15px; font-weight: bold;">HRS Order: </span>\n <span style="font-size: 15px;"> @HrsOrder</span>\n </div>\n </div>\n """\n )\n', (8734, 9468), False, 'from bokeh.models import HoverTool\n'), ((9482, 9631), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""HRS Order Position Low Resolution"""', 'x_axis_label': '"""Date"""', 'y_axis_label': '"""y_upper"""', 'x_axis_type': '"""datetime"""', 'tools': '[tool_list, _hover]'}), "(title='HRS Order Position Low Resolution', x_axis_label='Date',\n y_axis_label='y_upper', x_axis_type='datetime', tools=[tool_list, _hover])\n", (9488, 9631), False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((2757, 2784), 'pandas.read_sql', 'pd.read_sql', (['sql', 'db.engine'], {}), '(sql, db.engine)\n', (2768, 2784), True, 'import pandas as pd\n'), ((2803, 2823), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (2819, 2823), False, 'from bokeh.plotting import figure, ColumnDataSource\n')] |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
path_results = '../results/images/'
# this function receives a dataset with binary target and it will graph a hist of values
def graph_target(data,name="target",figsize=(6,4),title_name=None,color_text="white",save=False,name_file='target_distribution'):
plt.figure(figsize=figsize)
total = float(len(data)) # one person per row
title_name = "Target distribution"+" of "+str(int(total))+" users" if title_name is None else title_name+" of "+str(int(total))+" users"
ax = sns.countplot(x=name, data=data) # for Seaborn version 0.7 and more
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height/3,
'{:.2f}%\n{:d}'.format(100*height/total,height),
ha="center",color=color_text,fontweight='bold')#fontsize=10
plt.title(title_name)
plt.show()
if save:
figure = ax.get_figure()
figure.savefig(path_results+name_file+'.png',dpi=400, bbox_inches = 'tight')
# plot histograms of train and test to understand the differences between them
def plot_comp_hist(data1,data2,l_range=[-np.inf,np.inf],labels=['x','y'],title='histogram',bins=20,alpha=0.5):
x = data1[(data1>=l_range[0])&(data1<l_range[1])]
y = data2[(data2>=l_range[0])&(data2<l_range[1])]
plt.hist([x, y],label=labels, bins = bins, alpha=alpha)
plt.legend(loc='upper right')
plt.title(title)
#rcc_train[(rcc_train.saldo>=0.2)&(rcc_train.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5)
#rcc_train[(rcc_test.saldo>=0.2)&(rcc_test.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5) | [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure",
"seaborn.countplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((354, 381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (364, 381), True, 'import matplotlib.pyplot as plt\n'), ((583, 615), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'name', 'data': 'data'}), '(x=name, data=data)\n', (596, 615), True, 'import seaborn as sns\n'), ((923, 944), 'matplotlib.pyplot.title', 'plt.title', (['title_name'], {}), '(title_name)\n', (932, 944), True, 'import matplotlib.pyplot as plt\n'), ((949, 959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (957, 959), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1453), 'matplotlib.pyplot.hist', 'plt.hist', (['[x, y]'], {'label': 'labels', 'bins': 'bins', 'alpha': 'alpha'}), '([x, y], label=labels, bins=bins, alpha=alpha)\n', (1407, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1488), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1469, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1509), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1502, 1509), True, 'import matplotlib.pyplot as plt\n')] |
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
class BlacklistValidator:
blacklist = []
def __call__(self, value):
# Validation logic
if value in self.blacklist:
raise ValidationError(
_('This slug has an invalid value: %(value)s.'),
code='invalid',
params={'value': value},
)
@deconstructible
class EventSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'api',
'events',
]
@deconstructible
class OrganizerSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'pretixdroid',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'about',
'api',
]
| [
"django.utils.translation.ugettext_lazy"
]
| [((354, 401), 'django.utils.translation.ugettext_lazy', '_', (['"""This slug has an invalid value: %(value)s."""'], {}), "('This slug has an invalid value: %(value)s.')\n", (355, 401), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import _winreg
import os
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except OSError:
return None
return path
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
def get_index_path(hint):
return hint
| [
"_winreg.CreateKey",
"os.makedirs",
"_winreg.QueryValueEx",
"_winreg.OpenKey",
"os.path.normpath",
"os.path.isdir",
"_winreg.ConnectRegistry",
"_winreg.SetValueEx"
]
| [((210, 266), '_winreg.ConnectRegistry', '_winreg.ConnectRegistry', (['None', '_winreg.HKEY_CURRENT_USER'], {}), '(None, _winreg.HKEY_CURRENT_USER)\n', (233, 266), False, 'import _winreg\n'), ((779, 838), '_winreg.CreateKey', '_winreg.CreateKey', (['_winreg.aReg', '"""SOFTWARE\\\\CCP\\\\EVEONLINE"""'], {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEONLINE')\n", (796, 838), False, 'import _winreg\n'), ((842, 927), '_winreg.SetValueEx', '_winreg.SetValueEx', (['key_eveonline', '"""CACHEFOLDER"""', '(0)', '_winreg.REG_SZ', 'folder_path'], {}), "(key_eveonline, 'CACHEFOLDER', 0, _winreg.REG_SZ, folder_path\n )\n", (860, 927), False, 'import _winreg\n'), ((943, 1001), '_winreg.CreateKey', '_winreg.CreateKey', (['_winreg.aReg', '"""SOFTWARE\\\\CCP\\\\EVEPROBE"""'], {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEPROBE')\n", (960, 1001), False, 'import _winreg\n'), ((1005, 1084), '_winreg.SetValueEx', '_winreg.SetValueEx', (['key_eveprobe', '"""CACHEFOLDER"""', '(0)', '_winreg.REG_SZ', 'folder_path'], {}), "(key_eveprobe, 'CACHEFOLDER', 0, _winreg.REG_SZ, folder_path)\n", (1023, 1084), False, 'import _winreg\n'), ((290, 347), '_winreg.OpenKey', '_winreg.OpenKey', (['_winreg.aReg', '"""SOFTWARE\\\\CCP\\\\EVEONLINE"""'], {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEONLINE')\n", (305, 347), False, 'import _winreg\n'), ((365, 405), '_winreg.QueryValueEx', '_winreg.QueryValueEx', (['key', '"""CACHEFOLDER"""'], {}), "(key, 'CACHEFOLDER')\n", (385, 405), False, 'import _winreg\n'), ((517, 543), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (530, 543), False, 'import os\n'), ((719, 748), 'os.path.normpath', 'os.path.normpath', (['folder_path'], {}), '(folder_path)\n', (735, 748), False, 'import os\n'), ((570, 594), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (581, 594), False, 'import os\n')] |
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(name='awspk',
version='0.1',
description='A aws cli pen knife with loads of interested stuff',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['awspk'],
license='LICENSE',
)
| [
"setuptools.setup"
]
| [((69, 260), 'setuptools.setup', 'setup', ([], {'name': '"""awspk"""', 'version': '"""0.1"""', 'description': '"""A aws cli pen knife with loads of interested stuff"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'py_modules': "['awspk']", 'license': '"""LICENSE"""'}), "(name='awspk', version='0.1', description=\n 'A aws cli pen knife with loads of interested stuff', author='<NAME>',\n author_email='<EMAIL>', py_modules=['awspk'], license='LICENSE')\n", (74, 260), False, 'from setuptools import setup, find_packages\n')] |
# Xlib.display -- high level display object
#
# Copyright (C) 2000 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python modules
import types
# Python 2/3 compatibility.
from six import create_unbound_method
# Xlib modules
from . import error
from . import ext
from . import X
# Xlib.protocol modules
from .protocol import display as protocol_display
from .protocol import request, event, rq
# Xlib.xobjects modules
from .xobject import resource
from .xobject import drawable
from .xobject import fontable
from .xobject import colormap
from .xobject import cursor
_resource_baseclasses = {
'resource': resource.Resource,
'drawable': drawable.Drawable,
'window': drawable.Window,
'pixmap': drawable.Pixmap,
'fontable': fontable.Fontable,
'font': fontable.Font,
'gc': fontable.GC,
'colormap': colormap.Colormap,
'cursor': cursor.Cursor,
}
_resource_hierarchy = {
'resource': ('drawable', 'window', 'pixmap',
'fontable', 'font', 'gc',
'colormap', 'cursor'),
'drawable': ('window', 'pixmap'),
'fontable': ('font', 'gc')
}
class _BaseDisplay(protocol_display.Display):
resource_classes = _resource_baseclasses.copy()
# Implement a cache of atom names, used by Window objects when
# dealing with some ICCCM properties not defined in Xlib.Xatom
def __init__(self, *args, **keys):
protocol_display.Display.__init__(self, *args, **keys)
self._atom_cache = {}
def get_atom(self, atomname, only_if_exists=0):
if atomname in self._atom_cache:
return self._atom_cache[atomname]
r = request.InternAtom(display = self, name = atomname, only_if_exists = only_if_exists)
# don't cache NONE responses in case someone creates this later
if r.atom != X.NONE:
self._atom_cache[atomname] = r.atom
return r.atom
class Display(object):
def __init__(self, display = None):
self.display = _BaseDisplay(display)
# Create the keymap cache
self._keymap_codes = [()] * 256
self._keymap_syms = {}
self._update_keymap(self.display.info.min_keycode,
(self.display.info.max_keycode
- self.display.info.min_keycode + 1))
# Translations for keysyms to strings.
self.keysym_translations = {}
# Find all supported extensions
self.extensions = []
self.class_extension_dicts = {}
self.display_extension_methods = {}
# a dict that maps the event name to the code
# or, when it's an event with a subcode, to a tuple of (event,subcode)
# note this wraps the dict so you address it as
# extension_event.EXTENSION_EVENT_NAME rather than
# extension_event["EXTENSION_EVENT_NAME"]
self.extension_event = rq.DictWrapper({})
exts = self.list_extensions()
# Go through all extension modules
for extname, modname in ext.__extensions__:
if extname in exts:
# Import the module and fetch it
__import__('Xlib.ext.' + modname)
mod = getattr(ext, modname)
info = self.query_extension(extname)
self.display.set_extension_major(extname, info.major_opcode)
# Call initialiasation function
mod.init(self, info)
self.extensions.append(extname)
# Finalize extensions by creating new classes
for class_name, dictionary in self.class_extension_dicts.items():
origcls = self.display.resource_classes[class_name]
self.display.resource_classes[class_name] = type(origcls.__name__,
(origcls,),
dictionary)
# Problem: we have already created some objects without the
# extensions: the screen roots and default colormaps.
# Fix that by reinstantiating them.
for screen in self.display.info.roots:
screen.root = self.display.resource_classes['window'](self.display, screen.root.id)
screen.default_colormap = self.display.resource_classes['colormap'](self.display, screen.default_colormap.id)
def get_display_name(self):
"""Returns the name used to connect to the server, either
provided when creating the Display object, or fetched from the
environmental variable $DISPLAY."""
return self.display.get_display_name()
def fileno(self):
"""Returns the file descriptor number of the underlying socket.
This method is provided to allow Display objects to be passed
select.select()."""
return self.display.fileno()
def close(self):
"""Close the display, freeing the resources that it holds."""
self.display.close()
def set_error_handler(self, handler):
"""Set the default error handler which will be called for all
unhandled errors. handler should take two arguments as a normal
request error handler, but the second argument (the request) will
be None. See section Error Handling."""
self.display.set_error_handler(handler)
def flush(self):
"""Flush the request queue, building and sending the queued
requests. This can be necessary in applications that never wait
for events, and in threaded applications."""
self.display.flush()
def sync(self):
"""Flush the queue and wait until the server has processed all
the queued requests. Use this e.g. when it is important that
errors caused by a certain request is trapped."""
# Do a light-weight replyrequest to sync. There must
# be a better way to do it...
self.get_pointer_control()
def next_event(self):
"""Return the next event. If there are no events queued, it will
block until the next event is fetched from the server."""
return self.display.next_event()
def pending_events(self):
"""Return the number of events queued, i.e. the number of times
that Display.next_event() can be called without blocking."""
return self.display.pending_events()
def has_extension(self, extension):
"""Check if both the server and the client library support the X
extension named extension."""
return extension in self.extensions
def create_resource_object(self, type, id):
"""Create a resource object of type for the integer id. type
should be one of the following strings:
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
This function can be used when a resource ID has been fetched
e.g. from an resource or a command line argument. Resource
objects should never be created by instantiating the appropriate
class directly, since any X extensions dynamically added by the
library will not be available.
"""
return self.display.resource_classes[type](self.display, id)
# We need this to handle display extension methods
def __getattr__(self, attr):
try:
function = self.display_extension_methods[attr]
return types.MethodType(function, self)
except KeyError:
raise AttributeError(attr)
###
### display information retrieval
###
def screen(self, sno = None):
if sno is None:
return self.display.info.roots[self.display.default_screen]
else:
return self.display.info.roots[sno]
def screen_count(self):
"""Return the total number of screens on the display."""
return len(self.display.info.roots)
def get_default_screen(self):
"""Return the number of the default screen, extracted from the
display name."""
return self.display.get_default_screen()
###
### Extension module interface
###
def extension_add_method(self, object, name, function):
"""extension_add_method(object, name, function)
Add an X extension module method. OBJECT is the type of
object to add the function to, a string from this list:
display
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
NAME is the name of the method, a string. FUNCTION is a
normal function whose first argument is a 'self'.
"""
if object == 'display':
if hasattr(self, name):
raise AssertionError('attempting to replace display method: %s' % name)
self.display_extension_methods[name] = function
else:
class_list = (object, ) + _resource_hierarchy.get(object, ())
for class_name in class_list:
cls = _resource_baseclasses[class_name]
if hasattr(cls, name):
raise AssertionError('attempting to replace %s method: %s' % (class_name, name))
method = create_unbound_method(function, cls)
# Maybe should check extension overrides too
try:
self.class_extension_dicts[class_name][name] = method
except KeyError:
self.class_extension_dicts[class_name] = { name: method }
def extension_add_event(self, code, evt, name = None):
"""extension_add_event(code, evt, [name])
Add an extension event. CODE is the numeric code, and EVT is
the event class. EVT will be cloned, and the attribute _code
of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt)
if name is None:
name = evt.__name__
setattr(self.extension_event, name, code)
def extension_add_subevent(self, code, subcode, evt, name = None):
"""extension_add_subevent(code, evt, [name])
Add an extension subevent. CODE is the numeric code, subcode
is the sub-ID of this event that shares the code ID with other
sub-events and EVT is the event class. EVT will be cloned, and
the attribute _code of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt, subcode)
if name is None:
name = evt.__name__
# store subcodes as a tuple of (event code, subcode) in the
# extension dict maintained in the display object
setattr(self.extension_event, name, (code,subcode))
def add_extension_error(self, code, err):
"""add_extension_error(code, err)
Add an extension error. CODE is the numeric code, and ERR is
the error class.
"""
self.display.add_extension_error(code, err)
###
### keymap cache implementation
###
# The keycode->keysym map is stored in a list with 256 elements.
# Each element represents a keycode, and the tuple elements are
# the keysyms bound to the key.
# The keysym->keycode map is stored in a mapping, where the keys
# are keysyms. The values are a sorted list of tuples with two
# elements each: (index, keycode)
# keycode is the code for a key to which this keysym is bound, and
# index is the keysyms index in the map for that keycode.
def keycode_to_keysym(self, keycode, index):
"""Convert a keycode to a keysym, looking in entry index.
Normally index 0 is unshifted, 1 is shifted, 2 is alt grid, and 3
is shift+alt grid. If that key entry is not bound, X.NoSymbol is
returned."""
try:
return self._keymap_codes[keycode][index]
except IndexError:
return X.NoSymbol
def keysym_to_keycode(self, keysym):
"""Look up the primary keycode that is bound to keysym. If
several keycodes are found, the one with the lowest index and
lowest code is returned. If keysym is not bound to any key, 0 is
returned."""
try:
return self._keymap_syms[keysym][0][1]
except (KeyError, IndexError):
return 0
def keysym_to_keycodes(self, keysym):
"""Look up all the keycodes that is bound to keysym. A list of
tuples (keycode, index) is returned, sorted primarily on the
lowest index and secondarily on the lowest keycode."""
try:
# Copy the map list, reversing the arguments
return map(lambda x: (x[1], x[0]), self._keymap_syms[keysym])
except KeyError:
return []
def refresh_keyboard_mapping(self, evt):
"""This method should be called once when a MappingNotify event
is received, to update the keymap cache. evt should be the event
object."""
if isinstance(evt, event.MappingNotify):
if evt.request == X.MappingKeyboard:
self._update_keymap(evt.first_keycode, evt.count)
else:
raise TypeError('expected a MappingNotify event')
def _update_keymap(self, first_keycode, count):
"""Internal function, called to refresh the keymap cache.
"""
# Delete all sym->code maps for the changed codes
lastcode = first_keycode + count
for keysym, codes in self._keymap_syms.items():
i = 0
while i < len(codes):
code = codes[i][1]
if code >= first_keycode and code < lastcode:
del codes[i]
else:
i = i + 1
# Get the new keyboard mapping
keysyms = self.get_keyboard_mapping(first_keycode, count)
# Replace code->sym map with the new map
self._keymap_codes[first_keycode:lastcode] = keysyms
# Update sym->code map
code = first_keycode
for syms in keysyms:
index = 0
for sym in syms:
if sym != X.NoSymbol:
if sym in self._keymap_syms:
symcodes = self._keymap_syms[sym]
symcodes.append((index, code))
symcodes.sort()
else:
self._keymap_syms[sym] = [(index, code)]
index = index + 1
code = code + 1
###
### client-internal keysym to string translations
###
def lookup_string(self, keysym):
"""Return a string corresponding to KEYSYM, or None if no
reasonable translation is found.
"""
s = self.keysym_translations.get(keysym)
if s is not None:
return s
import Xlib.XK
return Xlib.XK.keysym_to_string(keysym)
def rebind_string(self, keysym, newstring):
"""Change the translation of KEYSYM to NEWSTRING.
If NEWSTRING is None, remove old translation if any.
"""
if newstring is None:
try:
del self.keysym_translations[keysym]
except KeyError:
pass
else:
self.keysym_translations[keysym] = newstring
###
### X requests
###
def intern_atom(self, name, only_if_exists = 0):
"""Intern the string name, returning its atom number. If
only_if_exists is true and the atom does not already exist, it
will not be created and X.NONE is returned."""
r = request.InternAtom(display = self.display,
name = name,
only_if_exists = only_if_exists)
return r.atom
def get_atom(self, atom, only_if_exists = 0):
"""Alias for intern_atom, using internal cache"""
return self.display.get_atom(atom, only_if_exists)
def get_atom_name(self, atom):
"""Look up the name of atom, returning it as a string. Will raise
BadAtom if atom does not exist."""
r = request.GetAtomName(display = self.display,
atom = atom)
return r.name
def get_selection_owner(self, selection):
"""Return the window that owns selection (an atom), or X.NONE if
there is no owner for the selection. Can raise BadAtom."""
r = request.GetSelectionOwner(display = self.display,
selection = selection)
return r.owner
def send_event(self, destination, event, event_mask = 0, propagate = 0,
onerror = None):
"""Send a synthetic event to the window destination which can be
a window object, or X.PointerWindow or X.InputFocus. event is the
event object to send, instantiated from one of the classes in
protocol.events. See XSendEvent(3X11) for details.
There is also a Window.send_event() method."""
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = destination,
event_mask = event_mask,
event = event)
def ungrab_pointer(self, time, onerror = None):
"""elease a grabbed pointer and any queued events. See
XUngrabPointer(3X11)."""
request.UngrabPointer(display = self.display,
onerror = onerror,
time = time)
def change_active_pointer_grab(self, event_mask, cursor, time, onerror = None):
"""Change the dynamic parameters of a pointer grab. See
XChangeActivePointerGrab(3X11)."""
request.ChangeActivePointerGrab(display = self.display,
onerror = onerror,
cursor = cursor,
time = time,
event_mask = event_mask)
def ungrab_keyboard(self, time, onerror = None):
"""Ungrab a grabbed keyboard and any queued events. See
XUngrabKeyboard(3X11)."""
request.UngrabKeyboard(display = self.display,
onerror = onerror,
time = time)
def allow_events(self, mode, time, onerror = None):
"""Release some queued events. mode should be one of
X.AsyncPointer, X.SyncPointer, X.AsyncKeyboard, X.SyncKeyboard,
X.ReplayPointer, X.ReplayKeyboard, X.AsyncBoth, or X.SyncBoth.
time should be a timestamp or X.CurrentTime."""
request.AllowEvents(display = self.display,
onerror = onerror,
mode = mode,
time = time)
def grab_server(self, onerror = None):
"""Disable processing of requests on all other client connections
until the server is ungrabbed. Server grabbing should be avoided
as much as possible."""
request.GrabServer(display = self.display,
onerror = onerror)
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror)
def warp_pointer(self, x, y, src_window = X.NONE, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
"""Move the pointer relative its current position by the offsets
(x, y). However, if src_window is a window the pointer is only
moved if the specified rectangle in src_window contains it. If
src_width is 0 it will be replaced with the width of src_window -
src_x. src_height is treated in a similar way.
To move the pointer to absolute coordinates, use Window.warp_pointer()."""
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = X.NONE,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, focus, revert_to, time, onerror = None):
"""Set input focus to focus, which should be a window,
X.PointerRoot or X.NONE. revert_to specifies where the focus
reverts to if the focused window becomes not visible, and should
be X.RevertToParent, RevertToPointerRoot, or RevertToNone. See
XSetInputFocus(3X11) for details.
There is also a Window.set_input_focus()."""
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = focus,
time = time)
def get_input_focus(self):
"""Return an object with the following attributes:
focus
The window which currently holds the input
focus, X.NONE or X.PointerRoot.
revert_to
Where the focus will revert, one of X.RevertToParent,
RevertToPointerRoot, or RevertToNone. """
return request.GetInputFocus(display = self.display)
def query_keymap(self):
"""Return a bit vector for the logical state of the keyboard,
where each bit set to 1 indicates that the corresponding key is
currently pressed down. The vector is represented as a list of 32
integers. List item N contains the bits for keys 8N to 8N + 7
with the least significant bit in the byte representing key 8N."""
r = request.QueryKeymap(display = self.display)
return r.map
def open_font(self, name):
"""Open the font identifed by the pattern name and return its
font object. If name does not match any font, None is returned."""
fid = self.display.allocate_resource_id()
ec = error.CatchError(error.BadName)
request.OpenFont(display = self.display,
onerror = ec,
fid = fid,
name = name)
self.sync()
if ec.get_error():
self.display.free_resource_id(fid)
return None
else:
cls = self.display.get_resource_class('font', fontable.Font)
return cls(self.display, fid, owner = 1)
def list_fonts(self, pattern, max_names):
"""Return a list of font names matching pattern. No more than
max_names will be returned."""
r = request.ListFonts(display = self.display,
max_names = max_names,
pattern = pattern)
return r.fonts
def list_fonts_with_info(self, pattern, max_names):
"""Return a list of fonts matching pattern. No more than
max_names will be returned. Each list item represents one font
and has the following properties:
name
The name of the font.
min_bounds
max_bounds
min_char_or_byte2
max_char_or_byte2
default_char
draw_direction
min_byte1
max_byte1
all_chars_exist
font_ascent
font_descent
replies_hint
See the descripton of XFontStruct in XGetFontProperty(3X11)
for details on these values.
properties
A list of properties. Each entry has two attributes:
name
The atom identifying this property.
value
A 32-bit unsigned value.
"""
return request.ListFontsWithInfo(display = self.display,
max_names = max_names,
pattern = pattern)
def set_font_path(self, path, onerror = None):
"""Set the font path to path, which should be a list of strings.
If path is empty, the default font path of the server will be
restored."""
request.SetFontPath(display = self.display,
onerror = onerror,
path = path)
def get_font_path(self):
"""Return the current font path as a list of strings."""
r = request.GetFontPath(display = self.display)
return r.paths
def query_extension(self, name):
"""Ask the server if it supports the extension name. If it is
supported an object with the following attributes is returned:
major_opcode
The major opcode that the requests of this extension uses.
first_event
The base event code if the extension have additional events, or 0.
first_error
The base error code if the extension have additional errors, or 0.
If the extension is not supported, None is returned."""
r = request.QueryExtension(display = self.display,
name = name)
if r.present:
return r
else:
return None
def list_extensions(self):
"""Return a list of all the extensions provided by the server."""
r = request.ListExtensions(display = self.display)
return r.names
def change_keyboard_mapping(self, first_keycode, keysyms, onerror = None):
"""Modify the keyboard mapping, starting with first_keycode.
keysyms is a list of tuples of keysyms. keysyms[n][i] will be
assigned to keycode first_keycode+n at index i."""
request.ChangeKeyboardMapping(display = self.display,
onerror = onerror,
first_keycode = first_keycode,
keysyms = keysyms)
def get_keyboard_mapping(self, first_keycode, count):
"""Return the current keyboard mapping as a list of tuples,
starting at first_keycount and no more than count."""
r = request.GetKeyboardMapping(display = self.display,
first_keycode = first_keycode,
count = count)
return r.keysyms
def change_keyboard_control(self, onerror = None, **keys):
"""Change the parameters provided as keyword arguments:
key_click_percent
The volume of key clicks between 0 (off) and 100 (load).
-1 will restore default setting.
bell_percent
The base volume of the bell, coded as above.
bell_pitch
The pitch of the bell in Hz, -1 restores the default.
bell_duration
The duration of the bell in milliseconds, -1 restores
the default.
led
led_mode
led_mode should be X.LedModeOff or X.LedModeOn. If led is
provided, it should be a 32-bit mask listing the LEDs that
should change. If led is not provided, all LEDs are changed.
key
auto_repeat_mode
auto_repeat_mode should be one of X.AutoRepeatModeOff,
X.AutoRepeatModeOn, or X.AutoRepeatModeDefault. If key is
provided, that key will be modified, otherwise the global
state for the entire keyboard will be modified."""
request.ChangeKeyboardControl(display = self.display,
onerror = onerror,
attrs = keys)
def get_keyboard_control(self):
"""Return an object with the following attributes:
global_auto_repeat
X.AutoRepeatModeOn or X.AutoRepeatModeOff.
auto_repeats
A list of 32 integers. List item N contains the bits for keys
8N to 8N + 7 with the least significant bit in the byte
representing key 8N. If a bit is on, autorepeat is enabled
for the corresponding key.
led_mask
A 32-bit mask indicating which LEDs are on.
key_click_percent
The volume of key click, from 0 to 100.
bell_percent
bell_pitch
bell_duration
The volume, pitch and duration of the bell. """
return request.GetKeyboardControl(display = self.display)
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent)
def change_pointer_control(self, accel = None, threshold = None, onerror = None):
"""To change the pointer acceleration, set accel to a tuple (num,
denum). The pointer will then move num/denum times the normal
speed if it moves beyond the threshold number of pixels at once.
To change the threshold, set it to the number of pixels. -1
restores the default."""
if accel is None:
do_accel = 0
accel_num = 0
accel_denum = 0
else:
do_accel = 1
accel_num, accel_denum = accel
if threshold is None:
do_threshold = 0
else:
do_threshold = 1
request.ChangePointerControl(display = self.display,
onerror = onerror,
do_accel = do_accel,
do_thres = do_threshold,
accel_num = accel_num,
accel_denum = accel_denum,
threshold = threshold)
def get_pointer_control(self):
"""Return an object with the following attributes:
accel_num
accel_denom
The acceleration as numerator/denumerator.
threshold
The number of pixels the pointer must move before the
acceleration kicks in."""
return request.GetPointerControl(display = self.display)
def set_screen_saver(self, timeout, interval, prefer_blank, allow_exposures, onerror = None):
"""See XSetScreenSaver(3X11)."""
request.SetScreenSaver(display = self.display,
onerror = onerror,
timeout = timeout,
interval = interval,
prefer_blank = prefer_blank,
allow_exposures = allow_exposures)
def get_screen_saver(self):
"""Return an object with the attributes timeout, interval,
prefer_blanking, allow_exposures. See XGetScreenSaver(3X11) for
details."""
return request.GetScreenSaver(display = self.display)
def change_hosts(self, mode, host_family, host, onerror = None):
"""mode is either X.HostInsert or X.HostDelete. host_family is
one of X.FamilyInternet, X.FamilyDECnet or X.FamilyChaos.
host is a list of bytes. For the Internet family, it should be the
four bytes of an IPv4 address."""
request.ChangeHosts(display = self.display,
onerror = onerror,
mode = mode,
host_family = host_family,
host = host)
def list_hosts(self):
"""Return an object with the following attributes:
mode
X.EnableAccess if the access control list is used, X.DisableAccess otherwise.
hosts
The hosts on the access list. Each entry has the following attributes:
family
X.FamilyInternet, X.FamilyDECnet, or X.FamilyChaos.
name
A list of byte values, the coding depends on family. For the Internet family, it is the 4 bytes of an IPv4 address.
"""
return request.ListHosts(display = self.display)
def set_access_control(self, mode, onerror = None):
"""Enable use of access control lists at connection setup if mode
is X.EnableAccess, disable if it is X.DisableAccess."""
request.SetAccessControl(display = self.display,
onerror = onerror,
mode = mode)
def set_close_down_mode(self, mode, onerror = None):
"""Control what will happen with the client's resources at
connection close. The default is X.DestroyAll, the other values
are X.RetainPermanent and X.RetainTemporary."""
request.SetCloseDownMode(display = self.display,
onerror = onerror,
mode = mode)
def force_screen_saver(self, mode, onerror = None):
"""If mode is X.ScreenSaverActive the screen saver is activated.
If it is X.ScreenSaverReset, the screen saver is deactivated as
if device input had been received."""
request.ForceScreenSaver(display = self.display,
onerror = onerror,
mode = mode)
def set_pointer_mapping(self, map):
"""Set the mapping of the pointer buttons. map is a list of
logical button numbers. map must be of the same length as the
list returned by Display.get_pointer_mapping().
map[n] sets the
logical number for the physical button n+1. Logical number 0
disables the button. Two physical buttons cannot be mapped to the
same logical number.
If one of the buttons to be altered are
logically in the down state, X.MappingBusy is returned and the
mapping is not changed. Otherwise the mapping is changed and
X.MappingSuccess is returned."""
r = request.SetPointerMapping(display = self.display,
map = map)
return r.status
def get_pointer_mapping(self):
"""Return a list of the pointer button mappings. Entry N in the
list sets the logical button number for the physical button N+1."""
r = request.GetPointerMapping(display = self.display)
return r.map
def set_modifier_mapping(self, keycodes):
"""Set the keycodes for the eight modifiers X.Shift, X.Lock,
X.Control, X.Mod1, X.Mod2, X.Mod3, X.Mod4 and X.Mod5. keycodes
should be a eight-element list where each entry is a list of the
keycodes that should be bound to that modifier.
If any changed
key is logically in the down state, X.MappingBusy is returned and
the mapping is not changed. If the mapping violates some server
restriction, X.MappingFailed is returned. Otherwise the mapping
is changed and X.MappingSuccess is returned."""
r = request.SetModifierMapping(display = self.display,
keycodes = keycodes)
return r.status
def get_modifier_mapping(self):
"""Return a list of eight lists, one for each modifier. The list
can be indexed using X.ShiftMapIndex, X.Mod1MapIndex, and so on.
The sublists list the keycodes bound to that modifier."""
r = request.GetModifierMapping(display = self.display)
return r.keycodes
def no_operation(self, onerror = None):
"""Do nothing but send a request to the server."""
request.NoOperation(display = self.display,
onerror = onerror)
| [
"six.create_unbound_method",
"types.MethodType"
]
| [((8110, 8142), 'types.MethodType', 'types.MethodType', (['function', 'self'], {}), '(function, self)\n', (8126, 8142), False, 'import types\n'), ((9973, 10009), 'six.create_unbound_method', 'create_unbound_method', (['function', 'cls'], {}), '(function, cls)\n', (9994, 10009), False, 'from six import create_unbound_method\n')] |
from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = '<EMAIL>'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
| [
"xagents.utils.common.register_models"
]
| [((859, 882), 'xagents.utils.common.register_models', 'register_models', (['agents'], {}), '(agents)\n', (874, 882), False, 'from xagents.utils.common import register_models\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "4"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| [
"mmpt.utils.recursive_config",
"mmpt.utils.load_config"
]
| [((359, 386), 'mmpt.utils.recursive_config', 'recursive_config', (['yaml_file'], {}), '(yaml_file)\n', (375, 386), False, 'from mmpt.utils import recursive_config\n'), ((2216, 2255), 'mmpt.utils.load_config', 'load_config', ([], {'config_file': 'self.yaml_file'}), '(config_file=self.yaml_file)\n', (2227, 2255), False, 'from mmpt.utils import load_config\n')] |
import os.path
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.TCCShellNub import TCCShellNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'tcc'
def start(poller):
stop()
initCmds = ('show version', 'show users', 'show time', 'show status', 'show inst/full',
'show object/full', 'show axisconfig', 'show focus', 'axis status', 'show scale',
'mir status')
safeCmds = r'(^show )|(status$)'
d = ASCIIReplyDecoder(EOL='\r', stripChars='\n', CIDfirst=False, debug=1)
e = ASCIICmdEncoder(EOL='\r', debug=1, CIDfirst=False)
tcc = TCCShellNub(poller, [
'/usr/bin/ssh', '-1', '-e', 'none', '-a', '-x', '-i',
os.path.expanduser('~/.ssh/tron'), '-T', 'tccuser@tcc25m'
],
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
name=name,
encoder=e,
decoder=d,
logDir=os.path.join(g.logDir, name),
debug=1)
hub.addActor(tcc)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| [
"tron.Hub.Reply.Decoders.ASCIIReplyDecoder.ASCIIReplyDecoder",
"tron.hub.dropActor",
"tron.Hub.Command.Encoders.ASCIICmdEncoder.ASCIICmdEncoder",
"tron.hub.addActor",
"tron.hub.findActor"
]
| [((547, 616), 'tron.Hub.Reply.Decoders.ASCIIReplyDecoder.ASCIIReplyDecoder', 'ASCIIReplyDecoder', ([], {'EOL': "'\\r'", 'stripChars': '"""\n"""', 'CIDfirst': '(False)', 'debug': '(1)'}), "(EOL='\\r', stripChars='\\n', CIDfirst=False, debug=1)\n", (564, 616), False, 'from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder\n'), ((625, 675), 'tron.Hub.Command.Encoders.ASCIICmdEncoder.ASCIICmdEncoder', 'ASCIICmdEncoder', ([], {'EOL': "'\\r'", 'debug': '(1)', 'CIDfirst': '(False)'}), "(EOL='\\r', debug=1, CIDfirst=False)\n", (640, 675), False, 'from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder\n'), ((1045, 1062), 'tron.hub.addActor', 'hub.addActor', (['tcc'], {}), '(tcc)\n', (1057, 1062), False, 'from tron import g, hub\n'), ((1085, 1104), 'tron.hub.findActor', 'hub.findActor', (['name'], {}), '(name)\n', (1098, 1104), False, 'from tron import g, hub\n'), ((1123, 1139), 'tron.hub.dropActor', 'hub.dropActor', (['n'], {}), '(n)\n', (1136, 1139), False, 'from tron import g, hub\n')] |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import filecmp
import json
import pytest
import oci
import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage
import os
import random
import shutil
import six
import string
from tests import util
from tests import test_config_container
from mimetypes import guess_type
OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET = 100
OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT = 20
CONTENT_STRING_LENGTH = 5000
MID_SIZED_FILE_IN_MEBIBTYES = 20
LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES = 150 # Default multipart is 128MiB
# Holds the objects we create and their content so that we can verify results
bulk_get_object_to_content = {}
bulk_get_prefix_to_object = {
'a/b/c/d': [],
'a/b/c': [],
'a/b': [],
'/a': [],
'': []
}
bulk_get_bucket_name = None
bulk_put_large_files = set()
bulk_put_mid_sized_files = set()
root_bulk_put_folder = None
bulk_put_bucket_name = None
@pytest.fixture
def vcr_fixture(request):
with test_config_container.create_vcr(cassette_library_dir='services/object_storage/tests/cassettes').use_cassette('object_storage_bulk_operations_{name}.yml'.format(name=request.function.__name__)):
yield
# Generate test data for different operations:
#
# Bulk Get: create a new bucket and populate it with some objects, then tear it all down afterwards
# Bulk Put: create a folder structure containing small and large files, then tear it all down afterwards
# Bulk Delete: uses the folders and files generated for bulk put
@pytest.fixture(scope='module', autouse=True)
def generate_test_data(object_storage_client):
global bulk_get_object_to_content, bulk_get_bucket_name, root_bulk_put_folder, bulk_put_large_files, bulk_put_mid_sized_files, bulk_put_bucket_name
# Create a test bucket
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_get_bucket_name = create_bucket_request.name
# Create items at various heirarchy levels (to be surfaced as different directories on disk)
for i in range(OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET):
if i % 5 == 4:
object_name = 'a/b/c/d/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c/d'].append(object_name)
elif i % 5 == 3:
object_name = 'a/b/c/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c'].append(object_name)
elif i % 5 == 2:
object_name = 'a/b/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b'].append(object_name)
elif i % 5 == 1:
# This is equivalent to a/ on the file system because we drop the leading slash (we drop path separators from the front to avoid unexpected results)
object_name = '/a/Object_{}'.format(i)
bulk_get_prefix_to_object['/a'].append(object_name)
else:
# At the root of the bucket
object_name = 'Object_{}'.format(i)
bulk_get_prefix_to_object[''].append(object_name)
object_content = generate_random_string(CONTENT_STRING_LENGTH)
object_storage_client.put_object(util.NAMESPACE, create_bucket_request.name, object_name, object_content)
bulk_get_object_to_content[object_name] = object_content
# makedirs creates all subfolders recursively
root_bulk_put_folder = 'tests/temp/bulk_put_{}'.format(util.random_number_string())
bulk_put_folder_leaf = '{}/subfolder1/subfolder2/subfolder3'.format(root_bulk_put_folder)
if not os.path.exists(bulk_put_folder_leaf):
os.makedirs(bulk_put_folder_leaf)
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_put_bucket_name = create_bucket_request.name
subfolders = ['', 'subfolder1', 'subfolder1/subfolder2', 'subfolder1/subfolder2/subfolder3']
for subfolder in subfolders:
if subfolder == '':
full_folder = root_bulk_put_folder
else:
full_folder = os.path.join(root_bulk_put_folder, subfolder)
for i in range(OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT + 1):
file_path = '{}/object_{}'.format(full_folder, i)
if i != 0 and i % OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT == 0:
# Put in one big file per subfolder
util.create_large_file(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
bulk_put_large_files.add(file_path)
elif i != 0 and i % 10 == 0:
# Put in the occasional file with a reasonable size so that we can force multipart
util.create_large_file(file_path, MID_SIZED_FILE_IN_MEBIBTYES)
bulk_put_mid_sized_files.add(file_path)
else:
with open(file_path, 'w') as f:
f.write(generate_random_string(CONTENT_STRING_LENGTH))
yield
# Tear down stuff by deleting all the things and then deleting the buckets
delete_bucket_and_all_items(object_storage_client, bulk_get_bucket_name)
delete_bucket_and_all_items(object_storage_client, bulk_put_bucket_name)
# Remove all directories recursively
shutil.rmtree(root_bulk_put_folder)
@util.skip_while_rerecording
def test_normalize_object_name_path():
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path', '/')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this\\is\\a\\path', '\\')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this/is/a\\path', '\\')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '/')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '\\')
@util.skip_while_rerecording
def test_get_all_objects_in_bucket(vcr_fixture):
download_folder = 'tests/temp/get_all_{}'.format(bulk_get_bucket_name)
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
print(result.output)
# Ensure that content matches
for object_name in bulk_get_object_to_content:
if object_name[0] == '/' or object_name[0] == '\\':
file_path = os.path.join(download_folder, object_name[1:])
else:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_and_subdirectories(vcr_fixture):
download_folder = 'tests/temp/get_directory_and_subdirectories_{}'.format(bulk_get_bucket_name)
# This should get us a/b/<object>, a/b/c/<object> and a/b/c/d/<object>
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b'])
for object_name in bulk_get_prefix_to_object['a/b']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c/d']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b']) + len(bulk_get_prefix_to_object['a/b/c']) + len(bulk_get_prefix_to_object['a/b/c/d']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_no_subdirectory(vcr_fixture):
download_folder = 'tests/temp/get_directory_only_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b/c/', '--delimiter', '/'])
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b/c']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_files_skipped():
download_folder = 'tests/temp/skip_and_replace_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
# Sanity check
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
# We should skip over all objects since there is no --overwrite. There should be prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over all objects since we say --no-overwrite. Additionally there should be no prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over no objects since we --overwrite
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_result['skipped-objects']) == 0
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_no_objects(vcr_fixture):
download_folder = 'tests/temp/no_objects_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'batman'])
assert 0 == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_multipart(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
large_file_root_dir = os.path.join('tests', 'temp', 'multipart_get_large_files')
if not os.path.exists(large_file_root_dir):
os.makedirs(large_file_root_dir)
util.create_large_file(os.path.join(large_file_root_dir, '1.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '2.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '3.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '4.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '5.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '6.bin'), 1) # Creates a 1 MiB file for variety
invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', large_file_root_dir
])
large_file_verify_dir = os.path.join('tests', 'temp', 'multipart_get_large_files_verify')
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--download-dir', large_file_verify_dir, '--multipart-download-threshold', '128'])
assert get_count_of_files_in_folder_and_subfolders(large_file_verify_dir) == 6
assert filecmp.cmp(os.path.join(large_file_root_dir, '1.bin'), os.path.join(large_file_verify_dir, '1.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '2.bin'), os.path.join(large_file_verify_dir, '2.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '3.bin'), os.path.join(large_file_verify_dir, '3.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '4.bin'), os.path.join(large_file_verify_dir, '4.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '5.bin'), os.path.join(large_file_verify_dir, '5.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '6.bin'), os.path.join(large_file_verify_dir, '6.bin'))
shutil.rmtree(large_file_root_dir)
shutil.rmtree(large_file_verify_dir)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
# Since we've created a reasonable number of objects in this test suite, it's a good opportunity to test using the --all and --limit parameters
@util.skip_while_rerecording
def test_list_all_objects_operations(vcr_fixture):
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all', '--page-size', '20'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '47'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 47
assert 'next-start-with' in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '33', '--page-size', '3'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 33
assert 'next-start-with' in result.output
# Bulk puts objects, uses multipart where appropriate (when we breach the default of 128MiB)
@util.skip_while_rerecording
def test_bulk_put_default_options():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
# If we try and put it in the same bucket without --overwrite then everything should be skipped. There should be prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# If we say to --no-overwrite then everything should be skipped. There should be no prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# Now we force it
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == len(object_name_set)
for object_name in object_name_set:
assert object_name in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
# Bulk puts objects with --content-type as auto
@util.skip_while_rerecording
def test_bulk_put_auto_content_type():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--content-type', 'auto', '--overwrite'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert guess_type(source_file_path) == guess_type(downloaded_file_path)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
shutil.rmtree(download_folder)
# Tests that multipart params are applied:
#
# - Try to upload with a part size of 10MiB (this will force the large and mid-sized files to be multipart uploaded)
# - Try to upload with multipart disabled
@util.skip_while_rerecording
def test_bulk_put_with_multipart_params(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--part-size', '10'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--no-multipart',
'--overwrite'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_put_with_prefix():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--object-prefix', 'bulk_put_prefix_test/'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
download_folder = 'tests/temp/verify_files_bulk_put_prefix_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', 'bulk_put_prefix_test/'])
actual_download_folder = os.path.join(download_folder, 'bulk_put_prefix_test')
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, actual_download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert 'bulk_put_prefix_test/{}'.format(get_object_name_from_path(root_bulk_put_folder, source_file_path)) in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_bulk_put_with_non_existent_folder():
fake_directory = 'tests/folder/not/exist'
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', fake_directory])
assert 'UsageError' in result.output
assert 'The specified --src-dir {} (expanded to: {}) does not exist'.format(fake_directory, fake_directory) in result.output
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_inclusions(object_storage_client):
inclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_inclusion_test')
if not os.path.exists(inclusion_test_folder):
os.makedirs(inclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(inclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', inclusion_test_folder,
'--object-prefix', 'inclusion_test/',
'--include', '*.txt', # Matches test_file1.txt, subfolder/hello.txt, subfolder/subfolder2/blag.txt
'--include', 'subfolder/*.png', # Matches subfolder/testfile3.png, subfolder/subfolder2/testfile4.png
'--include', 'subfolder/[b]lah.pdf', # Matches subfolder/blah.pdf
'--include', '*/[ax]yz.jpg' # Matches subfolder/subfolder2/xyz.jpg
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('inclusion_test/', 'test_file1.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/hello.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/blag.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png'),
'{}{}'.format('inclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_inclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=inclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='inclusion_test'
)
# Download objects with inclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_include')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/*.png',
'--include', 'subfolder/blah.pdf',
])
expected_uploaded_files.remove('{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')) # This is not in our --include switches
assert not os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
for expected_file in expected_uploaded_files:
target_file = os.path.join(target_download_folder, expected_file)
original_file = target_file.replace(os.path.join(target_download_folder, 'inclusion_test'), inclusion_test_folder)
assert os.path.exists(target_file)
assert filecmp.cmp(original_file, target_file, shallow=False)
# Download a specific object with inclusions
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', 'subfolder/subfolder2/xyz.jpg'
])
assert os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
# Delete objects with inclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 4
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='inclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 3
assert '{}{}'.format('inclusion_test/', 'subfolder/testfile3.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(inclusion_test_folder)
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_exclusions(object_storage_client):
exclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_exclusion_test')
if not os.path.exists(exclusion_test_folder):
os.makedirs(exclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(exclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', exclusion_test_folder,
'--object-prefix', 'exclusion_test/',
'--exclude', '*.txt',
'--exclude', '*.ps1', # Shouldn't match anything
'--exclude', 'subfolder/subfolder2/xyz.jpg',
'--exclude', 'subfolder/[spqr]lah.pdf' # blah.pdf should still be included because it's not slah.pdf, plah.pdf, qlah.pdf or rlah.pdf
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('exclusion_test/', 'test_file2.png'),
'{}{}'.format('exclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('exclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/testfile4.png')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_exclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=exclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='exclusion_test'
)
# Download objects with exclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_exclude')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/subfolder2/*.png',
'--exclude', 'subfolder/blah.pdf',
])
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'blah.pdf'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'byz.jpg'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'testfile4.png'))
assert get_count_of_files_in_folder_and_subfolders(target_download_folder) == 2
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png'))
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png'))
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'test_file2.png'),
os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png')
)
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'subfolder', 'testfile3.png'),
os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png')
)
# Delete objects with exclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 3
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='exclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 2
assert '{}{}'.format('exclusion_test/', 'subfolder/blah.pdf') in remaining_objects
assert '{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(exclusion_test_folder)
@util.skip_while_rerecording
def test_delete_when_no_objects_in_bucket(vcr_fixture, object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
assert 'There are no objects to delete in {}'.format(create_bucket_request.name) in result.output
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_delete_dry_run(vcr_fixture):
# Dry-run against entire bucket
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_object_to_content.keys())
# Dry-run against a folder and all subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--dry-run'])
parsed_result = json.loads(result.output)
expected_objects = set().union(bulk_get_prefix_to_object['a/b'], bulk_get_prefix_to_object['a/b/c'], bulk_get_prefix_to_object['a/b/c/d'])
assert set(parsed_result['deleted-objects']) == expected_objects
# Dry-run against a folder and no subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--delimiter', '/', '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_prefix_to_object['a/b'])
@util.skip_while_rerecording
def test_delete(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(random.randint(0, 1000000))
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder])
num_objects_to_delete = get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Sanity check that the bucket has things in it
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) > 0
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
if num_objects_to_delete >= 1000:
confirm_prompt = 'WARNING: This command will delete at least {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
else:
confirm_prompt = 'WARNING: This command will delete {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
assert confirm_prompt in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--force'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert len(parsed_result['deleted-objects']) == num_objects_to_delete
# Check that the bucket is now empty
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) == 0
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_operation_table_output_query(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageTableOutput_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder, '--output', 'table', '--query', "[?action=='Uploaded'].{file: file, \"opc-content-md5\": \"opc-content-md5\"}"])
assert 'file' in result.output
assert 'opc-content-md5' in result.output
assert 'etag' not in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table'])
assert 'action' in result.output
assert 'object' in result.output
assert '/a/Object_1' in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table', '--query', "[?object=='Object_0'][object]"])
assert 'action' not in result.output
assert '/a/Object_1' not in result.output
assert 'Object_0' in result.output
target_download_folder = os.path.join('tests', 'temp', create_bucket_request.name)
result = invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--download-dir', target_download_folder,
'--output', 'table',
])
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
shutil.rmtree(target_download_folder)
def invoke(commands, debug=False, ** args):
if debug is True:
commands = ['--debug'] + commands
return util.invoke_command(commands, ** args)
def get_count_of_files_in_folder_and_subfolders(directory):
file_count = 0
for dir_name, subdir_list, file_list in os.walk(directory):
file_count = file_count + len(file_list)
return file_count
def generate_random_string(length):
if test_config_container.using_vcr_with_mock_responses():
return 'a' * length
else:
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
# Pull JSON data out of output which may have stuff other than JSON in it. Assumes that nothing
# comes after the JSON data
def parse_json_response_from_mixed_output(output):
lines = output.split('\n')
json_str = ''
object_begun = False
for line in lines:
if object_begun or line.startswith('{'):
object_begun = True
json_str += line
return json.loads(json_str)
# For the bulk operations, object names are taken from the file path of the thing we uploaded. Normalize to
# / in the paths (Windows can go both ways) then chop the front bit off
def get_object_name_from_path(path_root, full_path):
return full_path.replace(os.sep, '/').replace(path_root + '/', '')
def delete_bucket_and_all_items(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
for response in list_object_responses:
for obj in response.data.objects:
object_storage_client.delete_object(util.NAMESPACE, bucket_name, obj.name)
object_storage_client.delete_bucket(util.NAMESPACE, bucket_name)
def get_number_of_objects_in_bucket(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
num_objects_in_bucket = 0
for response in list_object_responses:
num_objects_in_bucket = num_objects_in_bucket + len(response.data.objects)
return num_objects_in_bucket
def verify_downloaded_folders_for_inclusion_exclusion_tests(expected_uploaded_files, source_folder, download_folder, download_prefix_no_slash):
# Download uploaded files and check they are the same
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', download_prefix_no_slash + '/'])
# The strings in the expected_uploaded_files array have a "/" in them, but this doesn't match with paths on Windows. Using normpath converts these of
# "\" on Windows and so our matching/comparison works. For Linux/Unix/macOS this doesn't appear to have an impact
normalized_expected_uploaded_files = []
for euf in expected_uploaded_files:
normalized_expected_uploaded_files.append(os.path.normpath(euf))
actual_download_folder = os.path.join(download_folder, download_prefix_no_slash)
files_compared = 0
for dir_name, subdir_list, file_list in os.walk(source_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(source_folder, actual_download_folder)
if downloaded_file_path.replace(actual_download_folder, download_prefix_no_slash) in normalized_expected_uploaded_files:
files_compared += 1
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert files_compared == len(expected_uploaded_files)
shutil.rmtree(actual_download_folder)
| [
"pytest.fixture",
"mimetypes.guess_type",
"os.walk",
"os.path.exists",
"tests.util.invoke_command",
"os.path.normpath",
"tests.util.random_number_string",
"random.randint",
"tests.util.create_large_file",
"json.loads",
"random.choice",
"tests.test_config_container.create_vcr",
"oci.object_storage.models.CreateBucketDetails",
"tests.util.clear_test_data",
"os.makedirs",
"tests.test_config_container.using_vcr_with_mock_responses",
"os.path.join",
"services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects",
"shutil.rmtree",
"services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage",
"six.iteritems",
"filecmp.cmp"
]
| [((1582, 1626), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (1596, 1626), False, 'import pytest\n'), ((1882, 1929), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (1927, 1929), False, 'import oci\n'), ((2096, 2209), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (2116, 2209), False, 'from tests import util\n'), ((4004, 4051), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (4049, 4051), False, 'import oci\n'), ((4218, 4331), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (4238, 4331), False, 'from tests import util\n'), ((5862, 5897), 'shutil.rmtree', 'shutil.rmtree', (['root_bulk_put_folder'], {}), '(root_bulk_put_folder)\n', (5875, 5897), False, 'import shutil\n'), ((7923, 7953), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (7936, 7953), False, 'import shutil\n'), ((9450, 9480), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (9463, 9480), False, 'import shutil\n'), ((10251, 10281), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (10264, 10281), False, 'import shutil\n'), ((12067, 12097), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (12080, 12097), False, 'import shutil\n'), ((12501, 12531), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (12514, 12531), False, 'import shutil\n'), ((12638, 12685), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (12683, 12685), False, 'import oci\n'), ((12862, 12975), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (12882, 12975), False, 'from tests import util\n'), ((13077, 13135), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""multipart_get_large_files"""'], {}), "('tests', 'temp', 'multipart_get_large_files')\n", (13089, 13135), False, 'import os\n'), ((14102, 14167), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""multipart_get_large_files_verify"""'], {}), "('tests', 'temp', 'multipart_get_large_files_verify')\n", (14114, 14167), False, 'import os\n'), ((15140, 15174), 'shutil.rmtree', 'shutil.rmtree', (['large_file_root_dir'], {}), '(large_file_root_dir)\n', (15153, 15174), False, 'import shutil\n'), ((15179, 15215), 'shutil.rmtree', 'shutil.rmtree', (['large_file_verify_dir'], {}), '(large_file_verify_dir)\n', (15192, 15215), False, 'import shutil\n'), ((15671, 15696), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (15681, 15696), False, 'import json\n'), ((15996, 16021), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (16006, 16021), False, 'import json\n'), ((16308, 16333), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (16318, 16333), False, 'import json\n'), ((16598, 16623), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (16608, 16623), False, 'import json\n'), ((17808, 17837), 'os.walk', 'os.walk', (['root_bulk_put_folder'], {}), '(root_bulk_put_folder)\n', (17815, 17837), False, 'import os\n'), ((20215, 20245), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (20228, 20245), False, 'import shutil\n'), ((21338, 21367), 'os.walk', 'os.walk', (['root_bulk_put_folder'], {}), '(root_bulk_put_folder)\n', (21345, 21367), False, 'import os\n'), ((22099, 22129), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (22112, 22129), False, 'import shutil\n'), ((22461, 22508), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (22506, 22508), False, 'import oci\n'), ((22685, 22798), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (22705, 22798), False, 'from tests import util\n'), ((24953, 25006), 'os.path.join', 'os.path.join', (['download_folder', '"""bulk_put_prefix_test"""'], {}), "(download_folder, 'bulk_put_prefix_test')\n", (24965, 25006), False, 'import os\n'), ((25051, 25080), 'os.walk', 'os.walk', (['root_bulk_put_folder'], {}), '(root_bulk_put_folder)\n', (25058, 25080), False, 'import os\n'), ((25670, 25700), 'shutil.rmtree', 'shutil.rmtree', (['download_folder'], {}), '(download_folder)\n', (25683, 25700), False, 'import shutil\n'), ((26275, 26337), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""os_bulk_upload_inclusion_test"""'], {}), "('tests', 'temp', 'os_bulk_upload_inclusion_test')\n", (26287, 26337), False, 'import os\n'), ((26729, 26760), 'six.iteritems', 'six.iteritems', (['folders_to_files'], {}), '(folders_to_files)\n', (26742, 26760), False, 'import six\n'), ((28799, 28868), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""verify_os_bulk_upload_inclusion_test"""'], {}), "('tests', 'temp', 'verify_os_bulk_upload_inclusion_test')\n", (28811, 28868), False, 'import os\n'), ((29227, 29281), 'os.path.join', 'os.path.join', (['download_folder_base', '"""get_with_include"""'], {}), "(download_folder_base, 'get_with_include')\n", (29239, 29281), False, 'import os\n'), ((31719, 32020), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', ([], {'client': 'object_storage_client', 'request_id': 'None', 'namespace': 'util.NAMESPACE', 'bucket_name': 'bulk_put_bucket_name', 'prefix': '"""inclusion_test/"""', 'start': 'None', 'end': 'None', 'limit': '(1000)', 'delimiter': 'None', 'fields': '"""name"""', 'retrieve_all': '(True)'}), "(client\n =object_storage_client, request_id=None, namespace=util.NAMESPACE,\n bucket_name=bulk_put_bucket_name, prefix='inclusion_test/', start=None,\n end=None, limit=1000, delimiter=None, fields='name', retrieve_all=True)\n", (31790, 32020), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((32592, 32629), 'shutil.rmtree', 'shutil.rmtree', (['target_download_folder'], {}), '(target_download_folder)\n', (32605, 32629), False, 'import shutil\n'), ((32634, 32670), 'shutil.rmtree', 'shutil.rmtree', (['inclusion_test_folder'], {}), '(inclusion_test_folder)\n', (32647, 32670), False, 'import shutil\n'), ((32799, 32861), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""os_bulk_upload_exclusion_test"""'], {}), "('tests', 'temp', 'os_bulk_upload_exclusion_test')\n", (32811, 32861), False, 'import os\n'), ((33253, 33284), 'six.iteritems', 'six.iteritems', (['folders_to_files'], {}), '(folders_to_files)\n', (33266, 33284), False, 'import six\n'), ((35096, 35165), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', '"""verify_os_bulk_upload_exclusion_test"""'], {}), "('tests', 'temp', 'verify_os_bulk_upload_exclusion_test')\n", (35108, 35165), False, 'import os\n'), ((35524, 35578), 'os.path.join', 'os.path.join', (['download_folder_base', '"""get_with_exclude"""'], {}), "(download_folder_base, 'get_with_exclude')\n", (35536, 35578), False, 'import os\n'), ((37978, 38279), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', ([], {'client': 'object_storage_client', 'request_id': 'None', 'namespace': 'util.NAMESPACE', 'bucket_name': 'bulk_put_bucket_name', 'prefix': '"""exclusion_test/"""', 'start': 'None', 'end': 'None', 'limit': '(1000)', 'delimiter': 'None', 'fields': '"""name"""', 'retrieve_all': '(True)'}), "(client\n =object_storage_client, request_id=None, namespace=util.NAMESPACE,\n bucket_name=bulk_put_bucket_name, prefix='exclusion_test/', start=None,\n end=None, limit=1000, delimiter=None, fields='name', retrieve_all=True)\n", (38049, 38279), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((38743, 38780), 'shutil.rmtree', 'shutil.rmtree', (['target_download_folder'], {}), '(target_download_folder)\n', (38756, 38780), False, 'import shutil\n'), ((38785, 38821), 'shutil.rmtree', 'shutil.rmtree', (['exclusion_test_folder'], {}), '(exclusion_test_folder)\n', (38798, 38821), False, 'import shutil\n'), ((38960, 39007), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (39005, 39007), False, 'import oci\n'), ((39825, 39850), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (39835, 39850), False, 'import json\n'), ((40169, 40194), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (40179, 40194), False, 'import json\n'), ((40653, 40678), 'json.loads', 'json.loads', (['result.output'], {}), '(result.output)\n', (40663, 40678), False, 'import json\n'), ((40868, 40915), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (40913, 40915), False, 'import oci\n'), ((41080, 41193), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (41100, 41193), False, 'from tests import util\n'), ((42864, 42911), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ([], {}), '()\n', (42909, 42911), False, 'import oci\n'), ((43078, 43191), 'tests.util.clear_test_data', 'util.clear_test_data', (['object_storage_client', 'util.NAMESPACE', 'util.COMPARTMENT_ID', 'create_bucket_request.name'], {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)\n', (43098, 43191), False, 'from tests import util\n'), ((44295, 44352), 'os.path.join', 'os.path.join', (['"""tests"""', '"""temp"""', 'create_bucket_request.name'], {}), "('tests', 'temp', create_bucket_request.name)\n", (44307, 44352), False, 'import os\n'), ((44683, 44720), 'shutil.rmtree', 'shutil.rmtree', (['target_download_folder'], {}), '(target_download_folder)\n', (44696, 44720), False, 'import shutil\n'), ((44842, 44879), 'tests.util.invoke_command', 'util.invoke_command', (['commands'], {}), '(commands, **args)\n', (44861, 44879), False, 'from tests import util\n'), ((45006, 45024), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (45013, 45024), False, 'import os\n'), ((45143, 45196), 'tests.test_config_container.using_vcr_with_mock_responses', 'test_config_container.using_vcr_with_mock_responses', ([], {}), '()\n', (45194, 45196), False, 'from tests import test_config_container\n'), ((45717, 45737), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (45727, 45737), False, 'import json\n'), ((46143, 46422), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', ([], {'client': 'object_storage_client', 'request_id': 'None', 'namespace': 'util.NAMESPACE', 'bucket_name': 'bucket_name', 'prefix': 'None', 'start': 'None', 'end': 'None', 'limit': '(1000)', 'delimiter': 'None', 'fields': '"""name"""', 'retrieve_all': '(True)'}), "(client\n =object_storage_client, request_id=None, namespace=util.NAMESPACE,\n bucket_name=bucket_name, prefix=None, start=None, end=None, limit=1000,\n delimiter=None, fields='name', retrieve_all=True)\n", (46214, 46422), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((46848, 47127), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', ([], {'client': 'object_storage_client', 'request_id': 'None', 'namespace': 'util.NAMESPACE', 'bucket_name': 'bucket_name', 'prefix': 'None', 'start': 'None', 'end': 'None', 'limit': '(1000)', 'delimiter': 'None', 'fields': '"""name"""', 'retrieve_all': '(True)'}), "(client\n =object_storage_client, request_id=None, namespace=util.NAMESPACE,\n bucket_name=bucket_name, prefix=None, start=None, end=None, limit=1000,\n delimiter=None, fields='name', retrieve_all=True)\n", (46919, 47127), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((48258, 48313), 'os.path.join', 'os.path.join', (['download_folder', 'download_prefix_no_slash'], {}), '(download_folder, download_prefix_no_slash)\n', (48270, 48313), False, 'import os\n'), ((48381, 48403), 'os.walk', 'os.walk', (['source_folder'], {}), '(source_folder)\n', (48388, 48403), False, 'import os\n'), ((48978, 49015), 'shutil.rmtree', 'shutil.rmtree', (['actual_download_folder'], {}), '(actual_download_folder)\n', (48991, 49015), False, 'import shutil\n'), ((2000, 2027), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (2025, 2027), False, 'from tests import util\n'), ((3761, 3788), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (3786, 3788), False, 'from tests import util\n'), ((3895, 3931), 'os.path.exists', 'os.path.exists', (['bulk_put_folder_leaf'], {}), '(bulk_put_folder_leaf)\n', (3909, 3931), False, 'import os\n'), ((3941, 3974), 'os.makedirs', 'os.makedirs', (['bulk_put_folder_leaf'], {}), '(bulk_put_folder_leaf)\n', (3952, 3974), False, 'import os\n'), ((4122, 4149), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (4147, 4149), False, 'from tests import util\n'), ((6000, 6119), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""/this/is/a/path"""'], {}), "(\n '/this/is/a/path')\n", (6095, 6119), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6147, 6271), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""/this/is/a/path"""', '"""/"""'], {}), "(\n '/this/is/a/path', '/')\n", (6242, 6271), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6299, 6428), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""\\\\this\\\\is\\\\a\\\\path"""', '"""\\\\"""'], {}), "(\n '\\\\this\\\\is\\\\a\\\\path', '\\\\')\n", (6394, 6428), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6456, 6583), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""\\\\this/is/a\\\\path"""', '"""\\\\"""'], {}), "(\n '\\\\this/is/a\\\\path', '\\\\')\n", (6551, 6583), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6608, 6723), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""thisisapath"""'], {}), "(\n 'thisisapath')\n", (6703, 6723), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6747, 6867), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""thisisapath"""', '"""/"""'], {}), "(\n 'thisisapath', '/')\n", (6842, 6867), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((6891, 7012), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', (['"""thisisapath"""', '"""\\\\"""'], {}), "(\n 'thisisapath', '\\\\')\n", (6986, 7012), True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((8465, 8507), 'os.path.join', 'os.path.join', (['download_folder', 'object_name'], {}), '(download_folder, object_name)\n', (8477, 8507), False, 'import os\n'), ((8751, 8793), 'os.path.join', 'os.path.join', (['download_folder', 'object_name'], {}), '(download_folder, object_name)\n', (8763, 8793), False, 'import os\n'), ((9039, 9081), 'os.path.join', 'os.path.join', (['download_folder', 'object_name'], {}), '(download_folder, object_name)\n', (9051, 9081), False, 'import os\n'), ((9924, 9966), 'os.path.join', 'os.path.join', (['download_folder', 'object_name'], {}), '(download_folder, object_name)\n', (9936, 9966), False, 'import os\n'), ((12766, 12793), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (12791, 12793), False, 'from tests import util\n'), ((13147, 13182), 'os.path.exists', 'os.path.exists', (['large_file_root_dir'], {}), '(large_file_root_dir)\n', (13161, 13182), False, 'import os\n'), ((13192, 13224), 'os.makedirs', 'os.makedirs', (['large_file_root_dir'], {}), '(large_file_root_dir)\n', (13203, 13224), False, 'import os\n'), ((13252, 13294), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""1.bin"""'], {}), "(large_file_root_dir, '1.bin')\n", (13264, 13294), False, 'import os\n'), ((13361, 13403), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""2.bin"""'], {}), "(large_file_root_dir, '2.bin')\n", (13373, 13403), False, 'import os\n'), ((13470, 13512), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""3.bin"""'], {}), "(large_file_root_dir, '3.bin')\n", (13482, 13512), False, 'import os\n'), ((13579, 13621), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""4.bin"""'], {}), "(large_file_root_dir, '4.bin')\n", (13591, 13621), False, 'import os\n'), ((13688, 13730), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""5.bin"""'], {}), "(large_file_root_dir, '5.bin')\n", (13700, 13730), False, 'import os\n'), ((13797, 13839), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""6.bin"""'], {}), "(large_file_root_dir, '6.bin')\n", (13809, 13839), False, 'import os\n'), ((14480, 14522), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""1.bin"""'], {}), "(large_file_root_dir, '1.bin')\n", (14492, 14522), False, 'import os\n'), ((14524, 14568), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""1.bin"""'], {}), "(large_file_verify_dir, '1.bin')\n", (14536, 14568), False, 'import os\n'), ((14593, 14635), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""2.bin"""'], {}), "(large_file_root_dir, '2.bin')\n", (14605, 14635), False, 'import os\n'), ((14637, 14681), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""2.bin"""'], {}), "(large_file_verify_dir, '2.bin')\n", (14649, 14681), False, 'import os\n'), ((14706, 14748), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""3.bin"""'], {}), "(large_file_root_dir, '3.bin')\n", (14718, 14748), False, 'import os\n'), ((14750, 14794), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""3.bin"""'], {}), "(large_file_verify_dir, '3.bin')\n", (14762, 14794), False, 'import os\n'), ((14819, 14861), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""4.bin"""'], {}), "(large_file_root_dir, '4.bin')\n", (14831, 14861), False, 'import os\n'), ((14863, 14907), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""4.bin"""'], {}), "(large_file_verify_dir, '4.bin')\n", (14875, 14907), False, 'import os\n'), ((14932, 14974), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""5.bin"""'], {}), "(large_file_root_dir, '5.bin')\n", (14944, 14974), False, 'import os\n'), ((14976, 15020), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""5.bin"""'], {}), "(large_file_verify_dir, '5.bin')\n", (14988, 15020), False, 'import os\n'), ((15045, 15087), 'os.path.join', 'os.path.join', (['large_file_root_dir', '"""6.bin"""'], {}), "(large_file_root_dir, '6.bin')\n", (15057, 15087), False, 'import os\n'), ((15089, 15133), 'os.path.join', 'os.path.join', (['large_file_verify_dir', '"""6.bin"""'], {}), "(large_file_verify_dir, '6.bin')\n", (15101, 15133), False, 'import os\n'), ((22589, 22616), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (22614, 22616), False, 'from tests import util\n'), ((26349, 26386), 'os.path.exists', 'os.path.exists', (['inclusion_test_folder'], {}), '(inclusion_test_folder)\n', (26363, 26386), False, 'import os\n'), ((26396, 26430), 'os.makedirs', 'os.makedirs', (['inclusion_test_folder'], {}), '(inclusion_test_folder)\n', (26407, 26430), False, 'import os\n'), ((26784, 26827), 'os.path.join', 'os.path.join', (['inclusion_test_folder', 'folder'], {}), '(inclusion_test_folder, folder)\n', (26796, 26827), False, 'import os\n'), ((29969, 30020), 'os.path.join', 'os.path.join', (['target_download_folder', 'expected_file'], {}), '(target_download_folder, expected_file)\n', (29981, 30020), False, 'import os\n'), ((30160, 30187), 'os.path.exists', 'os.path.exists', (['target_file'], {}), '(target_file)\n', (30174, 30187), False, 'import os\n'), ((30203, 30257), 'filecmp.cmp', 'filecmp.cmp', (['original_file', 'target_file'], {'shallow': '(False)'}), '(original_file, target_file, shallow=False)\n', (30214, 30257), False, 'import filecmp\n'), ((30622, 30718), 'os.path.join', 'os.path.join', (['target_download_folder', '"""inclusion_test"""', '"""subfolder"""', '"""subfolder2"""', '"""xyz.jpg"""'], {}), "(target_download_folder, 'inclusion_test', 'subfolder',\n 'subfolder2', 'xyz.jpg')\n", (30634, 30718), False, 'import os\n'), ((32873, 32910), 'os.path.exists', 'os.path.exists', (['exclusion_test_folder'], {}), '(exclusion_test_folder)\n', (32887, 32910), False, 'import os\n'), ((32920, 32954), 'os.makedirs', 'os.makedirs', (['exclusion_test_folder'], {}), '(exclusion_test_folder)\n', (32931, 32954), False, 'import os\n'), ((33308, 33351), 'os.path.join', 'os.path.join', (['exclusion_test_folder', 'folder'], {}), '(exclusion_test_folder, folder)\n', (33320, 33351), False, 'import os\n'), ((36416, 36488), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""test_file2.png"""'], {}), "(target_download_folder, 'exclusion_test', 'test_file2.png')\n", (36428, 36488), False, 'import os\n'), ((36516, 36604), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""subfolder"""', '"""testfile3.png"""'], {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'testfile3.png')\n", (36528, 36604), False, 'import os\n'), ((36635, 36688), 'os.path.join', 'os.path.join', (['exclusion_test_folder', '"""test_file2.png"""'], {}), "(exclusion_test_folder, 'test_file2.png')\n", (36647, 36688), False, 'import os\n'), ((36698, 36770), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""test_file2.png"""'], {}), "(target_download_folder, 'exclusion_test', 'test_file2.png')\n", (36710, 36770), False, 'import os\n'), ((36809, 36874), 'os.path.join', 'os.path.join', (['exclusion_test_folder', '"""subfolder"""', '"""testfile3.png"""'], {}), "(exclusion_test_folder, 'subfolder', 'testfile3.png')\n", (36821, 36874), False, 'import os\n'), ((36884, 36972), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""subfolder"""', '"""testfile3.png"""'], {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'testfile3.png')\n", (36896, 36972), False, 'import os\n'), ((39077, 39104), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (39102, 39104), False, 'from tests import util\n'), ((40985, 41011), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (40999, 41011), False, 'import random\n'), ((42982, 43009), 'tests.util.random_number_string', 'util.random_number_string', ([], {}), '()\n', (43007, 43009), False, 'from tests import util\n'), ((4707, 4752), 'os.path.join', 'os.path.join', (['root_bulk_put_folder', 'subfolder'], {}), '(root_bulk_put_folder, subfolder)\n', (4719, 4752), False, 'import os\n'), ((7518, 7564), 'os.path.join', 'os.path.join', (['download_folder', 'object_name[1:]'], {}), '(download_folder, object_name[1:])\n', (7530, 7564), False, 'import os\n'), ((7603, 7645), 'os.path.join', 'os.path.join', (['download_folder', 'object_name'], {}), '(download_folder, object_name)\n', (7615, 7645), False, 'import os\n'), ((17901, 17929), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (17913, 17929), False, 'import os\n'), ((18049, 18085), 'os.path.exists', 'os.path.exists', (['downloaded_file_path'], {}), '(downloaded_file_path)\n', (18063, 18085), False, 'import os\n'), ((18105, 18171), 'filecmp.cmp', 'filecmp.cmp', (['source_file_path', 'downloaded_file_path'], {'shallow': '(False)'}), '(source_file_path, downloaded_file_path, shallow=False)\n', (18116, 18171), False, 'import filecmp\n'), ((21431, 21459), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (21443, 21459), False, 'import os\n'), ((21579, 21615), 'os.path.exists', 'os.path.exists', (['downloaded_file_path'], {}), '(downloaded_file_path)\n', (21593, 21615), False, 'import os\n'), ((21635, 21701), 'filecmp.cmp', 'filecmp.cmp', (['source_file_path', 'downloaded_file_path'], {'shallow': '(False)'}), '(source_file_path, downloaded_file_path, shallow=False)\n', (21646, 21701), False, 'import filecmp\n'), ((25144, 25172), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (25156, 25172), False, 'import os\n'), ((25299, 25335), 'os.path.exists', 'os.path.exists', (['downloaded_file_path'], {}), '(downloaded_file_path)\n', (25313, 25335), False, 'import os\n'), ((25355, 25421), 'filecmp.cmp', 'filecmp.cmp', (['source_file_path', 'downloaded_file_path'], {'shallow': '(False)'}), '(source_file_path, downloaded_file_path, shallow=False)\n', (25366, 25421), False, 'import filecmp\n'), ((26843, 26870), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (26857, 26870), False, 'import os\n'), ((26884, 26908), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (26895, 26908), False, 'import os\n'), ((26961, 26992), 'os.path.join', 'os.path.join', (['folder_path', 'file'], {}), '(folder_path, file)\n', (26973, 26992), False, 'import os\n'), ((29803, 29899), 'os.path.join', 'os.path.join', (['target_download_folder', '"""inclusion_test"""', '"""subfolder"""', '"""subfolder2"""', '"""xyz.jpg"""'], {}), "(target_download_folder, 'inclusion_test', 'subfolder',\n 'subfolder2', 'xyz.jpg')\n", (29815, 29899), False, 'import os\n'), ((30065, 30119), 'os.path.join', 'os.path.join', (['target_download_folder', '"""inclusion_test"""'], {}), "(target_download_folder, 'inclusion_test')\n", (30077, 30119), False, 'import os\n'), ((33367, 33394), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (33381, 33394), False, 'import os\n'), ((33408, 33432), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (33419, 33432), False, 'import os\n'), ((33485, 33516), 'os.path.join', 'os.path.join', (['folder_path', 'file'], {}), '(folder_path, file)\n', (33497, 33516), False, 'import os\n'), ((35970, 36049), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""subfolder"""', '"""blah.pdf"""'], {}), "(target_download_folder, 'exclusion_test', 'subfolder', 'blah.pdf')\n", (35982, 36049), False, 'import os\n'), ((36081, 36177), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""subfolder"""', '"""subfolder2"""', '"""byz.jpg"""'], {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'subfolder2', 'byz.jpg')\n", (36093, 36177), False, 'import os\n'), ((36205, 36307), 'os.path.join', 'os.path.join', (['target_download_folder', '"""exclusion_test"""', '"""subfolder"""', '"""subfolder2"""', '"""testfile4.png"""'], {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'subfolder2', 'testfile4.png')\n", (36217, 36307), False, 'import os\n'), ((48205, 48226), 'os.path.normpath', 'os.path.normpath', (['euf'], {}), '(euf)\n', (48221, 48226), False, 'import os\n'), ((48467, 48495), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (48479, 48495), False, 'import os\n'), ((1042, 1143), 'tests.test_config_container.create_vcr', 'test_config_container.create_vcr', ([], {'cassette_library_dir': '"""services/object_storage/tests/cassettes"""'}), "(cassette_library_dir=\n 'services/object_storage/tests/cassettes')\n", (1074, 1143), False, 'from tests import test_config_container\n'), ((5031, 5102), 'tests.util.create_large_file', 'util.create_large_file', (['file_path', 'LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES'], {}), '(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)\n', (5053, 5102), False, 'from tests import util\n'), ((21721, 21749), 'mimetypes.guess_type', 'guess_type', (['source_file_path'], {}), '(source_file_path)\n', (21731, 21749), False, 'from mimetypes import guess_type\n'), ((21753, 21785), 'mimetypes.guess_type', 'guess_type', (['downloaded_file_path'], {}), '(downloaded_file_path)\n', (21763, 21785), False, 'from mimetypes import guess_type\n'), ((45259, 45296), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (45272, 45296), False, 'import random\n'), ((48788, 48824), 'os.path.exists', 'os.path.exists', (['downloaded_file_path'], {}), '(downloaded_file_path)\n', (48802, 48824), False, 'import os\n'), ((48848, 48914), 'filecmp.cmp', 'filecmp.cmp', (['source_file_path', 'downloaded_file_path'], {'shallow': '(False)'}), '(source_file_path, downloaded_file_path, shallow=False)\n', (48859, 48914), False, 'import filecmp\n'), ((5311, 5373), 'tests.util.create_large_file', 'util.create_large_file', (['file_path', 'MID_SIZED_FILE_IN_MEBIBTYES'], {}), '(file_path, MID_SIZED_FILE_IN_MEBIBTYES)\n', (5333, 5373), False, 'from tests import util\n')] |
#!/usr/bin/python3
import random
import string
import time
import subprocess
import os
import redis
import threading
def generate_string(string_size, size, dict):
'''
https://stackoverflow.com/questions/16308989/fastest-method-to-generate-big-random-string-with-lower-latin-letters
'''
for i in range(size):
min_lc = ord(b'a')
len_lc = 26
key = bytearray(random.getrandbits(8*string_size).to_bytes(string_size, 'big'))
for i, b in enumerate(key):
key[i] = min_lc + b % len_lc # convert 0..255 to 97..122
key = key.decode()
val = key
dict[key] = val
if __name__ == "__main__":
size = 1000 # TODO: make is an command line argument
port = 7000
FNULL = open(os.devnull, 'w')
string_size = 100000
partition = int(size/4)
print("generating test sets")
d1 = {}
d2 = {}
d3 = {}
d4 = {}
t1 = threading.Thread(target=generate_string, args = (string_size, partition, d1))
t2 = threading.Thread(target=generate_string, args = (string_size, partition, d2))
t3 = threading.Thread(target=generate_string, args = (string_size, partition, d3))
t4 = threading.Thread(target=generate_string, args = (string_size, partition, d4))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t1.join()
t1.join()
t1.join()
test_set = {}
test_set.update(d1)
test_set.update(d2)
test_set.update(d3)
test_set.update(d4)
print(len(test_set))
print("running tests...")
r = redis.StrictRedis(host='localhost', port=port, db=0)
start = time.time()
print("testing set")
for k,v in test_set.items():
r.set(k, v)
r.wait(3, 0)
print("testing get")
for k,v in test_set.items():
r.get(k)
r.wait(3, 0)
end = time.time()
runtime = end - start
ops = size * 2
throughput = float(ops/runtime)
latency = float(1/throughput)
print("total run time: {runtime}s \n\
number of total operations with 50% Set and 50% Get: {ops} \n\
avg. throughput: {throughput} ops/s \n\
avg. latency: {latency} s".format(
runtime=runtime,
ops=ops,
throughput=throughput,
latency=latency
))
| [
"random.getrandbits",
"threading.Thread",
"time.time",
"redis.StrictRedis"
]
| [((934, 1009), 'threading.Thread', 'threading.Thread', ([], {'target': 'generate_string', 'args': '(string_size, partition, d1)'}), '(target=generate_string, args=(string_size, partition, d1))\n', (950, 1009), False, 'import threading\n'), ((1021, 1096), 'threading.Thread', 'threading.Thread', ([], {'target': 'generate_string', 'args': '(string_size, partition, d2)'}), '(target=generate_string, args=(string_size, partition, d2))\n', (1037, 1096), False, 'import threading\n'), ((1108, 1183), 'threading.Thread', 'threading.Thread', ([], {'target': 'generate_string', 'args': '(string_size, partition, d3)'}), '(target=generate_string, args=(string_size, partition, d3))\n', (1124, 1183), False, 'import threading\n'), ((1195, 1270), 'threading.Thread', 'threading.Thread', ([], {'target': 'generate_string', 'args': '(string_size, partition, d4)'}), '(target=generate_string, args=(string_size, partition, d4))\n', (1211, 1270), False, 'import threading\n'), ((1571, 1623), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""localhost"""', 'port': 'port', 'db': '(0)'}), "(host='localhost', port=port, db=0)\n", (1588, 1623), False, 'import redis\n'), ((1637, 1648), 'time.time', 'time.time', ([], {}), '()\n', (1646, 1648), False, 'import time\n'), ((1857, 1868), 'time.time', 'time.time', ([], {}), '()\n', (1866, 1868), False, 'import time\n'), ((401, 436), 'random.getrandbits', 'random.getrandbits', (['(8 * string_size)'], {}), '(8 * string_size)\n', (419, 436), False, 'import random\n')] |
"""
VAE on the swirl task.
Basically, VAEs don't work. It's probably because the prior isn't very good
and/or because the learning signal is pretty weak when both the encoder and
decoder change quickly. However, I tried also alternating between the two,
and that didn't seem to help.
"""
from torch.distributions import Normal
from torch.optim import Adam
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn as nn
import railrl.torch.pytorch_util as ptu
SWIRL_RATE = 1
T = 10
BS = 128
N_BATCHES = 2000
N_VIS = 1000
HIDDEN_SIZE = 32
VERBOSE = False
def swirl_data(batch_size):
t = np.random.uniform(size=batch_size, low=0, high=T)
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
data = np.array([x, y]).T
noise = np.random.randn(batch_size, 2) / (T * 2)
return data + noise, t.reshape(-1, 1)
def swirl_t_to_data(t):
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
)
class Encoder(nn.Sequential):
def encode(self, x):
return self.get_encoding_and_suff_stats(x)[0]
def get_encoding_and_suff_stats(self, x):
output = self(x)
means, log_stds = (
output[:, 0:1], output[:, 1:2]
)
stds = log_stds.exp()
epsilon = ptu.Variable(torch.randn(*means.size()))
latents = epsilon * stds + means
latents = latents
return latents, means, log_stds, stds
class Decoder(nn.Sequential):
def decode(self, latents):
output = self(latents)
means, log_stds = output[:, 0:2], output[:, 2:4]
distribution = Normal(means, log_stds.exp())
return distribution.sample()
def t_to_xy(t):
if len(t.shape) == 2:
t = t[:, 0]
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def pretrain_encoder(encoder, opt):
losses = []
for _ in range(1000):
x_np, y_np = swirl_data(BS)
x = ptu.np_to_var(x_np)
y = ptu.np_to_var(y_np)
y_hat = encoder.encode(x)
loss = ((y_hat - y) ** 2).mean()
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.data.numpy())
if VERBOSE:
x_np, y_np = swirl_data(N_VIS)
x = ptu.np_to_var(x_np)
y_hat = encoder.encode(x)
y_hat_np = y_hat.data.numpy()
x_hat_np = t_to_xy(y_hat_np[:, 0])
plt.subplot(2, 1, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 1, 2)
plt.plot(x_np[:, 0], x_np[:, 1], '.')
plt.plot(x_hat_np[:, 0], x_hat_np[:, 1], '.')
plt.title("Samples")
plt.legend(["Samples", "Estimates"])
plt.show()
def train_encoder(encoder, decoder, encoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
# elbo = - kl + reconstruction_log_prob
# loss = - elbo.mean()
loss = - reconstruction_log_prob.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss# + latent_loss
encoder_opt.zero_grad()
loss.backward()
encoder_opt.step()
return loss
def train_decoder(encoder, decoder, decoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
loss = - reconstruction_log_prob.mean()
decoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
return loss
def train_alternating(*_):
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
encoder_losses = []
decoder_losses = []
for _ in range(100):
for _ in range(N_BATCHES):
encoder_losses.append(
train_encoder(encoder, decoder, encoder_opt).data.numpy()
)
for _ in range(N_BATCHES):
decoder_losses.append(
train_decoder(encoder, decoder, decoder_opt).data.numpy()
)
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 2, 1)
plt.plot(np.array(encoder_losses))
plt.title("Encoder Loss")
plt.subplot(2, 2, 2)
plt.plot(np.array(decoder_losses))
plt.title("Decoder Loss")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
# plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
# plt.legend(["Samples", "Projected Latents"])
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
def train():
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
# This is the first place that we cheat. However, this pretraining isn't
# needed if you just add the loss to the training (see below)
# pretrain_encoder(encoder, encoder_opt)
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
print("Done training encoder")
losses = []
kls = []
log_probs = []
for _ in range(N_BATCHES):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
# decoder_output = decoder(latents.detach())
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
elbo = - kl + reconstruction_log_prob
loss = - elbo.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss + latent_loss
decoder_opt.zero_grad()
encoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
encoder_opt.step()
losses.append(loss.data.numpy())
kls.append(kl.mean().data.numpy())
log_probs.append(reconstruction_log_prob.mean().data.numpy())
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 3, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 3, 2)
plt.plot(np.array(kls))
plt.title("KLs")
plt.subplot(2, 3, 3)
plt.plot(np.array(log_probs))
plt.title("Log Probs")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
if __name__ == '__main__':
train_alternating()
# train()
| [
"torch.nn.ReLU",
"matplotlib.pyplot.plot",
"railrl.torch.pytorch_util.np_to_var",
"numpy.array",
"numpy.random.randn",
"numpy.cos",
"torch.nn.Linear",
"numpy.random.uniform",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"torch.randn",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((621, 670), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'batch_size', 'low': '(0)', 'high': 'T'}), '(size=batch_size, low=0, high=T)\n', (638, 670), True, 'import numpy as np\n'), ((3240, 3260), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['batch'], {}), '(batch)\n', (3253, 3260), True, 'import railrl.torch.pytorch_util as ptu\n'), ((4157, 4177), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['batch'], {}), '(batch)\n', (4170, 4177), True, 'import railrl.torch.pytorch_util as ptu\n'), ((5829, 5858), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['vis_samples_np'], {}), '(vis_samples_np)\n', (5842, 5858), True, 'import railrl.torch.pytorch_util as ptu\n'), ((6127, 6147), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (6138, 6147), True, 'import matplotlib.pyplot as plt\n'), ((6191, 6216), 'matplotlib.pyplot.title', 'plt.title', (['"""Encoder Loss"""'], {}), "('Encoder Loss')\n", (6200, 6216), True, 'import matplotlib.pyplot as plt\n'), ((6221, 6241), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (6232, 6241), True, 'import matplotlib.pyplot as plt\n'), ((6285, 6310), 'matplotlib.pyplot.title', 'plt.title', (['"""Decoder Loss"""'], {}), "('Decoder Loss')\n", (6294, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (6327, 6336), True, 'import matplotlib.pyplot as plt\n'), ((6341, 6404), 'matplotlib.pyplot.plot', 'plt.plot', (['generated_samples[:, 0]', 'generated_samples[:, 1]', '"""."""'], {}), "(generated_samples[:, 0], generated_samples[:, 1], '.')\n", (6349, 6404), True, 'import matplotlib.pyplot as plt\n'), ((6409, 6439), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Samples"""'], {}), "('Generated Samples')\n", (6418, 6439), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6464), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (6455, 6464), True, 'import matplotlib.pyplot as plt\n'), ((6469, 6540), 'matplotlib.pyplot.plot', 'plt.plot', (['reconstructed_samples[:, 0]', 'reconstructed_samples[:, 1]', '"""."""'], {}), "(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')\n", (6477, 6540), True, 'import matplotlib.pyplot as plt\n'), ((6663, 6690), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstruction"""'], {}), "('Reconstruction')\n", (6672, 6690), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6766), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (6757, 6766), True, 'import matplotlib.pyplot as plt\n'), ((6771, 6828), 'matplotlib.pyplot.plot', 'plt.plot', (['vis_samples_np[:, 0]', 'vis_samples_np[:, 1]', '"""."""'], {}), "(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')\n", (6779, 6828), True, 'import matplotlib.pyplot as plt\n'), ((6833, 6892), 'matplotlib.pyplot.plot', 'plt.plot', (['true_xy_mean_np[:, 0]', 'true_xy_mean_np[:, 1]', '"""."""'], {}), "(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')\n", (6841, 6892), True, 'import matplotlib.pyplot as plt\n'), ((6897, 6926), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Samples"""'], {}), "('Original Samples')\n", (6906, 6926), True, 'import matplotlib.pyplot as plt\n'), ((6931, 6969), 'matplotlib.pyplot.legend', 'plt.legend', (["['Original', 'True means']"], {}), "(['Original', 'True means'])\n", (6941, 6969), True, 'import matplotlib.pyplot as plt\n'), ((6974, 6984), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6982, 6984), True, 'import matplotlib.pyplot as plt\n'), ((9252, 9281), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['vis_samples_np'], {}), '(vis_samples_np)\n', (9265, 9281), True, 'import railrl.torch.pytorch_util as ptu\n'), ((9550, 9570), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (9561, 9570), True, 'import matplotlib.pyplot as plt\n'), ((9606, 9632), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss"""'], {}), "('Training Loss')\n", (9615, 9632), True, 'import matplotlib.pyplot as plt\n'), ((9637, 9657), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (9648, 9657), True, 'import matplotlib.pyplot as plt\n'), ((9690, 9706), 'matplotlib.pyplot.title', 'plt.title', (['"""KLs"""'], {}), "('KLs')\n", (9699, 9706), True, 'import matplotlib.pyplot as plt\n'), ((9711, 9731), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (9722, 9731), True, 'import matplotlib.pyplot as plt\n'), ((9770, 9792), 'matplotlib.pyplot.title', 'plt.title', (['"""Log Probs"""'], {}), "('Log Probs')\n", (9779, 9792), True, 'import matplotlib.pyplot as plt\n'), ((9798, 9818), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (9809, 9818), True, 'import matplotlib.pyplot as plt\n'), ((9823, 9886), 'matplotlib.pyplot.plot', 'plt.plot', (['generated_samples[:, 0]', 'generated_samples[:, 1]', '"""."""'], {}), "(generated_samples[:, 0], generated_samples[:, 1], '.')\n", (9831, 9886), True, 'import matplotlib.pyplot as plt\n'), ((9891, 9921), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Samples"""'], {}), "('Generated Samples')\n", (9900, 9921), True, 'import matplotlib.pyplot as plt\n'), ((9926, 9946), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (9937, 9946), True, 'import matplotlib.pyplot as plt\n'), ((9951, 10022), 'matplotlib.pyplot.plot', 'plt.plot', (['reconstructed_samples[:, 0]', 'reconstructed_samples[:, 1]', '"""."""'], {}), "(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')\n", (9959, 10022), True, 'import matplotlib.pyplot as plt\n'), ((10079, 10138), 'matplotlib.pyplot.plot', 'plt.plot', (['estimated_means[:, 0]', 'estimated_means[:, 1]', '"""."""'], {}), "(estimated_means[:, 0], estimated_means[:, 1], '.')\n", (10087, 10138), True, 'import matplotlib.pyplot as plt\n'), ((10143, 10170), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstruction"""'], {}), "('Reconstruction')\n", (10152, 10170), True, 'import matplotlib.pyplot as plt\n'), ((10175, 10195), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (10186, 10195), True, 'import matplotlib.pyplot as plt\n'), ((10200, 10257), 'matplotlib.pyplot.plot', 'plt.plot', (['vis_samples_np[:, 0]', 'vis_samples_np[:, 1]', '"""."""'], {}), "(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')\n", (10208, 10257), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10321), 'matplotlib.pyplot.plot', 'plt.plot', (['true_xy_mean_np[:, 0]', 'true_xy_mean_np[:, 1]', '"""."""'], {}), "(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')\n", (10270, 10321), True, 'import matplotlib.pyplot as plt\n'), ((10326, 10355), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Samples"""'], {}), "('Original Samples')\n", (10335, 10355), True, 'import matplotlib.pyplot as plt\n'), ((10360, 10398), 'matplotlib.pyplot.legend', 'plt.legend', (["['Original', 'True means']"], {}), "(['Original', 'True means'])\n", (10370, 10398), True, 'import matplotlib.pyplot as plt\n'), ((10403, 10413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10411, 10413), True, 'import matplotlib.pyplot as plt\n'), ((760, 776), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (768, 776), True, 'import numpy as np\n'), ((791, 821), 'numpy.random.randn', 'np.random.randn', (['batch_size', '(2)'], {}), '(batch_size, 2)\n', (806, 821), True, 'import numpy as np\n'), ((989, 1005), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (997, 1005), True, 'import numpy as np\n'), ((2226, 2242), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2234, 2242), True, 'import numpy as np\n'), ((2373, 2392), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['x_np'], {}), '(x_np)\n', (2386, 2392), True, 'import railrl.torch.pytorch_util as ptu\n'), ((2405, 2424), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['y_np'], {}), '(y_np)\n', (2418, 2424), True, 'import railrl.torch.pytorch_util as ptu\n'), ((2677, 2696), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['x_np'], {}), '(x_np)\n', (2690, 2696), True, 'import railrl.torch.pytorch_util as ptu\n'), ((2821, 2841), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2832, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2885, 2911), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss"""'], {}), "('Training Loss')\n", (2894, 2911), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2941), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2932, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2950, 2987), 'matplotlib.pyplot.plot', 'plt.plot', (['x_np[:, 0]', 'x_np[:, 1]', '"""."""'], {}), "(x_np[:, 0], x_np[:, 1], '.')\n", (2958, 2987), True, 'import matplotlib.pyplot as plt\n'), ((2996, 3041), 'matplotlib.pyplot.plot', 'plt.plot', (['x_hat_np[:, 0]', 'x_hat_np[:, 1]', '"""."""'], {}), "(x_hat_np[:, 0], x_hat_np[:, 1], '.')\n", (3004, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3070), 'matplotlib.pyplot.title', 'plt.title', (['"""Samples"""'], {}), "('Samples')\n", (3059, 3070), True, 'import matplotlib.pyplot as plt\n'), ((3079, 3115), 'matplotlib.pyplot.legend', 'plt.legend', (["['Samples', 'Estimates']"], {}), "(['Samples', 'Estimates'])\n", (3089, 3115), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3132, 3134), True, 'import matplotlib.pyplot as plt\n'), ((4669, 4694), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'HIDDEN_SIZE'], {}), '(2, HIDDEN_SIZE)\n', (4678, 4694), True, 'from torch import nn as nn\n'), ((4704, 4713), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4711, 4713), True, 'from torch import nn as nn\n'), ((4723, 4758), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (4732, 4758), True, 'from torch import nn as nn\n'), ((4768, 4777), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4775, 4777), True, 'from torch import nn as nn\n'), ((4787, 4822), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (4796, 4822), True, 'from torch import nn as nn\n'), ((4832, 4841), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4839, 4841), True, 'from torch import nn as nn\n'), ((4851, 4886), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (4860, 4886), True, 'from torch import nn as nn\n'), ((4896, 4905), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4903, 4905), True, 'from torch import nn as nn\n'), ((4915, 4940), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', '(2)'], {}), '(HIDDEN_SIZE, 2)\n', (4924, 4940), True, 'from torch import nn as nn\n'), ((5024, 5049), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'HIDDEN_SIZE'], {}), '(1, HIDDEN_SIZE)\n', (5033, 5049), True, 'from torch import nn as nn\n'), ((5059, 5068), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5066, 5068), True, 'from torch import nn as nn\n'), ((5078, 5113), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (5087, 5113), True, 'from torch import nn as nn\n'), ((5123, 5132), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5130, 5132), True, 'from torch import nn as nn\n'), ((5142, 5177), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (5151, 5177), True, 'from torch import nn as nn\n'), ((5187, 5196), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5194, 5196), True, 'from torch import nn as nn\n'), ((5206, 5241), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (5215, 5241), True, 'from torch import nn as nn\n'), ((5251, 5260), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5258, 5260), True, 'from torch import nn as nn\n'), ((5270, 5295), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', '(4)'], {}), '(HIDDEN_SIZE, 4)\n', (5279, 5295), True, 'from torch import nn as nn\n'), ((6161, 6185), 'numpy.array', 'np.array', (['encoder_losses'], {}), '(encoder_losses)\n', (6169, 6185), True, 'import numpy as np\n'), ((6255, 6279), 'numpy.array', 'np.array', (['decoder_losses'], {}), '(decoder_losses)\n', (6263, 6279), True, 'import numpy as np\n'), ((7031, 7056), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'HIDDEN_SIZE'], {}), '(2, HIDDEN_SIZE)\n', (7040, 7056), True, 'from torch import nn as nn\n'), ((7066, 7075), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7073, 7075), True, 'from torch import nn as nn\n'), ((7085, 7120), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7094, 7120), True, 'from torch import nn as nn\n'), ((7130, 7139), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7137, 7139), True, 'from torch import nn as nn\n'), ((7149, 7184), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7158, 7184), True, 'from torch import nn as nn\n'), ((7194, 7203), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7201, 7203), True, 'from torch import nn as nn\n'), ((7213, 7248), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7222, 7248), True, 'from torch import nn as nn\n'), ((7258, 7267), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7265, 7267), True, 'from torch import nn as nn\n'), ((7277, 7302), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', '(2)'], {}), '(HIDDEN_SIZE, 2)\n', (7286, 7302), True, 'from torch import nn as nn\n'), ((7574, 7599), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'HIDDEN_SIZE'], {}), '(1, HIDDEN_SIZE)\n', (7583, 7599), True, 'from torch import nn as nn\n'), ((7609, 7618), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7616, 7618), True, 'from torch import nn as nn\n'), ((7628, 7663), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7637, 7663), True, 'from torch import nn as nn\n'), ((7673, 7682), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7680, 7682), True, 'from torch import nn as nn\n'), ((7692, 7727), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7701, 7727), True, 'from torch import nn as nn\n'), ((7737, 7746), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7744, 7746), True, 'from torch import nn as nn\n'), ((7756, 7791), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', 'HIDDEN_SIZE'], {}), '(HIDDEN_SIZE, HIDDEN_SIZE)\n', (7765, 7791), True, 'from torch import nn as nn\n'), ((7801, 7810), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7808, 7810), True, 'from torch import nn as nn\n'), ((7820, 7845), 'torch.nn.Linear', 'nn.Linear', (['HIDDEN_SIZE', '(4)'], {}), '(HIDDEN_SIZE, 4)\n', (7829, 7845), True, 'from torch import nn as nn\n'), ((8074, 8094), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['batch'], {}), '(batch)\n', (8087, 8094), True, 'import railrl.torch.pytorch_util as ptu\n'), ((9584, 9600), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (9592, 9600), True, 'import numpy as np\n'), ((9671, 9684), 'numpy.array', 'np.array', (['kls'], {}), '(kls)\n', (9679, 9684), True, 'import numpy as np\n'), ((9745, 9764), 'numpy.array', 'np.array', (['log_probs'], {}), '(log_probs)\n', (9753, 9764), True, 'import numpy as np\n'), ((683, 705), 'numpy.cos', 'np.cos', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (689, 705), True, 'import numpy as np\n'), ((722, 744), 'numpy.sin', 'np.sin', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (728, 744), True, 'import numpy as np\n'), ((912, 934), 'numpy.cos', 'np.cos', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (918, 934), True, 'import numpy as np\n'), ((951, 973), 'numpy.sin', 'np.sin', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (957, 973), True, 'import numpy as np\n'), ((2149, 2171), 'numpy.cos', 'np.cos', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (2155, 2171), True, 'import numpy as np\n'), ((2188, 2210), 'numpy.sin', 'np.sin', (['(t * SWIRL_RATE)'], {}), '(t * SWIRL_RATE)\n', (2194, 2210), True, 'import numpy as np\n'), ((2859, 2875), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (2867, 2875), True, 'import numpy as np\n'), ((3882, 3909), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['true_latents'], {}), '(true_latents)\n', (3895, 3909), True, 'import railrl.torch.pytorch_util as ptu\n'), ((6074, 6101), 'torch.randn', 'torch.randn', (['*latents.shape'], {}), '(*latents.shape)\n', (6085, 6101), False, 'import torch\n'), ((8778, 8805), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', (['true_latents'], {}), '(true_latents)\n', (8791, 8805), True, 'import railrl.torch.pytorch_util as ptu\n'), ((9497, 9524), 'torch.randn', 'torch.randn', (['*latents.shape'], {}), '(*latents.shape)\n', (9508, 9524), False, 'import torch\n')] |
class FoodClassifier:
#Class Attributes:
#model - the underlying keras model
#labels - the labels to be associated with the activation of each output neuron.
#Labels must be the same size as the output layer of the neural network.
def __init__(self, modelpath, labels, min_confidence = 0.6):
from keras.models import load_model
from keras.applications.resnet50 import ResNet50
self.resnet = ResNet50(include_top=False,weights='imagenet',pooling='max',input_shape=(224,224,3))
self.extModel = load_model(modelpath)
if(isinstance(labels,str)):
#its a file path
from os.path import exists
if(exists(labels)):
f = open(labels,'r')
x = f.readlines()
y = []
for i in x:
y.append(i.split('\n')[0])
self.labels = y
else:
self.labels = labels
self.num_classes = len(labels)
self.min_confidence=min_confidence
def predict(self,img):
import os
from PIL import Image
from keras.preprocessing.image import img_to_array
import numpy as np
#check if image is a filepath
if(isinstance(img,str)):
if(not os.path.exists(img)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(img)
#resize image
#shape from model input
shape = self.resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = self.resnet.predict(x)
prediction = self.extModel.predict(features)
#get max of predictions and return label(s)
predIdx = np.argmax(prediction)
if(prediction[0,predIdx]<self.min_confidence):
return ""
else:
return self.labels[predIdx]
def set_extModel(self,model):
self.extModel = model
def get_extModel(self):
return self.extModel
def set_labels(self,labels):
self.labels = labels
def get_labels(self):
return self.labels
def set_min_confidence(self,conf):
self.min_confidence=conf
def get_min_confidence(self):
return self.min_confidence
def generate_features_from_directory(location,target_image_count,model=None):
#generates feature maps from the convolutional layers of ResNet50 using all
#images from the directory
#INPUT:
#directory containing NESTED DIRECTORIES of images. (Very Important)
#the number of feature maps to generate for each image class
#OUTPUT:
#a npy file containing the 2048-dimensional feature vector
#produced by ResNet50's convolutional layers
#data is generated in batches of 32
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
from os import listdir
from os.path import isdir
#create the model, if not defined
if model==None:
model = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the data generation
datagen = ImageDataGenerator()
#for each directory in
if(not isdir(location)):
print("could not find location: " + location)
return
for label in listdir(location):
#first check that its a directory
label_path = location+'/'+label
if(not isdir(label_path)):
continue
#create the data generator
#Output size is 256x256 to fit the ResNet50
print("Generating feature maps for " + label + "...")
generator = datagen.flow_from_directory(
label_path,
target_size = (224,224),
batch_size = 32,
class_mode=None)
#use ResNet50 to create the features
features = model.predict_generator(generator,target_image_count/32)
#features = np.reshape(features,(features.shape[0],features.shape[3]))
#save the features in a numpy binary
np.save(location+'/'+label+'.npy', features)
def create_data_set(data_path,output_folder,save_to_file=True):
#combines all npy files into one large file with their respective labels
#INPUTS:
#a directory containing npy fils of all different classes
#Outputs:
#training array and training labels
#label array is returned as a one hot encoding
#label names
from os.path import isdir
from os import listdir
import numpy as np
#find out how many classes
num_classes = 0
label_names = []
if(not isdir(data_path)):
print("Could not find directory: "+ data_path)
return
data_contents = listdir(data_path)
for f in data_contents:
if(f.endswith('.npy')):
num_classes +=1
label_names.append(f.split('.')[0])
if(num_classes==0):
print("Could not find any data files in directory: "+data_path)
return
#generate one-hot label vectors
labels = np.zeros([num_classes,num_classes])
for i in range(0,num_classes):
labels[i][i]=1
#load all arrays into memory.
#In the future, might need to do this on either a high ram machine
#or find another way to concatenate data
arrays = []
sizes = []
for f in data_contents:
if(f.endswith('.npy')):
arr = np.load(data_path+'/'+f)
sizes.append(arr.shape[0])
arrays.append(arr)
X = np.vstack([arr for arr in arrays])
#load the labels into memory
labelcodes = []
for i in range(0,num_classes):
labelcodes.append(np.vstack([labels[i]]*sizes[i]))
y = np.vstack([l for l in labelcodes])
if(save_to_file):
np.save(output_folder+'/data_set.npy',X)
np.save(output_folder+'/label_codes.npy',y)
with open(output_folder+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in label_names]))
return X,y,label_names
def train_classifier_from_images(train_dir,train_size,val_dir,val_size,output_dir):
#INPUTS:
#train_dir is the directory containig the training images
#test_dir is the directory containing the validation images
#output_dir is the directory to save the trained model
#train_size is the number of images to generate for each training class
#val_size is the number of images to generate for each validation class
#OUTPUTS
#A model that takes as input a 2048-vector of feature maps and outputs
#a prediction of what an image with those features might be.
#The labels file is also placed in this directory
#The model created is an SVM with softmax activation.
from time import time
from keras.applications.resnet50 import ResNet50
from keras.models import Sequential
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.layers import Dense
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping, ModelCheckpoint
#import ResNet50 without top layer
print("Loading the ResNet50 Network...")
resnet = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the training and validation datasets for each class
print("Generating Training Set...")
generate_features_from_directory(train_dir,train_size,model=resnet)
print("Generating Testing Set...")
generate_features_from_directory(val_dir,val_size,model=resnet)
#create the combined dataset
print("Combining datasets...")
X_train,y_train,labels = create_data_set(train_dir,output_dir+"/train",save_to_file=True)
X_val,y_val,labels = create_data_set(val_dir,output_dir+"/validation",save_to_file=True)
#shuffle the train data
X_train,y_train = shuffle(X_train,y_train)
num_classes = len(labels)
#create the extension model
print("Creating extension model...")
extModel = Sequential()
extModel.add(Dense(num_classes,input_shape=(2048,), activation='softmax', W_regularizer=l2(0.01)))
extModel.compile(loss='hinge',optimizer=SGD(lr=0.01,momentum=0.9),metrics=["accuracy"])
#callbacks
checkpoint = ModelCheckpoint(output_dir + "/extModel"+str(int(time()))+".h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
with open(output_dir+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in labels]))
#train model
print("Training...")
extModel.fit(X_train,y_train,
batch_size=32,
epochs=50,
validation_data=(X_val,y_val),
callbacks = [checkpoint,early])
return extModel
def add_to_train(train_dir,image,label, resnet):
#INPUTS
#Train_dir - the directory that all npy files are contained
#image - the path to the image being added
#resnet - the resnet model to be used for feature determination
#label - the name of the item
#Appends the features of the new item to the training set data for that label
from PIL import Image
from os.path import exists
from keras.preprocessing.image import img_to_array
if(isinstance(image,str)):
if(not exists(image)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(image)
shape = resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = resnet.predict(x)
import numpy as np
npyname = train_dir+'/'+label+'.npy'
if(not exists(npyname)):
np.save(npyname,features)
else:
fullset = np.load(npyname)
newset = np.append(fullset,features,axis=0)
np.save(npyname,newset)
| [
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.ImageDataGenerator",
"keras.optimizers.SGD",
"numpy.save",
"os.path.exists",
"os.listdir",
"os.path.isdir",
"numpy.vstack",
"keras.callbacks.EarlyStopping",
"numpy.argmax",
"keras.models.Sequential",
"keras.applications.resnet50.ResNet50",
"keras.regularizers.l2",
"time.time",
"PIL.Image.open",
"keras.models.load_model",
"sklearn.utils.shuffle",
"numpy.append",
"numpy.zeros",
"numpy.load"
]
| [((3487, 3507), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (3505, 3507), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((3656, 3673), 'os.listdir', 'listdir', (['location'], {}), '(location)\n', (3663, 3673), False, 'from os import listdir\n'), ((5158, 5176), 'os.listdir', 'listdir', (['data_path'], {}), '(data_path)\n', (5165, 5176), False, 'from os import listdir\n'), ((5483, 5519), 'numpy.zeros', 'np.zeros', (['[num_classes, num_classes]'], {}), '([num_classes, num_classes])\n', (5491, 5519), True, 'import numpy as np\n'), ((5950, 5984), 'numpy.vstack', 'np.vstack', (['[arr for arr in arrays]'], {}), '([arr for arr in arrays])\n', (5959, 5984), True, 'import numpy as np\n'), ((6145, 6179), 'numpy.vstack', 'np.vstack', (['[l for l in labelcodes]'], {}), '([l for l in labelcodes])\n', (6154, 6179), True, 'import numpy as np\n'), ((7644, 7706), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'pooling': '"""max"""'}), "(weights='imagenet', include_top=False, pooling='max')\n", (7652, 7706), False, 'from keras.applications.resnet50 import ResNet50\n'), ((8308, 8333), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (8315, 8333), False, 'from sklearn.utils import shuffle\n'), ((8461, 8473), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8471, 8473), False, 'from keras.models import Sequential\n'), ((8882, 8969), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode=\n 'auto')\n", (8895, 8969), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((450, 544), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'pooling': '"""max"""', 'input_shape': '(224, 224, 3)'}), "(include_top=False, weights='imagenet', pooling='max', input_shape=\n (224, 224, 3))\n", (458, 544), False, 'from keras.applications.resnet50 import ResNet50\n'), ((559, 580), 'keras.models.load_model', 'load_model', (['modelpath'], {}), '(modelpath)\n', (569, 580), False, 'from keras.models import load_model\n'), ((1961, 1982), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1970, 1982), True, 'import numpy as np\n'), ((3370, 3432), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'pooling': '"""max"""'}), "(weights='imagenet', include_top=False, pooling='max')\n", (3378, 3432), False, 'from keras.applications.resnet50 import ResNet50\n'), ((3552, 3567), 'os.path.isdir', 'isdir', (['location'], {}), '(location)\n', (3557, 3567), False, 'from os.path import isdir\n'), ((4431, 4481), 'numpy.save', 'np.save', (["(location + '/' + label + '.npy')", 'features'], {}), "(location + '/' + label + '.npy', features)\n", (4438, 4481), True, 'import numpy as np\n'), ((5044, 5060), 'os.path.isdir', 'isdir', (['data_path'], {}), '(data_path)\n', (5049, 5060), False, 'from os.path import isdir\n'), ((6215, 6258), 'numpy.save', 'np.save', (["(output_folder + '/data_set.npy')", 'X'], {}), "(output_folder + '/data_set.npy', X)\n", (6222, 6258), True, 'import numpy as np\n'), ((6264, 6310), 'numpy.save', 'np.save', (["(output_folder + '/label_codes.npy')", 'y'], {}), "(output_folder + '/label_codes.npy', y)\n", (6271, 6310), True, 'import numpy as np\n'), ((10372, 10387), 'os.path.exists', 'exists', (['npyname'], {}), '(npyname)\n', (10378, 10387), False, 'from os.path import exists\n'), ((10398, 10424), 'numpy.save', 'np.save', (['npyname', 'features'], {}), '(npyname, features)\n', (10405, 10424), True, 'import numpy as np\n'), ((10456, 10472), 'numpy.load', 'np.load', (['npyname'], {}), '(npyname)\n', (10463, 10472), True, 'import numpy as np\n'), ((10490, 10526), 'numpy.append', 'np.append', (['fullset', 'features'], {'axis': '(0)'}), '(fullset, features, axis=0)\n', (10499, 10526), True, 'import numpy as np\n'), ((10533, 10557), 'numpy.save', 'np.save', (['npyname', 'newset'], {}), '(npyname, newset)\n', (10540, 10557), True, 'import numpy as np\n'), ((709, 723), 'os.path.exists', 'exists', (['labels'], {}), '(labels)\n', (715, 723), False, 'from os.path import exists\n'), ((3772, 3789), 'os.path.isdir', 'isdir', (['label_path'], {}), '(label_path)\n', (3777, 3789), False, 'from os.path import isdir\n'), ((5842, 5870), 'numpy.load', 'np.load', (["(data_path + '/' + f)"], {}), "(data_path + '/' + f)\n", (5849, 5870), True, 'import numpy as np\n'), ((6104, 6137), 'numpy.vstack', 'np.vstack', (['([labels[i]] * sizes[i])'], {}), '([labels[i]] * sizes[i])\n', (6113, 6137), True, 'import numpy as np\n'), ((8621, 8647), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'momentum': '(0.9)'}), '(lr=0.01, momentum=0.9)\n', (8624, 8647), False, 'from keras.optimizers import SGD\n'), ((9896, 9909), 'os.path.exists', 'exists', (['image'], {}), '(image)\n', (9902, 9909), False, 'from os.path import exists\n'), ((10065, 10082), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (10075, 10082), False, 'from PIL import Image\n'), ((10166, 10184), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['imgr'], {}), '(imgr)\n', (10178, 10184), False, 'from keras.preprocessing.image import img_to_array\n'), ((1320, 1339), 'os.path.exists', 'os.path.exists', (['img'], {}), '(img)\n', (1334, 1339), False, 'import os\n'), ((1515, 1530), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1525, 1530), False, 'from PIL import Image\n'), ((1693, 1711), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['imgr'], {}), '(imgr)\n', (1705, 1711), False, 'from keras.preprocessing.image import img_to_array\n'), ((8566, 8574), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (8568, 8574), False, 'from keras.regularizers import l2\n'), ((8755, 8761), 'time.time', 'time', ([], {}), '()\n', (8759, 8761), False, 'from time import time\n')] |
from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import ssl
logger = MyLogger()
class BitstampWebsocket(ExchangeWebSocket):
def __init__(self, pairs_n_streams):
super().__init__('Bitstamp', pairs_n_streams)
self.possible_streams = ['live_trades', 'diff_order_book']
self.streams = []
def init_streams(self):
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
cur = dict()
cur['event'] = 'bts:subscribe'
cur['data'] = {'channel': "{}_{}".format(sub_stream, pair)}
self.streams.append(cur)
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://ws.bitstamp.net",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}))
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
str(self.streams)))
def save_trades(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
append_data = "{},{},{},{}\n".format(data['timestamp'],
data['price'],
data['amount'],
data['type'])
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_data)
def save_level2_orderbook(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
all_data = {}
data_time = data['timestamp']
for side in ['bids', 'asks']:
for cur in data[side]:
if not all_data.get(symbol, None):
all_data[symbol] = []
price = cur[0]
size = cur[1]
all_data[symbol].append("{},{},{}\n".format(
data_time,
price,
size if side == "bids" else "-{}".format(size)))
for symbol, l2_ob_data in all_data.items():
for l2_ob in l2_ob_data:
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
l2_ob)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = json.loads(message)
channel = message['channel']
if channel.startswith('diff_order_book'):
self.save_level2_orderbook(message)
elif channel.startswith('live_trades'):
self.save_trades(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.streams:
for stream in self.streams:
logger.info('Subscribing to %s', json.dumps(stream))
self.ws.send(json.dumps(stream))
sleep(2)
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
| [
"json.loads",
"websocket.enableTrace",
"json.dumps",
"websocket.WebSocketApp",
"time.sleep",
"time.time",
"singletones.custom_logger.MyLogger"
]
| [((225, 235), 'singletones.custom_logger.MyLogger', 'MyLogger', ([], {}), '()\n', (233, 235), False, 'from singletones.custom_logger import MyLogger\n'), ((1003, 1030), 'websocket.enableTrace', 'websocket.enableTrace', (['(True)'], {}), '(True)\n', (1024, 1030), False, 'import websocket\n'), ((1050, 1212), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['"""wss://ws.bitstamp.net"""'], {'on_open': 'self.__on_open', 'on_message': 'self.__on_message', 'on_error': 'self.__on_error', 'on_close': 'self.__on_close'}), "('wss://ws.bitstamp.net', on_open=self.__on_open,\n on_message=self.__on_message, on_error=self.__on_error, on_close=self.\n __on_close)\n", (1072, 1212), False, 'import websocket\n'), ((1736, 1744), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1741, 1744), False, 'from time import sleep\n'), ((4079, 4098), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (4089, 4098), False, 'import json\n'), ((4049, 4055), 'time.time', 'time', ([], {}), '()\n', (4053, 4055), False, 'from time import time\n'), ((5034, 5042), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (5039, 5042), False, 'from time import sleep\n'), ((4949, 4967), 'json.dumps', 'json.dumps', (['stream'], {}), '(stream)\n', (4959, 4967), False, 'import json\n'), ((4998, 5016), 'json.dumps', 'json.dumps', (['stream'], {}), '(stream)\n', (5008, 5016), False, 'import json\n')] |
from datetime import datetime
from airflow import DAG
from airflow.operators.python import PythonOperator
# v0.0.1
from oss_know.libs.base_dict.variable_key import NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS, GITHUB_TOKENS, \
OPENSEARCH_CONN_DATA, PROXY_CONFS
from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator
from oss_know.libs.util.token import TokenManager
with DAG(
dag_id='github_init_issues_timeline_v1',
schedule_interval=None,
start_date=datetime(2000, 1, 1),
catchup=False,
tags=['github'],
) as dag:
def scheduler_init_github_issues_timeline(ds, **kwargs):
return 'End:scheduler_init_github_issues_timeline'
op_scheduler_init_github_issues_timeline = PythonOperator(
task_id='op_scheduler_init_github_issues_timeline',
python_callable=scheduler_init_github_issues_timeline
)
def do_init_github_issues_timeline(params):
from airflow.models import Variable
from oss_know.libs.github import init_issues_timeline
github_tokens = Variable.get(GITHUB_TOKENS, deserialize_json=True)
opensearch_conn_info = Variable.get(OPENSEARCH_CONN_DATA, deserialize_json=True)
proxy_confs = Variable.get(PROXY_CONFS, deserialize_json=True)
proxies = []
for line in proxy_confs['reserved_proxies']:
proxies.append(f'http://{line}')
proxy_service = KuaiProxyService(proxy_confs['api_url'], proxy_confs['orderid'])
proxy_manager = ProxyManager(proxies, proxy_service)
token_manager = TokenManager(github_tokens)
proxy_accommodator = GithubTokenProxyAccommodator(token_manager, proxy_manager, shuffle=True,
policy=GithubTokenProxyAccommodator.POLICY_FIXED_MAP)
owner = params["owner"]
repo = params["repo"]
# since = params["since"]
since = None
init_issues_timeline.init_sync_github_issues_timeline(opensearch_conn_info, owner, repo,
proxy_accommodator, since)
return params
need_do_init_ops = []
from airflow.models import Variable
need_init_github_issues_timeline_repos = Variable.get(NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS,
deserialize_json=True)
for need_init_github_issues_timeline_repo in need_init_github_issues_timeline_repos:
op_do_init_github_issues_timeline = PythonOperator(
task_id='op_do_init_github_issues_timeline_{owner}_{repo}'.format(
owner=need_init_github_issues_timeline_repo["owner"],
repo=need_init_github_issues_timeline_repo["repo"]),
python_callable=do_init_github_issues_timeline,
op_kwargs={'params': need_init_github_issues_timeline_repo},
)
op_scheduler_init_github_issues_timeline >> op_do_init_github_issues_timeline
| [
"datetime.datetime",
"airflow.models.Variable.get",
"oss_know.libs.util.proxy.ProxyManager",
"oss_know.libs.util.token.TokenManager",
"oss_know.libs.github.init_issues_timeline.init_sync_github_issues_timeline",
"oss_know.libs.util.proxy.GithubTokenProxyAccommodator",
"airflow.operators.python.PythonOperator",
"oss_know.libs.util.proxy.KuaiProxyService"
]
| [((769, 894), 'airflow.operators.python.PythonOperator', 'PythonOperator', ([], {'task_id': '"""op_scheduler_init_github_issues_timeline"""', 'python_callable': 'scheduler_init_github_issues_timeline'}), "(task_id='op_scheduler_init_github_issues_timeline',\n python_callable=scheduler_init_github_issues_timeline)\n", (783, 894), False, 'from airflow.operators.python import PythonOperator\n'), ((2286, 2361), 'airflow.models.Variable.get', 'Variable.get', (['NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS'], {'deserialize_json': '(True)'}), '(NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS, deserialize_json=True)\n', (2298, 2361), False, 'from airflow.models import Variable\n'), ((1094, 1144), 'airflow.models.Variable.get', 'Variable.get', (['GITHUB_TOKENS'], {'deserialize_json': '(True)'}), '(GITHUB_TOKENS, deserialize_json=True)\n', (1106, 1144), False, 'from airflow.models import Variable\n'), ((1176, 1233), 'airflow.models.Variable.get', 'Variable.get', (['OPENSEARCH_CONN_DATA'], {'deserialize_json': '(True)'}), '(OPENSEARCH_CONN_DATA, deserialize_json=True)\n', (1188, 1233), False, 'from airflow.models import Variable\n'), ((1257, 1305), 'airflow.models.Variable.get', 'Variable.get', (['PROXY_CONFS'], {'deserialize_json': '(True)'}), '(PROXY_CONFS, deserialize_json=True)\n', (1269, 1305), False, 'from airflow.models import Variable\n'), ((1450, 1514), 'oss_know.libs.util.proxy.KuaiProxyService', 'KuaiProxyService', (["proxy_confs['api_url']", "proxy_confs['orderid']"], {}), "(proxy_confs['api_url'], proxy_confs['orderid'])\n", (1466, 1514), False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((1539, 1575), 'oss_know.libs.util.proxy.ProxyManager', 'ProxyManager', (['proxies', 'proxy_service'], {}), '(proxies, proxy_service)\n', (1551, 1575), False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((1600, 1627), 'oss_know.libs.util.token.TokenManager', 'TokenManager', (['github_tokens'], {}), '(github_tokens)\n', (1612, 1627), False, 'from oss_know.libs.util.token import TokenManager\n'), ((1658, 1788), 'oss_know.libs.util.proxy.GithubTokenProxyAccommodator', 'GithubTokenProxyAccommodator', (['token_manager', 'proxy_manager'], {'shuffle': '(True)', 'policy': 'GithubTokenProxyAccommodator.POLICY_FIXED_MAP'}), '(token_manager, proxy_manager, shuffle=True,\n policy=GithubTokenProxyAccommodator.POLICY_FIXED_MAP)\n', (1686, 1788), False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((1970, 2089), 'oss_know.libs.github.init_issues_timeline.init_sync_github_issues_timeline', 'init_issues_timeline.init_sync_github_issues_timeline', (['opensearch_conn_info', 'owner', 'repo', 'proxy_accommodator', 'since'], {}), '(opensearch_conn_info,\n owner, repo, proxy_accommodator, since)\n', (2023, 2089), False, 'from oss_know.libs.github import init_issues_timeline\n'), ((520, 540), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (528, 540), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Unit tests for the Person plugin and its model
"""
from django import forms
from django.conf import settings
from django.test import TestCase
from cms.api import add_plugin, create_page
from cmsplugin_plain_text.cms_plugins import PlaintextPlugin
from djangocms_picture.cms_plugins import PicturePlugin
from richie.apps.core.factories import FilerImageFactory, UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.persons.cms_plugins import PersonPlugin
from richie.apps.persons.factories import PersonFactory
from richie.apps.persons.models import PersonPluginModel
class PersonPluginTestCase(TestCase):
"""
Test that PersonPlugin correctly displays a Person's page placeholders content
"""
def test_cms_plugins_person_form_page_choices(self):
"""
The form to create a person plugin should only list person pages in the select box.
"""
class PersonPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = PersonPluginModel
exclude = ()
person = PersonFactory()
other_page_title = "other page"
create_page(other_page_title, "richie/fullwidth.html", settings.LANGUAGE_CODE)
plugin_form = PersonPluginModelForm()
self.assertIn(person.get_full_name(), plugin_form.as_table())
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_person_render(self):
"""
Test that a PersonPlugin correctly renders person's page specific information
"""
# Create a filer fake image
staff = UserFactory(is_staff=True, is_superuser=True)
image = FilerImageFactory(owner=staff)
# Create a Person
person = PersonFactory()
person_page = person.extended_object
# Add portrait to related placeholder
portrait_placeholder = person_page.placeholders.get(slot="portrait")
add_plugin(
portrait_placeholder,
PicturePlugin,
"en",
**{"picture": image, "attributes": {"alt": "portrait description"}}
)
add_plugin(
portrait_placeholder,
PicturePlugin,
"fr",
**{"picture": image, "attributes": {"alt": "description du portrait"}}
)
# A resume to related placeholder
resume_placeholder = person_page.placeholders.get(slot="resume")
add_plugin(
resume_placeholder, PlaintextPlugin, "en", **{"body": "A short resume"}
)
add_plugin(
resume_placeholder, PlaintextPlugin, "fr", **{"body": "Un résumé court"}
)
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, PersonPlugin, "en", **{"person": person})
add_plugin(placeholder, PersonPlugin, "fr", **{"person": person})
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Person's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
self.assertContains(response, person.get_full_name(), html=True)
# Person's portrait and its properties should be present
# pylint: disable=no-member
self.assertContains(response, image.file.name)
# Short resume should be present
self.assertContains(
response,
'<div class="person-plugin__content__text">A short resume</div>',
html=True,
)
# The person's full name should be wrapped in a h2
self.assertContains(
response,
'<h2 class="person-plugin__content__title">{:s}</h2>'.format(
person.get_full_name()
),
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
# pylint: disable=no-member
self.assertContains(response, image.file.name)
self.assertContains(
response,
'<div class="person-plugin__content__text">Un résumé court</div>',
html=True,
)
| [
"cms.api.create_page",
"richie.apps.core.helpers.create_i18n_page",
"richie.apps.core.factories.FilerImageFactory",
"richie.apps.core.factories.UserFactory",
"richie.apps.persons.factories.PersonFactory",
"cms.api.add_plugin"
]
| [((1176, 1191), 'richie.apps.persons.factories.PersonFactory', 'PersonFactory', ([], {}), '()\n', (1189, 1191), False, 'from richie.apps.persons.factories import PersonFactory\n'), ((1240, 1318), 'cms.api.create_page', 'create_page', (['other_page_title', '"""richie/fullwidth.html"""', 'settings.LANGUAGE_CODE'], {}), "(other_page_title, 'richie/fullwidth.html', settings.LANGUAGE_CODE)\n", (1251, 1318), False, 'from cms.api import add_plugin, create_page\n'), ((1711, 1756), 'richie.apps.core.factories.UserFactory', 'UserFactory', ([], {'is_staff': '(True)', 'is_superuser': '(True)'}), '(is_staff=True, is_superuser=True)\n', (1722, 1756), False, 'from richie.apps.core.factories import FilerImageFactory, UserFactory\n'), ((1773, 1803), 'richie.apps.core.factories.FilerImageFactory', 'FilerImageFactory', ([], {'owner': 'staff'}), '(owner=staff)\n', (1790, 1803), False, 'from richie.apps.core.factories import FilerImageFactory, UserFactory\n'), ((1848, 1863), 'richie.apps.persons.factories.PersonFactory', 'PersonFactory', ([], {}), '()\n', (1861, 1863), False, 'from richie.apps.persons.factories import PersonFactory\n'), ((2041, 2167), 'cms.api.add_plugin', 'add_plugin', (['portrait_placeholder', 'PicturePlugin', '"""en"""'], {}), "(portrait_placeholder, PicturePlugin, 'en', **{'picture': image,\n 'attributes': {'alt': 'portrait description'}})\n", (2051, 2167), False, 'from cms.api import add_plugin, create_page\n'), ((2230, 2359), 'cms.api.add_plugin', 'add_plugin', (['portrait_placeholder', 'PicturePlugin', '"""fr"""'], {}), "(portrait_placeholder, PicturePlugin, 'fr', **{'picture': image,\n 'attributes': {'alt': 'description du portrait'}})\n", (2240, 2359), False, 'from cms.api import add_plugin, create_page\n'), ((2537, 2624), 'cms.api.add_plugin', 'add_plugin', (['resume_placeholder', 'PlaintextPlugin', '"""en"""'], {}), "(resume_placeholder, PlaintextPlugin, 'en', **{'body':\n 'A short resume'})\n", (2547, 2624), False, 'from cms.api import add_plugin, create_page\n'), ((2651, 2739), 'cms.api.add_plugin', 'add_plugin', (['resume_placeholder', 'PlaintextPlugin', '"""fr"""'], {}), "(resume_placeholder, PlaintextPlugin, 'fr', **{'body':\n 'Un résumé court'})\n", (2661, 2739), False, 'from cms.api import add_plugin, create_page\n'), ((2819, 2871), 'richie.apps.core.helpers.create_i18n_page', 'create_i18n_page', (["{'en': 'A page', 'fr': 'Une page'}"], {}), "({'en': 'A page', 'fr': 'Une page'})\n", (2835, 2871), False, 'from richie.apps.core.helpers import create_i18n_page\n'), ((2944, 3009), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'PersonPlugin', '"""en"""'], {}), "(placeholder, PersonPlugin, 'en', **{'person': person})\n", (2954, 3009), False, 'from cms.api import add_plugin, create_page\n'), ((3018, 3083), 'cms.api.add_plugin', 'add_plugin', (['placeholder', 'PersonPlugin', '"""fr"""'], {}), "(placeholder, PersonPlugin, 'fr', **{'person': person})\n", (3028, 3083), False, 'from cms.api import add_plugin, create_page\n')] |
# cython: language_level=3
# -*- coding: utf-8 -*-
from mathics.core.expression import Expression
from mathics.core.symbols import Atom, Symbol
from mathics.core.atoms import Integer
from mathics.builtin.base import MessageException
"""
This module provides some infrastructure to deal with SubExpressions.
"""
def _pspec_span_to_tuple(pspec, expr):
"""
This function takes an expression and a Mathics
`Span` Expression and returns a tuple with the positions
of the leaves.
"""
start = 1
stop = None
step = 1
leaves = pspec.leaves
if len(leaves) > 3:
raise MessageException("Part", "span", leaves)
if len(leaves) > 0:
start = leaves[0].get_int_value()
if len(leaves) > 1:
stop = leaves[1].get_int_value()
if stop is None:
if leaves[1].get_name() == "System`All":
stop = None
else:
raise MessageException("Part", "span", pspec)
else:
stop = stop - 1 if stop > 0 else len(expr.leaves) + stop
if len(pspec.leaves) > 2:
step = leaves[2].get_int_value()
if start is None or step is None:
raise MessageException("Part", "span", pspec)
if start == 0 or stop == 0:
# index 0 is undefined
raise MessageException("Part", "span", Integer(0))
if start < 0:
start = len(expr.leaves) - start
else:
start = start - 1
if stop is None:
stop = 0 if step < 0 else len(expr.leaves) - 1
stop = stop + 1 if step > 0 else stop - 1
return tuple(k for k in range(start, stop, step))
class ExpressionPointer(object):
"""
This class represents a reference to a leaf in an expression.
Supports a minimal part of the basic interface of `mathics.core.symbols.BaseElement`.
"""
def __init__(self, expr, pos=None):
"""
Initializes a ExpressionPointer pointing to the leaf in position `pos`
of `expr`.
expr: can be an Expression, a Symbol, or another ExpressionPointer
pos: int or None
If `pos==0`, then the pointer points to the `head` of the expression.
If `pos` is `None`, it points out the whole expression.
"""
if pos is None:
if type(expr) is ExpressionPointer:
self.parent = expr.parent
self.position = expr.position
else:
self.parent = expr
self.position = None
else:
self.parent = expr
self.position = pos
def __str__(self) -> str:
return "%s[[%s]]" % (self.parent, self.position)
def __repr__(self) -> str:
return self.__str__()
@property
def original(self):
return None
@original.setter
def original(self, value):
raise ValueError("Expression.original is write protected.")
@property
def head(self):
pos = self.position
if pos is None:
return self.parent.head
elif pos == 0:
return self.parent.head.head
return self.parent.leaves[pos - 1].head
@head.setter
def head(self, value):
raise ValueError("ExpressionPointer.head is write protected.")
@property
def leaves(self):
pos = self.position
if pos is None:
return self.parent.leaves
elif pos == 0:
self.parent.head.leaves
return self.parent.leaves[pos - 1].leaves
@leaves.setter
def leaves(self, value):
raise ValueError("ExpressionPointer.leaves is write protected.")
def get_head_name(self):
return self.head.get_name()
def is_atom(self):
pos = self.position
if pos is None:
return self.parent.is_atom()
elif pos == 0:
return self.parent.head.is_atom()
return self.parent.leaves[pos - 1].is_atom()
def to_expression(self):
parent = self.parent
p = self.position
if p == 0:
if isinstance(parent, Symbol):
return parent
else:
return parent.head.copy()
else:
leaf = self.parent.leaves[p - 1]
if isinstance(leaf, Atom):
return leaf
else:
return leaf.copy()
def replace(self, new):
"""
This method replaces the value pointed out by a `new` value.
"""
# First, look for the ancestor that is not an ExpressionPointer,
# keeping the positions of each step:
parent = self.parent
pos = [self.position]
while type(parent) is ExpressionPointer:
position = parent.position
if position is None:
parent = parent.parent
continue
pos.append(parent.position)
parent = parent.parent
# At this point, we hit the expression, and we have
# the path to reach the position
i = pos.pop()
try:
while pos:
if i == 0:
parent = parent._head
else:
parent = parent.elements[i - 1]
i = pos.pop()
except Exception:
raise MessageException("Part", "span", pos)
# Now, we have a pointer to an element in a true `Expression`.
# Now, set it to the new value.
if i == 0:
parent.set_head(new)
else:
parent.set_element(i - 1, new)
class SubExpression(object):
"""
This class represents a Subexpression of an existing Expression.
Assignment to a subexpression results in the change of the original Expression.
"""
def __new__(cls, expr, pos=None):
"""
`expr` can be an `Expression`, a `ExpressionPointer` or
another `SubExpression`
`pos` can be `None`, an integer value or an `Expression` that
indicates a subset of leaves in the original `Expression`.
If `pos` points out to a single whole leaf of `expr`, then
returns an `ExpressionPointer`.
"""
# If pos is a list, take the first element, and
# store the remainder.
if type(pos) in (tuple, list):
pos, rem_pos = pos[0], pos[1:]
if len(rem_pos) == 0:
rem_pos = None
else:
rem_pos = None
# Trivial conversion: if pos is an `Integer`, convert
# to a Python native int
if type(pos) is Integer:
pos = pos.get_int_value()
# pos == `System`All`
elif isinstance(pos, Symbol) and pos.get_name() == "System`All":
pos = None
elif type(pos) is Expression:
if pos.has_form("System`List", None):
tuple_pos = [i.get_int_value() for i in pos.leaves]
if any([i is None for i in tuple_pos]):
raise MessageException("Part", "pspec", pos)
pos = tuple_pos
elif pos.has_form("System`Span", None):
pos = _pspec_span_to_tuple(pos, expr)
else:
raise MessageException("Part", "pspec", pos)
if pos is None or type(pos) is int:
if rem_pos is None:
return ExpressionPointer(expr, pos)
else:
return SubExpression(ExpressionPointer(expr, pos), rem_pos)
elif type(pos) is tuple:
self = super(SubExpression, cls).__new__(cls)
self._headp = ExpressionPointer(expr.head, 0)
self._elementsp = [
SubExpression(ExpressionPointer(expr, k + 1), rem_pos) for k in pos
]
return self
def is_atom(self):
return False
def __str__(self):
return (
self.head.__str__()
+ "[\n"
+ ",\n".join(["\t " + leaf.__str__() for leaf in self.leaves])
+ "\n\t]"
)
def __repr__(self):
return self.__str__()
@property
def head(self):
return self._headp
@head.setter
def head(self, value):
raise ValueError("SubExpression.head is write protected.")
def get_head_name(self):
return self._headp.parent.get_head_name()
@property
def elements(self):
return self._elementsp
@elements.setter
def elements(self, value):
raise ValueError("SubExpression.leaves is write protected.")
@property
def leaves(self):
return self._elementsp
@leaves.setter
def leaves(self, value):
raise ValueError("SubExpression.leaves is write protected.")
def to_expression(self):
return Expression(
self._headp.to_expression(),
*(leaf.to_expression() for leaf in self._elementsp)
)
def replace(self, new):
"""
Asigns `new` to the subexpression, according to the logic of `mathics.core.walk_parts`
"""
if (new.has_form("List", None) or new.get_head_name() == "System`List") and len(
new.leaves
) == len(self._elementsp):
for leaf, sub_new in zip(self._elementsp, new.leaves):
leaf.replace(sub_new)
else:
for leaf in self._elementsp:
leaf.replace(new)
| [
"mathics.core.atoms.Integer",
"mathics.builtin.base.MessageException"
]
| [((611, 651), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""span"""', 'leaves'], {}), "('Part', 'span', leaves)\n", (627, 651), False, 'from mathics.builtin.base import MessageException\n'), ((1177, 1216), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""span"""', 'pspec'], {}), "('Part', 'span', pspec)\n", (1193, 1216), False, 'from mathics.builtin.base import MessageException\n'), ((1328, 1338), 'mathics.core.atoms.Integer', 'Integer', (['(0)'], {}), '(0)\n', (1335, 1338), False, 'from mathics.core.atoms import Integer\n'), ((929, 968), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""span"""', 'pspec'], {}), "('Part', 'span', pspec)\n", (945, 968), False, 'from mathics.builtin.base import MessageException\n'), ((5252, 5289), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""span"""', 'pos'], {}), "('Part', 'span', pos)\n", (5268, 5289), False, 'from mathics.builtin.base import MessageException\n'), ((6920, 6958), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""pspec"""', 'pos'], {}), "('Part', 'pspec', pos)\n", (6936, 6958), False, 'from mathics.builtin.base import MessageException\n'), ((7137, 7175), 'mathics.builtin.base.MessageException', 'MessageException', (['"""Part"""', '"""pspec"""', 'pos'], {}), "('Part', 'pspec', pos)\n", (7153, 7175), False, 'from mathics.builtin.base import MessageException\n')] |
import zipfile
import random
RAND_INT_RANGE = (1,100)
def wrf(fname):
with open(fname, 'w') as f:
for i in range(100):
f.write(str(random.randint(*RAND_INT_RANGE)))
fnames = []
for i in range(10):
fname = 'file' + str(i) + '.txt'
wrf(fname)
fnames.append(fname)
dirpaths = set()
with zipfile.ZipFile('myzip.zip', 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for fname in fnames:
dirpath = '/dirpath'+str(random.randint(*RAND_INT_RANGE))
# let's not have duplicate dirpaths.
while dirpath in dirpaths:
dirpath = '/dirpath' + str(random.randint(*RAND_INT_RANGE))
zf.write(fname, arcname=dirpath+'/'+fname)
dirpaths.add(dirpath)
print('dirpaths', dirpaths)
print('fnames', fnames)
| [
"random.randint",
"zipfile.ZipFile"
]
| [((323, 390), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""myzip.zip"""', '"""w"""'], {'compression': 'zipfile.ZIP_DEFLATED'}), "('myzip.zip', 'w', compression=zipfile.ZIP_DEFLATED)\n", (338, 390), False, 'import zipfile\n'), ((457, 488), 'random.randint', 'random.randint', (['*RAND_INT_RANGE'], {}), '(*RAND_INT_RANGE)\n', (471, 488), False, 'import random\n'), ((156, 187), 'random.randint', 'random.randint', (['*RAND_INT_RANGE'], {}), '(*RAND_INT_RANGE)\n', (170, 187), False, 'import random\n'), ((609, 640), 'random.randint', 'random.randint', (['*RAND_INT_RANGE'], {}), '(*RAND_INT_RANGE)\n', (623, 640), False, 'import random\n')] |
import unittest
from django.test import Client
from wagtail.core.models import Page
from wagtail_managed404.models import PageNotFoundEntry
class TestMiddleware(unittest.TestCase):
"""Tests for `wagtail_app_pages` package."""
def setUp(self):
self.client = Client()
self.invalid_url = '/definitely_not_an_actual_url/'
self.redirect_to_url = '/much_better_url/'
self.redirect_to_page = Page.objects.get(depth=2)
def test_redirect_to_url(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_url = self.redirect_to_url
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_url)
def test_redirect_to_page(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_page = self.redirect_to_page
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_page.url)
def _trigger_404(self):
response = self.client.get(self.invalid_url)
self.assertEquals(response.status_code, 404)
entries = PageNotFoundEntry.objects.filter(url=self.invalid_url)
self.assertEquals(entries.count(), 1)
return entries.first()
def _validate_redirect(self, source_url, target_url):
response = self.client.get(source_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, target_url)
| [
"wagtail.core.models.Page.objects.get",
"wagtail_managed404.models.PageNotFoundEntry.objects.all",
"wagtail_managed404.models.PageNotFoundEntry.objects.filter",
"django.test.Client"
]
| [((278, 286), 'django.test.Client', 'Client', ([], {}), '()\n', (284, 286), False, 'from django.test import Client\n'), ((430, 455), 'wagtail.core.models.Page.objects.get', 'Page.objects.get', ([], {'depth': '(2)'}), '(depth=2)\n', (446, 455), False, 'from wagtail.core.models import Page\n'), ((1154, 1208), 'wagtail_managed404.models.PageNotFoundEntry.objects.filter', 'PageNotFoundEntry.objects.filter', ([], {'url': 'self.invalid_url'}), '(url=self.invalid_url)\n', (1186, 1208), False, 'from wagtail_managed404.models import PageNotFoundEntry\n'), ((501, 532), 'wagtail_managed404.models.PageNotFoundEntry.objects.all', 'PageNotFoundEntry.objects.all', ([], {}), '()\n', (530, 532), False, 'from wagtail_managed404.models import PageNotFoundEntry\n'), ((770, 801), 'wagtail_managed404.models.PageNotFoundEntry.objects.all', 'PageNotFoundEntry.objects.all', ([], {}), '()\n', (799, 801), False, 'from wagtail_managed404.models import PageNotFoundEntry\n')] |
"""
Kronos: A simple scheduler for graduate training programme
Entities: User, Schedule, Rotation
"""
from operator import itemgetter
from datetime import datetime, timedelta
def getRotationCapacity(rotationId, startDate, endDate, assignments):
""" Calculate number of users assigned to a particular rotation during the specified duration
"""
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
# Weeks involved during the rotation
weeks = [(start + timedelta(weeks=x)).strftime("%W%Y") for x in range(0, duration)]
capacity = sum(itemgetter(*weeks)(assignments[rotationId][0][0]))
return capacity
def score_assignment(
assignments,
solution,
earliestAvailableDate,
core_rotations=["PMO", "PE", "SE", "PM"],
rotation_duration={
"PMO": 12,
"PE": 12,
"SE": 12,
"PM": 12,
"SYS": 12,
"ARC": 12,
"ANA": 12,
},
):
""" Calculate loss function for suggested solution (negative = better)
Parameters:
assignments (dict): global assignment object by rotation
solution (dict): rotation assignment for a user
earliestAvailableDate (date): earliest date where a user can be assigned a rotation
core_rotations (list): rotation that should be completed first
rotation_duration (dict): duration of each rotation
"""
print(solution)
# SOFT CONSTRAINT 1 - Core rotations should be completed in the first 4 rotations if possible
core_first_loss = sum(
[
-3 if x[0] in core_rotations else 0
for x in solution
if int(x[1]) <= len(core_rotations)
]
)
# SOFT CONSTRAINT 2 - External Assignment must be assigned last
external_assignment_loss = (
99 if "EXT" in [x[0] for x in solution] and solution[-1][0] != "EXT" else 0
)
# Calculate timing of each rotation from solution
solution = [
(
x[0],
rotation_duration[x[0]]
+ (sum([rotation_duration[x[0]] for x in solution[:i]]) if i != 0 else 0),
)
for i, x in enumerate(solution)
]
startDate = earliestAvailableDate
schedule = []
for x in solution:
endDate = startDate + timedelta(weeks=x[1]) - timedelta(days=1)
# Make sure the date falls on weekday
if endDate.weekday() >= 5:
endDate -= timedelta(endDate.weekday() - 4)
schedule.append(
(x[0], startDate.strftime("%d%m%Y"), endDate.strftime("%d%m%Y"))
)
startDate += timedelta(weeks=x[1])
spread_first_loss = sum(
[getRotationCapacity(x[0], x[1], x[2], assignments) for x in schedule]
)
loss = core_first_loss + external_assignment_loss + spread_first_loss
return loss
def schedule2assignments(schedule):
""" Convert schedule object to assignment object
"""
rotations = {}
for userId, userSchedule in schedule.items():
for rotation in userSchedule:
id = rotation["rotationId"]
if id not in rotations:
rotations[id] = [[{}], []]
print(rotations[id][0][0])
startDate, endDate = itemgetter("startDate", "endDate")(rotation)
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
for i in range(duration):
date = (start + timedelta(weeks=i)).strftime("%W%Y")
if date not in rotations[id][0][0]:
rotations[id][0][0][date] = 0
rotations[id][0][0][date] += 1
rotations[id][1].append((userId, startDate, endDate))
sortedDate = sorted(list(rotations[id][0][0].keys()))
if len(rotations[id][0]) < 2:
rotations[id][0].append(sortedDate[0])
rotations[id][0].append(sortedDate[-1])
elif sortedDate[0] < rotations[id][0][1]:
rotations[id][0][1] = sortedDate[0]
elif len(rotations[id][0]) > 2 and sortedDate[-1] > rotations[id][0][2]:
rotations[id][0][2] = sortedDate[-1]
print(rotations)
return rotations
def assignments2schedule(assignments):
""" Convert assignment object to overall schedule
"""
users = {}
for rotationId, rotationInfo in assignments.items():
for userId, userAssignment in rotationInfo[1].items():
if userId not in users:
users[userId] = []
users[userId].append(
{
"rotationId": rotationId,
"startDate": userAssignment[0],
"endDate": userAssignment[1],
}
)
print(users)
return users
def generateUserSchedule(user, assignments, scoring_function):
""" Generate most optimal user schedule
Parameters:
user (object): User
assignments (dict): Time-bounded assignments
scoring_function (function): scoring function to rank possible assignments
Returns:
schedule (list): list of rotations
"""
return [{"rotationId": "PMO", "startDate": "012018"}]
def getOverallSchedule(users):
""" Generate overall schedule from individual user's schedule
Parameters:
users (list): list of Users
Returns:
schedule (dict): overall assignments
"""
return {}
def getConflictingAssignments(schedule):
""" Get list of assignments which exceeded rotation capacity
Parameters:
schedule (dict): overall assignments
Returns:
confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments
"""
return {}
if __name__ == "__main__":
pass
| [
"datetime.datetime.strptime",
"operator.itemgetter",
"datetime.timedelta"
]
| [((367, 405), 'datetime.datetime.strptime', 'datetime.strptime', (['startDate', '"""%d%m%Y"""'], {}), "(startDate, '%d%m%Y')\n", (384, 405), False, 'from datetime import datetime, timedelta\n'), ((416, 452), 'datetime.datetime.strptime', 'datetime.strptime', (['endDate', '"""%d%m%Y"""'], {}), "(endDate, '%d%m%Y')\n", (433, 452), False, 'from datetime import datetime, timedelta\n'), ((2640, 2661), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'x[1]'}), '(weeks=x[1])\n', (2649, 2661), False, 'from datetime import datetime, timedelta\n'), ((646, 664), 'operator.itemgetter', 'itemgetter', (['*weeks'], {}), '(*weeks)\n', (656, 664), False, 'from operator import itemgetter\n'), ((2352, 2369), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2361, 2369), False, 'from datetime import datetime, timedelta\n'), ((3329, 3367), 'datetime.datetime.strptime', 'datetime.strptime', (['startDate', '"""%d%m%Y"""'], {}), "(startDate, '%d%m%Y')\n", (3346, 3367), False, 'from datetime import datetime, timedelta\n'), ((3386, 3422), 'datetime.datetime.strptime', 'datetime.strptime', (['endDate', '"""%d%m%Y"""'], {}), "(endDate, '%d%m%Y')\n", (3403, 3422), False, 'from datetime import datetime, timedelta\n'), ((2328, 2349), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'x[1]'}), '(weeks=x[1])\n', (2337, 2349), False, 'from datetime import datetime, timedelta\n'), ((3264, 3298), 'operator.itemgetter', 'itemgetter', (['"""startDate"""', '"""endDate"""'], {}), "('startDate', 'endDate')\n", (3274, 3298), False, 'from operator import itemgetter\n'), ((561, 579), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'x'}), '(weeks=x)\n', (570, 579), False, 'from datetime import datetime, timedelta\n'), ((3546, 3564), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'i'}), '(weeks=i)\n', (3555, 3564), False, 'from datetime import datetime, timedelta\n')] |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import contextlib
import sys
from pylint.utils import utils
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
def _patch_sys_path(args):
original = list(sys.path)
changes = []
seen = set()
for arg in args:
path = utils.get_python_path(arg)
if path not in seen:
changes.append(path)
seen.add(path)
sys.path[:] = changes + sys.path
return original
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
original = _patch_sys_path(args)
try:
yield
finally:
sys.path[:] = original
| [
"pylint.utils.utils.get_python_path"
]
| [((1694, 1720), 'pylint.utils.utils.get_python_path', 'utils.get_python_path', (['arg'], {}), '(arg)\n', (1715, 1720), False, 'from pylint.utils import utils\n')] |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from django.test.utils import override_settings
from django.urls import reverse
from myuw.test.api import MyuwApiTest
@override_settings(
RESTCLIENTS_ADMIN_AUTH_MODULE='rc_django.tests.can_proxy_restclient')
class RestSearchViewTest(MyuwApiTest):
def test_post(self):
self.set_user('javerage')
# hfs
url = reverse("myuw_rest_search", args=["hfs", "accounts"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, "/restclients/view/hfs/myuw/v1/javerage")
# bookstore
url = reverse("myuw_rest_search", args=["book", "index"])
response = self.client.post(url, {
"sln1": "123", "quarter": "spring", "returnlink": "t"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/book/uw/json_utf8_202007.ubs%3F"
"quarter=spring&sln1=123&returnlink=t"))
# myplan
url = reverse("myuw_rest_search", args=["myplan", "index"])
response = self.client.post(url, {
"uwregid": "ABC", "year": "2013", "quarter": "spring"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/myplan/student/api/plan/v1/2013,spring,1,ABC")
# libraries
url = reverse("myuw_rest_search", args=["libraries", "accounts"])
response = self.client.post(url, {"id": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/libraries/mylibinfo/v1/?id=javerage")
# iasystem
url = reverse("myuw_rest_search", args=[
"iasystem_uw", "uw/api/v1/evaluation"])
response = self.client.post(url, {"student_id": "123456"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/iasystem_uw/api/" +
"v1/evaluation?student_id=123456"))
# uwnetid
url = reverse("myuw_rest_search", args=["uwnetid", "password"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/javerage/password")
url = reverse("myuw_rest_search", args=["uwnetid", "subscription"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/" +
"javerage/subscription/60,64,105")
# grad
url = reverse("myuw_rest_search", args=[
"grad", "services/students/v1/api/committee"])
response = self.client.post(url, {
"id": "12345", "csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/grad/services/" +
"students/v1/api/committee?id=12345"))
# notices
url = reverse("myuw_rest_search", args=["sws", "notices"])
response = self.client.post(url, {
"uwregid": "12345678123456781234567812345678",
"csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/sws/student/v5/notice/" +
"12345678123456781234567812345678.json"))
# upass
url = reverse("myuw_rest_search", args=["upass", "index"])
response = self.client.post(url, {
"uwnetid": "bill",
"csrfmiddlewaretoken": "<PASSWORD>"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/upass/MyUWUpass/MyUWUpass.aspx%3Fid=bill")
| [
"django.test.utils.override_settings",
"django.urls.reverse"
]
| [((233, 325), 'django.test.utils.override_settings', 'override_settings', ([], {'RESTCLIENTS_ADMIN_AUTH_MODULE': '"""rc_django.tests.can_proxy_restclient"""'}), "(RESTCLIENTS_ADMIN_AUTH_MODULE=\n 'rc_django.tests.can_proxy_restclient')\n", (250, 325), False, 'from django.test.utils import override_settings\n'), ((454, 507), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['hfs', 'accounts']"}), "('myuw_rest_search', args=['hfs', 'accounts'])\n", (461, 507), False, 'from django.urls import reverse\n'), ((755, 806), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['book', 'index']"}), "('myuw_rest_search', args=['book', 'index'])\n", (762, 806), False, 'from django.urls import reverse\n'), ((1160, 1213), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['myplan', 'index']"}), "('myuw_rest_search', args=['myplan', 'index'])\n", (1167, 1213), False, 'from django.urls import reverse\n'), ((1542, 1601), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['libraries', 'accounts']"}), "('myuw_rest_search', args=['libraries', 'accounts'])\n", (1549, 1601), False, 'from django.urls import reverse\n'), ((1870, 1943), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['iasystem_uw', 'uw/api/v1/evaluation']"}), "('myuw_rest_search', args=['iasystem_uw', 'uw/api/v1/evaluation'])\n", (1877, 1943), False, 'from django.urls import reverse\n'), ((2249, 2306), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['uwnetid', 'password']"}), "('myuw_rest_search', args=['uwnetid', 'password'])\n", (2256, 2306), False, 'from django.urls import reverse\n'), ((2566, 2627), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['uwnetid', 'subscription']"}), "('myuw_rest_search', args=['uwnetid', 'subscription'])\n", (2573, 2627), False, 'from django.urls import reverse\n'), ((2933, 3018), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['grad', 'services/students/v1/api/committee']"}), "('myuw_rest_search', args=['grad', 'services/students/v1/api/committee']\n )\n", (2940, 3018), False, 'from django.urls import reverse\n'), ((3361, 3413), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['sws', 'notices']"}), "('myuw_rest_search', args=['sws', 'notices'])\n", (3368, 3413), False, 'from django.urls import reverse\n'), ((3801, 3853), 'django.urls.reverse', 'reverse', (['"""myuw_rest_search"""'], {'args': "['upass', 'index']"}), "('myuw_rest_search', args=['upass', 'index'])\n", (3808, 3853), False, 'from django.urls import reverse\n')] |
from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# save the snapshot in the KV store
self.configuration_kv[config_hash] = serialized_config
return config_hash
def add_serial_configuration(self, serial_configuration):
# get the hash of the configuration
snaphash = self.hash_snapshot(serial_configuration)
# check that the hash is not already in the configurations
if any([True if snaphash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the configuration in the KV store
self.configuration_kv[snaphash] = serial_configuration
return snaphash
@property
def create_run_table_query(self):
create_run_table_query = """
CREATE TABLE IF NOT EXISTS runs
(start_hash TEXT NOT NULL,
end_hash TEXT NOT NULL,
config_hash NOT NULL,
last_cycle_idx INTEGER NOT NULL,
PRIMARY KEY (start_hash, end_hash))
"""
return create_run_table_query
@property
def add_run_record_query(self):
add_run_row_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
return add_run_row_query
@property
def update_run_record_query(self):
q = """
UPDATE runs
SET config_hash = ?,
last_cycle_idx = ?
WHERE start_hash=? AND end_hash=?
"""
return q
@property
def delete_run_record_query(self):
q = """
DELETE FROM runs
WHERE start_hash=? AND end_hash=?
"""
return q
def _add_run_record(self, start_hash, end_hash, configuration_hash, cycle_idx):
params = (start_hash, end_hash, configuration_hash, cycle_idx)
# do it as a transaction
c = self._db.cursor()
# run the insert
c.execute(self.add_run_record_query, params)
def _delete_run_record(self, start_hash, end_hash):
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(self.delete_run_record_query, params)
def _update_run_record(self, start_hash, end_hash, new_config_hash, new_last_cycle_idx):
params = (new_config_hash, new_last_cycle_idx, start_hash, end_hash)
# do it as a transaction
c = self._db.cursor()
# run the update
c.execute(self.update_run_record_query, params)
def register_run(self, start_hash, end_hash, config_hash, cycle_idx):
"""
Parameters
----------
start_hash :
end_hash :
config_hash :
cycle_idx : int
The cycle of the simulation run the checkpoint was generated for.
Returns
-------
"""
# check that the hashes are for snapshots in the orchestrator
# if one is not registered raise an error
if not self.snapshot_hash_registered(start_hash):
raise OrchestratorError(
"snapshot start_hash {} is not registered with the orchestrator".format(
start_hash))
if not self.snapshot_hash_registered(end_hash):
raise OrchestratorError(
"snapshot end_hash {} is not registered with the orchestrator".format(
end_hash))
if not self.configuration_hash_registered(config_hash):
raise OrchestratorError(
"config hash {} is not registered with the orchestrator".format(
config_hash))
# save the configuration and get it's id
self._add_run_record(start_hash, end_hash, config_hash, cycle_idx)
def get_run_records(self):
get_run_record_query = """
SELECT *
FROM runs
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
cursor = self._db.cursor()
cursor.execute(get_run_record_query)
records = cursor.fetchall()
return records
def get_run_record(self, start_hash, end_hash):
get_run_record_query = """
SELECT {fields}
FROM runs
WHERE start_hash=? AND end_hash=?
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(get_run_record_query, params)
record = cursor.fetchone()
return record
def run_last_cycle_idx(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
last_cycle_idx = record[self.RUN_SELECT_FIELDS.index('last_cycle_idx')]
return last_cycle_idx
def run_configuration(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
# get the configuration object and deserialize it
return self.deserialize(self.configuration_kv[config_hash])
def run_configuration_hash(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
return config_hash
def run_hashes(self):
return [(rec[0], rec[1]) for rec in self.get_run_records()]
def run_continues(self, start_hash, end_hash):
"""Given a start hash and end hash for a run, find the run that this
continues.
Parameters
----------
start_hash :
end_hash :
Returns
-------
run_id
"""
# loop through the runs in this orchestrator until we find one
# where the start_hash matches the end hash
runs = self.run_hashes()
run_idx = 0
while True:
run_start_hash, run_end_hash = runs[run_idx]
# if the start hash of the queried run is the same as the
# end hash for this run we have found it
if start_hash == run_end_hash:
return (run_start_hash, run_end_hash)
run_idx += 1
# if the index is over the number of runs we quit and
# return None as no match
if run_idx >= len(runs):
return None
def _init_checkpoint_db(self, start_hash, configuration, checkpoint_dir, mode='x'):
logging.debug("Initializing checkpoint orch database")
# make the checkpoint with the default filename at the checkpoint directory
checkpoint_path = osp.join(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)
# create a new database in the mode specified
logging.debug("Creating checkpoint database")
checkpoint_orch = Orchestrator(checkpoint_path, mode=mode)
# add the starting snapshot, bypassing the serialization stuff
logging.debug("Setting the starting snapshot")
checkpoint_orch.snapshot_kv[start_hash] = self.snapshot_kv[start_hash]
# if we have a new configuration at runtime serialize and
# hash it
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# save the configuration as well
checkpoint_orch.configuration_kv[config_hash] = serialized_config
checkpoint_orch.close()
logging.debug("closing connection to checkpoint database")
return checkpoint_path, config_hash
def _save_checkpoint(self, checkpoint_snapshot, config_hash,
checkpoint_db_path, cycle_idx,
):
"""
Parameters
----------
checkpoint_snapshot :
config_hash :
checkpoint_db_path :
mode :
(Default value = 'wb')
Returns
-------
"""
# orchestrator wrapper to the db
logging.debug("Opening the checkpoint orch database")
checkpoint_orch = Orchestrator(checkpoint_db_path, mode='r+')
# connection to the db
cursor = checkpoint_orch._db.cursor()
# we replicate the code for adding the snapshot here because
# we want it to occur transactionally the delete and add
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(checkpoint_snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# the queries for deleting and inserting the new run record
delete_query = """
DELETE FROM runs
WHERE start_hash=?
AND end_hash=?
"""
insert_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
# if there are any runs in the checkpoint orch remove the
# final snapshot
delete_params = None
if len(checkpoint_orch.run_hashes()) > 0:
start_hash, old_checkpoint_hash = checkpoint_orch.run_hashes()[0]
delete_params = (start_hash, old_checkpoint_hash)
else:
start_hash = list(checkpoint_orch.snapshot_kv.keys())[0]
# the config should already be in the orchestrator db
insert_params = (start_hash, snaphash, config_hash, cycle_idx)
# start this whole process as a transaction so we don't get
# something weird in between
logging.debug("Starting transaction for updating run table in checkpoint")
cursor.execute("BEGIN TRANSACTION")
# add the new one, using a special method for setting inside
# of a transaction
logging.debug("setting the new checkpoint snapshot into the KV")
cursor = checkpoint_orch.snapshot_kv.set_in_tx(cursor, snaphash, serialized_snapshot)
logging.debug("finished")
# if we need to delete the old end of the run snapshot and the
# run record for it
if delete_params is not None:
logging.debug("Old run record needs to be removed")
# remove the old run from the run table
logging.debug("Deleting the old run record")
cursor.execute(delete_query, delete_params)
logging.debug("finished")
# register the new run in the run table
logging.debug("Inserting the new run record")
cursor.execute(insert_query, insert_params)
logging.debug("finished")
# end the transaction
logging.debug("Finishing transaction")
cursor.execute("COMMIT")
logging.debug("Transaction committed")
# we do the removal of the old snapshot outside of the
# transaction since it is slow and can cause timeouts to
# occur. Furthermore, it is okay if it is in the checkpoint as
# the run record is what matters as long as the new checkpoint
# is there.
# delete the old snapshot if we need to
if delete_params is not None:
# WARN: occasionally and for unknown reasons we have found
# that the final checkpoint hash is the same as the one
# before. (The case where the last snapshot is on the same
# cycle as a backup is already covered). So as a last
# resort, we check that they don't have the same hash. If
# they do we don't delete it!
if snaphash != old_checkpoint_hash:
logging.debug("Deleting the old snapshot")
del checkpoint_orch.snapshot_kv[old_checkpoint_hash]
logging.debug("finished")
else:
logging.warn("Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.")
checkpoint_orch.close()
logging.debug("closed the checkpoint orch connection")
@staticmethod
def gen_sim_manager(start_snapshot, configuration):
"""
Parameters
----------
start_snapshot :
configuration :
Returns
-------
"""
# construct the sim manager, in a wepy specific way
sim_manager = Manager(start_snapshot.walkers,
runner=start_snapshot.apparatus.filters[0],
boundary_conditions=start_snapshot.apparatus.filters[1],
resampler=start_snapshot.apparatus.filters[2],
# configuration options
work_mapper=configuration.work_mapper,
reporters=configuration.reporters,
sim_monitor=configuration.monitor,
)
return sim_manager
def run_snapshot_by_time(self, start_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
configuration=None,
configuration_hash=None,
checkpoint_mode='x'):
"""For a finished run continue it but resetting all the state of the
resampler and boundary conditions
Parameters
----------
start_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
configuration :
(Default value = None)
configuration_hash :
(Default value = None)
checkpoint_mode :
(Default value = None)
Returns
-------
"""
# you must have a checkpoint dir if you ask for a checkpoint
# frequency
if checkpoint_freq is not None and checkpoint_dir is None:
raise ValueError("Must provide a directory for the checkpoint file "
"is a frequency is specified")
if configuration_hash is not None and configuration is not None:
raise ValueError("Cannot specify both a hash of an existing configuration"
"and provide a runtime configuration")
# if no configuration was specified we use the default one, oth
elif (configuration is None) and (configuration_hash is None):
configuration = self.get_default_configuration()
# if a configuration hash was given only then we retrieve that
# configuration since we must pass configurations to the
# checkpoint DB initialization
elif configuration_hash is not None:
configuration = self.configuration_kv[configuration_hash]
# check that the directory for checkpoints exists, and create
# it if it doesn't and isn't already created
if checkpoint_dir is not None:
checkpoint_dir = osp.realpath(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# if the checkpoint dir is not specified don't create a
# checkpoint db orch
checkpoint_db_path = None
if checkpoint_dir is not None:
logging.debug("Initialization of checkpoint database is requested")
checkpoint_db_path, configuration_hash = self._init_checkpoint_db(start_hash,
configuration,
checkpoint_dir,
mode=checkpoint_mode)
logging.debug("finished initializing checkpoint database")
# get the snapshot and the configuration to use for the sim_manager
start_snapshot = self.get_snapshot(start_hash)
# generate the simulation manager given the snapshot and the
# configuration
sim_manager = self.gen_sim_manager(start_snapshot, configuration)
# handle and process the optional arguments for running simulation
if 'runner' in configuration.apparatus_opts:
runner_opts = configuration.apparatus_opts['runner']
else:
runner_opts = None
# run the init subroutine for the simulation manager
logging.debug("Running sim_manager.init")
sim_manager.init()
# run each cycle manually creating checkpoints when necessary
logging.debug("Starting run loop")
walkers = sim_manager.init_walkers
cycle_idx = 0
start_time = time.time()
while time.time() - start_time < run_time:
logging.debug("Running cycle {}".format(cycle_idx))
# run the cycle
walkers, filters = sim_manager.run_cycle(
walkers,
n_steps,
cycle_idx,
runner_opts=runner_opts,
)
# check to see if a checkpoint is necessary
if (checkpoint_freq is not None):
if (cycle_idx % checkpoint_freq == 0):
logging.debug("Checkpoint is required for this cycle")
# make the checkpoint snapshot
logging.debug("Generating the simulation snapshot")
checkpoint_snapshot = SimSnapshot(walkers, SimApparatus(filters))
# save the checkpoint (however that is implemented)
logging.debug("saving the checkpoint to the database")
self._save_checkpoint(checkpoint_snapshot,
configuration_hash,
checkpoint_db_path,
cycle_idx)
logging.debug("finished saving the checkpoint to the database")
# increase the cycle index for the next cycle
cycle_idx += 1
logging.debug("Finished the run cycle")
# the cycle index was set for the next cycle which didn't run
# so we decrement it
last_cycle_idx = cycle_idx - 1
logging.debug("Running sim_manager.cleanup")
# run the cleanup subroutine
sim_manager.cleanup()
# run the segment given the sim manager and run parameters
end_snapshot = SimSnapshot(walkers, SimApparatus(filters))
logging.debug("Run finished")
# return the things necessary for saving to the checkpoint if
# that is what is wanted later on
return end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx
def orchestrate_snapshot_run_by_time(self, snapshot_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
orchestrator_path=None,
configuration=None,
# these can reparametrize the paths
# for both the orchestrator produced
# files as well as the configuration
work_dir=None,
config_name=None,
narration=None,
mode=None,
# extra kwargs will be passed to the
# configuration.reparametrize method
**kwargs):
"""
Parameters
----------
snapshot_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
orchestrator_path :
(Default value = None)
configuration :
(Default value = None)
# these can reparametrize the paths# for both the orchestrator produced# files as well as the configurationwork_dir :
(Default value = None)
config_name :
(Default value = None)
narration :
(Default value = None)
mode :
(Default value = None)
# extra kwargs will be passed to the# configuration.reparametrize method**kwargs :
Returns
-------
"""
# for writing the orchestration files we set the default mode
# if mode is not given
if mode is None:
# the orchestrator mode is used for pickling the
# orchestrator and so must be in bytes mode
orch_mode = self.DEFAULT_ORCHESTRATION_MODE
# there are two possible uses for the path reparametrizations:
# the configuration and the orchestrator file paths. If both
# of those are explicitly specified by passing in the whole
# configuration object or both of checkpoint_dir,
# orchestrator_path then those reparametrization kwargs will
# not be used. As this is likely not the intention of the user
# we will raise an error. If there is even one use for them no
# error will be raised.
# first check if any reparametrizations were even requested
parametrizations_requested = (True if work_dir is not None else False,
True if config_name is not None else False,
True if narration is not None else False,
True if mode is not None else False,)
# check if there are any available targets for reparametrization
reparametrization_targets = (True if configuration is None else False,
True if checkpoint_dir is None else False,
True if orchestrator_path is None else False)
# if paramatrizations were requested and there are no targets
# we need to raise an error
if any(parametrizations_requested) and not any(reparametrization_targets):
raise OrchestratorError("Reparametrizations were requested but none are possible,"
" due to all possible targets being already explicitly given")
# if any paths were not given and no defaults for path
# parameters we want to fill in the defaults for them. This
# will also fill in any missing parametrizations with defaults
# we do this by just setting the path parameters if they
# aren't set, then later the parametrization targets will be
# tested for if they have been set or not, and if they haven't
# then these will be used to generate paths for them.
if work_dir is None:
work_dir = self.DEFAULT_WORKDIR
if config_name is None:
config_name = self.DEFAULT_CONFIG_NAME
if narration is None:
narration = self.DEFAULT_NARRATION
if mode is None:
mode = self.DEFAULT_MODE
# if no configuration was specified use the default one
if configuration is None:
configuration = self.get_default_configuration()
# reparametrize the configuration with the given path
# parameters and anything else in kwargs. If they are none
# this will have no effect anyhow
logging.debug("Reparametrizing the configuration")
configuration = configuration.reparametrize(work_dir=work_dir,
config_name=config_name,
narration=narration,
mode=mode,
**kwargs)
# make parametric paths for the checkpoint directory and the
# orchestrator pickle to be made, unless they are explicitly given
if checkpoint_dir is None:
# the checkpoint directory will be in the work dir
logging.debug("checkpoint directory defaulted to the work_dir")
checkpoint_dir = work_dir
logging.debug("In the orchestrate run, calling to run_snapshot by time")
# then actually run the simulation with checkpointing. This
# returns the end snapshot and doesn't write out anything to
# orchestrators other than the checkpointing
(end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx) =\
self.run_snapshot_by_time(snapshot_hash, run_time, n_steps,
checkpoint_freq=checkpoint_freq,
checkpoint_dir=checkpoint_dir,
configuration=configuration,
checkpoint_mode=orch_mode)
logging.debug("Finished running snapshot by time")
# if the last cycle in the run was a checkpoint skip this step
# of saving a checkpoint
do_final_checkpoint = True
# make sure the checkpoint_freq is defined before testing it
if checkpoint_freq is not None:
if checkpoint_freq % last_cycle_idx == 0:
logging.debug("Last cycle saved a checkpoint, no need to save one")
do_final_checkpoint = False
if do_final_checkpoint:
logging.debug("Saving a final checkpoint for the end of the run")
# now that it is finished we save the final snapshot to the
# checkpoint file. This is done transactionally using the
# SQLite transaction functionality (either succeeds or doesn't
# happen) that way we don't have worry about data integrity
# loss. Here we also don't have to worry about other processes
# interacting with the checkpoint which makes it isolated.
self._save_checkpoint(end_snapshot, configuration_hash,
checkpoint_db_path, last_cycle_idx)
logging.debug("Finished saving the final checkpoint for the run")
# then return the final orchestrator
logging.debug("Getting a connection to that orch to retun")
checkpoint_orch = Orchestrator(checkpoint_db_path,
mode='r+',
append_only=True)
return checkpoint_orch
def reconcile_orchestrators(host_path, *orchestrator_paths):
"""
Parameters
----------
template_orchestrator :
*orchestrators :
Returns
-------
"""
if not osp.exists(host_path):
assert len(orchestrator_paths) > 1, \
"If the host path is a new orchestrator, must give at least 2 orchestrators to merge."
# open the host orchestrator at the location which will have all
# of the new things put into it from the other orchestrators. If
# it doesn't already exist it will be created otherwise open
# read-write.
new_orch = Orchestrator(orch_path=host_path,
mode='a',
append_only=True)
# TODO deprecate, if there is no defaults we can't set them since
# the mode is append only, we don't really care about these so
# don't set them, otherwise do some mode logic to figure this out
# and open in write mode and set defaults, then change to append
# only
# # if this is an existing orchestrator copy the default
# # sim_apparatus and init_walkers
# try:
# default_app = new_orch.get_default_sim_apparatus()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_app)
# # same for the initial walkers
# try:
# default_walkers = new_orch.get_default_init_walkers()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_walkers)
for orch_path in orchestrator_paths:
# open it in read-write fail if doesn't exist
orch = Orchestrator(orch_path=orch_path,
mode='r+',
append_only=True)
# add in all snapshots from each orchestrator, by the hash not the
# snapshots themselves, we trust they are correct
for snaphash in orch.snapshot_hashes:
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in new_orch.snapshot_hashes]):
# skip it and move on
continue
# if it is not copy it over without deserializing
new_orch.snapshot_kv[snaphash] = orch.snapshot_kv[snaphash]
# add in the configurations for the runs from each
# orchestrator, by the hash not the snapshots themselves, we
# trust they are correct
for run_id in orch.run_hashes():
config_hash = orch.run_configuration_hash(*run_id)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in new_orch.configuration_hashes]):
# skip it and move on
continue
# if it is not set it
new_orch.configuration_kv[config_hash] = orch.configuration_kv[config_hash]
# concatenate the run table with an SQL union from an attached
# database
attached_table_name = "other"
# query to attach the foreign database
attach_query = """
ATTACH '{}' AS {}
""".format(orch_path, attached_table_name)
# query to update the runs tabel with new unique runs
union_query = """
INSERT INTO runs
SELECT * FROM (
SELECT * FROM {}.runs
EXCEPT
SELECT * FROM runs
)
""".format(attached_table_name)
# query to detach the table
detach_query = """
DETACH {}
""".format(attached_table_name)
# then run the queries
cursor = new_orch._db.cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(attach_query)
cursor.execute(union_query)
cursor.execute('COMMIT')
cursor.execute(detach_query)
except:
cursor.execute('COMMIT')
import pdb; pdb.set_trace()
cursor.execute("SELECT * FROM (SELECT * FROM other.runs EXCEPT SELECT * FROM runs)")
recs = cursor.fetchall()
return new_orch
| [
"os.path.exists",
"logging.warn",
"logging.debug",
"sqlite3.connect",
"os.makedirs",
"hashlib.md5",
"wepy.util.kv.KV",
"wepy.sim_manager.Manager",
"wepy.util.kv.gen_uri",
"os.path.join",
"base64.b64decode",
"os.path.realpath",
"wepy.orchestration.snapshot.SimApparatus",
"pdb.set_trace",
"copy.deepcopy",
"time.time"
]
| [((1857, 1881), 'wepy.util.kv.gen_uri', 'gen_uri', (['orch_path', 'mode'], {}), '(orch_path, mode)\n', (1864, 1881), False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((2022, 2098), 'sqlite3.connect', 'sqlite3.connect', (['self.db_uri'], {'uri': '(True)', 'timeout': 'self.SQLITE3_DEFAULT_TIMEOUT'}), '(self.db_uri, uri=True, timeout=self.SQLITE3_DEFAULT_TIMEOUT)\n', (2037, 2098), False, 'import sqlite3\n'), ((2967, 3065), 'wepy.util.kv.KV', 'KV', ([], {'db_url': 'self.db_uri', 'table': '"""meta"""', 'mode': '"""a"""', 'value_types': 'None', 'append_only': 'self.append_only'}), "(db_url=self.db_uri, table='meta', mode='a', value_types=None,\n append_only=self.append_only)\n", (2969, 3065), False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((3232, 3364), 'wepy.util.kv.KV', 'KV', ([], {'db_url': 'self.db_uri', 'table': '"""snapshots"""', 'primary_key': '"""snaphash"""', 'value_name': '"""snapshot"""', 'mode': '"""a"""', 'append_only': 'self.append_only'}), "(db_url=self.db_uri, table='snapshots', primary_key='snaphash',\n value_name='snapshot', mode='a', append_only=self.append_only)\n", (3234, 3364), False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((3569, 3707), 'wepy.util.kv.KV', 'KV', ([], {'db_url': 'self.db_uri', 'table': '"""configurations"""', 'primary_key': '"""config_hash"""', 'value_name': '"""config"""', 'mode': '"""a"""', 'append_only': 'self.append_only'}), "(db_url=self.db_uri, table='configurations', primary_key='config_hash',\n value_name='config', mode='a', append_only=self.append_only)\n", (3571, 3707), False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((20664, 20718), 'logging.debug', 'logging.debug', (['"""Initializing checkpoint orch database"""'], {}), "('Initializing checkpoint orch database')\n", (20677, 20718), False, 'import logging\n'), ((20830, 20888), 'os.path.join', 'osp.join', (['checkpoint_dir', 'self.DEFAULT_CHECKPOINT_FILENAME'], {}), '(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)\n', (20838, 20888), True, 'import os.path as osp\n'), ((20952, 20997), 'logging.debug', 'logging.debug', (['"""Creating checkpoint database"""'], {}), "('Creating checkpoint database')\n", (20965, 20997), False, 'import logging\n'), ((21145, 21191), 'logging.debug', 'logging.debug', (['"""Setting the starting snapshot"""'], {}), "('Setting the starting snapshot')\n", (21158, 21191), False, 'import logging\n'), ((21631, 21689), 'logging.debug', 'logging.debug', (['"""closing connection to checkpoint database"""'], {}), "('closing connection to checkpoint database')\n", (21644, 21689), False, 'import logging\n'), ((22204, 22257), 'logging.debug', 'logging.debug', (['"""Opening the checkpoint orch database"""'], {}), "('Opening the checkpoint orch database')\n", (22217, 22257), False, 'import logging\n'), ((23746, 23820), 'logging.debug', 'logging.debug', (['"""Starting transaction for updating run table in checkpoint"""'], {}), "('Starting transaction for updating run table in checkpoint')\n", (23759, 23820), False, 'import logging\n'), ((23970, 24034), 'logging.debug', 'logging.debug', (['"""setting the new checkpoint snapshot into the KV"""'], {}), "('setting the new checkpoint snapshot into the KV')\n", (23983, 24034), False, 'import logging\n'), ((24137, 24162), 'logging.debug', 'logging.debug', (['"""finished"""'], {}), "('finished')\n", (24150, 24162), False, 'import logging\n'), ((24627, 24672), 'logging.debug', 'logging.debug', (['"""Inserting the new run record"""'], {}), "('Inserting the new run record')\n", (24640, 24672), False, 'import logging\n'), ((24733, 24758), 'logging.debug', 'logging.debug', (['"""finished"""'], {}), "('finished')\n", (24746, 24758), False, 'import logging\n'), ((24798, 24836), 'logging.debug', 'logging.debug', (['"""Finishing transaction"""'], {}), "('Finishing transaction')\n", (24811, 24836), False, 'import logging\n'), ((24878, 24916), 'logging.debug', 'logging.debug', (['"""Transaction committed"""'], {}), "('Transaction committed')\n", (24891, 24916), False, 'import logging\n'), ((26082, 26136), 'logging.debug', 'logging.debug', (['"""closed the checkpoint orch connection"""'], {}), "('closed the checkpoint orch connection')\n", (26095, 26136), False, 'import logging\n'), ((26468, 26775), 'wepy.sim_manager.Manager', 'Manager', (['start_snapshot.walkers'], {'runner': 'start_snapshot.apparatus.filters[0]', 'boundary_conditions': 'start_snapshot.apparatus.filters[1]', 'resampler': 'start_snapshot.apparatus.filters[2]', 'work_mapper': 'configuration.work_mapper', 'reporters': 'configuration.reporters', 'sim_monitor': 'configuration.monitor'}), '(start_snapshot.walkers, runner=start_snapshot.apparatus.filters[0],\n boundary_conditions=start_snapshot.apparatus.filters[1], resampler=\n start_snapshot.apparatus.filters[2], work_mapper=configuration.\n work_mapper, reporters=configuration.reporters, sim_monitor=\n configuration.monitor)\n', (26475, 26775), False, 'from wepy.sim_manager import Manager\n'), ((30571, 30612), 'logging.debug', 'logging.debug', (['"""Running sim_manager.init"""'], {}), "('Running sim_manager.init')\n", (30584, 30612), False, 'import logging\n'), ((30719, 30753), 'logging.debug', 'logging.debug', (['"""Starting run loop"""'], {}), "('Starting run loop')\n", (30732, 30753), False, 'import logging\n'), ((30840, 30851), 'time.time', 'time.time', ([], {}), '()\n', (30849, 30851), False, 'import time\n'), ((32192, 32231), 'logging.debug', 'logging.debug', (['"""Finished the run cycle"""'], {}), "('Finished the run cycle')\n", (32205, 32231), False, 'import logging\n'), ((32380, 32424), 'logging.debug', 'logging.debug', (['"""Running sim_manager.cleanup"""'], {}), "('Running sim_manager.cleanup')\n", (32393, 32424), False, 'import logging\n'), ((32636, 32665), 'logging.debug', 'logging.debug', (['"""Run finished"""'], {}), "('Run finished')\n", (32649, 32665), False, 'import logging\n'), ((38541, 38613), 'logging.debug', 'logging.debug', (['"""In the orchestrate run, calling to run_snapshot by time"""'], {}), "('In the orchestrate run, calling to run_snapshot by time')\n", (38554, 38613), False, 'import logging\n'), ((39263, 39313), 'logging.debug', 'logging.debug', (['"""Finished running snapshot by time"""'], {}), "('Finished running snapshot by time')\n", (39276, 39313), False, 'import logging\n'), ((40563, 40622), 'logging.debug', 'logging.debug', (['"""Getting a connection to that orch to retun"""'], {}), "('Getting a connection to that orch to retun')\n", (40576, 40622), False, 'import logging\n'), ((41038, 41059), 'os.path.exists', 'osp.exists', (['host_path'], {}), '(host_path)\n', (41048, 41059), True, 'import os.path as osp\n'), ((24314, 24365), 'logging.debug', 'logging.debug', (['"""Old run record needs to be removed"""'], {}), "('Old run record needs to be removed')\n", (24327, 24365), False, 'import logging\n'), ((24431, 24475), 'logging.debug', 'logging.debug', (['"""Deleting the old run record"""'], {}), "('Deleting the old run record')\n", (24444, 24475), False, 'import logging\n'), ((24544, 24569), 'logging.debug', 'logging.debug', (['"""finished"""'], {}), "('finished')\n", (24557, 24569), False, 'import logging\n'), ((29181, 29209), 'os.path.realpath', 'osp.realpath', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (29193, 29209), True, 'import os.path as osp\n'), ((29222, 29264), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {'exist_ok': '(True)'}), '(checkpoint_dir, exist_ok=True)\n', (29233, 29264), False, 'import os\n'), ((29445, 29512), 'logging.debug', 'logging.debug', (['"""Initialization of checkpoint database is requested"""'], {}), "('Initialization of checkpoint database is requested')\n", (29458, 29512), False, 'import logging\n'), ((29902, 29960), 'logging.debug', 'logging.debug', (['"""finished initializing checkpoint database"""'], {}), "('finished initializing checkpoint database')\n", (29915, 29960), False, 'import logging\n'), ((32604, 32625), 'wepy.orchestration.snapshot.SimApparatus', 'SimApparatus', (['filters'], {}), '(filters)\n', (32616, 32625), False, 'from wepy.orchestration.snapshot import SimApparatus, SimSnapshot\n'), ((37755, 37805), 'logging.debug', 'logging.debug', (['"""Reparametrizing the configuration"""'], {}), "('Reparametrizing the configuration')\n", (37768, 37805), False, 'import logging\n'), ((38429, 38492), 'logging.debug', 'logging.debug', (['"""checkpoint directory defaulted to the work_dir"""'], {}), "('checkpoint directory defaulted to the work_dir')\n", (38442, 38492), False, 'import logging\n'), ((39792, 39857), 'logging.debug', 'logging.debug', (['"""Saving a final checkpoint for the end of the run"""'], {}), "('Saving a final checkpoint for the end of the run')\n", (39805, 39857), False, 'import logging\n'), ((40443, 40508), 'logging.debug', 'logging.debug', (['"""Finished saving the final checkpoint for the run"""'], {}), "('Finished saving the final checkpoint for the run')\n", (40456, 40508), False, 'import logging\n'), ((7084, 7105), 'base64.b64decode', 'b64decode', (['serial_str'], {}), '(serial_str)\n', (7093, 7105), False, 'from base64 import b64encode, b64decode\n'), ((9428, 9443), 'hashlib.md5', 'md5', (['serial_str'], {}), '(serial_str)\n', (9431, 9443), False, 'from hashlib import md5\n'), ((25748, 25790), 'logging.debug', 'logging.debug', (['"""Deleting the old snapshot"""'], {}), "('Deleting the old snapshot')\n", (25761, 25790), False, 'import logging\n'), ((25876, 25901), 'logging.debug', 'logging.debug', (['"""finished"""'], {}), "('finished')\n", (25889, 25901), False, 'import logging\n'), ((25936, 26049), 'logging.warn', 'logging.warn', (['"""Final snapshot has same hash as the previous checkpoint. Not deleting the previous one."""'], {}), "(\n 'Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.'\n )\n", (25948, 26049), False, 'import logging\n'), ((30866, 30877), 'time.time', 'time.time', ([], {}), '()\n', (30875, 30877), False, 'import time\n'), ((39634, 39701), 'logging.debug', 'logging.debug', (['"""Last cycle saved a checkpoint, no need to save one"""'], {}), "('Last cycle saved a checkpoint, no need to save one')\n", (39647, 39701), False, 'import logging\n'), ((44915, 44930), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (44928, 44930), False, 'import pdb\n'), ((6339, 6357), 'copy.deepcopy', 'deepcopy', (['snapshot'], {}), '(snapshot)\n', (6347, 6357), False, 'from copy import copy, deepcopy\n'), ((31360, 31414), 'logging.debug', 'logging.debug', (['"""Checkpoint is required for this cycle"""'], {}), "('Checkpoint is required for this cycle')\n", (31373, 31414), False, 'import logging\n'), ((31487, 31538), 'logging.debug', 'logging.debug', (['"""Generating the simulation snapshot"""'], {}), "('Generating the simulation snapshot')\n", (31500, 31538), False, 'import logging\n'), ((31718, 31772), 'logging.debug', 'logging.debug', (['"""saving the checkpoint to the database"""'], {}), "('saving the checkpoint to the database')\n", (31731, 31772), False, 'import logging\n'), ((32033, 32096), 'logging.debug', 'logging.debug', (['"""finished saving the checkpoint to the database"""'], {}), "('finished saving the checkpoint to the database')\n", (32046, 32096), False, 'import logging\n'), ((31602, 31623), 'wepy.orchestration.snapshot.SimApparatus', 'SimApparatus', (['filters'], {}), '(filters)\n', (31614, 31623), False, 'from wepy.orchestration.snapshot import SimApparatus, SimSnapshot\n')] |
"""
Created on Thu Oct 26 14:19:44 2017
@author: <NAME> - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from misc_functions import preprocess_image, recreate_image, save_image
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('../generated/class_'+str(self.target_class)):
os.makedirs('../generated/class_'+str(self.target_class))
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
initial_learning_rate = 6
for i in range(1, iterations):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image)
# Target specific class
class_loss = -output[0, self.target_class]
if i % 10 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
# Save image
im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 130 # Flamingo
pretrained_model = models.alexnet(pretrained=True)
csig = ClassSpecificImageGeneration(pretrained_model, target_class)
csig.generate()
| [
"torch.optim.SGD",
"misc_functions.recreate_image",
"misc_functions.save_image",
"misc_functions.preprocess_image",
"torchvision.models.alexnet",
"numpy.random.uniform"
]
| [((2551, 2582), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2565, 2582), False, 'from torchvision import models\n'), ((698, 738), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(255)', '(224, 224, 3)'], {}), '(0, 255, (224, 224, 3))\n', (715, 738), True, 'import numpy as np\n'), ((1393, 1436), 'misc_functions.preprocess_image', 'preprocess_image', (['self.created_image', '(False)'], {}), '(self.created_image, False)\n', (1409, 1436), False, 'from misc_functions import preprocess_image, recreate_image, save_image\n'), ((1507, 1560), 'torch.optim.SGD', 'SGD', (['[self.processed_image]'], {'lr': 'initial_learning_rate'}), '([self.processed_image], lr=initial_learning_rate)\n', (1510, 1560), False, 'from torch.optim import SGD\n'), ((2130, 2166), 'misc_functions.recreate_image', 'recreate_image', (['self.processed_image'], {}), '(self.processed_image)\n', (2144, 2166), False, 'from misc_functions import preprocess_image, recreate_image, save_image\n'), ((2387, 2426), 'misc_functions.save_image', 'save_image', (['self.created_image', 'im_path'], {}), '(self.created_image, im_path)\n', (2397, 2426), False, 'from misc_functions import preprocess_image, recreate_image, save_image\n')] |
#!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author <NAME>
@author <NAME>
@date 2007-10-25
@version $Id$
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import math
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0] - n2._coord[0]
yd = n1._coord[1] - n2._coord[1]
return math.sqrt(xd * xd + yd * yd)
def relAngle(angle1, angle2):
angle2 -= angle1
if angle2 > 180:
angle2 = (360. - angle2) * -1.
while angle2 < -180:
angle2 = 360 + angle2
return angle2
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print("Reading net#1...")
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print("Reading net#2...")
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(
net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax - xmin) / float(CELLSIZE)
ch = (ymax - ymin) / float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing == 0:
highwaySinks2.add(n2)
if noIncoming == 0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print("Found " + str(len(highwaySinks2)) + " highway sinks in net2")
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print(cont)
cont = ""
print("Found " + str(len(highwaySources2)) + " highway sources in net2")
for n in highwaySources2:
cont = cont + n._id + ", "
print(cont)
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n")
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1) < 0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0] == '-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print("District: " + d)
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
if e.getSpeed() > 99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if e.getSpeed() > 99:
noInConns = noInConns + 1
if options.verbose:
print("Check", un1._id, noOutgoing, noIncoming)
if isHighwayNode:
if noOutgoing == 0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming == 0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming == 1 and noOutgoing == 1 and noInConns == 1 and noOutConns == 1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist == -1 or bestDist > dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print("a: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist == -1 or bestDist > dist) and n2 != preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print("b: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print("Found " + str(len(highwaySinks1)) + " highway sinks in net1")
for n in highwaySinks1:
print(n._id)
print("Found " + str(len(highwaySources1)) + " highway sources in net1")
for n in highwaySources1:
print(n._id)
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming) == 1:
fdd.write(' <connection from="' + n2._incoming[
0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print("has outgoing")
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing) == 1:
fdd.write(' <connection from="' + e2._id + '" to="' +
n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [n1i, n1o]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d]) == 1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ni, no] = connectedNodesConnections[n]
if len(ni._outgoing) > 0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming) > 0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [0, 0]
p11 = [0, 0]
p12 = [0, 0]
p2 = [0, 0]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d]) * 2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d]) * 2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ni, no] = connectedNodesConnections[n2]
print("In: " + ni._id + " " + str(len(ni._incoming)) +
" " + str(len(ni._outgoing)))
print("Out: " + no._id + " " + str(len(no._incoming)) +
" " + str(len(no._outgoing)))
if len(no._incoming) > 0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[
0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(
' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo == 0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing) > 0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[
0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo == 0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(
' <tazSource id="' + districtSources[d] + '" weight="1"/>\n')
if d in districtSinks:
fd.write(
' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" +
str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" +
edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i, c in enumerate(shape):
if i != 0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#") > 0 or edge._id.find("c") >= 0 or edge._id.find("i") >= 0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n")
writeNodes(net1)
writeEdges(net1)
| [
"os.path.abspath",
"math.sqrt",
"optparse.OptionParser"
]
| [((1262, 1276), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1274, 1276), False, 'from optparse import OptionParser\n'), ((1020, 1048), 'math.sqrt', 'math.sqrt', (['(xd * xd + yd * yd)'], {}), '(xd * xd + yd * yd)\n', (1029, 1048), False, 'import math\n'), ((833, 858), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (848, 858), False, 'import os\n')] |
#!/usr/bin/env python3
######
# General Detector
# 06.12.2018 / Last Update: 20.05.2021
# LRB
######
import numpy as np
import os
import sys
import tensorflow as tf
import hashlib
import cv2
import magic
import PySimpleGUI as sg
import csv
import imagehash
import face_recognition
import subprocess
from itertools import groupby
from distutils.version import StrictVersion
from PIL import Image
from datetime import datetime
from time import strftime
from time import gmtime
from multiprocessing import Pool
from Models.Face import detect_face
from pathlib import Path
from openvino.inference_engine import IENetwork, IECore
from AudioAnalysis import audioAnalysis
######
# Worker function to check the input provided via the GUI
#######
def validateInput(gui_input):
error = False
#Validate input
# for element in gui_input[1][0:7]:
# if element == '' or []:
# error = True
if gui_input[0] == "Cancel" or len(gui_input[1][8]) == 0:
error = True
if bool(gui_input[1][5]) == True and gui_input[1][12] == "":
error = True
if error == True:
sg.Popup('You have not populated all required fields. Aborting!', title='Error', button_color=('black', 'red'), background_color=('grey'))
exit()
######
# Worker function to update the progress bar
######
def updateProgressMeter(step, customText):
if sg.OneLineProgressMeter('BKP Media Detector', step, 12, 'key', customText, orientation='h', size=(50, 25)) == False:
exit()
######
# Worker function to prepare and reshape the input images into a Numpy array
# and to calculate the MD5 hashes of them.
######
def load_image_into_numpy_array(image_path):
try:
image_path = str(image_path)
# Open, measure and convert image to RGB channels
image = Image.open(image_path)
(im_width, im_height) = image.size
if int(im_width) < 34 or int(im_height) < 34:
logfile.write("Insufficient file dimensions: " + str(image_path) + "\n")
return None
if int(im_width) > 4512 or int(im_height) > 3008:
maxheight = int(3008)
maxwidth = int(4512)
resize_ratio = min(maxwidth/im_width, maxheight/im_height)
im_width = int(im_width * resize_ratio)
im_height = int(im_height * resize_ratio)
image = image.resize((im_width, im_height))
image = image.convert('RGB')
np_array = np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
image.close()
# Hash the image in byte-chunks of 4096
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
f.close()
hashvalue = hash_md5.hexdigest()
return image_path, hashvalue, np_array
#Throw errors to stdout
except IOError or OSError:
magictype = str(magic.from_file((image_path), mime=True))
# If image file cannot be read, check if it is a video
if magictype[:5] == 'video': #or magictype[12:17] == 'octet':
# If so, return a video flag instead of numpy array
flag = "VIDEO"
elif magictype[:5] == 'audio':
flag = "AUDIO"
elif magictype[12:17] == 'octet':
flag = "OCTET"
else:
image_path = "Could not open file: " + str(image_path) + " (" + str(magictype) + ")\n"
flag = "ERROR"
return image_path, flag
except:
magictype = str(magic.from_file((image_path), mime=True))
logfile.write("General error with file: " + str(image_path) + " (" + str(magictype) + ")\n")
def check_video_orientation(image_path):
# Function to check video rotation with ffprobe and return corresponding CV2 rotation code
try:
cmnd = ['ffprobe', '-loglevel', 'error', '-select_streams', 'v:0', '-show_entries', 'stream_tags=rotate', '-of',
'default=nw=1:nk=1', image_path]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
orientation = out.decode('utf-8')
if orientation == '':
rotation = 3
elif int(orientation) == 180:
rotation = 1
elif int(orientation) == 90:
rotation = 0
else:
rotation = 2
return rotation
except:
logfile.write("Cannot determine video rotation: " + str(image_path) + "\n")
######
# Worker function to prepare and reshape the input videos to a Numpy array
# and to calculate the MD5 hashes of them.
# The function analyzes as much frames as indicated in the variable "frames_per_second" (Default = 0.5)
######
def load_video_into_numpy_array(image_path):
videoframes = []
old_hash = None
# Loading the video via the OpenCV framework
try:
rotation = check_video_orientation(image_path)
vidcap = cv2.VideoCapture(image_path)
im_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
im_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Switch height/width if video is to be rotated 90/270 degrees
if rotation == 0 or rotation == 2:
im_width_new = im_height
im_height_new = im_width
im_width = im_width_new
im_height = im_height_new
# Calculating frames per second, total frame count and analyze rate
fps = int(vidcap.get(cv2.CAP_PROP_FPS))
framecount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
analyze_rate = int(framecount / fps * frames_per_second)
if 0 < analyze_rate < max_frames_per_video:
int(analyze_rate)
elif analyze_rate >= int(max_frames_per_video):
analyze_rate = int(max_frames_per_video) #Limiting maximum frames per video
else:
videoerror = 'Unable to extract frames from video: ' + str(image_path) + '\n'
return videoerror
# Hashing the video once
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
# Extracting the frames from the video
for percentile in range(0, analyze_rate):
vidcap.set(cv2.CAP_PROP_POS_FRAMES, (framecount / analyze_rate) * percentile)
success, extracted_frame = vidcap.read()
if rotation != 3:
extracted_frame = cv2.rotate(extracted_frame, rotation)
extracted_frame = cv2.cvtColor(extracted_frame, cv2.COLOR_BGR2RGB)
timecode = ((framecount / analyze_rate) * percentile) / fps
timecode = str(strftime("%H:%M:%S", gmtime(timecode)))
# And reshape them into a numpy array
np_array = np.array(extracted_frame).reshape(
(im_height, im_width, 3)).astype(np.uint8)
if video_sensitivity > 0:
# Compare the frame with the previous one for similarity, and drop if similar
frame_to_check = Image.fromarray(np_array)
new_hash = imagehash.phash(frame_to_check)
if old_hash is None or (new_hash - old_hash > video_sensitivity):
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
old_hash = new_hash
else:
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
vidcap.release()
return videoframes
except cv2.error:
videoerror = 'Could not process video: ' + str(image_path) + '\n'
return videoerror
except:
videoerror = 'General error processing video: ' + str(image_path) + '\n'
return videoerror
######
# Detection within loaded images with Tensorflow framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_for_multiple_images(image_paths, images, hashvalues):
# Open the results file again
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
for y in range(0, len(graphlist)):
# Create TF Session with loaded graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with model " + str(y + 1) + " of " + str(len(graphlist)) + "*\n")
# Update progress indicator
updateProgressMeter(7 + y, 'Detecting with model {}'.format(graphlist[y]))
# Load the respective detetion graph from file
with tf.gfile.GFile(graphlist[y], 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Create TF session
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Setting the detection limit of the different models.
if "ISLogo" not in graphlist[y]:
detectionlimit = 0.5
else:
detectionlimit = 0.90
# Loading the label map of the corresponding graph
category_index = indexlist[y]
# Conduct actual detection within single image
for index, image in enumerate(images):
updateProgressMeter(7 + y, str(graphlist[y]) + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_scores'] = output_dict['detection_scores'][0]
detectionhit = output_dict['num_detections']
output_dict['detection_classes'] = output_dict['detection_classes'][0]
hashvalue = hashvalues[index]
image_path = image_paths[index]
# Validate against detection limit (default: 65%) and write hash/score if above
for j in range(detectionhit):
score = output_dict['detection_scores'][j]
category = category_index[output_dict['detection_classes'][j]]
# Validate against the preconfigured minimum detection assurance and write to result file
if (score >= detectionlimit):
scorestring = str(score)
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([category['name'], "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, scorestring, category['name']])
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
logfile.write("Unable to process file dimensions of file with hash: \t" + str(hashvalue) + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with model " + str(y + 1) + "*\n")
detectionresults.flush()
detectionresults.close()
######
# Detect and count faces in loaded images
# Prepare and call age/gender detection once done
######
def faceDetection(image_paths, images, hashvalues):
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Updating progress bar and logfile
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with face/age/gender detection model*\n")
# Applying constants as defined in Facenet
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
# Creating different TF Session
with tf.Session() as sess:
# read pnet, rnet, onet models from Models/Face directory
facemodel_path = Path('Models/Face')
pnet, rnet, onet = detect_face.create_mtcnn(sess, str(facemodel_path))
# Helperlists for age/gender detection
facelist = []
imagelist = []
# Inference for all images
for index, image in enumerate(images):
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector' + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
# If a face was detected, go on
if nrof_faces > 0:
detectedFaces = bounding_boxes[:, 0:4]
detectedFacesArray = []
img_size = np.asarray(image.shape)[0:2]
if nrof_faces > 1:
for single_face in range(nrof_faces):
detectedFacesArray.append(np.squeeze(detectedFaces[single_face]))
else:
detectedFacesArray.append(np.squeeze(detectedFaces))
# Crop the detected face and add it to the list to conduct age/gender identification
for x, detectedFaces in enumerate(detectedFacesArray):
detectedFaces = np.squeeze(detectedFaces)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(detectedFaces[0], 0)
bb[1] = np.maximum(detectedFaces[1], 0)
bb[2] = np.minimum(detectedFaces[2], img_size[1])
bb[3] = np.minimum(detectedFaces[3], img_size[0])
cropped_Face = image[bb[1]:bb[3], bb[0]:bb[2], :]
facelist.append(cropped_Face)
imagelist.append(index)
# Write the results of the face detection into the resultsfile
if not len(bounding_boxes) == 0:
hashvalue = hashvalues[index]
number_of_faces = len(bounding_boxes)
if REPORT_FORMAT[0] == 'Nuix':
line = "Face,md5:" + hashvalue
else:
line = str(Path(image_paths[index]).name) + "," + str(hashvalue) + ",FACES," + str(
number_of_faces) + "Faces"
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
errorcount += 1
logfile.write("Unable to detect faces in file with hash: \t" + str(hashvalue) + "\n")
# Conduct age/gender recognition based on the list of detected & cropped faces
if len(facelist) != 0:
age_gender_detection(imagelist, facelist, hashvalues, image_paths)
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with face/age/gender detection model*\n")
detectionresults.flush()
detectionresults.close()
######
# Detection with the OPEN VINO Framework
# Evaluate Age & Gender based on input faces
######
def age_gender_detection(imagelist, facelist, hashvalues, image_paths):
# Acquire the age-gender detection model
model_path = Path('Models/OpenVINO/age-gender')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
# Reopen the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
net.batch_size = len(facelist)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Resize and reshape input faces
for i in range(n):
image = facelist[i]
if image.shape[:-1] != (62, 62):
h, w = image.shape[:2]
# interpolation method
if h > 62 or w > 62: # shrinking image
interp = cv2.INTER_AREA
else: # stretching image
interp = cv2.INTER_CUBIC
# aspect ratio of image
aspect = w / h
# compute scaling and pad sizing
if aspect > 1: # horizontal image
new_w = 62
new_h = np.round(new_w / aspect).astype(int)
pad_vert = (62 - new_h) / 2
pad_top, pad_bot = np.floor(pad_vert).astype(int), np.ceil(pad_vert).astype(int)
pad_left, pad_right = 0, 0
elif aspect < 1: # vertical image
new_h = 62
new_w = np.round(new_h * aspect).astype(int)
pad_horz = (62 - new_w) / 2
pad_left, pad_right = np.floor(pad_horz).astype(int), np.ceil(pad_horz).astype(int)
pad_top, pad_bot = 0, 0
else: # square image
new_h, new_w = 62, 62
pad_left, pad_right, pad_top, pad_bot = 0, 0, 0, 0
# set pad color
padColor = 0
if len(image.shape) is 3 and not isinstance(padColor, (
list, tuple, np.ndarray)): # color image but only one color provided
padColor = [padColor] * 3
# scale and pad
scaled_img = cv2.resize(image, (new_w, new_h), interpolation=interp)
scaled_img = cv2.cvtColor(scaled_img, cv2.COLOR_BGR2RGB)
scaled_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bot, pad_left, pad_right,
borderType=cv2.BORDER_CONSTANT, value=padColor)
image = scaled_img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# Conduct inference
res = exec_net.infer(inputs={input_blob: images})
# Process inference results
for y in range(len(facelist)):
probable_age = int(np.squeeze(res['age_conv3'][y]) * 100)
if np.squeeze(res['prob'][y][0]) > 0.5:
gender = "Female"
else:
gender = "Male"
age_gender_combo = str(probable_age) + str(gender)
# Write inference results to resultsfile
hashvalue = hashvalues[imagelist[y]]
if REPORT_FORMAT[0] == 'Nuix':
line = str(age_gender_combo) + ",md5:" + hashvalue
else:
line = str(Path(image_paths[imagelist[y]]).name) + "," + str(hashvalue) + ",AGE-GENDER," + str(
age_gender_combo)
detectionresults.write(line + "\n")
######
# Detection with the OPEN VINO Framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_openvino(image_paths, images, hashvalue):
# Update progress meter and reopen results file
updateProgressMeter(6, 'Detecting with OpenVINO Object Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with OpenVINO object detection model*\n")
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Fetch paths for openvino model
model_path = Path('Models/OpenVINO/vgg19')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
model_labels = str(model_path / 'model.labels')
temp_bilder = images
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 4000
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Create batches to prevent RAM overload
batches = tuple(temp_bilder[x:x + net.batch_size] for x in range(0, len(temp_bilder), net.batch_size))
# Start sync inference
for batch in batches:
for index, temp_pic in enumerate(batch):
temp_pic = cv2.resize(temp_pic, (w, h))
temp_pic = temp_pic.transpose((2, 0, 1))
images[index] = temp_pic
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
res = res[out_blob]
# Prepare label file
with open(model_labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
# Clean inference results and write them to resultsfile
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-3:][::-1]
for id in top_ind:
if probs[id] >= 0.3:
# det_label = labels_map[id] if labels_map else "{}".format(id)
det_label = labels_map[id].split(sep=' ', maxsplit=1)[1]
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([det_label, "md5:" + hashvalue])
else:
line = ",".join([Path(image_paths[i]).name, hashvalue[i], str(probs[id]), str(det_label)])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with OpenVINO object detection model*\n")
######
# Worker function to load and encode known faces and to compare them against
# the provided input material
######
def faceRecognition(known_faces_path, image_paths, images, hashvalues):
# Update progress bar
updateProgressMeter(5, 'Conducting Face Recognition')
known_face_counter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
OutputPictureFolder = PATH_TO_RESULTS / 'DetectedFaces'
if not OutputPictureFolder.exists(): os.mkdir(str(OutputPictureFolder))
# Initiate array to store known faces
known_face_encodings = []
known_face_names = []
known_faces = Path.iterdir(Path(known_faces_path))
# Create encodings and store them with names
for known_face in known_faces:
known_person_image = face_recognition.load_image_file(known_face)
known_face_encodings.extend(face_recognition.face_encodings(known_person_image))
known_face_names.append(Path(known_face).stem)
logfile.write("*" + str(datetime.now()) + ": \tStarting face recognition with " + str(len(known_face_names)) + " known faces*\n")
# Load images, detect faces, encode and compare them to the known faces
for index, image_to_detect in enumerate(images):
hashvalue = hashvalues[index]
image_path = image_paths[index]
updateProgressMeter(5, 'Face Reco Image ' + str(index) + ' of ' + str(len(images)))
# Use GPU based model to detect & encode
face_locations = face_recognition.face_locations(image_to_detect, model="cnn")
face_encodings = face_recognition.face_encodings(image_to_detect, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=facereq_tolerance)
name = "Unknown"
# Check the face distance and get best match
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# If there is a match, write it to the output file
if name != "Unknown":
known_face_counter += 1
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([name, "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, "FACE-Match", name])
detectionresults.write(line + "\n")
if output_detFaces:
# Export detected face with bounding box
cv2.rectangle(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
savePath = str(OutputPictureFolder / str(Path(image_path).name)) + '.jpg'
detectedFace = Image.fromarray(image_to_detect)
detectedFace.save(savePath)
logfile.write("*" + str(datetime.now()) + ": \tFace Recognition completed.*\n")
detectionresults.flush()
detectionresults.close()
# Return amount of detected known faces
return known_face_counter
######
# Worker function to conduct speech detection in audio files
# for all audio files detected
######
def audioSpeechDetection(audiolist):
logfile.write("*" + str(datetime.now()) + ": \tStarting audio speech detection*\n")
updateProgressMeter(11, 'Processing Audio Files')
audiocounter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
pool = Pool(maxtasksperchild=100)
result = pool.map(audioAnalysis.segmentSpeechDetection, audiolist, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
result = [x for x in result if x != None]
for processedAudio in result:
speechPercentage, audiopath = processedAudio
# Check for the video flag
if not isinstance(speechPercentage, float):
logfile.write("Unsupported audio file: " + str(audiopath) + "\n")
else:
speechPercentage, audiopath = processedAudio
# Hashing the video once
hash_md5 = hashlib.md5()
with open(audiopath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
audiocounter += 1
if REPORT_FORMAT[0] == 'Nuix':
if speechPercentage != 0.0:
line = ",".join(["AUDIO-SPEECH", "md5:" + hashvalue])
else:
line = ",".join([Path(audiopath).name, hashvalue, str(speechPercentage), "AUDIO-SPEECH"])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tAudio speech detection completed.*\n")
detectionresults.flush()
detectionresults.close()
return audiocounter
######
# Split the report file to allow seamless integration into XWays Hash Database per category
######
def createXWaysReport():
detectionresults_path = str(PATH_TO_RESULTS / 'Detection_Results.csv')
xways_folder = PATH_TO_RESULTS / 'XWaysOutput'
if not xways_folder.exists(): os.mkdir(str(xways_folder))
for key, rows in groupby(csv.reader(open(detectionresults_path)),
lambda row: row[3]):
# Replace special characters in categories
if str(key) != 'category':
key = str(key).replace("/","-")
key = str(key).replace(".", "")
key = str(key).replace("(", "")
key = str(key).replace(")", "")
key = key + '.txt'
detectionresults_single_path = xways_folder / key
with open(str(detectionresults_single_path), 'a') as rf:
for row in rows:
rf.write(row[1] + "\n")
rf.flush()
# Get a list of all files in results directory
resultsfiles = os.listdir(str(xways_folder))
# Prepend them with MD5 for seamless import into XWays
for file in resultsfiles:
line = "md5"
if file[-3:] == 'txt' and file != 'Logfile.txt':
with open(str(xways_folder / file), 'r+') as ff:
content = ff.read()
ff.seek(0,0)
ff.write(line.rstrip('\r\n') + '\n' + content)
######
#
# Main program function
# First initiates required parameters and variables, then loads the GUI
# After which the image and video load functions are triggered based on the input parameters
# Finally, the detection is executed and results written to the place requested
#
######
# Prevent execution when externally called
if __name__ == '__main__':
######
# Collecting parameters via GUI
######
sg.ChangeLookAndFeel('Dark')
layout = [[sg.Text('General Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Please specify the folder holding the media data:')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestBilder', button_color=('black', 'grey'))], #Path.home() = Initial folder
[sg.Text('Where shall I place the results?')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestResults', button_color=('black', 'grey'))], #Path.home()
[sg.Text('TENSORFLOW DETECTORS')],
[sg.Checkbox('Objects/Persons', size=(15, 2)),
sg.Checkbox('Actions'),
sg.Checkbox('IS Logos'),
sg.Checkbox("Face Recognition")],
[sg.Text('OPEN VINO DETECTORS')],
[sg.Checkbox('Objects-fast', size=(15, 2)),
sg.Checkbox('Faces/Age/Gender')],
[sg.Text('Output Format:'), sg.Listbox(values=('Nuix', 'XWays', 'csv'), size=(29, 3))],
[sg.Text('Video Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('# of frames to be analyzed per Minute:', size=(36, 0))],
[sg.Slider(range=(1, 120), orientation='h', size=(29, 20), default_value=30)],
[sg.Text('Max. # of frames to be analyzed per Video:', size=(36, 0))],
[sg.Slider(range=(1, 500), orientation='h', size=(29, 20), default_value=100)],
[sg.Text('Check for & discard similar frames?'),
sg.InputCombo(('Yes', 'No'), default_value='No', size=(10, 2))],
[sg.Text('Face Recognition', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Specify folder with known faces (if FaceReq selected): ')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/known', button_color=('black', 'grey'))],
[sg.Text('Specify face recognition tolerance (Default: 60%):', size=(48, 0))],
[sg.Slider(range=(0, 100), orientation='h', size=(29, 20), default_value=60)],
[sg.Checkbox('Output detected faces as jpg', size=(25, 2))],
[sg.Text('Audio Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('AUDIO PROCESSING')],
[sg.Checkbox('Speech Detection', size=(15, 2))],
[sg.OK(button_color=('black', 'sea green')), sg.Cancel(button_color=('black', 'grey'))]]
layout_progress = [[sg.Text('Detection in progress')],
[sg.ProgressBar(12, orientation='h', size=(20, 20), key='progressbar')],
[sg.Cancel()]]
# Render the GUI
gui_input = sg.Window('BKP Media Detector').Layout(layout).Read()
error = False
# Validate input
validateInput(gui_input)
# Initiating progress meter
updateProgressMeter(1, 'Initializing variables & parameters...')
startTime = datetime.now()
# Variable to determine minimum GPU Processor requirement & to disable TF log output
# os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '5'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Validating TF version
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# Defining multiple needed variables based on GUI input & adding TF/OpenVINO directory to path
PATH_TO_INPUT = Path(gui_input[1][0])
TEST_IMAGE_PATHS = Path.iterdir(PATH_TO_INPUT)
number_of_input = 0
for elements in Path.iterdir(PATH_TO_INPUT):
number_of_input += 1
PATH_TO_RESULTS = Path(gui_input[1][1])
PATH_TO_OBJECT_DETECTION_DIR = '/home/b/Programs/tensorflow/models/research' # PLACEHOLDER-tobereplacedWithPathtoDirectory
sys.path.append(PATH_TO_OBJECT_DETECTION_DIR)
REPORT_FORMAT = gui_input[1][8]
frames_per_second = gui_input[1][9] / 60
max_frames_per_video = gui_input[1][10]
video_sensitivity_text = gui_input[1][11]
KNOWN_FACES_PATH = gui_input[1][12]
facereq_tolerance = int(gui_input[1][13])/100
output_detFaces = gui_input[1][14]
if video_sensitivity_text == "Yes":
video_sensitivity = 20
else:
video_sensitivity = 0
# Check which models to apply and load their corresponding label maps
from object_detection.utils import label_map_util
graphlist = []
indexlist = []
MODEL1 = bool(gui_input[1][2])
if MODEL1:
OPEN_IMAGES_GRAPH = str(Path('Models/OpenImages/openimages.pb'))
OPEN_IMAGES_LABELS = str(OPEN_IMAGES_GRAPH)[:-3] + '.pbtxt'
OPEN_IMAGES_INDEX = label_map_util.create_category_index_from_labelmap(OPEN_IMAGES_LABELS)
graphlist.append(OPEN_IMAGES_GRAPH)
indexlist.append(OPEN_IMAGES_INDEX)
MODEL2 = bool(gui_input[1][3])
if MODEL2:
AVA_GRAPH = str(Path('Models/AVA/ava.pb'))
AVA_LABELS = str(AVA_GRAPH)[:-3] + '.pbtxt'
AVA_INDEX = label_map_util.create_category_index_from_labelmap(AVA_LABELS)
graphlist.append(AVA_GRAPH)
indexlist.append(AVA_INDEX)
MODEL3 = bool(gui_input[1][4])
if MODEL3:
SPECIAL_DETECTOR_GRAPH = str(Path('Models/ISLogos/islogos.pb'))
SPECIAL_DETECTOR_LABELS = str(SPECIAL_DETECTOR_GRAPH)[:-3] + '.pbtxt'
SPECIAL_DETECTOR_INDEX = label_map_util.create_category_index_from_labelmap(SPECIAL_DETECTOR_LABELS)
graphlist.append(SPECIAL_DETECTOR_GRAPH)
indexlist.append(SPECIAL_DETECTOR_INDEX)
FACE_RECOGNITION = bool(gui_input[1][5])
OPEN_VINO_vgg19 = bool(gui_input[1][6])
FACE_MODEL = bool(gui_input[1][7])
AUDIO_SPEECH_DETECTION = bool(gui_input[1][15])
# Update the progress indicator
updateProgressMeter(2, 'Process started. Loading ' + str(number_of_input) + ' media files...')
# Create logfile
logfile = open(str(PATH_TO_RESULTS / 'Logfile.txt'), 'w')
logfile.write('***DETECTION LOG***\n')
logfile.write("*" + str(datetime.now()) + ': \tProcess started. Loading images...*\n')
# Create resultsfile
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'w')
if REPORT_FORMAT[0] == 'Nuix':
detectionresults.write("tag,searchterm\n")
else:
detectionresults.write("name,hash,score,category\n")
detectionresults.flush()
detectionresults.close()
# Initiate needed variables
vidlist = []
audiolist = []
final_images = []
errors = []
# Multiprocess the image load function on all CPU cores available
pool = Pool(maxtasksperchild=100)
processed_images = pool.map(load_image_into_numpy_array, TEST_IMAGE_PATHS, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
# Clean the result for None types (where image conversion failed)
processed_images = [x for x in processed_images if x != None]
# Check for the different flags set by mimetype
for processed_image in processed_images:
if str(processed_image[1]) == "VIDEO":
# If present, populate the video list
vidlist.append(processed_image[0])
elif str(processed_image[1]) == "AUDIO":
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "OCTET":
if processed_image[0][-3:] in ["mp4", "mov", "mpg", "avi", "exo", "mkv", "m4v", "ebm"]:
vidlist.append(processed_image[0])
else:
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "ERROR":
errors.append(processed_image[0])
else:
# If not, put it to the final images list
final_images.append(processed_image)
for error in errors:
logfile.write(error)
logfile.flush()
# Count the number of images before adding the videoframes
number_of_images = len(final_images)
# Update the progress indicator
updateProgressMeter(3, 'Loading ' + str(len(vidlist)) + ' Videos...')
# Multiprocess the video load function on all CPU cores available
pool = Pool(maxtasksperchild=10)
videoframes = pool.map(load_video_into_numpy_array, vidlist, chunksize=2)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
number_of_videos = 0
# Clean the result for None types (where video conversion failed)
for video in videoframes:
if type(video) is str:
errors.append(video)
if type(video) is list:
final_images.extend(video)
number_of_videos += 1
for error in errors:
logfile.write(error)
logfile.flush()
# Split the result from the loading function into hashes and image arrays
if len(final_images) != 0:
image_path, hashvalues, image_nps = zip(*final_images)
# Update the progress indicator & logfile
updateProgressMeter(4, 'Starting detection of ' + str(len(final_images)) + ' media files')
logfile.write("*" + str(datetime.now()) + ": \tLoading completed. Detecting...*\n")
# Conduct Face Recognition if needed
if FACE_RECOGNITION:
known_face_counter = faceRecognition(KNOWN_FACES_PATH, image_path, image_nps, hashvalues)
# Conduct OpenVino VGG19 Model if needed
if OPEN_VINO_vgg19:
run_inference_openvino(image_path, image_nps, hashvalues)
# Execute all other detection models
if len(final_images) != 0:
run_inference_for_multiple_images(image_path, image_nps, hashvalues)
# Conduct face/age/gender detection
if FACE_MODEL:
faceDetection(image_path, image_nps, hashvalues)
if AUDIO_SPEECH_DETECTION:
audiofiles_processed = audioSpeechDetection(audiolist)
else:
audiofiles_processed = 0
# Check whether an Xways report needs to be created
if REPORT_FORMAT[0] == 'XWays':
createXWaysReport()
# Write process statistics to logfile
logfile.write("*Results:\t\t\t" + str(PATH_TO_RESULTS / 'Detection_Results.csv*\n'))
logfile.write("*Total Amount of Files:\t\t" + str(number_of_input) + " (of which " + str(number_of_images + number_of_videos + audiofiles_processed) + " were processed.)*\n")
logfile.write("*Processed Images:\t\t" + str(number_of_images) + "*\n")
logfile.write("*Processed Videos: \t\t" + str(number_of_videos) + " (analyzed " + str(frames_per_second * 60) + " frames per minute, up to max. 500) with the check for content-based duplicates set to " + video_sensitivity_text + "\n")
logfile.write("*Processed Audio Files:\t\t" + str(audiofiles_processed) + "*\n")
logfile.write("*Applied models:\n")
for y in range(0, len(graphlist)): logfile.write("\t\t\t\t" + graphlist[y] + "\n")
if OPEN_VINO_vgg19: logfile.write("\t\t\t\tOpenVINO Object Detector\n")
if FACE_MODEL: logfile.write("\t\t\t\tFace-Age-Gender Detector\n")
if FACE_RECOGNITION: logfile.write("\t\t\t\tFace Recognition (Known faces detected: " + str(known_face_counter) + ")\n")
logfile.write("*Processing time:\t\t" + str(datetime.now() - startTime) + "*\n")
logfile.write("*Time per processed file:\t" + str((datetime.now() - startTime) / (number_of_images + number_of_videos + audiofiles_processed)) + "*\n")
logfile.flush()
logfile.close()
# Update progress indicator
sg.OneLineProgressMeter('BKP Media Detector', 12, 12, 'key', 'Detection finished',orientation='h',size=(100, 10))
# Deliver final success pop up to user
sg.Popup('The detection was successful',
'The results are placed here:',
'Path: "{}"'.format(str(PATH_TO_RESULTS)))
| [
"cv2.rectangle",
"PySimpleGUI.OneLineProgressMeter",
"numpy.argsort",
"numpy.array",
"face_recognition.load_image_file",
"tensorflow.gfile.GFile",
"PySimpleGUI.OK",
"sys.path.append",
"tensorflow.Graph",
"PySimpleGUI.Popup",
"PySimpleGUI.Slider",
"pathlib.Path",
"subprocess.Popen",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.GraphDef",
"face_recognition.face_distance",
"openvino.inference_engine.IECore",
"PySimpleGUI.Input",
"numpy.argmin",
"numpy.maximum",
"openvino.inference_engine.IENetwork",
"tensorflow.get_default_graph",
"numpy.round",
"pathlib.Path.iterdir",
"face_recognition.face_locations",
"numpy.ceil",
"hashlib.md5",
"PySimpleGUI.Listbox",
"magic.from_file",
"numpy.floor",
"PySimpleGUI.Text",
"numpy.squeeze",
"cv2.putText",
"PySimpleGUI.InputCombo",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"cv2.cvtColor",
"imagehash.phash",
"tensorflow.import_graph_def",
"distutils.version.StrictVersion",
"cv2.resize",
"time.gmtime",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.minimum",
"PySimpleGUI.FolderBrowse",
"PySimpleGUI.Checkbox",
"PySimpleGUI.Cancel",
"cv2.copyMakeBorder",
"PySimpleGUI.ProgressBar",
"cv2.rotate",
"datetime.datetime.now",
"numpy.zeros",
"numpy.ndarray",
"multiprocessing.Pool",
"cv2.VideoCapture",
"face_recognition.face_encodings",
"face_recognition.compare_faces",
"Models.Face.detect_face.detect_face",
"numpy.expand_dims",
"PySimpleGUI.Window",
"PySimpleGUI.ChangeLookAndFeel"
]
| [((16564, 16598), 'pathlib.Path', 'Path', (['"""Models/OpenVINO/age-gender"""'], {}), "('Models/OpenVINO/age-gender')\n", (16568, 16598), False, 'from pathlib import Path\n'), ((16953, 16961), 'openvino.inference_engine.IECore', 'IECore', ([], {}), '()\n', (16959, 16961), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((16987, 17032), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'model_xml', 'weights': 'model_bin'}), '(model=model_xml, weights=model_bin)\n', (16996, 17032), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((17208, 17238), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(n, c, h, w)'}), '(shape=(n, c, h, w))\n', (17218, 17238), True, 'import numpy as np\n'), ((20732, 20761), 'pathlib.Path', 'Path', (['"""Models/OpenVINO/vgg19"""'], {}), "('Models/OpenVINO/vgg19')\n", (20736, 20761), False, 'from pathlib import Path\n'), ((21032, 21040), 'openvino.inference_engine.IECore', 'IECore', ([], {}), '()\n', (21038, 21040), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((21066, 21111), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'model_xml', 'weights': 'model_bin'}), '(model=model_xml, weights=model_bin)\n', (21075, 21111), False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((21318, 21348), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(n, c, h, w)'}), '(shape=(n, c, h, w))\n', (21328, 21348), True, 'import numpy as np\n'), ((27234, 27260), 'multiprocessing.Pool', 'Pool', ([], {'maxtasksperchild': '(100)'}), '(maxtasksperchild=100)\n', (27238, 27260), False, 'from multiprocessing import Pool\n'), ((30475, 30503), 'PySimpleGUI.ChangeLookAndFeel', 'sg.ChangeLookAndFeel', (['"""Dark"""'], {}), "('Dark')\n", (30495, 30503), True, 'import PySimpleGUI as sg\n'), ((33467, 33481), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (33479, 33481), False, 'from datetime import datetime\n'), ((33979, 34000), 'pathlib.Path', 'Path', (['gui_input[1][0]'], {}), '(gui_input[1][0])\n', (33983, 34000), False, 'from pathlib import Path\n'), ((34024, 34051), 'pathlib.Path.iterdir', 'Path.iterdir', (['PATH_TO_INPUT'], {}), '(PATH_TO_INPUT)\n', (34036, 34051), False, 'from pathlib import Path\n'), ((34096, 34123), 'pathlib.Path.iterdir', 'Path.iterdir', (['PATH_TO_INPUT'], {}), '(PATH_TO_INPUT)\n', (34108, 34123), False, 'from pathlib import Path\n'), ((34176, 34197), 'pathlib.Path', 'Path', (['gui_input[1][1]'], {}), '(gui_input[1][1])\n', (34180, 34197), False, 'from pathlib import Path\n'), ((34330, 34375), 'sys.path.append', 'sys.path.append', (['PATH_TO_OBJECT_DETECTION_DIR'], {}), '(PATH_TO_OBJECT_DETECTION_DIR)\n', (34345, 34375), False, 'import sys\n'), ((37150, 37176), 'multiprocessing.Pool', 'Pool', ([], {'maxtasksperchild': '(100)'}), '(maxtasksperchild=100)\n', (37154, 37176), False, 'from multiprocessing import Pool\n'), ((38693, 38718), 'multiprocessing.Pool', 'Pool', ([], {'maxtasksperchild': '(10)'}), '(maxtasksperchild=10)\n', (38697, 38718), False, 'from multiprocessing import Pool\n'), ((41916, 42035), 'PySimpleGUI.OneLineProgressMeter', 'sg.OneLineProgressMeter', (['"""BKP Media Detector"""', '(12)', '(12)', '"""key"""', '"""Detection finished"""'], {'orientation': '"""h"""', 'size': '(100, 10)'}), "('BKP Media Detector', 12, 12, 'key',\n 'Detection finished', orientation='h', size=(100, 10))\n", (41939, 42035), True, 'import PySimpleGUI as sg\n'), ((1114, 1255), 'PySimpleGUI.Popup', 'sg.Popup', (['"""You have not populated all required fields. Aborting!"""'], {'title': '"""Error"""', 'button_color': "('black', 'red')", 'background_color': '"""grey"""'}), "('You have not populated all required fields. Aborting!', title=\n 'Error', button_color=('black', 'red'), background_color='grey')\n", (1122, 1255), True, 'import PySimpleGUI as sg\n'), ((1378, 1488), 'PySimpleGUI.OneLineProgressMeter', 'sg.OneLineProgressMeter', (['"""BKP Media Detector"""', 'step', '(12)', '"""key"""', 'customText'], {'orientation': '"""h"""', 'size': '(50, 25)'}), "('BKP Media Detector', step, 12, 'key', customText,\n orientation='h', size=(50, 25))\n", (1401, 1488), True, 'import PySimpleGUI as sg\n'), ((1812, 1834), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1822, 1834), False, 'from PIL import Image\n'), ((2646, 2659), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (2657, 2659), False, 'import hashlib\n'), ((4073, 4143), 'subprocess.Popen', 'subprocess.Popen', (['cmnd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (4089, 4143), False, 'import subprocess\n'), ((5017, 5045), 'cv2.VideoCapture', 'cv2.VideoCapture', (['image_path'], {}), '(image_path)\n', (5033, 5045), False, 'import cv2\n'), ((6100, 6113), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (6111, 6113), False, 'import hashlib\n'), ((8471, 8481), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8479, 8481), True, 'import tensorflow as tf\n'), ((13177, 13189), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13187, 13189), True, 'import tensorflow as tf\n'), ((13291, 13310), 'pathlib.Path', 'Path', (['"""Models/Face"""'], {}), "('Models/Face')\n", (13295, 13310), False, 'from pathlib import Path\n'), ((23711, 23733), 'pathlib.Path', 'Path', (['known_faces_path'], {}), '(known_faces_path)\n', (23715, 23733), False, 'from pathlib import Path\n'), ((23849, 23893), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['known_face'], {}), '(known_face)\n', (23881, 23893), False, 'import face_recognition\n'), ((24547, 24608), 'face_recognition.face_locations', 'face_recognition.face_locations', (['image_to_detect'], {'model': '"""cnn"""'}), "(image_to_detect, model='cnn')\n", (24578, 24608), False, 'import face_recognition\n'), ((24634, 24698), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image_to_detect', 'face_locations'], {}), '(image_to_detect, face_locations)\n', (24665, 24698), False, 'import face_recognition\n'), ((33710, 33739), 'distutils.version.StrictVersion', 'StrictVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (33723, 33739), False, 'from distutils.version import StrictVersion\n'), ((33742, 33764), 'distutils.version.StrictVersion', 'StrictVersion', (['"""1.9.0"""'], {}), "('1.9.0')\n", (33755, 33764), False, 'from distutils.version import StrictVersion\n'), ((35178, 35248), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['OPEN_IMAGES_LABELS'], {}), '(OPEN_IMAGES_LABELS)\n', (35228, 35248), False, 'from object_detection.utils import label_map_util\n'), ((35511, 35573), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['AVA_LABELS'], {}), '(AVA_LABELS)\n', (35561, 35573), False, 'from object_detection.utils import label_map_util\n'), ((35880, 35955), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['SPECIAL_DETECTOR_LABELS'], {}), '(SPECIAL_DETECTOR_LABELS)\n', (35930, 35955), False, 'from object_detection.utils import label_map_util\n'), ((6671, 6719), 'cv2.cvtColor', 'cv2.cvtColor', (['extracted_frame', 'cv2.COLOR_BGR2RGB'], {}), '(extracted_frame, cv2.COLOR_BGR2RGB)\n', (6683, 6719), False, 'import cv2\n'), ((8552, 8565), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (8563, 8565), True, 'import tensorflow as tf\n'), ((18904, 18959), 'cv2.resize', 'cv2.resize', (['image', '(new_w, new_h)'], {'interpolation': 'interp'}), '(image, (new_w, new_h), interpolation=interp)\n', (18914, 18959), False, 'import cv2\n'), ((18985, 19028), 'cv2.cvtColor', 'cv2.cvtColor', (['scaled_img', 'cv2.COLOR_BGR2RGB'], {}), '(scaled_img, cv2.COLOR_BGR2RGB)\n', (18997, 19028), False, 'import cv2\n'), ((19054, 19175), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['scaled_img', 'pad_top', 'pad_bot', 'pad_left', 'pad_right'], {'borderType': 'cv2.BORDER_CONSTANT', 'value': 'padColor'}), '(scaled_img, pad_top, pad_bot, pad_left, pad_right,\n borderType=cv2.BORDER_CONSTANT, value=padColor)\n', (19072, 19175), False, 'import cv2\n'), ((19561, 19590), 'numpy.squeeze', 'np.squeeze', (["res['prob'][y][0]"], {}), "(res['prob'][y][0])\n", (19571, 19590), True, 'import numpy as np\n'), ((21726, 21754), 'cv2.resize', 'cv2.resize', (['temp_pic', '(w, h)'], {}), '(temp_pic, (w, h))\n', (21736, 21754), False, 'import cv2\n'), ((22243, 22260), 'numpy.squeeze', 'np.squeeze', (['probs'], {}), '(probs)\n', (22253, 22260), True, 'import numpy as np\n'), ((23930, 23981), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['known_person_image'], {}), '(known_person_image)\n', (23961, 23981), False, 'import face_recognition\n'), ((24936, 25036), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['known_face_encodings', 'face_encoding'], {'tolerance': 'facereq_tolerance'}), '(known_face_encodings, face_encoding,\n tolerance=facereq_tolerance)\n', (24966, 25036), False, 'import face_recognition\n'), ((25149, 25216), 'face_recognition.face_distance', 'face_recognition.face_distance', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (25179, 25216), False, 'import face_recognition\n'), ((25248, 25273), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (25257, 25273), True, 'import numpy as np\n'), ((27868, 27881), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (27879, 27881), False, 'import hashlib\n'), ((30520, 30595), 'PySimpleGUI.Text', 'sg.Text', (['"""General Settings"""'], {'font': "('Helvetica', 13)", 'text_color': '"""sea green"""'}), "('General Settings', font=('Helvetica', 13), text_color='sea green')\n", (30527, 30595), True, 'import PySimpleGUI as sg\n'), ((30613, 30673), 'PySimpleGUI.Text', 'sg.Text', (['"""Please specify the folder holding the media data:"""'], {}), "('Please specify the folder holding the media data:')\n", (30620, 30673), True, 'import PySimpleGUI as sg\n'), ((30691, 30701), 'PySimpleGUI.Input', 'sg.Input', ([], {}), '()\n', (30699, 30701), True, 'import PySimpleGUI as sg\n'), ((30703, 30809), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (['"""Browse"""'], {'initial_folder': '"""/home/b/Desktop/TestBilder"""', 'button_color': "('black', 'grey')"}), "('Browse', initial_folder='/home/b/Desktop/TestBilder',\n button_color=('black', 'grey'))\n", (30718, 30809), True, 'import PySimpleGUI as sg\n'), ((30853, 30896), 'PySimpleGUI.Text', 'sg.Text', (['"""Where shall I place the results?"""'], {}), "('Where shall I place the results?')\n", (30860, 30896), True, 'import PySimpleGUI as sg\n'), ((30914, 30924), 'PySimpleGUI.Input', 'sg.Input', ([], {}), '()\n', (30922, 30924), True, 'import PySimpleGUI as sg\n'), ((30926, 31033), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (['"""Browse"""'], {'initial_folder': '"""/home/b/Desktop/TestResults"""', 'button_color': "('black', 'grey')"}), "('Browse', initial_folder='/home/b/Desktop/TestResults',\n button_color=('black', 'grey'))\n", (30941, 31033), True, 'import PySimpleGUI as sg\n'), ((31060, 31091), 'PySimpleGUI.Text', 'sg.Text', (['"""TENSORFLOW DETECTORS"""'], {}), "('TENSORFLOW DETECTORS')\n", (31067, 31091), True, 'import PySimpleGUI as sg\n'), ((31109, 31153), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Objects/Persons"""'], {'size': '(15, 2)'}), "('Objects/Persons', size=(15, 2))\n", (31120, 31153), True, 'import PySimpleGUI as sg\n'), ((31170, 31192), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Actions"""'], {}), "('Actions')\n", (31181, 31192), True, 'import PySimpleGUI as sg\n'), ((31209, 31232), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""IS Logos"""'], {}), "('IS Logos')\n", (31220, 31232), True, 'import PySimpleGUI as sg\n'), ((31249, 31280), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Face Recognition"""'], {}), "('Face Recognition')\n", (31260, 31280), True, 'import PySimpleGUI as sg\n'), ((31298, 31328), 'PySimpleGUI.Text', 'sg.Text', (['"""OPEN VINO DETECTORS"""'], {}), "('OPEN VINO DETECTORS')\n", (31305, 31328), True, 'import PySimpleGUI as sg\n'), ((31346, 31387), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Objects-fast"""'], {'size': '(15, 2)'}), "('Objects-fast', size=(15, 2))\n", (31357, 31387), True, 'import PySimpleGUI as sg\n'), ((31404, 31435), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Faces/Age/Gender"""'], {}), "('Faces/Age/Gender')\n", (31415, 31435), True, 'import PySimpleGUI as sg\n'), ((31453, 31478), 'PySimpleGUI.Text', 'sg.Text', (['"""Output Format:"""'], {}), "('Output Format:')\n", (31460, 31478), True, 'import PySimpleGUI as sg\n'), ((31480, 31537), 'PySimpleGUI.Listbox', 'sg.Listbox', ([], {'values': "('Nuix', 'XWays', 'csv')", 'size': '(29, 3)'}), "(values=('Nuix', 'XWays', 'csv'), size=(29, 3))\n", (31490, 31537), True, 'import PySimpleGUI as sg\n'), ((31555, 31628), 'PySimpleGUI.Text', 'sg.Text', (['"""Video Settings"""'], {'font': "('Helvetica', 13)", 'text_color': '"""sea green"""'}), "('Video Settings', font=('Helvetica', 13), text_color='sea green')\n", (31562, 31628), True, 'import PySimpleGUI as sg\n'), ((31646, 31709), 'PySimpleGUI.Text', 'sg.Text', (['"""# of frames to be analyzed per Minute:"""'], {'size': '(36, 0)'}), "('# of frames to be analyzed per Minute:', size=(36, 0))\n", (31653, 31709), True, 'import PySimpleGUI as sg\n'), ((31727, 31802), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(1, 120)', 'orientation': '"""h"""', 'size': '(29, 20)', 'default_value': '(30)'}), "(range=(1, 120), orientation='h', size=(29, 20), default_value=30)\n", (31736, 31802), True, 'import PySimpleGUI as sg\n'), ((31820, 31887), 'PySimpleGUI.Text', 'sg.Text', (['"""Max. # of frames to be analyzed per Video:"""'], {'size': '(36, 0)'}), "('Max. # of frames to be analyzed per Video:', size=(36, 0))\n", (31827, 31887), True, 'import PySimpleGUI as sg\n'), ((31905, 31981), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(1, 500)', 'orientation': '"""h"""', 'size': '(29, 20)', 'default_value': '(100)'}), "(range=(1, 500), orientation='h', size=(29, 20), default_value=100)\n", (31914, 31981), True, 'import PySimpleGUI as sg\n'), ((31999, 32045), 'PySimpleGUI.Text', 'sg.Text', (['"""Check for & discard similar frames?"""'], {}), "('Check for & discard similar frames?')\n", (32006, 32045), True, 'import PySimpleGUI as sg\n'), ((32062, 32124), 'PySimpleGUI.InputCombo', 'sg.InputCombo', (["('Yes', 'No')"], {'default_value': '"""No"""', 'size': '(10, 2)'}), "(('Yes', 'No'), default_value='No', size=(10, 2))\n", (32075, 32124), True, 'import PySimpleGUI as sg\n'), ((32142, 32217), 'PySimpleGUI.Text', 'sg.Text', (['"""Face Recognition"""'], {'font': "('Helvetica', 13)", 'text_color': '"""sea green"""'}), "('Face Recognition', font=('Helvetica', 13), text_color='sea green')\n", (32149, 32217), True, 'import PySimpleGUI as sg\n'), ((32235, 32301), 'PySimpleGUI.Text', 'sg.Text', (['"""Specify folder with known faces (if FaceReq selected): """'], {}), "('Specify folder with known faces (if FaceReq selected): ')\n", (32242, 32301), True, 'import PySimpleGUI as sg\n'), ((32319, 32329), 'PySimpleGUI.Input', 'sg.Input', ([], {}), '()\n', (32327, 32329), True, 'import PySimpleGUI as sg\n'), ((32331, 32432), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (['"""Browse"""'], {'initial_folder': '"""/home/b/Desktop/known"""', 'button_color': "('black', 'grey')"}), "('Browse', initial_folder='/home/b/Desktop/known',\n button_color=('black', 'grey'))\n", (32346, 32432), True, 'import PySimpleGUI as sg\n'), ((32446, 32521), 'PySimpleGUI.Text', 'sg.Text', (['"""Specify face recognition tolerance (Default: 60%):"""'], {'size': '(48, 0)'}), "('Specify face recognition tolerance (Default: 60%):', size=(48, 0))\n", (32453, 32521), True, 'import PySimpleGUI as sg\n'), ((32539, 32614), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 100)', 'orientation': '"""h"""', 'size': '(29, 20)', 'default_value': '(60)'}), "(range=(0, 100), orientation='h', size=(29, 20), default_value=60)\n", (32548, 32614), True, 'import PySimpleGUI as sg\n'), ((32632, 32689), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Output detected faces as jpg"""'], {'size': '(25, 2)'}), "('Output detected faces as jpg', size=(25, 2))\n", (32643, 32689), True, 'import PySimpleGUI as sg\n'), ((32707, 32780), 'PySimpleGUI.Text', 'sg.Text', (['"""Audio Settings"""'], {'font': "('Helvetica', 13)", 'text_color': '"""sea green"""'}), "('Audio Settings', font=('Helvetica', 13), text_color='sea green')\n", (32714, 32780), True, 'import PySimpleGUI as sg\n'), ((32798, 32825), 'PySimpleGUI.Text', 'sg.Text', (['"""AUDIO PROCESSING"""'], {}), "('AUDIO PROCESSING')\n", (32805, 32825), True, 'import PySimpleGUI as sg\n'), ((32843, 32888), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (['"""Speech Detection"""'], {'size': '(15, 2)'}), "('Speech Detection', size=(15, 2))\n", (32854, 32888), True, 'import PySimpleGUI as sg\n'), ((32906, 32948), 'PySimpleGUI.OK', 'sg.OK', ([], {'button_color': "('black', 'sea green')"}), "(button_color=('black', 'sea green'))\n", (32911, 32948), True, 'import PySimpleGUI as sg\n'), ((32950, 32991), 'PySimpleGUI.Cancel', 'sg.Cancel', ([], {'button_color': "('black', 'grey')"}), "(button_color=('black', 'grey'))\n", (32959, 32991), True, 'import PySimpleGUI as sg\n'), ((33019, 33051), 'PySimpleGUI.Text', 'sg.Text', (['"""Detection in progress"""'], {}), "('Detection in progress')\n", (33026, 33051), True, 'import PySimpleGUI as sg\n'), ((33078, 33147), 'PySimpleGUI.ProgressBar', 'sg.ProgressBar', (['(12)'], {'orientation': '"""h"""', 'size': '(20, 20)', 'key': '"""progressbar"""'}), "(12, orientation='h', size=(20, 20), key='progressbar')\n", (33092, 33147), True, 'import PySimpleGUI as sg\n'), ((33174, 33185), 'PySimpleGUI.Cancel', 'sg.Cancel', ([], {}), '()\n', (33183, 33185), True, 'import PySimpleGUI as sg\n'), ((35041, 35080), 'pathlib.Path', 'Path', (['"""Models/OpenImages/openimages.pb"""'], {}), "('Models/OpenImages/openimages.pb')\n", (35045, 35080), False, 'from pathlib import Path\n'), ((35412, 35437), 'pathlib.Path', 'Path', (['"""Models/AVA/ava.pb"""'], {}), "('Models/AVA/ava.pb')\n", (35416, 35437), False, 'from pathlib import Path\n'), ((35734, 35767), 'pathlib.Path', 'Path', (['"""Models/ISLogos/islogos.pb"""'], {}), "('Models/ISLogos/islogos.pb')\n", (35738, 35767), False, 'from pathlib import Path\n'), ((2990, 3028), 'magic.from_file', 'magic.from_file', (['image_path'], {'mime': '(True)'}), '(image_path, mime=True)\n', (3005, 3028), False, 'import magic\n'), ((3600, 3638), 'magic.from_file', 'magic.from_file', (['image_path'], {'mime': '(True)'}), '(image_path, mime=True)\n', (3615, 3638), False, 'import magic\n'), ((6602, 6639), 'cv2.rotate', 'cv2.rotate', (['extracted_frame', 'rotation'], {}), '(extracted_frame, rotation)\n', (6612, 6639), False, 'import cv2\n'), ((7193, 7218), 'PIL.Image.fromarray', 'Image.fromarray', (['np_array'], {}), '(np_array)\n', (7208, 7218), False, 'from PIL import Image\n'), ((7246, 7277), 'imagehash.phash', 'imagehash.phash', (['frame_to_check'], {}), '(frame_to_check)\n', (7261, 7277), False, 'import imagehash\n'), ((8915, 8949), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['graphlist[y]', '"""rb"""'], {}), "(graphlist[y], 'rb')\n", (8929, 8949), True, 'import tensorflow as tf\n'), ((9083, 9125), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (9102, 9125), True, 'import tensorflow as tf\n'), ((9176, 9188), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9186, 9188), True, 'import tensorflow as tf\n'), ((13751, 13827), 'Models.Face.detect_face.detect_face', 'detect_face.detect_face', (['image', 'minsize', 'pnet', 'rnet', 'onet', 'threshold', 'factor'], {}), '(image, minsize, pnet, rnet, onet, threshold, factor)\n', (13774, 13827), False, 'from Models.Face import detect_face\n'), ((19511, 19542), 'numpy.squeeze', 'np.squeeze', (["res['age_conv3'][y]"], {}), "(res['age_conv3'][y])\n", (19521, 19542), True, 'import numpy as np\n'), ((24015, 24031), 'pathlib.Path', 'Path', (['known_face'], {}), '(known_face)\n', (24019, 24031), False, 'from pathlib import Path\n'), ((6840, 6856), 'time.gmtime', 'gmtime', (['timecode'], {}), '(timecode)\n', (6846, 6856), False, 'from time import gmtime\n'), ((12931, 12945), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12943, 12945), False, 'from datetime import datetime\n'), ((16185, 16199), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16197, 16199), False, 'from datetime import datetime\n'), ((20462, 20476), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20474, 20476), False, 'from datetime import datetime\n'), ((22284, 22301), 'numpy.argsort', 'np.argsort', (['probs'], {}), '(probs)\n', (22294, 22301), True, 'import numpy as np\n'), ((22893, 22907), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (22905, 22907), False, 'from datetime import datetime\n'), ((25907, 25983), 'cv2.rectangle', 'cv2.rectangle', (['image_to_detect', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)\n', (25920, 25983), False, 'import cv2\n'), ((26067, 26164), 'cv2.rectangle', 'cv2.rectangle', (['image_to_detect', '(left, bottom - 35)', '(right, bottom)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0,\n 255), cv2.FILLED)\n', (26080, 26164), False, 'import cv2\n'), ((26232, 26325), 'cv2.putText', 'cv2.putText', (['image_to_detect', 'name', '(left + 6, bottom - 6)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255,\n 255, 255), 1)\n', (26243, 26325), False, 'import cv2\n'), ((26453, 26485), 'PIL.Image.fromarray', 'Image.fromarray', (['image_to_detect'], {}), '(image_to_detect)\n', (26468, 26485), False, 'from PIL import Image\n'), ((26563, 26577), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26575, 26577), False, 'from datetime import datetime\n'), ((26926, 26940), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26938, 26940), False, 'from datetime import datetime\n'), ((28470, 28484), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28482, 28484), False, 'from datetime import datetime\n'), ((33226, 33257), 'PySimpleGUI.Window', 'sg.Window', (['"""BKP Media Detector"""'], {}), "('BKP Media Detector')\n", (33235, 33257), True, 'import PySimpleGUI as sg\n'), ((36526, 36540), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (36538, 36540), False, 'from datetime import datetime\n'), ((39601, 39615), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (39613, 39615), False, 'from datetime import datetime\n'), ((9278, 9300), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (9298, 9300), True, 'import tensorflow as tf\n'), ((9825, 9847), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (9845, 9847), True, 'import tensorflow as tf\n'), ((14099, 14122), 'numpy.asarray', 'np.asarray', (['image.shape'], {}), '(image.shape)\n', (14109, 14122), True, 'import numpy as np\n'), ((14648, 14673), 'numpy.squeeze', 'np.squeeze', (['detectedFaces'], {}), '(detectedFaces)\n', (14658, 14673), True, 'import numpy as np\n'), ((14703, 14730), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (14711, 14730), True, 'import numpy as np\n'), ((14763, 14794), 'numpy.maximum', 'np.maximum', (['detectedFaces[0]', '(0)'], {}), '(detectedFaces[0], 0)\n', (14773, 14794), True, 'import numpy as np\n'), ((14827, 14858), 'numpy.maximum', 'np.maximum', (['detectedFaces[1]', '(0)'], {}), '(detectedFaces[1], 0)\n', (14837, 14858), True, 'import numpy as np\n'), ((14891, 14932), 'numpy.minimum', 'np.minimum', (['detectedFaces[2]', 'img_size[1]'], {}), '(detectedFaces[2], img_size[1])\n', (14901, 14932), True, 'import numpy as np\n'), ((14965, 15006), 'numpy.minimum', 'np.minimum', (['detectedFaces[3]', 'img_size[0]'], {}), '(detectedFaces[3], img_size[0])\n', (14975, 15006), True, 'import numpy as np\n'), ((17917, 17941), 'numpy.round', 'np.round', (['(new_w / aspect)'], {}), '(new_w / aspect)\n', (17925, 17941), True, 'import numpy as np\n'), ((41646, 41660), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (41658, 41660), False, 'from datetime import datetime\n'), ((6933, 6958), 'numpy.array', 'np.array', (['extracted_frame'], {}), '(extracted_frame)\n', (6941, 6958), True, 'import numpy as np\n'), ((14400, 14425), 'numpy.squeeze', 'np.squeeze', (['detectedFaces'], {}), '(detectedFaces)\n', (14410, 14425), True, 'import numpy as np\n'), ((18033, 18051), 'numpy.floor', 'np.floor', (['pad_vert'], {}), '(pad_vert)\n', (18041, 18051), True, 'import numpy as np\n'), ((18065, 18082), 'numpy.ceil', 'np.ceil', (['pad_vert'], {}), '(pad_vert)\n', (18072, 18082), True, 'import numpy as np\n'), ((18236, 18260), 'numpy.round', 'np.round', (['(new_h * aspect)'], {}), '(new_h * aspect)\n', (18244, 18260), True, 'import numpy as np\n'), ((24067, 24081), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24079, 24081), False, 'from datetime import datetime\n'), ((28320, 28335), 'pathlib.Path', 'Path', (['audiopath'], {}), '(audiopath)\n', (28324, 28335), False, 'from pathlib import Path\n'), ((41738, 41752), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (41750, 41752), False, 'from datetime import datetime\n'), ((9709, 9731), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (9729, 9731), True, 'import tensorflow as tf\n'), ((14284, 14322), 'numpy.squeeze', 'np.squeeze', (['detectedFaces[single_face]'], {}), '(detectedFaces[single_face])\n', (14294, 14322), True, 'import numpy as np\n'), ((18355, 18373), 'numpy.floor', 'np.floor', (['pad_horz'], {}), '(pad_horz)\n', (18363, 18373), True, 'import numpy as np\n'), ((18387, 18404), 'numpy.ceil', 'np.ceil', (['pad_horz'], {}), '(pad_horz)\n', (18394, 18404), True, 'import numpy as np\n'), ((25682, 25698), 'pathlib.Path', 'Path', (['image_path'], {}), '(image_path)\n', (25686, 25698), False, 'from pathlib import Path\n'), ((10620, 10644), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (10634, 10644), True, 'import numpy as np\n'), ((19964, 19995), 'pathlib.Path', 'Path', (['image_paths[imagelist[y]]'], {}), '(image_paths[imagelist[y]])\n', (19968, 19995), False, 'from pathlib import Path\n'), ((22734, 22754), 'pathlib.Path', 'Path', (['image_paths[i]'], {}), '(image_paths[i])\n', (22738, 22754), False, 'from pathlib import Path\n'), ((12364, 12378), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12376, 12378), False, 'from datetime import datetime\n'), ((26384, 26400), 'pathlib.Path', 'Path', (['image_path'], {}), '(image_path)\n', (26388, 26400), False, 'from pathlib import Path\n'), ((8602, 8616), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8614, 8616), False, 'from datetime import datetime\n'), ((12007, 12023), 'pathlib.Path', 'Path', (['image_path'], {}), '(image_path)\n', (12011, 12023), False, 'from pathlib import Path\n'), ((15587, 15611), 'pathlib.Path', 'Path', (['image_paths[index]'], {}), '(image_paths[index])\n', (15591, 15611), False, 'from pathlib import Path\n')] |
import io
import numpy as np
import torch.utils.model_zoo as model_zoo
import torch.onnx
import torch.nn as nn
import torch.nn.init as init
# ================================================================ #
# Building the Model #
# ================================================================ #
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=upscale_factor ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Creating an instance from SuperResolutionNet
net = SuperResolutionNet(upscale_factor=3)
# ================================================================ #
# Downloading Pretrained Weights #
# ================================================================ #
model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# Initialize model with the pretrained weights
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net.load_state_dict(model_zoo.load_url(model_url, map_location=device))
net.eval() # Changing to eval mode to save it onnx format
# onnx input shape: x.shape : (batch_size=1, channel=1, H, W)
# The model expects the Y component of the YCbCr of an image as an input so it has one channel
x = torch.randn(1, 1, 224, 224, requires_grad=True)
onnx_model = net(x)
# Export the onnx model
torch.onnx.export(onnx_model, # model being run
x, # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
# ================================================================ #
# Loading ONNX model #
# ================================================================ #
import onnx
import onnxruntime
onnx_model = onnx.load("super_resolution.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("super_resolution.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
# ================================================================ #
# Reading Original Image and Feed it to Model #
# ================================================================ #
from PIL import Image
import torchvision.transforms as transforms
img = Image.open("../../../cat_224x224.jpg")
resize = transforms.Resize([224, 224])
img = resize(img)
# The model expects the Y component of the YCbCr of an image as an input
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
# get the output image follow post-processing step from PyTorch implementation
output = Image.merge(
"YCbCr",
[img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]
).convert("RGB")
# Save the image, we will compare this with the output image from mobile device
output.save("../../../cat_superres_with_ort.jpg")
| [
"torch.nn.ReLU",
"PIL.Image.open",
"torch.nn.PixelShuffle",
"onnxruntime.InferenceSession",
"torch.utils.model_zoo.load_url",
"torch.nn.Conv2d",
"torch.nn.init.orthogonal_",
"onnx.load",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torch.nn.init.calculate_gain",
"onnx.checker.check_model"
]
| [((3603, 3637), 'onnx.load', 'onnx.load', (['"""super_resolution.onnx"""'], {}), "('super_resolution.onnx')\n", (3612, 3637), False, 'import onnx\n'), ((3638, 3674), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model'], {}), '(onnx_model)\n', (3662, 3674), False, 'import onnx\n'), ((3690, 3743), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['"""super_resolution.onnx"""'], {}), "('super_resolution.onnx')\n", (3718, 3743), False, 'import onnxruntime\n'), ((4505, 4543), 'PIL.Image.open', 'Image.open', (['"""../../../cat_224x224.jpg"""'], {}), "('../../../cat_224x224.jpg')\n", (4515, 4543), False, 'from PIL import Image\n'), ((4554, 4583), 'torchvision.transforms.Resize', 'transforms.Resize', (['[224, 224]'], {}), '([224, 224])\n', (4571, 4583), True, 'import torchvision.transforms as transforms\n'), ((4765, 4786), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4784, 4786), True, 'import torchvision.transforms as transforms\n'), ((2048, 2098), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (['model_url'], {'map_location': 'device'}), '(model_url, map_location=device)\n', (2066, 2098), True, 'import torch.utils.model_zoo as model_zoo\n'), ((516, 540), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': 'inplace'}), '(inplace=inplace)\n', (523, 540), True, 'import torch.nn as nn\n'), ((562, 629), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(64)', 'kernel_size': '(5)', 'padding': '(2)'}), '(in_channels=1, out_channels=64, kernel_size=5, padding=2)\n', (571, 629), True, 'import torch.nn as nn\n'), ((651, 719), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=64, out_channels=64, kernel_size=3, padding=1)\n', (660, 719), True, 'import torch.nn as nn\n'), ((741, 809), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=64, out_channels=32, kernel_size=3, padding=1)\n', (750, 809), True, 'import torch.nn as nn\n'), ((831, 920), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(upscale_factor ** 2)', 'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels=32, out_channels=upscale_factor ** 2, kernel_size=3,\n padding=1)\n', (840, 920), True, 'import torch.nn as nn\n'), ((946, 977), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['upscale_factor'], {}), '(upscale_factor)\n', (961, 977), True, 'import torch.nn as nn\n'), ((1478, 1513), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['self.conv4.weight'], {}), '(self.conv4.weight)\n', (1494, 1513), True, 'import torch.nn.init as init\n'), ((1295, 1322), 'torch.nn.init.calculate_gain', 'init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (1314, 1322), True, 'import torch.nn.init as init\n'), ((1368, 1395), 'torch.nn.init.calculate_gain', 'init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (1387, 1395), True, 'import torch.nn.init as init\n'), ((1441, 1468), 'torch.nn.init.calculate_gain', 'init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (1460, 1468), True, 'import torch.nn.init as init\n')] |
"""
Laplacian of a compressed-sparse graph
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| [
"scipy.sparse.isspmatrix",
"numpy.sqrt",
"numpy.asarray",
"numpy.issubdtype",
"numpy.concatenate",
"scipy.sparse.coo_matrix"
]
| [((2298, 2317), 'scipy.sparse.isspmatrix', 'isspmatrix', (['csgraph'], {}), '(csgraph)\n', (2308, 2317), False, 'from scipy.sparse import isspmatrix, coo_matrix\n'), ((3169, 3210), 'numpy.concatenate', 'np.concatenate', (['[lap.row, diagonal_holes]'], {}), '([lap.row, diagonal_holes])\n', (3183, 3210), True, 'import numpy as np\n'), ((3229, 3270), 'numpy.concatenate', 'np.concatenate', (['[lap.col, diagonal_holes]'], {}), '([lap.col, diagonal_holes])\n', (3243, 3270), True, 'import numpy as np\n'), ((3285, 3344), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(new_data, (new_row, new_col))'], {'shape': 'lap.shape'}), '((new_data, (new_row, new_col)), shape=lap.shape)\n', (3295, 3344), False, 'from scipy.sparse import isspmatrix, coo_matrix\n'), ((3489, 3499), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (3496, 3499), True, 'import numpy as np\n'), ((3923, 3940), 'numpy.asarray', 'np.asarray', (['graph'], {}), '(graph)\n', (3933, 3940), True, 'import numpy as np\n'), ((4083, 4093), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (4090, 4093), True, 'import numpy as np\n'), ((2148, 2184), 'numpy.issubdtype', 'np.issubdtype', (['csgraph.dtype', 'np.int'], {}), '(csgraph.dtype, np.int)\n', (2161, 2184), True, 'import numpy as np\n'), ((2207, 2244), 'numpy.issubdtype', 'np.issubdtype', (['csgraph.dtype', 'np.uint'], {}), '(csgraph.dtype, np.uint)\n', (2220, 2244), True, 'import numpy as np\n')] |
from collections import defaultdict
f = open("input.txt")
d = f.read()
houses = defaultdict(int,{(0,0):1})
cur = [0,0]
for c in d:
if c == "<":
cur[0] -= 1
if c == ">":
cur[0] += 1
if c == "v":
cur[1] += 1
if c == "^":
cur[1] -= 1
houses[tuple(cur)]+=1
print(len(houses.keys()))
| [
"collections.defaultdict"
]
| [((80, 109), 'collections.defaultdict', 'defaultdict', (['int', '{(0, 0): 1}'], {}), '(int, {(0, 0): 1})\n', (91, 109), False, 'from collections import defaultdict\n')] |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal') # unit: g/min
action = self._bb_policy(pname, meal, observation.CGM, sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
"""
Helper function to compute the basal and bolus amount.
The basal insulin is based on the insulin amount to keep the blood
glucose in the steady state when there is no (meal) disturbance.
basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/min)
The bolus amount is computed based on the current glucose level, the
target glucose level, the patient's correction factor and the patient's
carbohydrate ratio.
bolus = ((carbohydrate / carbohydrate_ratio) +
(current_glucose - target_glucose) / correction_factor)
/ sample_time
NOTE the bolus computed from the above formula is in unit U. The
simulator only accepts insulin rate. Hence the bolus is converted to
insulin rate.
"""
if any(self.quest.Name.str.match(name)):
quest = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = params.u2ss.values.item() # unit: pmol/(L*kg)
BW = params.BW.values.item() # unit: kg
else:
quest = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43 # unit: pmol/(L*kg)
BW = 57.0 # unit: kg
basal = u2ss * BW / 6000 # unit: U/min
if meal > 0:
logger.info('Calculating bolus ...')
logger.info(f'Meal = {meal} g/min')
logger.info(f'glucose = {glucose}')
bolus = (
(meal * env_sample_time) / quest.CR.values + (glucose > 150) *
(glucose - self.target) / quest.CF.values).item() # unit: U
else:
bolus = 0 # unit: U
# This is to convert bolus in total amount (U) to insulin rate (U/min).
# The simulation environment does not treat basal and bolus
# differently. The unit of Action.basal and Action.bolus are the same
# (U/min).
bolus = bolus / env_sample_time # unit: U/min
return Action(basal=basal, bolus=bolus)
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = use_low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal + basal_adj
self.cr = self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_mean = 0
carb_error_std = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_mean, carb_error_std)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.append(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]} | [
"logging.getLogger",
"numpy.random.normal",
"collections.namedtuple",
"pandas.read_csv",
"numpy.stack",
"numpy.random.uniform",
"pandas.DataFrame"
]
| [((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n'), ((320, 365), 'collections.namedtuple', 'namedtuple', (['"""ParamTup"""', "['basal', 'cf', 'cr']"], {}), "('ParamTup', ['basal', 'cf', 'cr'])\n", (330, 365), False, 'from collections import namedtuple\n'), ((6854, 6882), 'numpy.stack', 'np.stack', (['full_patient_state'], {}), '(full_patient_state)\n', (6862, 6882), True, 'import numpy as np\n'), ((679, 705), 'pandas.read_csv', 'pd.read_csv', (['CONTROL_QUEST'], {}), '(CONTROL_QUEST)\n', (690, 705), True, 'import pandas as pd\n'), ((736, 766), 'pandas.read_csv', 'pd.read_csv', (['PATIENT_PARA_FILE'], {}), '(PATIENT_PARA_FILE)\n', (747, 766), True, 'import pandas as pd\n'), ((6623, 6672), 'numpy.random.normal', 'np.random.normal', (['carb_error_mean', 'carb_error_std'], {}), '(carb_error_mean, carb_error_std)\n', (6639, 6672), True, 'import numpy as np\n'), ((2387, 2483), 'pandas.DataFrame', 'pd.DataFrame', (["[['Average', 13.5, 23.52, 50, 30]]"], {'columns': "['Name', 'CR', 'CF', 'TDI', 'Age']"}), "([['Average', 13.5, 23.52, 50, 30]], columns=['Name', 'CR',\n 'CF', 'TDI', 'Age'])\n", (2399, 2483), True, 'import pandas as pd\n'), ((6549, 6568), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6566, 6568), True, 'import numpy as np\n')] |
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TraitPluginBase(object):
"""Base class for plugins.
It converts notification fields to Trait values.
"""
def __init__(self, **kw):
"""Setup the trait plugin.
For each Trait definition a plugin is used on in a conversion
definition, a new instance of the plugin will be created, and
initialized with the parameters (if any) specified in the
config file.
:param kw: the parameters specified in the event definitions file.
"""
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_value(self, match_list):
"""Convert a set of fields to a Trait value.
This method is called each time a trait is attempted to be extracted
from a notification. It will be called *even if* no matching fields
are found in the notification (in that case, the match_list will be
empty). If this method returns None, the trait *will not* be added to
the event. Any other value returned by this method will be used as
the value for the trait. Values returned will be coerced to the
appropriate type for the trait.
:param match_list: A list (may be empty if no matches) of *tuples*.
Each tuple is (field_path, value) where field_path is the jsonpath
for that specific field.
Example::
trait's fields definition: ['payload.foobar',
'payload.baz',
'payload.thing.*']
notification body:
{
'message_id': '12345',
'publisher': 'someservice.host',
'payload': {
'foobar': 'test',
'thing': {
'bar': 12,
'boing': 13,
}
}
}
match_list will be: [('payload.foobar','test'),
('payload.thing.bar',12),
('payload.thing.boing',13)]
Here is a plugin that emulates the default (no plugin) behavior:
.. code-block:: python
class DefaultPlugin(TraitPluginBase):
"Plugin that returns the first field value."
def __init__(self, **kw):
super(DefaultPlugin, self).__init__()
def trait_value(self, match_list):
if not match_list:
return None
return match_list[0][1]
"""
class SplitterTraitPlugin(TraitPluginBase):
"""Plugin that splits a piece off of a string value."""
def __init__(self, separator=".", segment=0, max_split=None, **kw):
"""Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
if not match_list:
return None
value = six.text_type(match_list[0][1])
if self.max_split is not None:
values = value.split(self.separator, self.max_split)
else:
values = value.split(self.separator)
try:
return values[self.segment]
except IndexError:
return None
class BitfieldTraitPlugin(TraitPluginBase):
"""Plugin to set flags on a bitfield."""
def __init__(self, initial_bitfield=0, flags=None, **kw):
"""Setup bitfield trait.
:param initial_bitfield: (int) initial value for the bitfield
Flags that are set will be OR'ed with this.
:param flags: List of dictionaries defining bitflags to set depending
on data in the notification. Each one has the following
keys:
path: jsonpath of field to match.
bit: (int) number of bit to set (lsb is bit 0)
value: set bit if corresponding field's value
matches this. If value is not provided,
bit will be set if the field exists (and
is non-null), regardless of it's value.
"""
self.initial_bitfield = initial_bitfield
if flags is None:
flags = []
self.flags = flags
super(BitfieldTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
matches = dict(match_list)
bitfield = self.initial_bitfield
for flagdef in self.flags:
path = flagdef['path']
bit = 2 ** int(flagdef['bit'])
if path in matches:
if 'value' in flagdef:
if matches[path] == flagdef['value']:
bitfield |= bit
else:
bitfield |= bit
return bitfield
| [
"six.text_type",
"six.add_metaclass"
]
| [((612, 642), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (629, 642), False, 'import six\n'), ((4093, 4124), 'six.text_type', 'six.text_type', (['match_list[0][1]'], {}), '(match_list[0][1])\n', (4106, 4124), False, 'import six\n')] |
import logging
from typing import Match, Any, Dict
import aiohttp
from discord import Message
from MoMMI import comm_event, command, MChannel, always_command
logger = logging.getLogger(__name__)
@comm_event("ss14")
async def ss14_nudge(channel: MChannel, message: Any, meta: str) -> None:
try:
config: Dict[str, Any] = channel.module_config(f"ss14.servers.{meta}")
except ValueError:
return
expect_password = config["password"]
if expect_password != message.get("password"):
return
if "type" not in message or "contents" not in message:
return
contents = message["contents"]
type = message["type"]
if type == "ooc":
final_message = f"\u200B**OOC**: `{contents['sender']}`: {contents['contents']}"
else:
return
await channel.send(final_message)
@always_command("ss14_relay", unsafe=True)
async def ss14_relay(channel: MChannel, match: Match, message: Message) -> None:
if not channel.internal_name:
return
content = message.content
content = content.strip()
if not content or content[0] == "\u200B":
return
server = None
config: Any
for config in channel.server_config("modules.ss14", []):
if config["discord_channel"] != channel.internal_name:
continue
server = config["server"]
if not server:
return
config = channel.module_config(f"ss14.servers.{server}")
password = config["password"]
url = config["api_url"] + "/ooc"
async with aiohttp.ClientSession() as session:
async with session.post(url, json={"password": password, "sender": message.author.name, "contents": content}) as resp:
r = await resp.text()
logger.error(f"{resp.status}")
| [
"logging.getLogger",
"MoMMI.comm_event",
"MoMMI.always_command",
"aiohttp.ClientSession"
]
| [((168, 195), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (185, 195), False, 'import logging\n'), ((198, 216), 'MoMMI.comm_event', 'comm_event', (['"""ss14"""'], {}), "('ss14')\n", (208, 216), False, 'from MoMMI import comm_event, command, MChannel, always_command\n'), ((841, 882), 'MoMMI.always_command', 'always_command', (['"""ss14_relay"""'], {'unsafe': '(True)'}), "('ss14_relay', unsafe=True)\n", (855, 882), False, 'from MoMMI import comm_event, command, MChannel, always_command\n'), ((1536, 1559), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1557, 1559), False, 'import aiohttp\n')] |
from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['max_seq_len'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
class Pretrainer:
def __init__(self, bert_model,
vocab_size, max_seq_len,
batch_size, lr, with_cuda=True):
# 词量, 注意这里实际字(词)汇量 = vocab_size - 20
# 因为前20个token用来做一些特殊功能,如padding等
self.vocab_size = vocab_size
self.batch_size = batch_size
self.lr = lr
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device('cuda:0' if cuda_condition else 'cpu')
# 限定单句最大长度
self.max_seq_len = max_seq_len
# 初始化超参数的配置
bertconfig = BertConfig(vocab_size=config['vocab_size'])
# 初始化bert模型
self.bert_model = bert_model(config=bertconfig)
self.bert_model.to(self.device)
# 初始化训练数据集
train_dataset = BERTDataset(corpus_path=config['train_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=False)
# 初始化训练dataloader
self.train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
collate_fn=lambda x:x)
# 初始化测试数据集
test_dataset = BERTDataset(corpus_path=config['test_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=True)
# 初始化测试dataloader
self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size,
num_workers=config['num_workers'],
collate_fn=lambda x: x)
# 初始化positional_encoding [max_seq_len, hidden_size]
self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size,
max_seq_len=self.max_seq_len)
# 拓展positional_encoding的维度为[1, max_seq_len, hidden_size]
self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)
# 列举需要优化的参数并传入优化器
optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr)
print('Total Parameters:', sum(p.nelement() for p in self.bert_model.parameters()))
def init_positional_encoding(self, hidden_dim, max_seq_len):
position_enc = np.array([
[pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)]
if pos != 0 else np.zeros(hidden_dim) for pos in range(max_seq_len)
])
# dim=2i
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# dim=2i+1
position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2])
# todo 归一化处理 why? 用位置嵌入的每一行除以它的模长
denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True)) # 作为分母
position_enc /= (denominator + 1e-8)
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
return position_enc
def test(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.eval()
with torch.no_grad():
return self.iteration(epoch, self.test_dataloader, train=False, df_path=df_path)
def load_model(self, model, dir_path='./output'):
# 加载模型
checkpoint_dir = self.find_most_recent_state_dict(dir_path)
checkpoint = torch.load(checkpoint_dir)
# todo key在哪保存的
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
torch.cuda.empty_cache()
model.to(self.device)
print('{} loaded for training!'.format(checkpoint_dir))
def train(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.train()
self.iteration(epoch, self.train_dataloader, train=True, df_path=df_path)
def compute_loss(self, preditions, labels, num_class=2, ignore_index=None):
if ignore_index is None:
loss_func = CrossEntropyLoss()
else:
loss_func = CrossEntropyLoss(ignore_index=ignore_index)
return loss_func(preditions.view(-1, num_class), labels.view(-1))
def get_mlm_accuracy(self, predictions, labels):
# predictions [batch_size, seq_len, vocab_size]
predictions = torch.argmax(predictions, dim=-1, keepdim=False) # predictions: [batch_size, seq_len]
# labels: [batch_size, seq_len]
mask = (labels > 0) # 只考虑被MASK的token
# 预测正确的数量
pred_correct = torch.sum((predictions == labels) * mask).float()
# accuracy
mlm_accuracy = pred_correct / (torch.sum(mask).float() + 1e-8)
return mlm_accuracy.item()
def padding(self, output_dic_list):
# todo output_dic_list的格式
# [batch_size, seq_len, embed_dim]
bert_input = [i['bert_input'] for i in output_dic_list]
bert_label = [i['bert_label'] for i in output_dic_list]
segment_label = [i['segment_label'] for i in output_dic_list]
# padding
bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True)
bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True)
segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True)
# [batch_size]
is_next = torch.cat([i['is_next'] for i in output_dic_list])
return {
'bert_input': bert_input,
'bert_label': bert_label,
'segment_label': segment_label,
'is_next': is_next
}
def find_most_recent_state_dict(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
dic_list = [i for i in os.listdir(dir_path)]
if len(dic_list) == 0:
raise FileNotFoundError('can not find any state dict in {}'.format(dir_path))
# todo model什么时候存放的?
dic_list = [i for i in dic_list if 'model' in i]
dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1]))
return dir_path + '/' + dic_list[-1]
def iteration(self, epoch, data_loader, train=True, df_path='./output_wiki_bert/df_log.pickle'):
if not os.path.isfile(df_path) and epoch != 0:
raise RuntimeError("log DataFrame path not found and can't create a new one because we're not training from scratch!")
if not os.path.isfile(df_path) and epoch == 0:
df = pd.DataFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',
'train_next_sen_acc', 'train_mlm_acc',
'test_next_sen_loss', 'test_mlm_loss',
'test_next_sen_acc', 'test_mlm_acc'])
df.to_pickle(df_path)
print('log DataFrame created!')
str_code = 'train' if train else 'test'
# 设置进度条,得到迭代器对象
data_iter = tqdm(enumerate(data_loader),
desc='EP_%s:%d' % (str_code, epoch),
total=len(data_loader),
bar_format='{l_bar}{r_bar}')
total_next_sen_loss = 0
total_mlm_loss = 0
total_next_sen_acc = 0
total_mlm_acc = 0
total_element = 0
for i, data in data_iter:
data = self.padding(data)
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
# todo data['bert_input'] 的维度
positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device)
# 1. forward the next_sentence_prediction and masked_lm_model
# mlm_preds: [batch_size, seq_len, vocab_size]
# next_sen_preds: [batch_size, seq_len]
mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'],
positional_enc=positional_enc,
token_type_ids=data['segment_label'])
mlm_acc = self.get_mlm_accuracy(mlm_preds, data['bert_label'])
next_sen_acc = next_sen_preds.argmax(dim=-1, keepdim=False).eq(data['is_next']).sum().item()
mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0)
next_sen_loss = self.compute_loss(next_sen_preds, data['is_next'])
# 两个任务联合训练
loss = mlm_loss + next_sen_loss
# 3. 反向传播和梯度更新
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_next_sen_loss += next_sen_loss.item()
total_mlm_loss += mlm_loss.item()
total_next_sen_acc += next_sen_acc
total_element += data['is_next'].nelement()
total_mlm_acc += mlm_acc
if train:
log_dict = {
'epoch': epoch,
'train_next_sen_loss': total_next_sen_loss / (i + 1),
'train_mlm_loss': total_mlm_loss / (i + 1),
'train_next_sen_acc': total_next_sen_acc / total_element,
'train_mlm_acc': total_mlm_acc / (i + 1),
'test_next_sen_loss': 0, 'test_mlm_loss':0,
'test_next_sen_acc':0, 'test_mlm_acc':0
}
else:
log_dict = {
'epoch': epoch,
'test_next_sen_loss': total_next_sen_loss / (i + 1),
'test_mlm_loss': total_mlm_loss / (i + 1),
'test_next_sen_acc': total_next_sen_acc / total_element,
'test_mlm_acc': total_mlm_acc / (i + 1),
'train_next_sen_loss': 0, 'train_mlm_loss': 0,
'train_next_sen_acc': 0, 'train_mlm_acc': 0
}
if i % 10 == 0:
data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}))
if train:
df = pd.read_pickle(df_path)
# 将日志信息追加到df中
df = df.append([log_dict])
# 重置索引
df.reset_index(inplace=True, drop=True)
# 保存到本地
df.to_pickle(df_path)
else:
log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}
df = pd.read_pickle(df_path)
df.reset_index(inplace=True, drop=True)
for k, v in log_dict.items():
df.at[epoch, k] = v
df.to_pickle(df_path)
return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss'])
def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
save_path = dir_path + '/' + file_path + '.epoch.{}'.format(str(epoch))
model.to('cpu')
torch.save({'model_state_dict': model.state_dict()}, save_path)
print('{} saved!'.format(save_path))
model.to(self.device)
if __name__ == '__main__':
def init_trainer(dynamic_lr, load_model=False):
trainer = Pretrainer(BertForPreTraining,
vocab_size=config['vocab_size'],
max_seq_len=config['max_seq_len'],
batch_size=config['batch_size'],
lr=dynamic_lr,
with_cuda=True)
if load_model:
trainer.load_model(trainer.bert_model, dir_path=config['output_path'])
return trainer
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 为什么要从3开始
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.format(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
| [
"pandas.read_pickle",
"os.path.exists",
"os.listdir",
"dataset.wiki_dataset.BERTDataset",
"pandas.DataFrame",
"numpy.power",
"os.path.isfile",
"numpy.sum",
"numpy.zeros",
"os.mkdir",
"torch.utils.data.DataLoader",
"numpy.sin"
]
| [((1318, 1497), 'dataset.wiki_dataset.BERTDataset', 'BERTDataset', ([], {'corpus_path': "config['train_corpus_path']", 'word2idx_path': "config['word2idx_path']", 'seq_len': 'self.max_seq_len', 'hidden_dim': 'bertconfig.hidden_size', 'on_memory': '(False)'}), "(corpus_path=config['train_corpus_path'], word2idx_path=config[\n 'word2idx_path'], seq_len=self.max_seq_len, hidden_dim=bertconfig.\n hidden_size, on_memory=False)\n", (1329, 1497), False, 'from dataset.wiki_dataset import BERTDataset\n'), ((1690, 1812), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': "config['batch_size']", 'num_workers': "config['num_workers']", 'collate_fn': '(lambda x: x)'}), "(train_dataset, batch_size=config['batch_size'], num_workers=\n config['num_workers'], collate_fn=lambda x: x)\n", (1700, 1812), False, 'from torch.utils.data import DataLoader\n'), ((1978, 2155), 'dataset.wiki_dataset.BERTDataset', 'BERTDataset', ([], {'corpus_path': "config['test_corpus_path']", 'word2idx_path': "config['word2idx_path']", 'seq_len': 'self.max_seq_len', 'hidden_dim': 'bertconfig.hidden_size', 'on_memory': '(True)'}), "(corpus_path=config['test_corpus_path'], word2idx_path=config[\n 'word2idx_path'], seq_len=self.max_seq_len, hidden_dim=bertconfig.\n hidden_size, on_memory=True)\n", (1989, 2155), False, 'from dataset.wiki_dataset import BERTDataset\n'), ((2343, 2459), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'self.batch_size', 'num_workers': "config['num_workers']", 'collate_fn': '(lambda x: x)'}), "(test_dataset, batch_size=self.batch_size, num_workers=config[\n 'num_workers'], collate_fn=lambda x: x)\n", (2353, 2459), False, 'from torch.utils.data import DataLoader\n'), ((3503, 3533), 'numpy.sin', 'np.sin', (['position_enc[1:, 0::2]'], {}), '(position_enc[1:, 0::2])\n', (3509, 3533), True, 'import numpy as np\n'), ((3586, 3616), 'numpy.sin', 'np.sin', (['position_enc[1:, 1::2]'], {}), '(position_enc[1:, 1::2])\n', (3592, 3616), True, 'import numpy as np\n'), ((3689, 3737), 'numpy.sum', 'np.sum', (['(position_enc ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(position_enc ** 2, axis=1, keepdims=True)\n', (3695, 3737), True, 'import numpy as np\n'), ((6486, 6510), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (6500, 6510), False, 'import os\n'), ((6524, 6542), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (6532, 6542), False, 'import os\n'), ((7282, 7477), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['epoch', 'train_next_sen_loss', 'train_mlm_loss', 'train_next_sen_acc',\n 'train_mlm_acc', 'test_next_sen_loss', 'test_mlm_loss',\n 'test_next_sen_acc', 'test_mlm_acc']"}), "(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',\n 'train_next_sen_acc', 'train_mlm_acc', 'test_next_sen_loss',\n 'test_mlm_loss', 'test_next_sen_acc', 'test_mlm_acc'])\n", (7294, 7477), True, 'import pandas as pd\n'), ((10945, 10968), 'pandas.read_pickle', 'pd.read_pickle', (['df_path'], {}), '(df_path)\n', (10959, 10968), True, 'import pandas as pd\n'), ((11276, 11299), 'pandas.read_pickle', 'pd.read_pickle', (['df_path'], {}), '(df_path)\n', (11290, 11299), True, 'import pandas as pd\n'), ((11663, 11687), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (11677, 11687), False, 'import os\n'), ((11701, 11719), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (11709, 11719), False, 'import os\n'), ((6574, 6594), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (6584, 6594), False, 'import os\n'), ((7038, 7061), 'os.path.isfile', 'os.path.isfile', (['df_path'], {}), '(df_path)\n', (7052, 7061), False, 'import os\n'), ((7225, 7248), 'os.path.isfile', 'os.path.isfile', (['df_path'], {}), '(df_path)\n', (7239, 7248), False, 'import os\n'), ((3390, 3410), 'numpy.zeros', 'np.zeros', (['hidden_dim'], {}), '(hidden_dim)\n', (3398, 3410), True, 'import numpy as np\n'), ((3297, 3332), 'numpy.power', 'np.power', (['(10000)', '(2 * i / hidden_dim)'], {}), '(10000, 2 * i / hidden_dim)\n', (3305, 3332), True, 'import numpy as np\n')] |
import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
| [
"platypush.message.event.joystick.JoystickAxisEvent",
"platypush.message.event.joystick.JoystickDisconnectedEvent",
"fcntl.ioctl",
"array.array",
"time.sleep",
"struct.unpack",
"platypush.message.event.joystick.JoystickConnectedEvent"
]
| [((3326, 3352), 'array.array', 'array.array', (['"""B"""', '([0] * 64)'], {}), "('B', [0] * 64)\n", (3337, 3352), False, 'import array\n'), ((3552, 3573), 'array.array', 'array.array', (['"""B"""', '[0]'], {}), "('B', [0])\n", (3563, 3573), False, 'import array\n'), ((3582, 3609), 'fcntl.ioctl', 'ioctl', (['dev', '(2147576337)', 'buf'], {}), '(dev, 2147576337, buf)\n', (3587, 3609), False, 'from fcntl import ioctl\n'), ((3665, 3686), 'array.array', 'array.array', (['"""B"""', '[0]'], {}), "('B', [0])\n", (3676, 3686), False, 'import array\n'), ((3695, 3722), 'fcntl.ioctl', 'ioctl', (['dev', '(2147576338)', 'buf'], {}), '(dev, 2147576338, buf)\n', (3700, 3722), False, 'from fcntl import ioctl\n'), ((3812, 3838), 'array.array', 'array.array', (['"""B"""', '([0] * 64)'], {}), "('B', [0] * 64)\n", (3823, 3838), False, 'import array\n'), ((3849, 3876), 'fcntl.ioctl', 'ioctl', (['dev', '(2151705138)', 'buf'], {}), '(dev, 2151705138, buf)\n', (3854, 3876), False, 'from fcntl import ioctl\n'), ((4142, 4169), 'array.array', 'array.array', (['"""H"""', '([0] * 200)'], {}), "('H', [0] * 200)\n", (4153, 4169), False, 'import array\n'), ((4178, 4205), 'fcntl.ioctl', 'ioctl', (['dev', '(2151705140)', 'buf'], {}), '(dev, 2151705140, buf)\n', (4183, 4205), False, 'from fcntl import ioctl\n'), ((4451, 4559), 'platypush.message.event.joystick.JoystickConnectedEvent', 'JoystickConnectedEvent', ([], {'device': 'self.device', 'name': 'js_name', 'axes': 'self._axis_map', 'buttons': 'self._button_map'}), '(device=self.device, name=js_name, axes=self.\n _axis_map, buttons=self._button_map)\n', (4473, 4559), False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n'), ((5025, 5038), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5035, 5038), False, 'import time\n'), ((5287, 5315), 'struct.unpack', 'struct.unpack', (['"""IhBB"""', 'evbuf'], {}), "('IhBB', evbuf)\n", (5300, 5315), False, 'import struct\n'), ((6494, 6539), 'platypush.message.event.joystick.JoystickDisconnectedEvent', 'JoystickDisconnectedEvent', ([], {'device': 'self.device'}), '(device=self.device)\n', (6519, 6539), False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n'), ((6277, 6339), 'platypush.message.event.joystick.JoystickAxisEvent', 'JoystickAxisEvent', ([], {'device': 'self.device', 'axis': 'axis', 'value': 'fvalue'}), '(device=self.device, axis=axis, value=fvalue)\n', (6294, 6339), False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n')] |
from flask import Flask
app = Flask(_name_)
@app.route('/')
def hello():
return 'welcome to my watchlist' | [
"flask.Flask"
]
| [((30, 43), 'flask.Flask', 'Flask', (['_name_'], {}), '(_name_)\n', (35, 43), False, 'from flask import Flask\n')] |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django_tablib.admin import TablibAdmin
from jazzpos.models import Customer, Patient, Store, CustomerType, StoreSettings
from jazzpos.models import UserProfile
class CustomerAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class PatientAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class StoreAdmin(admin.ModelAdmin):
pass
class StoreSettingsAdmin(admin.ModelAdmin):
pass
class CustomerTypeAdmin(admin.ModelAdmin):
pass
class UserProfileInline(admin.StackedInline):
model = UserProfile
UserAdmin.inlines = [UserProfileInline,]
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Patient, PatientAdmin)
admin.site.register(Store, StoreAdmin)
admin.site.register(StoreSettings, StoreSettingsAdmin)
admin.site.register(CustomerType, CustomerTypeAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| [
"django.contrib.admin.site.unregister",
"django.contrib.admin.site.register"
]
| [((687, 731), 'django.contrib.admin.site.register', 'admin.site.register', (['Customer', 'CustomerAdmin'], {}), '(Customer, CustomerAdmin)\n', (706, 731), False, 'from django.contrib import admin\n'), ((732, 774), 'django.contrib.admin.site.register', 'admin.site.register', (['Patient', 'PatientAdmin'], {}), '(Patient, PatientAdmin)\n', (751, 774), False, 'from django.contrib import admin\n'), ((775, 813), 'django.contrib.admin.site.register', 'admin.site.register', (['Store', 'StoreAdmin'], {}), '(Store, StoreAdmin)\n', (794, 813), False, 'from django.contrib import admin\n'), ((814, 868), 'django.contrib.admin.site.register', 'admin.site.register', (['StoreSettings', 'StoreSettingsAdmin'], {}), '(StoreSettings, StoreSettingsAdmin)\n', (833, 868), False, 'from django.contrib import admin\n'), ((869, 921), 'django.contrib.admin.site.register', 'admin.site.register', (['CustomerType', 'CustomerTypeAdmin'], {}), '(CustomerType, CustomerTypeAdmin)\n', (888, 921), False, 'from django.contrib import admin\n'), ((923, 950), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['User'], {}), '(User)\n', (944, 950), False, 'from django.contrib import admin\n'), ((951, 987), 'django.contrib.admin.site.register', 'admin.site.register', (['User', 'UserAdmin'], {}), '(User, UserAdmin)\n', (970, 987), False, 'from django.contrib import admin\n')] |
# Basic training configuration file
from torch.optim import RMSprop
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose
from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3
SEED = 17
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
size = 350
TRAIN_TRANSFORMS = Compose([
RandomApply(
[RandomAffine(degrees=10, resample=3, fillcolor=(255, 255, 255)), ],
p=0.5
),
RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
MODEL = FurnitureInceptionResNetV4350SSDLike_v3(num_classes=128, pretrained='imagenet')
N_EPOCHS = 100
OPTIM = RMSprop(
params=[
{"params": MODEL.extractor.stem.parameters(), 'lr': 0.0001},
{"params": MODEL.extractor.low_features_a.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.low_features_b.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.mid_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.top_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.smooth_layers.parameters(), 'lr': 0.045},
{"params": MODEL.cls_layers.parameters(), 'lr': 0.045},
{"params": MODEL.boxes_to_classes.parameters(), 'lr': 0.045},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.045},
],
alpha=0.9,
eps=1.0
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5),
]
EARLY_STOPPING_KWARGS = {
'patience': 25,
# 'score_function': None
}
LOG_INTERVAL = 100
| [
"torchvision.transforms.RandomAffine",
"torch.optim.lr_scheduler.MultiStepLR",
"models.inceptionresnetv2_ssd_like.FurnitureInceptionResNetV4350SSDLike_v3",
"common.data_loaders.get_data_loader",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.ColorJitter",
"torchvision.transforms.Normalize",
"common.dataset.FilesFromCsvDataset",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop"
]
| [((1033, 1096), 'common.dataset.FilesFromCsvDataset', 'FilesFromCsvDataset', (['"""output/unique_filtered_train_dataset.csv"""'], {}), "('output/unique_filtered_train_dataset.csv')\n", (1052, 1096), False, 'from common.dataset import FilesFromCsvDataset\n'), ((1112, 1251), 'common.data_loaders.get_data_loader', 'get_data_loader', (['dataset'], {'data_transform': 'TRAIN_TRANSFORMS', 'batch_size': 'BATCH_SIZE', 'num_workers': 'NUM_WORKERS', 'pin_memory': "('cuda' in DEVICE)"}), "(dataset, data_transform=TRAIN_TRANSFORMS, batch_size=\n BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory='cuda' in DEVICE)\n", (1127, 1251), False, 'from common.data_loaders import get_data_loader\n'), ((1387, 1448), 'common.dataset.FilesFromCsvDataset', 'FilesFromCsvDataset', (['"""output/unique_filtered_val_dataset.csv"""'], {}), "('output/unique_filtered_val_dataset.csv')\n", (1406, 1448), False, 'from common.dataset import FilesFromCsvDataset\n'), ((1462, 1603), 'common.data_loaders.get_data_loader', 'get_data_loader', (['val_dataset'], {'data_transform': 'VAL_TRANSFORMS', 'batch_size': 'BATCH_SIZE', 'num_workers': 'NUM_WORKERS', 'pin_memory': "('cuda' in DEVICE)"}), "(val_dataset, data_transform=VAL_TRANSFORMS, batch_size=\n BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory='cuda' in DEVICE)\n", (1477, 1603), False, 'from common.data_loaders import get_data_loader\n'), ((1725, 1804), 'models.inceptionresnetv2_ssd_like.FurnitureInceptionResNetV4350SSDLike_v3', 'FurnitureInceptionResNetV4350SSDLike_v3', ([], {'num_classes': '(128)', 'pretrained': '"""imagenet"""'}), "(num_classes=128, pretrained='imagenet')\n", (1764, 1804), False, 'from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3\n'), ((2578, 2655), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (['OPTIM'], {'milestones': '[4, 5, 6, 7, 8, 10, 11, 13, 14, 15]', 'gamma': '(0.5)'}), '(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5)\n', (2589, 2655), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((737, 795), 'torchvision.transforms.RandomResizedCrop', 'RandomResizedCrop', (['size'], {'scale': '(0.7, 1.0)', 'interpolation': '(3)'}), '(size, scale=(0.7, 1.0), interpolation=3)\n', (754, 795), False, 'from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply\n'), ((801, 828), 'torchvision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (821, 828), False, 'from torchvision.transforms import RandomHorizontalFlip, Compose\n'), ((834, 872), 'torchvision.transforms.ColorJitter', 'ColorJitter', ([], {'hue': '(0.12)', 'brightness': '(0.12)'}), '(hue=0.12, brightness=0.12)\n', (845, 872), False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((878, 888), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (886, 888), False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((894, 946), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (903, 946), False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((644, 707), 'torchvision.transforms.RandomAffine', 'RandomAffine', ([], {'degrees': '(10)', 'resample': '(3)', 'fillcolor': '(255, 255, 255)'}), '(degrees=10, resample=3, fillcolor=(255, 255, 255))\n', (656, 707), False, 'from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply\n')] |
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| [
"pyscf.qmmm.mm_charge",
"pyscf.gto.M",
"numpy.random.random",
"pyscf.mcscf.CASSCF",
"pyscf.mcscf.CASCI",
"numpy.random.seed",
"pyscf.scf.RHF",
"numpy.arange"
]
| [((178, 526), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""\nC 1.1879 -0.3829 0.0000\nC 0.0000 0.5526 0.0000\nO -1.1867 -0.2472 0.0000\nH -1.9237 0.3850 0.0000\nH 2.0985 0.2306 0.0000\nH 1.1184 -1.0093 0.8869\nH 1.1184 -1.0093 -0.8869\nH -0.0227 1.1812 0.8852\nH -0.0227 1.1812 -0.8852\n """', 'basis': '"""3-21g"""', 'verbose': '(4)'}), '(atom=\n """\nC 1.1879 -0.3829 0.0000\nC 0.0000 0.5526 0.0000\nO -1.1867 -0.2472 0.0000\nH -1.9237 0.3850 0.0000\nH 2.0985 0.2306 0.0000\nH 1.1184 -1.0093 0.8869\nH 1.1184 -1.0093 -0.8869\nH -0.0227 1.1812 0.8852\nH -0.0227 1.1812 -0.8852\n """\n , basis=\'3-21g\', verbose=4)\n', (183, 526), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((542, 562), 'numpy.random.seed', 'numpy.random.seed', (['(1)'], {}), '(1)\n', (559, 562), False, 'import numpy\n'), ((901, 923), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(6)', '(6)'], {}), '(mf, 6, 6)\n', (913, 923), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((939, 960), 'pyscf.mcscf.CASCI', 'mcscf.CASCI', (['mf', '(6)', '(6)'], {}), '(mf, 6, 6)\n', (950, 960), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1134, 1146), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (1141, 1146), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1152, 1174), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '(6)', '(6)'], {}), '(mf, 6, 6)\n', (1164, 1174), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1180, 1215), 'pyscf.qmmm.mm_charge', 'qmmm.mm_charge', (['mc', 'coords', 'charges'], {}), '(mc, coords, charges)\n', (1194, 1215), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1238, 1250), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (1245, 1250), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1256, 1277), 'pyscf.mcscf.CASCI', 'mcscf.CASCI', (['mf', '(6)', '(6)'], {}), '(mf, 6, 6)\n', (1267, 1277), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((1283, 1318), 'pyscf.qmmm.mm_charge', 'qmmm.mm_charge', (['mc', 'coords', 'charges'], {}), '(mc, coords, charges)\n', (1297, 1318), False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((572, 599), 'numpy.random.random', 'numpy.random.random', (['(5, 3)'], {}), '((5, 3))\n', (591, 599), False, 'import numpy\n'), ((615, 630), 'numpy.arange', 'numpy.arange', (['(5)'], {}), '(5)\n', (627, 630), False, 'import numpy\n'), ((858, 870), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (865, 870), False, 'from pyscf import gto, scf, mcscf, qmmm\n')] |
import datetime
from decimal import Decimal, ROUND_DOWN, ROUND_UP
import logging
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils import formats
from django.utils.cache import patch_cache_control
from django.utils.dateformat import format as format_date
from django.utils.dateparse import parse_date
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from mtp_common.auth import api_client, urljoin
import requests
from requests.exceptions import Timeout
logger = logging.getLogger('mtp')
prisoner_number_re = re.compile(r'^[a-z]\d\d\d\d[a-z]{2}$', re.IGNORECASE)
def get_api_session():
return api_client.get_authenticated_api_session(
settings.SHARED_API_USERNAME,
settings.SHARED_API_PASSWORD,
)
def check_payment_service_available():
# service is deemed unavailable only if status is explicitly false, not if it cannot be determined
try:
response = requests.get(api_url('/service-availability/'), timeout=5)
gov_uk_status = response.json().get('gov_uk_pay', {})
return gov_uk_status.get('status', True), gov_uk_status.get('message_to_users')
except (Timeout, ValueError):
return True, None
def validate_prisoner_number(value):
if not prisoner_number_re.match(value):
raise ValidationError(_('Incorrect prisoner number format'), code='invalid')
class RejectCardNumberValidator(RegexValidator):
regex = r'\d{4}\s*\d{4}\s*\d{4}\s*\d{4}'
inverse_match = True
code = 'card_number'
message = _('Please do not enter your debit card number here')
def format_percentage(number, decimals=1, trim_zeros=True):
if not isinstance(number, Decimal):
number = Decimal(number)
percentage_text = ('{0:.%sf}' % decimals).format(number)
if decimals and trim_zeros and percentage_text.endswith('.' + ('0' * decimals)):
percentage_text = percentage_text[:-decimals - 1]
return percentage_text + '%'
def currency_format(amount, trim_empty_pence=False):
"""
Formats a number into currency format
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
text_amount = serialise_amount(amount)
if trim_empty_pence and text_amount.endswith('.00'):
text_amount = text_amount[:-3]
return '£' + text_amount
def currency_format_pence(amount, trim_empty_pence=False):
"""
Formats a number into currency format display pence only as #p
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
if amount.__abs__() < Decimal('1'):
return '%sp' % (amount * Decimal('100')).to_integral_value()
return currency_format(amount, trim_empty_pence=trim_empty_pence)
def clamp_amount(amount):
"""
Round the amount to integer pence,
rounding fractional pence up (away from zero) for any fractional pence value
that is greater than or equal to a tenth of a penny.
@param amount: Decimal amount to round
"""
tenths_of_pennies = (amount * Decimal('1000')).to_integral_value(rounding=ROUND_DOWN)
pounds = tenths_of_pennies / Decimal('1000')
return pounds.quantize(Decimal('1.00'), rounding=ROUND_UP)
def get_service_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
percentage_charge = amount * settings.SERVICE_CHARGE_PERCENTAGE / Decimal('100')
service_charge = percentage_charge + settings.SERVICE_CHARGE_FIXED
if clamp:
return clamp_amount(service_charge)
return service_charge
def get_total_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
charge = get_service_charge(amount, clamp=False)
result = amount + charge
if clamp:
return clamp_amount(result)
return result
def serialise_amount(amount):
return '{0:.2f}'.format(amount)
def unserialise_amount(amount_text):
amount_text = force_text(amount_text)
return Decimal(amount_text)
def serialise_date(date):
return format_date(date, 'Y-m-d')
def unserialise_date(date_text):
date_text = force_text(date_text)
date = parse_date(date_text)
if not date:
raise ValueError('Invalid date')
return date
def lenient_unserialise_date(date_text):
date_text = force_text(date_text)
date_formats = formats.get_format('DATE_INPUT_FORMATS')
for date_format in date_formats:
try:
return datetime.datetime.strptime(date_text, date_format).date()
except (ValueError, TypeError):
continue
raise ValueError('Invalid date')
def govuk_headers():
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % settings.GOVUK_PAY_AUTH_TOKEN
}
def govuk_url(path):
return urljoin(settings.GOVUK_PAY_URL, path)
def api_url(path):
return urljoin(settings.API_URL, path)
def site_url(path):
return urljoin(settings.SITE_URL, path)
def get_link_by_rel(data, rel):
if rel in data['_links']:
return data['_links'][rel]['href']
def make_response_cacheable(response):
"""
Allow response to be public and cached for an hour
"""
patch_cache_control(response, public=True, max_age=3600)
return response
class CacheableTemplateView(TemplateView):
"""
For simple pages whose content rarely changes so can be cached for an hour
"""
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
return make_response_cacheable(response)
| [
"logging.getLogger",
"django.utils.dateformat.format",
"django.utils.cache.patch_cache_control",
"mtp_common.auth.api_client.get_authenticated_api_session",
"re.compile",
"mtp_common.auth.urljoin",
"datetime.datetime.strptime",
"django.utils.translation.gettext_lazy",
"django.utils.encoding.force_text",
"django.utils.dateparse.parse_date",
"decimal.Decimal",
"django.utils.formats.get_format"
]
| [((674, 698), 'logging.getLogger', 'logging.getLogger', (['"""mtp"""'], {}), "('mtp')\n", (691, 698), False, 'import logging\n'), ((720, 776), 're.compile', 're.compile', (['"""^[a-z]\\\\d\\\\d\\\\d\\\\d[a-z]{2}$"""', 're.IGNORECASE'], {}), "('^[a-z]\\\\d\\\\d\\\\d\\\\d[a-z]{2}$', re.IGNORECASE)\n", (730, 776), False, 'import re\n'), ((810, 914), 'mtp_common.auth.api_client.get_authenticated_api_session', 'api_client.get_authenticated_api_session', (['settings.SHARED_API_USERNAME', 'settings.SHARED_API_PASSWORD'], {}), '(settings.SHARED_API_USERNAME,\n settings.SHARED_API_PASSWORD)\n', (850, 914), False, 'from mtp_common.auth import api_client, urljoin\n'), ((1703, 1755), 'django.utils.translation.gettext_lazy', '_', (['"""Please do not enter your debit card number here"""'], {}), "('Please do not enter your debit card number here')\n", (1704, 1755), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4293, 4316), 'django.utils.encoding.force_text', 'force_text', (['amount_text'], {}), '(amount_text)\n', (4303, 4316), False, 'from django.utils.encoding import force_text\n'), ((4328, 4348), 'decimal.Decimal', 'Decimal', (['amount_text'], {}), '(amount_text)\n', (4335, 4348), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((4388, 4414), 'django.utils.dateformat.format', 'format_date', (['date', '"""Y-m-d"""'], {}), "(date, 'Y-m-d')\n", (4399, 4414), True, 'from django.utils.dateformat import format as format_date\n'), ((4466, 4487), 'django.utils.encoding.force_text', 'force_text', (['date_text'], {}), '(date_text)\n', (4476, 4487), False, 'from django.utils.encoding import force_text\n'), ((4499, 4520), 'django.utils.dateparse.parse_date', 'parse_date', (['date_text'], {}), '(date_text)\n', (4509, 4520), False, 'from django.utils.dateparse import parse_date\n'), ((4654, 4675), 'django.utils.encoding.force_text', 'force_text', (['date_text'], {}), '(date_text)\n', (4664, 4675), False, 'from django.utils.encoding import force_text\n'), ((4695, 4735), 'django.utils.formats.get_format', 'formats.get_format', (['"""DATE_INPUT_FORMATS"""'], {}), "('DATE_INPUT_FORMATS')\n", (4713, 4735), False, 'from django.utils import formats\n'), ((5188, 5225), 'mtp_common.auth.urljoin', 'urljoin', (['settings.GOVUK_PAY_URL', 'path'], {}), '(settings.GOVUK_PAY_URL, path)\n', (5195, 5225), False, 'from mtp_common.auth import api_client, urljoin\n'), ((5258, 5289), 'mtp_common.auth.urljoin', 'urljoin', (['settings.API_URL', 'path'], {}), '(settings.API_URL, path)\n', (5265, 5289), False, 'from mtp_common.auth import api_client, urljoin\n'), ((5323, 5355), 'mtp_common.auth.urljoin', 'urljoin', (['settings.SITE_URL', 'path'], {}), '(settings.SITE_URL, path)\n', (5330, 5355), False, 'from mtp_common.auth import api_client, urljoin\n'), ((5579, 5635), 'django.utils.cache.patch_cache_control', 'patch_cache_control', (['response'], {'public': '(True)', 'max_age': '(3600)'}), '(response, public=True, max_age=3600)\n', (5598, 5635), False, 'from django.utils.cache import patch_cache_control\n'), ((1875, 1890), 'decimal.Decimal', 'Decimal', (['number'], {}), '(number)\n', (1882, 1890), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((2923, 2935), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (2930, 2935), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((3463, 3478), 'decimal.Decimal', 'Decimal', (['"""1000"""'], {}), "('1000')\n", (3470, 3478), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((3506, 3521), 'decimal.Decimal', 'Decimal', (['"""1.00"""'], {}), "('1.00')\n", (3513, 3521), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((3645, 3660), 'decimal.Decimal', 'Decimal', (['amount'], {}), '(amount)\n', (3652, 3660), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((3731, 3745), 'decimal.Decimal', 'Decimal', (['"""100"""'], {}), "('100')\n", (3738, 3745), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((4002, 4017), 'decimal.Decimal', 'Decimal', (['amount'], {}), '(amount)\n', (4009, 4017), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((1488, 1525), 'django.utils.translation.gettext_lazy', '_', (['"""Incorrect prisoner number format"""'], {}), "('Incorrect prisoner number format')\n", (1489, 1525), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3374, 3389), 'decimal.Decimal', 'Decimal', (['"""1000"""'], {}), "('1000')\n", (3381, 3389), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((4805, 4855), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_text', 'date_format'], {}), '(date_text, date_format)\n', (4831, 4855), False, 'import datetime\n'), ((2970, 2984), 'decimal.Decimal', 'Decimal', (['"""100"""'], {}), "('100')\n", (2977, 2984), False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n')] |
# -*- coding: UTF-8 -*-
import os
this_file_path = os.path.dirname(os.path.realpath(__file__))
MODELS_DIR = os.path.join(this_file_path, "models/")
| [
"os.path.realpath",
"os.path.join"
]
| [((111, 150), 'os.path.join', 'os.path.join', (['this_file_path', '"""models/"""'], {}), "(this_file_path, 'models/')\n", (123, 150), False, 'import os\n'), ((69, 95), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n')] |
from web.api import BaseAPI
from utils import mongo
import json
class DataApi(BaseAPI):
def __init__(self):
BaseAPI.__init__(self)
self._db = mongo.MongoInterface()
self.query = {}
self.fields = {
"donation_count": "$influences.electoral_commission.donation_count",
"donor_count": '$influences.electoral_commission.donor_count',
"donation_total_int": "$influences.electoral_commission.donation_total_int",
"mp_interest_relationships": "$influences.register_of_interests.relationship_count",
"lord_interest_relationships": "$influences.register_of_interests.interest_relationships",
"remuneration_count": "$influences.register_of_interests.remuneration_count",
"remuneration_total_int": "$influences.register_of_interests.remuneration_total_int",
"lobbyists_hired": "$influences.lobby_registers.lobbyist_hired"
}
def request(self, **args):
node_type = args.get("type")
category = args.get("category")
field = args.get("field")
summary = {
"influencers": self._influencers_aggregate(category, field),
#"lobby_agencies": self._influencers_aggregate(),
"political_parties": self._party_aggregate(category, field),
"mps": self._mp_aggregate(category, field),
"lords": self._lord_aggregate(category, field)
}
return {"children": summary[node_type][category]}
def _influencers_aggregate(self, category, field):
_db_table = 'api_influencers'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "influencer"),
"donation_count": self._format_top(top_count, "influencer", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"mp_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "influencer"),
"interest_relationships": self._format_top(
top_relationships, "influencer", monetary=False
),
"remuneration_count": self._format_top(
top_count, "influencer", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _party_aggregate(self, category, field):
_db_table = 'api_political_parties'
response = {}
if category == "political_parties":
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
result = {
"donation_total": self._format_top(top_total, "party"),
"donation_count": self._format_top(top_count, "party", monetary=False)
}
response["electoral_commission"] = result[field]
return response
def _mp_aggregate(self, category, field):
_db_table = 'api_mps'
response = {}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donor_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "mp"),
"donor_count": self._format_top(top_count, "mp", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = [
"remuneration_total_int",
"lord_interest_relationships",
"remuneration_count"
]
top_total, top_relationships, top_count = self._get_top(_db_table, reg_fields)
reg = {
"remuneration_total": self._format_top(top_total, "mp"),
"interest_relationships": self._format_top(
top_relationships, "mp", monetary=False
),
"remuneration_count": self._format_top(
top_count, "mp", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _lord_aggregate(self, category, field):
_db_table = 'api_lords'
response ={}
if category == "electoral_commission":
# get electoral commission data
ec_fields = ["donation_total_int", "donation_count"]
top_total, top_count = self._get_top(_db_table, ec_fields)
ec = {
"donation_total": self._format_top(top_total, "lord"),
"donation_count": self._format_top(top_count, "lord", monetary=False)
}
response["electoral_commission"] = ec[field]
if category == "register_of_interests":
# get register of interests data
reg_fields = ["lord_interest_relationships"]
top_relationships = self._get_top(_db_table, reg_fields)[0]
reg = {
"interest_relationships": self._format_top(
top_relationships, "lord", monetary=False
)
}
response["register_of_interests"] = reg[field]
return response
def _format_top(self, results, label, monetary=True):
updated = []
for entry in results:
new = {
"name": entry["_id"],
"details_url": self.named_entity_resources(
entry["_id"], label
)[0]
}
if monetary:
new["total_int"] = entry["total"]
new["total"] = self._format_number(entry["total"])
else:
new["total"] = entry["total"]
updated.append(new)
return updated
def _get_aggregate(self, table, field_list):
return [self._db.sum(table, field=self.fields[x]) for x in field_list]
def _get_top(self, table, field_list):
return [self._db.top(table, field=self.fields[x]) for x in field_list]
| [
"web.api.BaseAPI.__init__",
"utils.mongo.MongoInterface"
]
| [((122, 144), 'web.api.BaseAPI.__init__', 'BaseAPI.__init__', (['self'], {}), '(self)\n', (138, 144), False, 'from web.api import BaseAPI\n'), ((164, 186), 'utils.mongo.MongoInterface', 'mongo.MongoInterface', ([], {}), '()\n', (184, 186), False, 'from utils import mongo\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import inspect
import os
import re
import netaddr
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import strutils
from ovsdbapp import constants as ovsdbapp_const
from neutron._i18n import _
from neutron.common.ovn import constants
from neutron.common.ovn import exceptions as ovn_exc
from neutron.db import models_v2
from neutron.objects import ports as ports_obj
LOG = log.getLogger(__name__)
CONF = cfg.CONF
DNS_RESOLVER_FILE = "/etc/resolv.conf"
AddrPairsDiff = collections.namedtuple(
'AddrPairsDiff', ['added', 'removed', 'changed'])
PortExtraDHCPValidation = collections.namedtuple(
'PortExtraDHCPValidation', ['failed', 'invalid_ipv4', 'invalid_ipv6'])
def ovn_name(id):
# The name of the OVN entry will be neutron-<UUID>
# This is due to the fact that the OVN application checks if the name
# is a UUID. If so then there will be no matches.
# We prefix the UUID to enable us to use the Neutron UUID when
# updating, deleting etc.
return "%s%s" % (constants.OVN_NAME_PREFIX, id)
def ovn_lrouter_port_name(id):
# The name of the OVN lrouter port entry will be lrp-<UUID>
# This is to distinguish with the name of the connected lswitch patch port,
# which is named with neutron port uuid, so that OVS patch ports are
# generated properly. The pairing patch port names will be:
# - patch-lrp-<UUID>-to-<UUID>
# - patch-<UUID>-to-lrp-<UUID>
# lrp stands for Logical Router Port
return constants.LRP_PREFIX + '%s' % id
def ovn_cr_lrouter_port_name(_id):
# The name of the OVN chassisredirect lrouter port entry will be
# cr-lrp-<UUID>
return 'cr-lrp-%s' % _id
def ovn_provnet_port_name(network_id):
# The name of OVN lswitch provider network port entry will be
# provnet-<Network-UUID>. The port is created for network having
# provider:physical_network attribute.
return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id
def ovn_vhu_sockpath(sock_dir, port_id):
# Frame the socket path of a virtio socket
return os.path.join(
sock_dir,
# this parameter will become the virtio port name,
# so it should not exceed IFNAMSIZ(16).
(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])
def ovn_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id and ip
# version. The format is:
# as-<ip version>-<security group uuid>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_')
def ovn_pg_addrset_name(sg_id, ip_version):
# The name of the address set for the given security group id modelled as a
# Port Group and ip version. The format is:
# pg-<security group uuid>-<ip version>
# with all '-' replaced with '_'. This replacement is necessary
# because OVN doesn't support '-' in an address set name.
return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_')
def ovn_port_group_name(sg_id):
# The name of the port group for the given security group id.
# The format is: pg-<security group uuid>.
return ('pg-%s' % sg_id).replace('-', '_')
def is_network_device_port(port):
return port.get('device_owner', '').startswith(
const.DEVICE_OWNER_PREFIXES)
def _is_dhcp_disabled(dhcp_opt):
return (dhcp_opt['opt_name'] == constants.DHCP_DISABLED_OPT and
dhcp_opt.get('opt_value', '').lower() == 'true')
def validate_port_extra_dhcp_opts(port):
"""Validate port's extra DHCP options.
:param port: A neutron port.
:returns: A PortExtraDHCPValidation object.
"""
invalid = {const.IP_VERSION_4: [], const.IP_VERSION_6: []}
failed = False
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
ip_version = edo['ip_version']
opt_name = edo['opt_name']
# If DHCP is disabled for this port via this special option,
# always succeed the validation
if _is_dhcp_disabled(edo):
failed = False
break
if opt_name not in constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]:
invalid[ip_version].append(opt_name)
failed = True
return PortExtraDHCPValidation(
failed=failed,
invalid_ipv4=invalid[const.IP_VERSION_4] if failed else [],
invalid_ipv6=invalid[const.IP_VERSION_6] if failed else [])
def get_lsp_dhcp_opts(port, ip_version):
# Get dhcp options from Neutron port, for setting DHCP_Options row
# in OVN.
lsp_dhcp_disabled = False
lsp_dhcp_opts = {}
if is_network_device_port(port):
lsp_dhcp_disabled = True
else:
mapping = constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
if edo['ip_version'] != ip_version:
continue
if _is_dhcp_disabled(edo):
# OVN native DHCP is disabled on this port
lsp_dhcp_disabled = True
# Make sure return value behavior not depends on the order and
# content of the extra DHCP options for the port
lsp_dhcp_opts.clear()
break
if edo['opt_name'] not in mapping:
LOG.warning('The DHCP option %(opt_name)s on port %(port)s '
'is not suppported by OVN, ignoring it',
{'opt_name': edo['opt_name'], 'port': port['id']})
continue
opt = mapping[edo['opt_name']]
lsp_dhcp_opts[opt] = edo['opt_value']
return (lsp_dhcp_disabled, lsp_dhcp_opts)
def is_lsp_trusted(port):
return n_utils.is_port_trusted(port) if port.get('device_owner') else False
def is_lsp_ignored(port):
# Since the floating IP port is not bound to any chassis, packets from vm
# destined to floating IP will be dropped. To overcome this, we do not
# create/update floating IP port in OVN.
return port.get('device_owner') in [const.DEVICE_OWNER_FLOATINGIP]
def get_lsp_security_groups(port, skip_trusted_port=True):
# In other agent link OVS, skipping trusted port is processed in security
# groups RPC. We haven't that step, so we do it here.
return [] if (skip_trusted_port and is_lsp_trusted(port)
) else port.get('security_groups', [])
def is_snat_enabled(router):
return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True)
def is_port_security_enabled(port):
return port.get(psec.PORTSECURITY)
def is_security_groups_enabled(port):
return port.get(constants.PORT_SECURITYGROUPS)
def validate_and_get_data_from_binding_profile(port):
if (constants.OVN_PORT_BINDING_PROFILE not in port or
not validators.is_attr_set(
port[constants.OVN_PORT_BINDING_PROFILE])):
return {}
param_set = {}
param_dict = {}
for param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS:
param_keys = param_set.keys()
for param_key in param_keys:
try:
param_dict[param_key] = (port[
constants.OVN_PORT_BINDING_PROFILE][param_key])
except KeyError:
pass
if len(param_dict) == 0:
continue
if len(param_dict) != len(param_keys):
msg = _('Invalid binding:profile. %s are all '
'required.') % param_keys
raise n_exc.InvalidInput(error_message=msg)
if (len(port[constants.OVN_PORT_BINDING_PROFILE]) != len(
param_keys)):
msg = _('Invalid binding:profile. too many parameters')
raise n_exc.InvalidInput(error_message=msg)
break
if not param_dict:
return {}
for param_key, param_type in param_set.items():
if param_type is None:
continue
param_value = param_dict[param_key]
if not isinstance(param_value, param_type):
msg = _('Invalid binding:profile. %(key)s %(value)s '
'value invalid type') % {'key': param_key,
'value': param_value}
raise n_exc.InvalidInput(error_message=msg)
# Make sure we can successfully look up the port indicated by
# parent_name. Just let it raise the right exception if there is a
# problem.
if 'parent_name' in param_set:
plugin = directory.get_plugin()
plugin.get_port(n_context.get_admin_context(),
param_dict['parent_name'])
if 'tag' in param_set:
tag = int(param_dict['tag'])
if tag < 0 or tag > 4095:
msg = _('Invalid binding:profile. tag "%s" must be '
'an integer between 0 and 4095, inclusive') % tag
raise n_exc.InvalidInput(error_message=msg)
return param_dict
def is_dhcp_options_ignored(subnet):
# Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as
# 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode.
return (subnet['ip_version'] == const.IP_VERSION_6 and
subnet.get('ipv6_address_mode') == const.IPV6_SLAAC)
def get_ovn_ipv6_address_mode(address_mode):
return constants.OVN_IPV6_ADDRESS_MODES[address_mode]
def get_revision_number(resource, resource_type):
"""Get the resource's revision number based on its type."""
if resource_type in (constants.TYPE_NETWORKS,
constants.TYPE_PORTS,
constants.TYPE_SECURITY_GROUP_RULES,
constants.TYPE_ROUTERS,
constants.TYPE_ROUTER_PORTS,
constants.TYPE_SECURITY_GROUPS,
constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS):
return resource['revision_number']
else:
raise ovn_exc.UnknownResourceType(resource_type=resource_type)
def remove_macs_from_lsp_addresses(addresses):
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
:param addresses: The list of addresses from the Logical_Switch_Port.
Example: ["80:fa:5b:06:72:b7 172.16.58.3",
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
:returns: A list of IP addesses (v4 and v6)
"""
ip_list = []
for addr in addresses:
ip_list.extend([x for x in addr.split() if
(netutils.is_valid_ipv4(x) or
netutils.is_valid_ipv6(x))])
return ip_list
def get_allowed_address_pairs_ip_addresses(port):
"""Return a list of IP addresses from port's allowed_address_pairs.
:param port: A neutron port
:returns: A list of IP addesses (v4 and v6)
"""
return [x['ip_address'] for x in port.get('allowed_address_pairs', [])
if 'ip_address' in x]
def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port):
"""Return a list of IP addresses from ovn port.
Return a list of IP addresses equivalent of Neutron's port
allowed_address_pairs column using the data in the OVN port.
:param ovn_port: A OVN port
:returns: A list of IP addesses (v4 and v6)
"""
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return [x for x in port_security if x not in addresses]
def get_ovn_port_security_groups(ovn_port, skip_trusted_port=True):
info = {'security_groups': ovn_port.external_ids.get(
constants.OVN_SG_IDS_EXT_ID_KEY, '').split(),
'device_owner': ovn_port.external_ids.get(
constants.OVN_DEVICE_OWNER_EXT_ID_KEY, '')}
return get_lsp_security_groups(info, skip_trusted_port=skip_trusted_port)
def get_ovn_port_addresses(ovn_port):
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return list(set(addresses + port_security))
def sort_ips_by_version(addresses):
ip_map = {'ip4': [], 'ip6': []}
for addr in addresses:
ip_version = netaddr.IPNetwork(addr).version
ip_map['ip%d' % ip_version].append(addr)
return ip_map
def is_lsp_router_port(port):
return port.get('device_owner') in const.ROUTER_PORT_OWNERS
def get_lrouter_ext_gw_static_route(ovn_router):
return [route for route in getattr(ovn_router, 'static_routes', []) if
strutils.bool_from_string(getattr(
route, 'external_ids', {}).get(
constants.OVN_ROUTER_IS_EXT_GW, 'false'))]
def get_lrouter_snats(ovn_router):
return [n for n in getattr(ovn_router, 'nat', []) if n.type == 'snat']
def get_lrouter_non_gw_routes(ovn_router):
routes = []
for route in getattr(ovn_router, 'static_routes', []):
external_ids = getattr(route, 'external_ids', {})
if strutils.bool_from_string(
external_ids.get(constants.OVN_ROUTER_IS_EXT_GW, 'false')):
continue
routes.append({'destination': route.ip_prefix,
'nexthop': route.nexthop})
return routes
def is_ovn_l3(l3_plugin):
return hasattr(l3_plugin, '_ovn_client_inst')
def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE):
resolvers = []
if not os.path.exists(resolver_file):
return resolvers
with open(resolver_file, 'r') as rconf:
for line in rconf.readlines():
if not line.startswith('nameserver'):
continue
line = line.split('nameserver')[1].strip()
ipv4 = re.search(r'^(?:[0-9]{1,3}\.){3}[0-9]{1,3}', line)
if ipv4:
resolvers.append(ipv4.group(0))
return resolvers
def get_port_subnet_ids(port):
fixed_ips = list(port['fixed_ips'])
return [f['subnet_id'] for f in fixed_ips]
def get_method_class(method):
if not inspect.ismethod(method):
return
return method.__self__.__class__
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_
def is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
"""Check if gateway chassis is invalid
@param chassis_name: gateway chassis name
@type chassis_name: string
@param gw_chassis: List of gateway chassis in the system
@type gw_chassis: []
@param physnet: physical network associated to chassis_name
@type physnet: string
@param chassis_physnets: Dictionary linking chassis with their physnets
@type chassis_physnets: {}
@return Boolean
"""
if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS:
return True
elif chassis_name not in chassis_physnets:
return True
elif physnet and physnet not in chassis_physnets.get(chassis_name):
return True
elif gw_chassis and chassis_name not in gw_chassis:
return True
return False
def is_provider_network(network):
return network.get(external_net.EXTERNAL, False)
def is_neutron_dhcp_agent_port(port):
"""Check if the given DHCP port belongs to Neutron DHCP agents
The DHCP ports with the device_id equals to 'reserved_dhcp_port'
or starting with the word 'dhcp' belongs to the Neutron DHCP agents.
"""
return (port['device_owner'] == const.DEVICE_OWNER_DHCP and
(port['device_id'] == const.DEVICE_ID_RESERVED_DHCP_PORT or
port['device_id'].startswith('dhcp')))
def compute_address_pairs_diff(ovn_port, neutron_port):
"""Compute the differences in the allowed_address_pairs field."""
ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port(
ovn_port)
neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port)
added = set(neutron_ap) - set(ovn_ap)
removed = set(ovn_ap) - set(neutron_ap)
return AddrPairsDiff(added, removed, changed=any(added or removed))
def get_ovn_cms_options(chassis):
"""Return the list of CMS options in a Chassis."""
return [opt.strip() for opt in chassis.external_ids.get(
constants.OVN_CMS_OPTIONS, '').split(',')]
def is_gateway_chassis(chassis):
"""Check if the given chassis is a gateway chassis"""
return constants.CMS_OPT_CHASSIS_AS_GW in get_ovn_cms_options(chassis)
def get_port_capabilities(port):
"""Return a list of port's capabilities"""
return port.get(portbindings.PROFILE, {}).get('capabilities', [])
def get_port_id_from_gwc_row(row):
"""Return a port_id from gwc row
The Gateway_Chassis row stores router port_id in
the row name attribute:
<prefix>-<port_id>_<chassis_id>
:param row: A Gateway_Chassis table row.
:returns: String containing router port_id.
"""
return constants.RE_PORT_FROM_GWC.search(row.name).group(2)
def get_chassis_availability_zones(chassis):
"""Return a list of availability zones from a given OVN Chassis."""
azs = set()
if not chassis:
return azs
opt_key = constants.CMS_OPT_AVAILABILITY_ZONES + '='
for opt in get_ovn_cms_options(chassis):
if not opt.startswith(opt_key):
continue
values = opt.split('=')[1]
azs = {az.strip() for az in values.split(':') if az.strip()}
break
return azs
def get_chassis_in_azs(chassis_list, az_list):
"""Return a set of Chassis that belongs to the AZs.
Given a list of Chassis and a list of availability zones (AZs),
return a set of Chassis that belongs to one or more AZs.
:param chassis_list: A list of Chassis objects
:param az_list: A list of availability zones
:returns: A set of Chassis names
"""
chassis = set()
for ch in chassis_list:
chassis_azs = get_chassis_availability_zones(ch)
if chassis_azs.intersection(az_list):
chassis.add(ch.name)
return chassis
def get_gateway_chassis_without_azs(chassis_list):
"""Return a set of Chassis that does not belong to any AZs.
Filter a list of Chassis and return only the Chassis that does not
belong to any availability zones.
:param chassis_list: A list of Chassis objects
:returns: A set of Chassis names
"""
return {ch.name for ch in chassis_list if is_gateway_chassis(ch) and not
get_chassis_availability_zones(ch)}
def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs):
"""Return a dictionary compatible with port forwarding from OVN lb."""
result = {}
for ovn_lb in ovn_rtr_lb_pfs:
ext_ids = ovn_lb.external_ids
fip_id = ext_ids.get(constants.OVN_FIP_EXT_ID_KEY)
protocol = (ovn_lb.protocol[0]
if ovn_lb.protocol else ovsdbapp_const.PROTO_TCP)
fip_dict = result.get(fip_id, {})
fip_dict_proto = fip_dict.get(protocol, set())
ovn_vips = ovn_lb.vips
for vip, ips in ovn_vips.items():
for ip in ips.split(','):
fip_dict_proto.add("{} {}".format(vip, ip))
fip_dict[protocol] = fip_dict_proto
result[fip_id] = fip_dict
return result
def get_network_name_from_datapath(datapath):
return datapath.external_ids['name'].replace('neutron-', '')
def is_port_external(port):
# This port is represented in OVN DB as lsp.type=external
capabilities = []
vnic_type = portbindings.VNIC_NORMAL
if isinstance(port, dict):
capabilities = get_port_capabilities(port)
vnic_type = port.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
else:
if isinstance(port, models_v2.Port):
bindings = port.port_bindings
elif isinstance(port, ports_obj.Port):
bindings = port.bindings
else: # What else could be "port"?
bindings = []
if bindings:
profile = bindings[0].get('profile')
if profile:
# DB object, not OVO, stores the dict in JSON.
profile = (jsonutils.loads(profile) if isinstance(profile, str)
else profile)
capabilities = profile.get('capabilities', [])
vnic_type = bindings[0].get('vnic_type', portbindings.VNIC_NORMAL)
return (vnic_type in constants.EXTERNAL_PORT_TYPES and
constants.PORT_CAP_SWITCHDEV not in capabilities)
| [
"netaddr.IPNetwork",
"os.path.exists",
"neutron_lib.api.validators.is_attr_set",
"collections.namedtuple",
"re.search",
"oslo_utils.netutils.is_valid_ipv6",
"inspect.ismethod",
"neutron.common.ovn.exceptions.UnknownResourceType",
"neutron_lib.utils.net.is_port_trusted",
"os.path.join",
"neutron.common.ovn.constants.RE_PORT_FROM_GWC.search",
"neutron._i18n._",
"oslo_serialization.jsonutils.loads",
"oslo_utils.netutils.is_valid_ipv4",
"neutron_lib.context.get_admin_context",
"neutron_lib.exceptions.InvalidInput",
"neutron_lib.plugins.directory.get_plugin",
"oslo_log.log.getLogger"
]
| [((1595, 1618), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1608, 1618), False, 'from oslo_log import log\n'), ((1693, 1765), 'collections.namedtuple', 'collections.namedtuple', (['"""AddrPairsDiff"""', "['added', 'removed', 'changed']"], {}), "('AddrPairsDiff', ['added', 'removed', 'changed'])\n", (1715, 1765), False, 'import collections\n'), ((1798, 1895), 'collections.namedtuple', 'collections.namedtuple', (['"""PortExtraDHCPValidation"""', "['failed', 'invalid_ipv4', 'invalid_ipv6']"], {}), "('PortExtraDHCPValidation', ['failed', 'invalid_ipv4',\n 'invalid_ipv6'])\n", (1820, 1895), False, 'import collections\n'), ((3267, 3338), 'os.path.join', 'os.path.join', (['sock_dir', '(const.VHOST_USER_DEVICE_PREFIX + port_id)[:14]'], {}), '(sock_dir, (const.VHOST_USER_DEVICE_PREFIX + port_id)[:14])\n', (3279, 3338), False, 'import os\n'), ((6950, 6979), 'neutron_lib.utils.net.is_port_trusted', 'n_utils.is_port_trusted', (['port'], {}), '(port)\n', (6973, 6979), True, 'from neutron_lib.utils import net as n_utils\n'), ((9692, 9714), 'neutron_lib.plugins.directory.get_plugin', 'directory.get_plugin', ([], {}), '()\n', (9712, 9714), False, 'from neutron_lib.plugins import directory\n'), ((11120, 11176), 'neutron.common.ovn.exceptions.UnknownResourceType', 'ovn_exc.UnknownResourceType', ([], {'resource_type': 'resource_type'}), '(resource_type=resource_type)\n', (11147, 11176), True, 'from neutron.common.ovn import exceptions as ovn_exc\n'), ((14546, 14575), 'os.path.exists', 'os.path.exists', (['resolver_file'], {}), '(resolver_file)\n', (14560, 14575), False, 'import os\n'), ((15140, 15164), 'inspect.ismethod', 'inspect.ismethod', (['method'], {}), '(method)\n', (15156, 15164), False, 'import inspect\n'), ((8033, 8097), 'neutron_lib.api.validators.is_attr_set', 'validators.is_attr_set', (['port[constants.OVN_PORT_BINDING_PROFILE]'], {}), '(port[constants.OVN_PORT_BINDING_PROFILE])\n', (8055, 8097), False, 'from neutron_lib.api import validators\n'), ((8719, 8756), 'neutron_lib.exceptions.InvalidInput', 'n_exc.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (8737, 8756), True, 'from neutron_lib import exceptions as n_exc\n'), ((8871, 8920), 'neutron._i18n._', '_', (['"""Invalid binding:profile. too many parameters"""'], {}), "('Invalid binding:profile. too many parameters')\n", (8872, 8920), False, 'from neutron._i18n import _\n'), ((8939, 8976), 'neutron_lib.exceptions.InvalidInput', 'n_exc.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (8957, 8976), True, 'from neutron_lib import exceptions as n_exc\n'), ((9448, 9485), 'neutron_lib.exceptions.InvalidInput', 'n_exc.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (9466, 9485), True, 'from neutron_lib import exceptions as n_exc\n'), ((9739, 9768), 'neutron_lib.context.get_admin_context', 'n_context.get_admin_context', ([], {}), '()\n', (9766, 9768), True, 'from neutron_lib import context as n_context\n'), ((10073, 10110), 'neutron_lib.exceptions.InvalidInput', 'n_exc.InvalidInput', ([], {'error_message': 'msg'}), '(error_message=msg)\n', (10091, 10110), True, 'from neutron_lib import exceptions as n_exc\n'), ((13345, 13368), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['addr'], {}), '(addr)\n', (13362, 13368), False, 'import netaddr\n'), ((14836, 14886), 're.search', 're.search', (['"""^(?:[0-9]{1,3}\\\\.){3}[0-9]{1,3}"""', 'line'], {}), "('^(?:[0-9]{1,3}\\\\.){3}[0-9]{1,3}', line)\n", (14845, 14886), False, 'import re\n'), ((18058, 18101), 'neutron.common.ovn.constants.RE_PORT_FROM_GWC.search', 'constants.RE_PORT_FROM_GWC.search', (['row.name'], {}), '(row.name)\n', (18091, 18101), False, 'from neutron.common.ovn import constants\n'), ((8614, 8664), 'neutron._i18n._', '_', (['"""Invalid binding:profile. %s are all required."""'], {}), "('Invalid binding:profile. %s are all required.')\n", (8615, 8664), False, 'from neutron._i18n import _\n'), ((9252, 9318), 'neutron._i18n._', '_', (['"""Invalid binding:profile. %(key)s %(value)s value invalid type"""'], {}), "('Invalid binding:profile. %(key)s %(value)s value invalid type')\n", (9253, 9318), False, 'from neutron._i18n import _\n'), ((9938, 10030), 'neutron._i18n._', '_', (['"""Invalid binding:profile. tag "%s" must be an integer between 0 and 4095, inclusive"""'], {}), '(\'Invalid binding:profile. tag "%s" must be an integer between 0 and 4095, inclusive\'\n )\n', (9939, 10030), False, 'from neutron._i18n import _\n'), ((21257, 21281), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['profile'], {}), '(profile)\n', (21272, 21281), False, 'from oslo_serialization import jsonutils\n'), ((11653, 11678), 'oslo_utils.netutils.is_valid_ipv4', 'netutils.is_valid_ipv4', (['x'], {}), '(x)\n', (11675, 11678), False, 'from oslo_utils import netutils\n'), ((11706, 11731), 'oslo_utils.netutils.is_valid_ipv6', 'netutils.is_valid_ipv6', (['x'], {}), '(x)\n', (11728, 11731), False, 'from oslo_utils import netutils\n')] |
from django.urls import reverse
from consents.models import Consent, Term
from workshops.models import KnowledgeDomain, Person, Qualification
from workshops.tests.base import TestBase
class TestAutoUpdateProfile(TestBase):
def setUp(self):
self._setUpAirports()
self._setUpLessons()
self._setUpLanguages()
self.user = Person.objects.create_user(
username="user",
personal="",
family="",
email="<EMAIL>",
password="<PASSWORD>",
)
self.person_consent_required_terms(self.user)
Qualification.objects.create(person=self.user, lesson=self.git)
Qualification.objects.create(person=self.user, lesson=self.sql)
self.physics = KnowledgeDomain.objects.create(name="physics")
self.chemistry = KnowledgeDomain.objects.create(name="chemistry")
self.user.domains.add(self.physics)
self.user.languages.add(self.english)
self.user.languages.add(self.french)
self.client.login(username="user", password="<PASSWORD>")
def test_load_form(self):
rv = self.client.get(reverse("autoupdate_profile"))
self.assertEqual(rv.status_code, 200)
def test_update_profile(self):
term_slugs = [
"may-contact",
"may-publish-name",
"public-profile",
]
terms_by_term_slug = {
term.slug: term
for term in Term.objects.filter(slug__in=term_slugs)
.active()
.prefetch_active_options()
}
consent_data = {
f"consents-{slug}": terms_by_term_slug[slug].active_options[0].pk
for slug in term_slugs
}
data = {
"personal": "admin",
"middle": "",
"family": "Smith",
"email": "<EMAIL>",
"gender": Person.UNDISCLOSED,
"airport": self.airport_0_0.pk,
"github": "changed",
"twitter": "",
"url": "",
"username": "changed",
"affiliation": "",
"languages": [self.latin.pk, self.french.pk],
"domains": [self.chemistry.pk],
"lessons": [self.git.pk, self.matlab.pk],
"consents-person": self.user.pk,
**consent_data,
}
rv = self.client.post(reverse("autoupdate_profile"), data, follow=True)
self.assertEqual(rv.status_code, 200)
content = rv.content.decode("utf-8")
self.assertNotIn("Fix errors below", content)
self.user.refresh_from_db()
self.assertEqual(self.user.username, "user") # username is read-only
self.assertEqual(self.user.github, None) # github is read-only
self.assertEqual(self.user.family, "Smith")
self.assertEqual(set(self.user.lessons.all()), {self.git, self.matlab})
self.assertEqual(list(self.user.domains.all()), [self.chemistry])
self.assertEqual(set(self.user.languages.all()), {self.french, self.latin})
updated_consents_by_term_slug = {
consent.term.slug: consent
for consent in Consent.objects.filter(
term__slug__in=term_slugs, person=self.user
)
.active()
.select_related("term")
}
for slug in term_slugs:
self.assertEqual(
updated_consents_by_term_slug[slug].term_option.pk,
consent_data[f"consents-{slug}"],
)
| [
"consents.models.Consent.objects.filter",
"workshops.models.Qualification.objects.create",
"consents.models.Term.objects.filter",
"django.urls.reverse",
"workshops.models.Person.objects.create_user",
"workshops.models.KnowledgeDomain.objects.create"
]
| [((358, 470), 'workshops.models.Person.objects.create_user', 'Person.objects.create_user', ([], {'username': '"""user"""', 'personal': '""""""', 'family': '""""""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='user', personal='', family='', email=\n '<EMAIL>', password='<PASSWORD>')\n", (384, 470), False, 'from workshops.models import KnowledgeDomain, Person, Qualification\n'), ((601, 664), 'workshops.models.Qualification.objects.create', 'Qualification.objects.create', ([], {'person': 'self.user', 'lesson': 'self.git'}), '(person=self.user, lesson=self.git)\n', (629, 664), False, 'from workshops.models import KnowledgeDomain, Person, Qualification\n'), ((673, 736), 'workshops.models.Qualification.objects.create', 'Qualification.objects.create', ([], {'person': 'self.user', 'lesson': 'self.sql'}), '(person=self.user, lesson=self.sql)\n', (701, 736), False, 'from workshops.models import KnowledgeDomain, Person, Qualification\n'), ((761, 807), 'workshops.models.KnowledgeDomain.objects.create', 'KnowledgeDomain.objects.create', ([], {'name': '"""physics"""'}), "(name='physics')\n", (791, 807), False, 'from workshops.models import KnowledgeDomain, Person, Qualification\n'), ((833, 881), 'workshops.models.KnowledgeDomain.objects.create', 'KnowledgeDomain.objects.create', ([], {'name': '"""chemistry"""'}), "(name='chemistry')\n", (863, 881), False, 'from workshops.models import KnowledgeDomain, Person, Qualification\n'), ((1145, 1174), 'django.urls.reverse', 'reverse', (['"""autoupdate_profile"""'], {}), "('autoupdate_profile')\n", (1152, 1174), False, 'from django.urls import reverse\n'), ((2367, 2396), 'django.urls.reverse', 'reverse', (['"""autoupdate_profile"""'], {}), "('autoupdate_profile')\n", (2374, 2396), False, 'from django.urls import reverse\n'), ((1463, 1503), 'consents.models.Term.objects.filter', 'Term.objects.filter', ([], {'slug__in': 'term_slugs'}), '(slug__in=term_slugs)\n', (1482, 1503), False, 'from consents.models import Consent, Term\n'), ((3148, 3215), 'consents.models.Consent.objects.filter', 'Consent.objects.filter', ([], {'term__slug__in': 'term_slugs', 'person': 'self.user'}), '(term__slug__in=term_slugs, person=self.user)\n', (3170, 3215), False, 'from consents.models import Consent, Term\n')] |
import numpy as np
import time
import cv2
import colorsys
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, ReLU, Multiply
# Custom objects from backbones package https://github.com/david8862/keras-YOLOv3-model-set/tree/master/common/backbones
def mish(x):
return x * K.tanh(K.softplus(x))
def hard_swish(x):
return Multiply()([Activation(hard_sigmoid)(x), x])
def hard_sigmoid(x):
return ReLU(6.)(x + 3.) * (1. / 6.)
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if K.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return K.tf.nn.swish(x)
except AttributeError:
pass
return x * K.sigmoid(x)
def get_custom_objects():
'''
form up a custom_objects dict so that the customized
layer/function call could be correctly parsed when keras
.h5 model is loading or converting
'''
custom_objects_dict = {
'tf': tf,
'swish': swish,
'hard_sigmoid': hard_sigmoid,
'hard_swish': hard_swish,
'mish': mish
}
return custom_objects_dict
def get_multiscale_list():
input_shape_list = [(320, 320), (352, 352), (384, 384), (416, 416),
(448, 448), (480, 480), (512, 512), (544, 544), (576, 576), (608, 608)]
return input_shape_list
def resize_anchors(base_anchors, target_shape, base_shape=(416, 416)):
'''
original anchor size is clustered from COCO dataset
under input shape (416,416). We need to resize it to
our train input shape for better performance
'''
return np.around(base_anchors*target_shape[::-1]/base_shape[::-1])
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def get_colors(class_names):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(colors)
np.random.seed(None) # Reset seed to default.
return colors
def get_dataset(annotation_file, shuffle=True):
with open(annotation_file) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if shuffle:
np.random.seed(int(time.time()))
np.random.shuffle(lines)
# np.random.seed(None)
return lines
def draw_label(image, text, color, coords):
font = cv2.FONT_HERSHEY_PLAIN
font_scale = 1.
(text_width, text_height) = cv2.getTextSize(
text, font, fontScale=font_scale, thickness=1)[0]
padding = 5
rect_height = text_height + padding * 2
rect_width = text_width + padding * 2
(x, y) = coords
cv2.rectangle(image, (x, y), (x + rect_width,
y - rect_height), color, cv2.FILLED)
cv2.putText(image, text, (x + padding, y - text_height + padding), font,
fontScale=font_scale,
color=(255, 255, 255),
lineType=cv2.LINE_AA)
return image
def draw_boxes(image, boxes, classes, scores, class_names, colors, show_score=True):
if boxes is None or len(boxes) == 0:
return image
if classes is None or len(classes) == 0:
return image
for box, cls, score in zip(boxes, classes, scores):
xmin, ymin, xmax, ymax = map(int, box)
class_name = class_names[cls]
if show_score:
label = '{} {:.2f}'.format(class_name, score)
else:
label = '{}'.format(class_name)
#print(label, (xmin, ymin), (xmax, ymax))
# if no color info, use black(0,0,0)
if colors is None:
color = (0, 0, 0)
else:
color = colors[cls]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 1, cv2.LINE_AA)
image = draw_label(image, label, color, (xmin, ymin))
return image
| [
"cv2.rectangle",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.backend.backend",
"colorsys.hsv_to_rgb",
"cv2.putText",
"numpy.array",
"tensorflow.keras.backend.sigmoid",
"numpy.random.seed",
"numpy.around",
"tensorflow.keras.backend.tf.nn.swish",
"cv2.getTextSize",
"time.time",
"tensorflow.keras.backend.softplus",
"numpy.random.shuffle"
]
| [((1919, 1982), 'numpy.around', 'np.around', (['(base_anchors * target_shape[::-1] / base_shape[::-1])'], {}), '(base_anchors * target_shape[::-1] / base_shape[::-1])\n', (1928, 1982), True, 'import numpy as np\n'), ((2790, 2811), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (2804, 2811), True, 'import numpy as np\n'), ((2919, 2944), 'numpy.random.shuffle', 'np.random.shuffle', (['colors'], {}), '(colors)\n', (2936, 2944), True, 'import numpy as np\n'), ((2949, 2969), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (2963, 2969), True, 'import numpy as np\n'), ((3656, 3743), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + rect_width, y - rect_height)', 'color', 'cv2.FILLED'], {}), '(image, (x, y), (x + rect_width, y - rect_height), color, cv2.\n FILLED)\n', (3669, 3743), False, 'import cv2\n'), ((3761, 3904), 'cv2.putText', 'cv2.putText', (['image', 'text', '(x + padding, y - text_height + padding)', 'font'], {'fontScale': 'font_scale', 'color': '(255, 255, 255)', 'lineType': 'cv2.LINE_AA'}), '(image, text, (x + padding, y - text_height + padding), font,\n fontScale=font_scale, color=(255, 255, 255), lineType=cv2.LINE_AA)\n', (3772, 3904), False, 'import cv2\n'), ((393, 403), 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {}), '()\n', (401, 403), False, 'from tensorflow.keras.layers import Activation, ReLU, Multiply\n'), ((765, 776), 'tensorflow.keras.backend.backend', 'K.backend', ([], {}), '()\n', (774, 776), True, 'from tensorflow.keras import backend as K\n'), ((1016, 1028), 'tensorflow.keras.backend.sigmoid', 'K.sigmoid', (['x'], {}), '(x)\n', (1025, 1028), True, 'from tensorflow.keras import backend as K\n'), ((3246, 3270), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (3263, 3270), True, 'import numpy as np\n'), ((3452, 3514), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font'], {'fontScale': 'font_scale', 'thickness': '(1)'}), '(text, font, fontScale=font_scale, thickness=1)\n', (3467, 3514), False, 'import cv2\n'), ((4671, 4742), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)', 'color', '(1)', 'cv2.LINE_AA'], {}), '(image, (xmin, ymin), (xmax, ymax), color, 1, cv2.LINE_AA)\n', (4684, 4742), False, 'import cv2\n'), ((346, 359), 'tensorflow.keras.backend.softplus', 'K.softplus', (['x'], {}), '(x)\n', (356, 359), True, 'from tensorflow.keras import backend as K\n'), ((472, 481), 'tensorflow.keras.layers.ReLU', 'ReLU', (['(6.0)'], {}), '(6.0)\n', (476, 481), False, 'from tensorflow.keras.layers import Activation, ReLU, Multiply\n'), ((935, 951), 'tensorflow.keras.backend.tf.nn.swish', 'K.tf.nn.swish', (['x'], {}), '(x)\n', (948, 951), True, 'from tensorflow.keras import backend as K\n'), ((2386, 2403), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (2394, 2403), True, 'import numpy as np\n'), ((405, 429), 'tensorflow.keras.layers.Activation', 'Activation', (['hard_sigmoid'], {}), '(hard_sigmoid)\n', (415, 429), False, 'from tensorflow.keras.layers import Activation, ReLU, Multiply\n'), ((2633, 2656), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (2652, 2656), False, 'import colorsys\n'), ((3224, 3235), 'time.time', 'time.time', ([], {}), '()\n', (3233, 3235), False, 'import time\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pickle
# import csv
# from collections import namedtuple
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.animation as animation
# import matplotlib.colors as mc
class FEModel:
def __init__(self, name=None, hist_data=None):
self.name = name
self.hist_outs = hist_data
def tuple2dict(self, data):
"""
Used to convert the load-displacement data exported from models to a dictionary
"""
ld_data = []
for specimen in data:
sp_dict = dict()
load = []
disp = []
for action in specimen[0]:
load.append(action[1])
for action in specimen[1]:
disp.append(action[1])
sp_dict["Load"] = np.array(load)
sp_dict["Disp"] = -1 * np.array(disp)
ld_data.append(sp_dict)
def plot_history(self, x_axis, y_axis):
"""
XXXXXXXXXXXXXXXXXXXXXXXXXX
"""
plt.figure()
plt.plot(self.hist_outs[x_axis], self.hist_outs[y_axis])
@classmethod
def from_hist_pkl(cls, filename):
"""
Creates an object and imports history output data.
"""
with open(filename, "rb") as fh:
history_data = pickle.load(fh)
return cls(name=filename, hist_data=history_data)
#
# class ParametricDB:
# def __init__(self, dimensions, responses):
# self.responses = responses
# self.dimensions = dimensions
#
# @classmethod
# def from_file(cls, filename):
# """
# Create from file.
#
# The file should be comma separated, first row titles, subsequent rows only numbers.
#
# Parameters
# ----------
# filename : str
# Relative path/filename.
#
# Return
# ------
# ParametricDB
#
# """
# # with open(filename, 'rU') as infile:
# # reader = csv.reader(infile)
# # n_dim = int(next(reader)[0].split()[0])
# # db = {c[0]: c[1:] for c in zip(*reader)}
#
# with open(filename, 'rU') as infile:
# reader = csv.reader(infile, delimiter=";")
# n_dim = int(next(reader)[0].split()[0])
# db = [c for c in zip(*reader)]
#
# all_responses = {i[0]: i[1:] for i in db[n_dim:]}
#
# dim_ticks = np.array([i[1:] for i in db[:n_dim]]).T
# dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# dim_names = [db[i][0] for i in range(n_dim)]
#
# # with open(filename, 'r') as infile:
# # all_lines = [[c.split(sep=":")[0]] + c.split(sep=":")[1].split(sep=",") for c in infile]
# # db = {c[0]: c[1:] for c in zip(*all_lines)}
#
# # for key in db.keys():
# # if len(key.split(",")) > 1:
# # n_dim = len(key.split(","))
# # dim_str = key
# # dim_ticks = np.array([c.split(sep=",") for c in db[dim_str]])
# # dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# # dim_names = dim_str.split(sep=",")
# full_list = {i[0]: i[1:][0] for i in zip(dim_names, dim_ticks.T)}
#
# # del db[dim_str]
#
# #df = pd.DataFrame(full_dict)
#
# Address = namedtuple("map", " ".join(dim_names))
# args = [tuple(sorted(set(dim_ticks[:, i]))) for i, j in enumerate(dim_names)]
# addressbook = Address(*args)
#
# mtx = {i: np.empty(dim_lengths) for i in all_responses.keys()}
# for response in all_responses.keys():
# for i, response_value in enumerate(all_responses[response]):
# current_idx = tuple(addressbook[idx].index(full_list[name][i]) for idx, name in enumerate(dim_names))
# mtx[response][current_idx] = response_value
# mtx[response].flags.writeable = False
#
# return cls(addressbook, mtx)
#
# def get_slice(self, slice_at, response):
# """
# Get a slice of the database.
#
# Parameters
# ----------
# slice_at : dict of int
# A dictionary of the keys to be sliced at the assigned values.
# response : str
# The name of the requested response to be sliced.
#
# """
#
# idx_arr = [0]*len(self.dimensions)
#
# for key in self.dimensions._fields:
# if key not in slice_at.keys():
# idx_arr[self.get_idx(key)] = slice(None, None)
# for name, value in zip(slice_at.keys(), slice_at.values()):
# idx_arr[self.get_idx(name)] = value
#
# return self.responses[response][idx_arr]
#
# def get_idx(self, attrname):
# """
# Get the index number of a parameter (dimension) in the database.
#
# Parameters
# ----------
# attrname : str
#
# """
# return(self.dimensions.index(self.dimensions.__getattribute__(attrname)))
#
# def contour_2d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Contour plot.
# :param slice_at:
# :return:
# """
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
# else:
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# # levels = np.arange(0, 2., 0.025)
# # sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, vmin=0.4, vmax=1., levels=levels, cmap=plt.cm.inferno)
# sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.gray_r)
# sbplt2 = ax.contourf(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def surf_3d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Surface plot.
# :param slice_at:
# :return:
# """
# #Convenient window dimensions
# # one subplot:
# # 2 side by side: Bbox(x0=0.0, y0=0.0, x1=6.79, y1=2.57)
# # azim elev = -160 30
# # 3 subplots side by side
# # 4 subplots: Bbox(x0=0.0, y0=0.0, x1=6.43, y1=5.14)
# #azim elev -160 30
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
# else:
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
#
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# sbplt = ax.plot_surface(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# # plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def match_viewports(fig=None):
# if fig is None:
# fig = plt.gcf()
# fig.axes[1].view_init(azim=fig.axes[0].azim, elev=fig.axes[0].elev)
def main():
lambda01 = ParametricDB.from_file("data/fem/fem-results_lambda01.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
lambda02 = ParametricDB.from_file("data/fem/fem-results-lambda02.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
return
| [
"matplotlib.pyplot.plot",
"pickle.load",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots"
]
| [((9287, 9317), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (9299, 9317), True, 'import matplotlib.pyplot as plt\n'), ((9955, 9985), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (9967, 9985), True, 'import matplotlib.pyplot as plt\n'), ((10623, 10653), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (10635, 10653), True, 'import matplotlib.pyplot as plt\n'), ((11292, 11322), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (11304, 11322), True, 'import matplotlib.pyplot as plt\n'), ((11960, 11990), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (11972, 11990), True, 'import matplotlib.pyplot as plt\n'), ((12628, 12658), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (12640, 12658), True, 'import matplotlib.pyplot as plt\n'), ((13375, 13405), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (13387, 13405), True, 'import matplotlib.pyplot as plt\n'), ((14043, 14073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (14055, 14073), True, 'import matplotlib.pyplot as plt\n'), ((14711, 14741), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (14723, 14741), True, 'import matplotlib.pyplot as plt\n'), ((15380, 15410), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (15392, 15410), True, 'import matplotlib.pyplot as plt\n'), ((16048, 16078), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (16060, 16078), True, 'import matplotlib.pyplot as plt\n'), ((16716, 16746), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)'}), '(nrows=2, ncols=3)\n', (16728, 16746), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1043), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1041, 1043), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1108), 'matplotlib.pyplot.plot', 'plt.plot', (['self.hist_outs[x_axis]', 'self.hist_outs[y_axis]'], {}), '(self.hist_outs[x_axis], self.hist_outs[y_axis])\n', (1060, 1108), True, 'import matplotlib.pyplot as plt\n'), ((818, 832), 'numpy.array', 'np.array', (['load'], {}), '(load)\n', (826, 832), True, 'import numpy as np\n'), ((1316, 1331), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1327, 1331), False, 'import pickle\n'), ((868, 882), 'numpy.array', 'np.array', (['disp'], {}), '(disp)\n', (876, 882), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import json
import sys
from gigamonkeys.spreadsheets import spreadsheets
spreadsheet_id = sys.argv[1]
ranges = sys.argv[2:]
data = spreadsheets().get(spreadsheet_id, include_grid_data=bool(ranges), ranges=ranges)
json.dump(data, sys.stdout, indent=2)
| [
"gigamonkeys.spreadsheets.spreadsheets",
"json.dump"
]
| [((240, 277), 'json.dump', 'json.dump', (['data', 'sys.stdout'], {'indent': '(2)'}), '(data, sys.stdout, indent=2)\n', (249, 277), False, 'import json\n'), ((157, 171), 'gigamonkeys.spreadsheets.spreadsheets', 'spreadsheets', ([], {}), '()\n', (169, 171), False, 'from gigamonkeys.spreadsheets import spreadsheets\n')] |
import numpy as np
from sources import BaseSource
from sources.base import BaseSourceWrapper
from sources.preloaded import PreLoadedSource
import json
class WordsSource(BaseSource):
def __init__(self, source):
self._source = source
def __len__(self):
return len(self._source)
def _remove_apostrpohs(self, seq):
res = ''.join(seq.split('''))
res = ''.join(res.split('"'))
return res
def _clean(self, seq):
s = ''
for ch in seq.strip():
if ch.isalpha():
s += ch
return s
def get_sequences(self):
for seq_in, transcription in self._source.get_sequences():
transcription = self._remove_apostrpohs(transcription)
words = [self._clean(word) for word in transcription.split(' ')]
yield seq_in, words
class LabelSource(BaseSource):
def __init__(self, source, mapping_table):
self._source = source
self._mapping_table = mapping_table
def __len__(self):
return len(self._source)
def get_sequences(self):
for seq_in, seq_out in self._source.get_sequences():
label_seq = [self._mapping_table.encode(ch) for ch in seq_out]
yield seq_in, label_seq
class CTCAdaptedSource(BaseSource):
def __init__(self, source, padding_value=0):
self._source = source
self._padding = padding_value
def __len__(self):
return len(self._source)
def get_sequences(self):
for seq_in, seq_out in self._source.get_sequences():
seqs_in_pad = list(seq_in)
while len(seqs_in_pad) <= 2 * len(seq_out) + 1:
n = len(seqs_in_pad[0])
seqs_in_pad.append([self._padding] * n)
yield seqs_in_pad, seq_out
class Normalizer:
def __init__(self):
self._mu = None
self._sd = None
@staticmethod
def from_json(path):
with open(path, 'r') as f:
s = f.read()
d = json.loads(s)
normalizer = Normalizer()
mu = np.array(d['mu'])
sd = np.array(d['sd'])
normalizer.set_mean(mu)
normalizer.set_deviation(sd)
return normalizer
def to_json(self, path):
d = {
'mu': np.array(self.mu).tolist(),
'sd': np.array(self.sd).tolist()
}
with open(path, 'w') as f:
f.write(json.dumps(d))
def set_mean(self, mu):
self._mu = mu
def set_deviation(self, sd):
self._sd = sd
@property
def mu(self):
return self._mu
@property
def sd(self):
return self._sd
def fit(self, X):
sequence = []
for x in X:
sequence.extend(x)
self._mu = np.mean(sequence, axis=0)
self._sd = np.std(sequence, axis=0)
def preprocess(self, X):
res = []
for x in X:
x_norm = (x - self._mu) / self._sd
# we do not want to normalize END-OF-STROKE flag which is last in the tuple
x_norm[:, -1] = np.array(x)[:, -1]
res.append(x_norm.tolist())
return res
class OffsetPointsSource(BaseSource):
def __init__(self, source):
self._source = source
def __len__(self):
return len(self._source)
def get_sequences(self):
for strokes, transcription in self._source.get_sequences():
x0, y0, t0 = strokes[0].points[0]
new_seq = []
for stroke in strokes:
points = []
for x, y, t in stroke.points:
points.append((x - x0, y - y0, t - t0, 0))
points[-1] = points[-1][:-1] + (1,)
new_seq.extend(points)
yield new_seq, transcription
class NormalizedSource(BaseSource):
def __init__(self, source, normalizer):
self._source = source
self._normalizer = normalizer
def __len__(self):
return len(self._source)
def get_sequences(self):
for points, transcription in self._source.get_sequences():
norm = self._normalizer.preprocess([points])[0]
yield norm, transcription
class DenormalizedSource(BaseSource):
def __init__(self, source, normalizer):
self._source = source
self._normalizer = normalizer
def __len__(self):
return len(self._source)
def get_sequences(self):
mu = self._normalizer.mu
sd = self._normalizer.sd
for points, transcription in self._source.get_sequences():
denormalized = [(p * sd + mu).tolist() for p in points]
for i, p in enumerate(denormalized):
p[3] = points[i][3]
yield denormalized, transcription
class H5pySource(BaseSource):
def __init__(self, h5py_ds, random_order=True):
self._h5py = h5py_ds
self._random = random_order
def __len__(self):
return len(self._h5py)
def get_sequences(self):
return self._h5py.get_data(random_order=self._random)
class PreprocessedSource(BaseSourceWrapper):
def __init__(self, source, preprocessor):
super().__init__(source)
self._preprocessor = preprocessor
def get_sequences(self):
for xs, ys in self._source.get_sequences():
yield self._preprocessor.pre_process_example(xs, ys)
class ConstrainedSource(BaseSourceWrapper):
def __init__(self, source, num_lines):
super().__init__(source)
self._num_lines = num_lines
self._use_all = (num_lines == 0)
def get_sequences(self):
for j, (seq_in, seq_out) in enumerate(self._source.get_sequences()):
#print(j, seq_out)
if j % 500 == 0:
print('Fetched {} examples'.format(j))
if j >= self._num_lines and not self._use_all:
break
yield seq_in, seq_out
class PlainListSource(BaseSourceWrapper):
def get_sequences(self):
for strokes, t in self._source.get_sequences():
points = [stroke.points for stroke in strokes]
yield points, t
| [
"numpy.mean",
"json.loads",
"json.dumps",
"numpy.array",
"numpy.std"
]
| [((2030, 2043), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2040, 2043), False, 'import json\n'), ((2091, 2108), 'numpy.array', 'np.array', (["d['mu']"], {}), "(d['mu'])\n", (2099, 2108), True, 'import numpy as np\n'), ((2122, 2139), 'numpy.array', 'np.array', (["d['sd']"], {}), "(d['sd'])\n", (2130, 2139), True, 'import numpy as np\n'), ((2787, 2812), 'numpy.mean', 'np.mean', (['sequence'], {'axis': '(0)'}), '(sequence, axis=0)\n', (2794, 2812), True, 'import numpy as np\n'), ((2832, 2856), 'numpy.std', 'np.std', (['sequence'], {'axis': '(0)'}), '(sequence, axis=0)\n', (2838, 2856), True, 'import numpy as np\n'), ((2435, 2448), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (2445, 2448), False, 'import json\n'), ((3088, 3099), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3096, 3099), True, 'import numpy as np\n'), ((2297, 2314), 'numpy.array', 'np.array', (['self.mu'], {}), '(self.mu)\n', (2305, 2314), True, 'import numpy as np\n'), ((2343, 2360), 'numpy.array', 'np.array', (['self.sd'], {}), '(self.sd)\n', (2351, 2360), True, 'import numpy as np\n')] |
"""
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
PyCoreNet, and PyCoreNetIf.
"""
import os
import shutil
import socket
import threading
from socket import AF_INET
from socket import AF_INET6
from core.data import NodeData, LinkData
from core.enumerations import LinkTypes
from core.misc import ipaddress
class Position(object):
"""
Helper class for Cartesian coordinate position
"""
def __init__(self, x=None, y=None, z=None):
"""
Creates a Position instance.
:param x: x position
:param y: y position
:param z: z position
:return:
"""
self.x = x
self.y = y
self.z = z
def set(self, x=None, y=None, z=None):
"""
Returns True if the position has actually changed.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
if self.x == x and self.y == y and self.z == z:
return False
self.x = x
self.y = y
self.z = z
return True
def get(self):
"""
Retrieve x,y,z position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.x, self.y, self.z
class PyCoreObj(object):
"""
Base class for CORE objects (nodes and networks)
"""
apitype = None
# TODO: appears start has no usage, verify and remove
def __init__(self, session, objid=None, name=None, start=True):
"""
Creates a PyCoreObj instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: start value
:return:
"""
self.session = session
if objid is None:
objid = session.get_object_id()
self.objid = objid
if name is None:
name = "o%s" % self.objid
self.name = name
self.type = None
self.server = None
self.services = None
# ifindex is key, PyCoreNetIf instance is value
self._netif = {}
self.ifindex = 0
self.canvas = None
self.icon = None
self.opaque = None
self.position = Position()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def setposition(self, x=None, y=None, z=None):
"""
Set the (x,y,z) position of the object.
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
return self.position.set(x=x, y=y, z=z)
def getposition(self):
"""
Return an (x,y,z) tuple representing this object's position.
:return: x,y,z position tuple
:rtype: tuple
"""
return self.position.get()
def ifname(self, ifindex):
"""
Retrieve interface name for index.
:param int ifindex: interface index
:return: interface name
:rtype: str
"""
return self._netif[ifindex].name
def netifs(self, sort=False):
"""
Retrieve network interfaces, sorted if desired.
:param bool sort: boolean used to determine if interfaces should be sorted
:return: network interfaces
:rtype: list
"""
if sort:
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
else:
return self._netif.itervalues()
def numnetif(self):
"""
Return the attached interface count.
:return: number of network interfaces
:rtype: int
"""
return len(self._netif)
def getifindex(self, netif):
"""
Retrieve index for an interface.
:param PyCoreNetIf netif: interface to get index for
:return: interface index if found, -1 otherwise
:rtype: int
"""
for ifindex in self._netif:
if self._netif[ifindex] is netif:
return ifindex
return -1
def newifindex(self):
"""
Create a new interface index.
:return: interface index
:rtype: int
"""
while self.ifindex in self._netif:
self.ifindex += 1
ifindex = self.ifindex
self.ifindex += 1
return ifindex
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Build a data object for this node.
:param message_type: purpose for the data object we are creating
:param str lat: latitude
:param str lon: longitude
:param str alt: altitude
:return: node data object
:rtype: core.data.NodeData
"""
if self.apitype is None:
return None
x, y, _ = self.getposition()
model = self.type
emulation_server = self.server
services = self.services
if services is not None:
services = "|".join([service.name for service in services])
node_data = NodeData(
message_type=message_type,
id=self.objid,
node_type=self.apitype,
name=self.name,
emulation_id=self.objid,
canvas=self.canvas,
icon=self.icon,
opaque=self.opaque,
x_position=x,
y_position=y,
latitude=lat,
longitude=lon,
altitude=alt,
model=model,
emulation_server=emulation_server,
services=services
)
return node_data
def all_link_data(self, flags):
"""
Build CORE Link data for this object. There is no default
method for PyCoreObjs as PyCoreNodes do not implement this but
PyCoreNets do.
:param flags: message flags
:return: list of link data
:rtype: core.data.LinkData
"""
return []
class PyCoreNode(PyCoreObj):
"""
Base class for CORE nodes.
"""
def __init__(self, session, objid=None, name=None, start=True):
"""
Create a PyCoreNode instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: boolean for starting
"""
super(PyCoreNode, self).__init__(session, objid, name, start=start)
self.services = []
self.nodedir = None
self.tmpnodedir = False
def addservice(self, service):
"""
Add a services to the service list.
:param core.service.CoreService service: service to add
:return: nothing
"""
if service is not None:
self.services.append(service)
def makenodedir(self):
"""
Create the node directory.
:return: nothing
"""
if self.nodedir is None:
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
os.makedirs(self.nodedir)
self.tmpnodedir = True
else:
self.tmpnodedir = False
def rmnodedir(self):
"""
Remove the node directory, unless preserve directory has been set.
:return: nothing
"""
preserve = self.session.options.get_config("preservedir") == "1"
if preserve:
return
if self.tmpnodedir:
shutil.rmtree(self.nodedir, ignore_errors=True)
def addnetif(self, netif, ifindex):
"""
Add network interface to node and set the network interface index if successful.
:param PyCoreNetIf netif: network interface to add
:param int ifindex: interface index
:return: nothing
"""
if ifindex in self._netif:
raise ValueError("ifindex %s already exists" % ifindex)
self._netif[ifindex] = netif
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
netif.netindex = ifindex
def delnetif(self, ifindex):
"""
Delete a network interface
:param int ifindex: interface index to delete
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
netif = self._netif.pop(ifindex)
netif.shutdown()
del netif
# TODO: net parameter is not used, remove
def netif(self, ifindex, net=None):
"""
Retrieve network interface.
:param int ifindex: index of interface to retrieve
:param PyCoreNetIf net: network node
:return: network interface, or None if not found
:rtype: PyCoreNetIf
"""
if ifindex in self._netif:
return self._netif[ifindex]
else:
return None
def attachnet(self, ifindex, net):
"""
Attach a network.
:param int ifindex: interface of index to attach
:param PyCoreNetIf net: network to attach
:return:
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].attachnet(net)
def detachnet(self, ifindex):
"""
Detach network interface.
:param int ifindex: interface index to detach
:return: nothing
"""
if ifindex not in self._netif:
raise ValueError("ifindex %s does not exist" % ifindex)
self._netif[ifindex].detachnet()
def setposition(self, x=None, y=None, z=None):
"""
Set position.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
changed = super(PyCoreNode, self).setposition(x, y, z)
if changed:
for netif in self.netifs(sort=True):
netif.setposition(x, y, z)
def commonnets(self, obj, want_ctrl=False):
"""
Given another node or net object, return common networks between
this node and that object. A list of tuples is returned, with each tuple
consisting of (network, interface1, interface2).
:param obj: object to get common network with
:param want_ctrl: flag set to determine if control network are wanted
:return: tuples of common networks
:rtype: list
"""
common = []
for netif1 in self.netifs():
if not want_ctrl and hasattr(netif1, "control"):
continue
for netif2 in obj.netifs():
if netif1.net == netif2.net:
common.append((netif1.net, netif1, netif2))
return common
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
raise NotImplementedError
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
raise NotImplementedError
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
raise NotImplementedError
def termcmdstring(self, sh):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
raise NotImplementedError
class PyCoreNet(PyCoreObj):
"""
Base class for networks
"""
linktype = LinkTypes.WIRED.value
def __init__(self, session, objid, name, start=True):
"""
Create a PyCoreNet instance.
:param core.session.Session session: CORE session object
:param int objid: object id
:param str name: object name
:param bool start: should object start
"""
super(PyCoreNet, self).__init__(session, objid, name, start=start)
self._linked = {}
self._linked_lock = threading.Lock()
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def attach(self, netif):
"""
Attach network interface.
:param PyCoreNetIf netif: network interface to attach
:return: nothing
"""
i = self.newifindex()
self._netif[i] = netif
netif.netifi = i
with self._linked_lock:
self._linked[netif] = {}
def detach(self, netif):
"""
Detach network interface.
:param PyCoreNetIf netif: network interface to detach
:return: nothing
"""
del self._netif[netif.netifi]
netif.netifi = None
with self._linked_lock:
del self._linked[netif]
def all_link_data(self, flags):
"""
Build link data objects for this network. Each link object describes a link
between this network and a node.
"""
all_links = []
# build a link message from this network node to each node having a
# connected interface
for netif in self.netifs(sort=True):
if not hasattr(netif, "node"):
continue
otherobj = netif.node
uni = False
if otherobj is None:
# two layer-2 switches/hubs linked together via linknet()
if not hasattr(netif, "othernet"):
continue
otherobj = netif.othernet
if otherobj.objid == self.objid:
continue
netif.swapparams('_params_up')
upstream_params = netif.getparams()
netif.swapparams('_params_up')
if netif.getparams() != upstream_params:
uni = True
unidirectional = 0
if uni:
unidirectional = 1
interface2_ip4 = None
interface2_ip4_mask = None
interface2_ip6 = None
interface2_ip6_mask = None
for address in netif.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = AF_INET
ipl = socket.inet_pton(family, ip)
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip4_mask = mask
else:
family = AF_INET6
ipl = socket.inet_pton(family, ip)
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
interface2_ip6_mask = mask
link_data = LinkData(
message_type=flags,
node1_id=self.objid,
node2_id=otherobj.objid,
link_type=self.linktype,
unidirectional=unidirectional,
interface2_id=otherobj.getifindex(netif),
interface2_mac=netif.hwaddr,
interface2_ip4=interface2_ip4,
interface2_ip4_mask=interface2_ip4_mask,
interface2_ip6=interface2_ip6,
interface2_ip6_mask=interface2_ip6_mask,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
all_links.append(link_data)
if not uni:
continue
netif.swapparams('_params_up')
link_data = LinkData(
message_type=0,
node1_id=otherobj.objid,
node2_id=self.objid,
unidirectional=1,
delay=netif.getparam("delay"),
bandwidth=netif.getparam("bw"),
dup=netif.getparam("duplicate"),
jitter=netif.getparam("jitter")
)
netif.swapparams('_params_up')
all_links.append(link_data)
return all_links
class PyCoreNetIf(object):
"""
Base class for network interfaces.
"""
def __init__(self, node, name, mtu):
"""
Creates a PyCoreNetIf instance.
:param core.coreobj.PyCoreNode node: node for interface
:param str name: interface name
:param mtu: mtu value
"""
self.node = node
self.name = name
if not isinstance(mtu, (int, long)):
raise ValueError
self.mtu = mtu
self.net = None
self._params = {}
self.addrlist = []
self.hwaddr = None
# placeholder position hook
self.poshook = lambda a, b, c, d: None
# used with EMANE
self.transport_type = None
# interface index on the network
self.netindex = None
# index used to find flow data
self.flow_id = None
def startup(self):
"""
Startup method for the interface.
:return: nothing
"""
pass
def shutdown(self):
"""
Shutdown method for the interface.
:return: nothing
"""
pass
def attachnet(self, net):
"""
Attach network.
:param core.coreobj.PyCoreNet net: network to attach
:return: nothing
"""
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self):
"""
Detach from a network.
:return: nothing
"""
if self.net is not None:
self.net.detach(self)
def addaddr(self, addr):
"""
Add address.
:param str addr: address to add
:return: nothing
"""
self.addrlist.append(addr)
def deladdr(self, addr):
"""
Delete address.
:param str addr: address to delete
:return: nothing
"""
self.addrlist.remove(addr)
def sethwaddr(self, addr):
"""
Set hardware address.
:param core.misc.ipaddress.MacAddress addr: hardware address to set to.
:return: nothing
"""
self.hwaddr = addr
def getparam(self, key):
"""
Retrieve a parameter from the, or None if the parameter does not exist.
:param key: parameter to get value for
:return: parameter value
"""
return self._params.get(key)
def getparams(self):
"""
Return (key, value) pairs for parameters.
"""
parameters = []
for k in sorted(self._params.keys()):
parameters.append((k, self._params[k]))
return parameters
def setparam(self, key, value):
"""
Set a parameter value, returns True if the parameter has changed.
:param key: parameter name to set
:param value: parameter value
:return: True if parameter changed, False otherwise
"""
# treat None and 0 as unchanged values
current_value = self._params.get(key)
if current_value == value or current_value <= 0 and value <= 0:
return False
self._params[key] = value
return True
def swapparams(self, name):
"""
Swap out parameters dict for name. If name does not exist,
intialize it. This is for supporting separate upstream/downstream
parameters when two layer-2 nodes are linked together.
:param str name: name of parameter to swap
:return: nothing
"""
tmp = self._params
if not hasattr(self, name):
setattr(self, name, {})
self._params = getattr(self, name)
setattr(self, name, tmp)
def setposition(self, x, y, z):
"""
Dispatch position hook handler.
:param x: x position
:param y: y position
:param z: z position
:return: nothing
"""
self.poshook(self, x, y, z)
| [
"core.misc.ipaddress.is_ipv4_address",
"os.makedirs",
"core.misc.ipaddress.IpAddress",
"threading.Lock",
"os.path.join",
"socket.inet_pton",
"core.data.NodeData",
"shutil.rmtree"
]
| [((5487, 5814), 'core.data.NodeData', 'NodeData', ([], {'message_type': 'message_type', 'id': 'self.objid', 'node_type': 'self.apitype', 'name': 'self.name', 'emulation_id': 'self.objid', 'canvas': 'self.canvas', 'icon': 'self.icon', 'opaque': 'self.opaque', 'x_position': 'x', 'y_position': 'y', 'latitude': 'lat', 'longitude': 'lon', 'altitude': 'alt', 'model': 'model', 'emulation_server': 'emulation_server', 'services': 'services'}), '(message_type=message_type, id=self.objid, node_type=self.apitype,\n name=self.name, emulation_id=self.objid, canvas=self.canvas, icon=self.\n icon, opaque=self.opaque, x_position=x, y_position=y, latitude=lat,\n longitude=lon, altitude=alt, model=model, emulation_server=\n emulation_server, services=services)\n', (5495, 5814), False, 'from core.data import NodeData, LinkData\n'), ((12861, 12877), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (12875, 12877), False, 'import threading\n'), ((7371, 7430), 'os.path.join', 'os.path.join', (['self.session.session_dir', "(self.name + '.conf')"], {}), "(self.session.session_dir, self.name + '.conf')\n", (7383, 7430), False, 'import os\n'), ((7443, 7468), 'os.makedirs', 'os.makedirs', (['self.nodedir'], {}), '(self.nodedir)\n', (7454, 7468), False, 'import os\n'), ((7859, 7906), 'shutil.rmtree', 'shutil.rmtree', (['self.nodedir'], {'ignore_errors': '(True)'}), '(self.nodedir, ignore_errors=True)\n', (7872, 7906), False, 'import shutil\n'), ((15260, 15289), 'core.misc.ipaddress.is_ipv4_address', 'ipaddress.is_ipv4_address', (['ip'], {}), '(ip)\n', (15285, 15289), False, 'from core.misc import ipaddress\n'), ((15354, 15382), 'socket.inet_pton', 'socket.inet_pton', (['family', 'ip'], {}), '(family, ip)\n', (15370, 15382), False, 'import socket\n'), ((15420, 15463), 'core.misc.ipaddress.IpAddress', 'ipaddress.IpAddress', ([], {'af': 'family', 'address': 'ipl'}), '(af=family, address=ipl)\n', (15439, 15463), False, 'from core.misc import ipaddress\n'), ((15597, 15625), 'socket.inet_pton', 'socket.inet_pton', (['family', 'ip'], {}), '(family, ip)\n', (15613, 15625), False, 'import socket\n'), ((15663, 15706), 'core.misc.ipaddress.IpAddress', 'ipaddress.IpAddress', ([], {'af': 'family', 'address': 'ipl'}), '(af=family, address=ipl)\n', (15682, 15706), False, 'from core.misc import ipaddress\n')] |
# External cracking script, part of https://github.com/mmmds/WirelessDiscoverCrackScan
import datetime
import subprocess
import os
### CONFIGURATION
HASHCAT_DIR = "C:\\hashcat-5.1.0"
HASHCAT_EXE = "hashcat64.exe"
LOG_FILE = "crack_log.txt"
DICT_DIR = "./dicts"
def load_dict_list():
for r,d,f in os.walk(DICT_DIR):
return f
def parse_log():
r = {}
with open(LOG_FILE, "r") as f:
for line in f.readlines():
try:
a = line.split("/")
date = a[0]
dict_file = a[1].strip()
hash_file = a[2].split(".")[0].strip()
r[(hash_file, dict_file)] = date
except:
pass
return r
def append_log(file, dictionary):
text = "{}/{}/{}".format(str(datetime.datetime.now()), dictionary, file)
with open(LOG_FILE, "a") as f:
f.write("\n" + text)
def read_files():
result = ([],[])
files = os.listdir(".")
for f in files:
if f.endswith(".16800"):
result[0].append(f.split(".")[0])
elif f.endswith(".2500"):
result[1].append(f.split(".")[0])
return result
def process(files, t, logs, dicts):
for f in files:
for d in dicts:
if (f.split(".")[0], d) not in logs:
print("\n\n######## {} {}\n\n".format(f, d))
cwd = os.getcwd()
subprocess.Popen([HASHCAT_DIR+ "\\" + HASHCAT_EXE, "-m", t, "{}\\{}.{}".format(cwd,f, t), "{}\\{}\\{}".format(cwd,DICT_DIR, d)], cwd = HASHCAT_DIR).wait()
append_log(f, d)
else:
print("\n\n-----------{} {} in logs\n\n".format(f, d))
files = read_files()
logs = parse_log()
dicts = load_dict_list()
print(dicts)
print(files)
print(logs)
pmkid = files[0]
hs4 = files[1]
process(pmkid, "16800", logs, dicts)
process(hs4, "2500", logs, dicts)
| [
"datetime.datetime.now",
"os.listdir",
"os.walk",
"os.getcwd"
]
| [((303, 320), 'os.walk', 'os.walk', (['DICT_DIR'], {}), '(DICT_DIR)\n', (310, 320), False, 'import os\n'), ((946, 961), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (956, 961), False, 'import os\n'), ((786, 809), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (807, 809), False, 'import datetime\n'), ((1376, 1387), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1385, 1387), False, 'import os\n')] |
"""
Keep track of which LBRY Files are downloading and store their LBRY File specific metadata
"""
import logging
import os
from twisted.enterprise import adbapi
from twisted.internet import defer, task, reactor
from twisted.python.failure import Failure
from lbrynet.reflector.reupload import reflect_stream
from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError
from lbrynet.cryptstream.client.CryptStreamDownloader import CurrentlyStoppingError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet import conf
log = logging.getLogger(__name__)
def safe_start_looping_call(looping_call, seconds=3600):
if not looping_call.running:
looping_call.start(seconds)
def safe_stop_looping_call(looping_call):
if looping_call.running:
looping_call.stop()
class EncryptedFileManager(object):
"""Keeps track of currently opened LBRY Files, their options, and
their LBRY File specific metadata.
"""
def __init__(self, session, stream_info_manager, sd_identifier, download_directory=None):
self.session = session
self.stream_info_manager = stream_info_manager
# TODO: why is sd_identifier part of the file manager?
self.sd_identifier = sd_identifier
self.lbry_files = []
self.sql_db = None
if download_directory:
self.download_directory = download_directory
else:
self.download_directory = os.getcwd()
self.lbry_file_reflector = task.LoopingCall(self.reflect_lbry_files)
log.debug("Download directory for EncryptedFileManager: %s", str(self.download_directory))
@defer.inlineCallbacks
def setup(self):
yield self._open_db()
yield self._add_to_sd_identifier()
yield self._start_lbry_files()
if conf.settings['reflect_uploads']:
safe_start_looping_call(self.lbry_file_reflector)
def get_lbry_file_status(self, lbry_file):
return self._get_lbry_file_status(lbry_file.rowid)
def set_lbry_file_data_payment_rate(self, lbry_file, new_rate):
return self._set_lbry_file_payment_rate(lbry_file.rowid, new_rate)
def change_lbry_file_status(self, lbry_file, status):
log.debug("Changing status of %s to %s", lbry_file.stream_hash, status)
return self._change_file_status(lbry_file.rowid, status)
def get_lbry_file_status_reports(self):
ds = []
for lbry_file in self.lbry_files:
ds.append(lbry_file.status())
dl = defer.DeferredList(ds)
def filter_failures(status_reports):
return [status_report for success, status_report in status_reports if success is True]
dl.addCallback(filter_failures)
return dl
def save_sd_blob_hash_to_stream(self, stream_hash, sd_hash):
return self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, sd_hash)
def _add_to_sd_identifier(self):
downloader_factory = ManagedEncryptedFileDownloaderFactory(self)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, downloader_factory)
@defer.inlineCallbacks
def _check_stream_is_managed(self, stream_hash):
# check that all the streams in the stream_info_manager are also
# tracked by lbry_file_manager and fix any streams that aren't.
rowid = yield self._get_rowid_for_stream_hash(stream_hash)
if rowid is not None:
defer.returnValue(True)
rate = self.session.base_payment_rate_manager.min_blob_data_payment_rate
key, stream_name, file_name = yield self.stream_info_manager.get_stream_info(stream_hash)
log.warning("Trying to fix missing lbry file for %s", stream_name.decode('hex'))
yield self._save_lbry_file(stream_hash, rate)
@defer.inlineCallbacks
def _check_stream_info_manager(self):
def _iter_streams(stream_hashes):
for stream_hash in stream_hashes:
yield self._check_stream_is_managed(stream_hash)
stream_hashes = yield self.stream_info_manager.get_all_streams()
log.debug("Checking %s streams", len(stream_hashes))
yield defer.DeferredList(list(_iter_streams(stream_hashes)))
@defer.inlineCallbacks
def _start_lbry_files(self):
yield self._check_stream_info_manager()
files_and_options = yield self._get_all_lbry_files()
yield defer.DeferredList([
self._set_options_and_restore(rowid, stream_hash, options)
for rowid, stream_hash, options in files_and_options
])
log.info("Started %i lbry files", len(self.lbry_files))
@defer.inlineCallbacks
def _set_options_and_restore(self, rowid, stream_hash, options):
try:
b_prm = self.session.base_payment_rate_manager
payment_rate_manager = NegotiatedPaymentRateManager(
b_prm, self.session.blob_tracker)
downloader = yield self.start_lbry_file(
rowid, stream_hash, payment_rate_manager, blob_data_rate=options)
yield downloader.restore()
except Exception:
log.error('An error occurred while starting a lbry file (%s, %s, %s)',
rowid, stream_hash, options)
@defer.inlineCallbacks
def start_lbry_file(self, rowid, stream_hash,
payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
if not download_directory:
download_directory = self.download_directory
payment_rate_manager.min_blob_data_payment_rate = blob_data_rate
lbry_file_downloader = ManagedEncryptedFileDownloader(
rowid,
stream_hash,
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self,
payment_rate_manager,
self.session.wallet,
download_directory,
file_name=file_name
)
yield lbry_file_downloader.set_stream_info()
self.lbry_files.append(lbry_file_downloader)
defer.returnValue(lbry_file_downloader)
@defer.inlineCallbacks
def _stop_lbry_file(self, lbry_file):
def wait_for_finished(lbry_file, count=2):
if count or lbry_file.saving_status is not False:
return task.deferLater(reactor, 1, self._stop_lbry_file, lbry_file, count=count - 1)
try:
yield lbry_file.stop(change_status=False)
self.lbry_files.remove(lbry_file)
except CurrentlyStoppingError:
yield wait_for_finished(lbry_file)
except AlreadyStoppedError:
pass
finally:
defer.returnValue(None)
def _stop_lbry_files(self):
log.info("Stopping %i lbry files", len(self.lbry_files))
lbry_files = self.lbry_files
for lbry_file in lbry_files:
yield self._stop_lbry_file(lbry_file)
@defer.inlineCallbacks
def add_lbry_file(self, stream_hash, payment_rate_manager, blob_data_rate=None,
download_directory=None, file_name=None):
rowid = yield self._save_lbry_file(stream_hash, blob_data_rate)
lbry_file = yield self.start_lbry_file(rowid, stream_hash, payment_rate_manager,
blob_data_rate, download_directory,
file_name)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def delete_lbry_file(self, lbry_file, delete_file=False):
if lbry_file not in self.lbry_files:
raise ValueError("Could not find that LBRY file")
def wait_for_finished(count=2):
if count <= 0 or lbry_file.saving_status is False:
return True
else:
return task.deferLater(reactor, 1, wait_for_finished, count=count - 1)
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
try:
yield lbry_file.stop()
except (AlreadyStoppedError, CurrentlyStoppingError):
yield wait_for_finished()
self.lbry_files.remove(lbry_file)
yield self._delete_lbry_file_options(lbry_file.rowid)
yield lbry_file.delete_data()
# TODO: delete this
# get count for stream hash returns the count of the lbry files with the stream hash
# in the lbry_file_options table, which will soon be removed.
stream_count = yield self.get_count_for_stream_hash(lbry_file.stream_hash)
if stream_count == 0:
yield self.stream_info_manager.delete_stream(lbry_file.stream_hash)
else:
msg = ("Can't delete stream info for %s, count is %i\n"
"The call that resulted in this warning will\n"
"be removed in the database refactor")
log.warning(msg, lbry_file.stream_hash, stream_count)
if delete_file and os.path.isfile(full_path):
os.remove(full_path)
defer.returnValue(True)
def toggle_lbry_file_running(self, lbry_file):
"""Toggle whether a stream reader is currently running"""
for l in self.lbry_files:
if l == lbry_file:
return l.toggle_running()
return defer.fail(Failure(ValueError("Could not find that LBRY file")))
def _reflect_lbry_files(self):
for lbry_file in self.lbry_files:
yield reflect_stream(lbry_file)
@defer.inlineCallbacks
def reflect_lbry_files(self):
yield defer.DeferredList(list(self._reflect_lbry_files()))
@defer.inlineCallbacks
def stop(self):
safe_stop_looping_call(self.lbry_file_reflector)
yield defer.DeferredList(list(self._stop_lbry_files()))
if self.sql_db:
yield self.sql_db.close()
self.sql_db = None
log.info("Stopped %s", self)
defer.returnValue(True)
def get_count_for_stream_hash(self, stream_hash):
return self._get_count_for_stream_hash(stream_hash)
######### database calls #########
def _open_db(self):
# check_same_thread=False is solely to quiet a spurious error that appears to be due
# to a bug in twisted, where the connection is closed by a different thread than the
# one that opened it. The individual connections in the pool are not used in multiple
# threads.
self.sql_db = adbapi.ConnectionPool(
"sqlite3",
os.path.join(self.session.db_dir, "lbryfile_info.db"),
check_same_thread=False
)
return self.sql_db.runQuery(
"create table if not exists lbry_file_options (" +
" blob_data_rate real, " +
" status text," +
" stream_hash text,"
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")"
)
@rerun_if_locked
def _save_lbry_file(self, stream_hash, data_payment_rate):
def do_save(db_transaction):
row = (data_payment_rate, ManagedEncryptedFileDownloader.STATUS_STOPPED, stream_hash)
db_transaction.execute("insert into lbry_file_options values (?, ?, ?)", row)
return db_transaction.lastrowid
return self.sql_db.runInteraction(do_save)
@rerun_if_locked
def _delete_lbry_file_options(self, rowid):
return self.sql_db.runQuery("delete from lbry_file_options where rowid = ?",
(rowid,))
@rerun_if_locked
def _set_lbry_file_payment_rate(self, rowid, new_rate):
return self.sql_db.runQuery(
"update lbry_file_options set blob_data_rate = ? where rowid = ?",
(new_rate, rowid))
@rerun_if_locked
def _get_all_lbry_files(self):
d = self.sql_db.runQuery("select rowid, stream_hash, blob_data_rate from lbry_file_options")
return d
@rerun_if_locked
def _change_file_status(self, rowid, new_status):
return self.sql_db.runQuery("update lbry_file_options set status = ? where rowid = ?",
(new_status, rowid))
@rerun_if_locked
def _get_lbry_file_status(self, rowid):
d = self.sql_db.runQuery("select status from lbry_file_options where rowid = ?",
(rowid,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
@rerun_if_locked
def _get_count_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select count(*) from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if r else 0))
return d
@rerun_if_locked
def _get_rowid_for_stream_hash(self, stream_hash):
d = self.sql_db.runQuery("select rowid from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
| [
"logging.getLogger",
"lbrynet.file_manager.EncryptedFileDownloader.ManagedEncryptedFileDownloaderFactory",
"twisted.internet.task.LoopingCall",
"twisted.internet.defer.returnValue",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"lbrynet.reflector.reupload.reflect_stream",
"lbrynet.file_manager.EncryptedFileDownloader.ManagedEncryptedFileDownloader",
"twisted.internet.defer.DeferredList",
"lbrynet.core.PaymentRateManager.NegotiatedPaymentRateManager",
"twisted.internet.task.deferLater",
"os.remove"
]
| [((893, 920), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (910, 920), False, 'import logging\n'), ((1836, 1877), 'twisted.internet.task.LoopingCall', 'task.LoopingCall', (['self.reflect_lbry_files'], {}), '(self.reflect_lbry_files)\n', (1852, 1877), False, 'from twisted.internet import defer, task, reactor\n'), ((2860, 2882), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['ds'], {}), '(ds)\n', (2878, 2882), False, 'from twisted.internet import defer, task, reactor\n'), ((3310, 3353), 'lbrynet.file_manager.EncryptedFileDownloader.ManagedEncryptedFileDownloaderFactory', 'ManagedEncryptedFileDownloaderFactory', (['self'], {}), '(self)\n', (3347, 3353), False, 'from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory\n'), ((6022, 6281), 'lbrynet.file_manager.EncryptedFileDownloader.ManagedEncryptedFileDownloader', 'ManagedEncryptedFileDownloader', (['rowid', 'stream_hash', 'self.session.peer_finder', 'self.session.rate_limiter', 'self.session.blob_manager', 'self.stream_info_manager', 'self', 'payment_rate_manager', 'self.session.wallet', 'download_directory'], {'file_name': 'file_name'}), '(rowid, stream_hash, self.session.peer_finder,\n self.session.rate_limiter, self.session.blob_manager, self.\n stream_info_manager, self, payment_rate_manager, self.session.wallet,\n download_directory, file_name=file_name)\n', (6052, 6281), False, 'from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader\n'), ((6525, 6564), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['lbry_file_downloader'], {}), '(lbry_file_downloader)\n', (6542, 6564), False, 'from twisted.internet import defer, task, reactor\n'), ((7862, 7890), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['lbry_file'], {}), '(lbry_file)\n', (7879, 7890), False, 'from twisted.internet import defer, task, reactor\n'), ((8346, 8409), 'os.path.join', 'os.path.join', (['lbry_file.download_directory', 'lbry_file.file_name'], {}), '(lbry_file.download_directory, lbry_file.file_name)\n', (8358, 8409), False, 'import os\n'), ((9460, 9483), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(True)'], {}), '(True)\n', (9477, 9483), False, 'from twisted.internet import defer, task, reactor\n'), ((10343, 10366), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(True)'], {}), '(True)\n', (10360, 10366), False, 'from twisted.internet import defer, task, reactor\n'), ((1789, 1800), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1798, 1800), False, 'import os\n'), ((3804, 3827), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(True)'], {}), '(True)\n', (3821, 3827), False, 'from twisted.internet import defer, task, reactor\n'), ((5197, 5259), 'lbrynet.core.PaymentRateManager.NegotiatedPaymentRateManager', 'NegotiatedPaymentRateManager', (['b_prm', 'self.session.blob_tracker'], {}), '(b_prm, self.session.blob_tracker)\n', (5225, 5259), False, 'from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager\n'), ((7130, 7153), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['None'], {}), '(None)\n', (7147, 7153), False, 'from twisted.internet import defer, task, reactor\n'), ((9391, 9416), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (9405, 9416), False, 'import os\n'), ((9430, 9450), 'os.remove', 'os.remove', (['full_path'], {}), '(full_path)\n', (9439, 9450), False, 'import os\n'), ((10926, 10979), 'os.path.join', 'os.path.join', (['self.session.db_dir', '"""lbryfile_info.db"""'], {}), "(self.session.db_dir, 'lbryfile_info.db')\n", (10938, 10979), False, 'import os\n'), ((6771, 6848), 'twisted.internet.task.deferLater', 'task.deferLater', (['reactor', '(1)', 'self._stop_lbry_file', 'lbry_file'], {'count': '(count - 1)'}), '(reactor, 1, self._stop_lbry_file, lbry_file, count=count - 1)\n', (6786, 6848), False, 'from twisted.internet import defer, task, reactor\n'), ((8261, 8324), 'twisted.internet.task.deferLater', 'task.deferLater', (['reactor', '(1)', 'wait_for_finished'], {'count': '(count - 1)'}), '(reactor, 1, wait_for_finished, count=count - 1)\n', (8276, 8324), False, 'from twisted.internet import defer, task, reactor\n'), ((9885, 9910), 'lbrynet.reflector.reupload.reflect_stream', 'reflect_stream', (['lbry_file'], {}), '(lbry_file)\n', (9899, 9910), False, 'from lbrynet.reflector.reupload import reflect_stream\n')] |
# Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import time
from typing import Dict, List, Optional, Type
from pydantic import validate_arguments
from ...app_logger import AppLogger
from ...testplan import SystemState
from ...testplan.schema import TestPlan
from ...testplan.verification import VerificationConfig, VerificationType
from ...utils.hooks import EventHook
from ...utils.yaml import Dumper
from .data import VerificationData, VerificationStateData
from .plugins.BaseVerificationPlugin import BaseVerificationPlugin
from .plugins.HTTPRequestVerificationPlugin import (
HTTPRequestVerificationPlugin,
)
from .plugins.PythonModuleVerificationPlugin import (
PythonModuleVerificationPlugin,
)
from .plugins.SDv4VerificationPlugin import SDv4VerificationPlugin
# Enum value to corresponding Plugin Map
VERIFICATION_PLUGIN_MAP: Dict[str, Type[BaseVerificationPlugin]] = {
"python_module": PythonModuleVerificationPlugin,
"http_request": HTTPRequestVerificationPlugin,
"sdv4": SDv4VerificationPlugin,
}
class VerificationController(EventHook):
"""
Verification controller is used to run all the verification plugins configured in the testplan
and assert that the system is expected to be in a state expected by the user. Extends the EventHook class,
that defines the following event hooks.
## Valid Hooks
=== "on_start"
Hook that gets called when the verification execution is about to start.
No arguments are passed to the callable.
```python
def callable_hook(): ...
```
=== "on_each_plugin_start"
Hook that gets called when a particular plugin execution is about to start. `index` in the signature refers
to the position in the list
```python
def callable_hook(index: int, config: VerificationConfig): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
=== "on_each_plugin_end"
Hook that gets called when a particular plugin execution has ended. `index` in the signature refers to the
position in the list
```python
def callable_hook(index: int, config: VerificationConfig, state_data: VerificationStateData): ...
```
References:
1. [VerificationConfig][ychaos.testplan.verification.VerificationConfig]
2. [VerificationStateData][ychaos.core.verification.data.VerificationStateData]
=== "on_end"
Hook that gets called when the verification execution has ended. Each element in the list
of boolean corresponds to the result of the plugin, where `True` indicates successful verification
and `False` is a failure to verify the state
```python
def callable_hook(verify_list: List[bool]): ...
```
=== "on_plugin_not_found"
Hook that gets called when a plugin available in schema is not ready for usage/not implemented.
This case is possible for the plugins that are in Beta/development phase
```python
def callable_hook(index:int, plugin_type: VerificationType): ...
```
---
Each of the hooks get called on a certain event. The caller can register as many hooks for a particular event,
by calling the `register_hook(event_name, hook_method)` method. All the hooks are executed sequentially. The best example
of this is to register a hook to print information on CLI.
"""
__hook_events__ = {
"on_start": EventHook.CallableType(),
"on_each_plugin_start": EventHook.CallableType(int, VerificationConfig),
"on_each_plugin_end": EventHook.CallableType(
int, VerificationConfig, VerificationStateData
),
"on_plugin_not_found": EventHook.CallableType(int, VerificationType),
"on_end": EventHook.CallableType(List[bool]),
}
@validate_arguments
def __init__(
self,
testplan: TestPlan,
current_state: SystemState,
verification_data: List[Dict[SystemState, Optional[VerificationStateData]]],
):
"""
Initialize a verification controller object.
Args:
testplan: A valid testplan object
current_state: The state in which the system is expected to be in
verification_data (List[VerificationData]): The verification data probably from previous run.
"""
super(VerificationController, self).__init__()
self.logger = AppLogger.get_logger(self.__class__.__name__)
self.logger.bind(event="controller")
self.testplan = testplan
self.current_state = current_state
if not verification_data:
verification_data = [
dict(),
] * len(self.testplan.verification)
elif len(verification_data) != len(self.testplan.verification):
raise ValueError("Data and verification config size mismatch")
self.verification_data = list()
for data in verification_data:
self.verification_data.append(VerificationData.parse_obj(data))
def execute(self) -> bool:
"""
Execute the Verification controller.
Returns:
True if all the verification plugin pass, False otherwise
"""
# Call all the hooks that were registered for `verification_start`
# If there were no hooks registered, this will be no-op
self.execute_hooks("on_start")
_verify_list = list()
for index, (verification_plugin, data) in enumerate(
zip(self.testplan.verification, self.verification_data)
):
# Delay before verifying
time.sleep(verification_plugin.delay_before)
assert isinstance(verification_plugin.states, List) # For mypy
if self.current_state in verification_plugin.states:
self.logger.info(
msg=f"Starting {verification_plugin.type.value} verification"
)
plugin_class = VERIFICATION_PLUGIN_MAP.get(
verification_plugin.type.value, None
)
if plugin_class is None:
# This can happen when a new plugin is not implemented yet, but is
# available in the schema
self.execute_hooks(
"on_plugin_not_found", index, verification_plugin.type
)
continue
plugin = plugin_class(verification_plugin.config, data)
# Call all the hooks that were registered for `verification_plugin_start`.
self.execute_hooks("on_each_plugin_start", index, verification_plugin)
state_data = plugin.run_verification()
self.logger.info(
msg=f"Completed {verification_plugin.type.value} verification"
)
# Call all the hooks that were registered for `verification_plugin_end`.
self.execute_hooks(
"on_each_plugin_end", index, verification_plugin, state_data
)
data.replace_data(self.current_state, state_data)
if verification_plugin.strict:
_verify_list.append(state_data.rc == 0)
else:
data.add_data(self.current_state, None)
# Delay after verifying
time.sleep(verification_plugin.delay_after)
# Call all the hooks that were registered for `verification_end`.
self.execute_hooks("on_end", _verify_list)
return all(_verify_list)
def get_encoded_verification_data(self):
return [data.encoded_dict() for data in self.verification_data]
def dump_verification_json(self, fp):
import json
json.dump(self.get_encoded_verification_data(), fp=fp, indent=4)
def dump_verification_yaml(self, fp):
import yaml
yaml.dump(
self.get_encoded_verification_data(),
fp,
default_flow_style=False,
sort_keys=False,
Dumper=Dumper,
indent=4,
)
| [
"time.sleep"
]
| [((5805, 5849), 'time.sleep', 'time.sleep', (['verification_plugin.delay_before'], {}), '(verification_plugin.delay_before)\n', (5815, 5849), False, 'import time\n'), ((7571, 7614), 'time.sleep', 'time.sleep', (['verification_plugin.delay_after'], {}), '(verification_plugin.delay_after)\n', (7581, 7614), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from vimeodl import __version__
from vimeodl.vimeo import VimeoLinkExtractor, VimeoDownloader
def test_version():
assert __version__ == '0.1.0'
def test_vimeo_link_extractor():
vm = VimeoLinkExtractor()
vm.extract()
| [
"vimeodl.vimeo.VimeoLinkExtractor"
]
| [((239, 259), 'vimeodl.vimeo.VimeoLinkExtractor', 'VimeoLinkExtractor', ([], {}), '()\n', (257, 259), False, 'from vimeodl.vimeo import VimeoLinkExtractor, VimeoDownloader\n')] |
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import optim, nn
from pytorch.xor.multilayer_perceptron import MultilayerPerceptron
from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations
input_size = 2
output_size = len(set(LABELS))
num_hidden_layers = 0
hidden_size = 2 # isn't ever used but we still set it
seed = 24
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
mlp1 = MultilayerPerceptron(input_size=input_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
output_size=output_size)
print(mlp1)
batch_size = 1000
x_data_static, y_truth_static = get_toy_data(batch_size)
fig, ax = plt.subplots(1, 1, figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static,
ax=ax, title='Initial Perceptron State', levels=[0.5])
plt.axis('off')
plt.savefig('images/perceptron_initial.png')
plt.show()
losses = []
batch_size = 10000
n_batches = 10
max_epochs = 10
loss_change = 1.0
last_loss = 10.0
change_threshold = 1e-3
epoch = 0
all_imagefiles = []
lr = 0.01
optimizer = optim.Adam(params=mlp1.parameters(), lr=lr)
cross_ent_loss = nn.CrossEntropyLoss()
def early_termination(loss_change, change_threshold, epoch, max_epochs):
terminate_for_loss_change = loss_change < change_threshold
terminate_for_epochs = epoch > max_epochs
# return terminate_for_loss_change or
return terminate_for_epochs
while not early_termination(loss_change, change_threshold, epoch, max_epochs):
for _ in range(n_batches):
# step 0: fetch the data
x_data, y_target = get_toy_data(batch_size)
# step 1: zero the gradients
mlp1.zero_grad()
# step 2: run the forward pass
y_pred = mlp1(x_data).squeeze()
# step 3: compute the loss
loss = cross_ent_loss(y_pred, y_target.long())
# step 4: compute the backward pass
loss.backward()
# step 5: have the optimizer take an optimization step
optimizer.step()
# auxillary: bookkeeping
loss_value = loss.item()
losses.append(loss_value)
loss_change = abs(last_loss - loss_value)
last_loss = loss_value
print("epoch: {}: loss_value: {}".format(epoch, loss_value))
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
visualize_results(mlp1, x_data_static, y_truth_static, ax=ax, epoch=epoch,
title=f"{loss_value:0.2f}; {loss_change:0.4f}")
plt.axis('off')
epoch += 1
all_imagefiles.append(f'images/perceptron_epoch{epoch}_toylearning.png')
plt.savefig(all_imagefiles[-1])
_, ax = plt.subplots(1,1,figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static, epoch=None, levels=[0.5], ax=ax)
plt.axis('off');
plt.savefig('images/perceptron_final.png')
plot_intermediate_representations(mlp1,
"The Perceptron's Input and Intermediate Representation",
figsize=(9, 3))
plt.savefig("images/perceptron_intermediate.png")
plt.savefig("images/figure_4_5.pdf") | [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"matplotlib.pyplot.savefig",
"torch.nn.CrossEntropyLoss",
"pytorch.xor.utils.get_toy_data",
"pytorch.xor.utils.plot_intermediate_representations",
"pytorch.xor.multilayer_perceptron.MultilayerPerceptron",
"pytorch.xor.utils.visualize_results",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
| [((401, 424), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (418, 424), False, 'import torch\n'), ((425, 457), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (451, 457), False, 'import torch\n'), ((458, 478), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (472, 478), True, 'import numpy as np\n'), ((487, 621), 'pytorch.xor.multilayer_perceptron.MultilayerPerceptron', 'MultilayerPerceptron', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_hidden_layers': 'num_hidden_layers', 'output_size': 'output_size'}), '(input_size=input_size, hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers, output_size=output_size)\n', (507, 621), False, 'from pytorch.xor.multilayer_perceptron import MultilayerPerceptron\n'), ((765, 789), 'pytorch.xor.utils.get_toy_data', 'get_toy_data', (['batch_size'], {}), '(batch_size)\n', (777, 789), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n'), ((800, 835), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 5)'}), '(1, 1, figsize=(10, 5))\n', (812, 835), True, 'import matplotlib.pyplot as plt\n'), ((835, 949), 'pytorch.xor.utils.visualize_results', 'visualize_results', (['mlp1', 'x_data_static', 'y_truth_static'], {'ax': 'ax', 'title': '"""Initial Perceptron State"""', 'levels': '[0.5]'}), "(mlp1, x_data_static, y_truth_static, ax=ax, title=\n 'Initial Perceptron State', levels=[0.5])\n", (852, 949), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n'), ((964, 979), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (972, 979), True, 'import matplotlib.pyplot as plt\n'), ((980, 1024), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/perceptron_initial.png"""'], {}), "('images/perceptron_initial.png')\n", (991, 1024), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1033, 1035), True, 'import matplotlib.pyplot as plt\n'), ((1273, 1294), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1292, 1294), False, 'from torch import optim, nn\n'), ((2748, 2783), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 5)'}), '(1, 1, figsize=(10, 5))\n', (2760, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2781, 2873), 'pytorch.xor.utils.visualize_results', 'visualize_results', (['mlp1', 'x_data_static', 'y_truth_static'], {'epoch': 'None', 'levels': '[0.5]', 'ax': 'ax'}), '(mlp1, x_data_static, y_truth_static, epoch=None, levels=[\n 0.5], ax=ax)\n', (2798, 2873), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n'), ((2869, 2884), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2877, 2884), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2928), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/perceptron_final.png"""'], {}), "('images/perceptron_final.png')\n", (2897, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2930, 3047), 'pytorch.xor.utils.plot_intermediate_representations', 'plot_intermediate_representations', (['mlp1', '"""The Perceptron\'s Input and Intermediate Representation"""'], {'figsize': '(9, 3)'}), '(mlp1,\n "The Perceptron\'s Input and Intermediate Representation", figsize=(9, 3))\n', (2963, 3047), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n'), ((3112, 3161), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/perceptron_intermediate.png"""'], {}), "('images/perceptron_intermediate.png')\n", (3123, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3198), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/figure_4_5.pdf"""'], {}), "('images/figure_4_5.pdf')\n", (3173, 3198), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2441), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 5)'}), '(1, 1, figsize=(10, 5))\n', (2418, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2572), 'pytorch.xor.utils.visualize_results', 'visualize_results', (['mlp1', 'x_data_static', 'y_truth_static'], {'ax': 'ax', 'epoch': 'epoch', 'title': 'f"""{loss_value:0.2f}; {loss_change:0.4f}"""'}), "(mlp1, x_data_static, y_truth_static, ax=ax, epoch=epoch,\n title=f'{loss_value:0.2f}; {loss_change:0.4f}')\n", (2463, 2572), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n'), ((2595, 2610), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2603, 2610), True, 'import matplotlib.pyplot as plt\n'), ((2707, 2738), 'matplotlib.pyplot.savefig', 'plt.savefig', (['all_imagefiles[-1]'], {}), '(all_imagefiles[-1])\n', (2718, 2738), True, 'import matplotlib.pyplot as plt\n'), ((1726, 1750), 'pytorch.xor.utils.get_toy_data', 'get_toy_data', (['batch_size'], {}), '(batch_size)\n', (1738, 1750), False, 'from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations\n')] |
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture(scope="function")
def browser():
options = webdriver.ChromeOptions()
options.add_argument('ignore-certificate-errors')
options.add_argument("--headless")
options.add_argument('--no-sandbox')
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
yield driver
driver.quit()
| [
"pytest.fixture",
"selenium.webdriver.ChromeOptions",
"webdriver_manager.chrome.ChromeDriverManager"
]
| [((105, 137), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (119, 137), False, 'import pytest\n'), ((167, 192), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (190, 192), False, 'from selenium import webdriver\n'), ((496, 517), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (515, 517), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')] |
import time
from datetime import date,datetime
from astral import LocationInfo
from astral.sun import sun
class CamLocation:
def __init__(self,lat,lon,info,country,timezone):
self.info = LocationInfo(info, country, timezone, lat, lon)
def is_night(self):
s = sun(self.info.observer, date=date.today(),tzinfo=self.info.timezone)
sunrise = s["sunrise"].timestamp()
sunset = s["sunset"].timestamp()
time_now = datetime.now().timestamp()
if time_now > sunrise and time_now < sunset:
return False
else:
return True
| [
"datetime.datetime.now",
"astral.LocationInfo",
"datetime.date.today"
]
| [((200, 247), 'astral.LocationInfo', 'LocationInfo', (['info', 'country', 'timezone', 'lat', 'lon'], {}), '(info, country, timezone, lat, lon)\n', (212, 247), False, 'from astral import LocationInfo\n'), ((314, 326), 'datetime.date.today', 'date.today', ([], {}), '()\n', (324, 326), False, 'from datetime import date, datetime\n'), ((459, 473), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (471, 473), False, 'from datetime import date, datetime\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.