code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import Rankings
def create_playoffs(input_text):
player_list = input_text.split('\n')
# delete last empty line
del player_list[-1]
player1 = player_list[0]
player2 = player_list[1]
player3 = player_list[2]
player4 = player_list[3]
matches = [(player1, player4), (player2, player3)]
return matches
def determine_winner(input_text):
red, black, score_list = Rankings.get_players_and_score(input_text)
if score_list[0] > score_list[1]:
return red, black
elif score_list[0] < score_list[1]:
return black, red
| [
"Rankings.get_players_and_score"
] | [((403, 445), 'Rankings.get_players_and_score', 'Rankings.get_players_and_score', (['input_text'], {}), '(input_text)\n', (433, 445), False, 'import Rankings\n')] |
import re
from typing import NamedTuple
from .common import AoCDay
from .utils import collapse_strings, dict_from_string
ECL = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
class ValidationRule(NamedTuple):
regex: str
validation_func: str
def hgt_validation_func(content):
return (content[1] == "cm" and 150 <= int(content[0]) <= 193) or (
content[1] == "in" and 59 <= int(content[0]) <= 76
)
FIELDS = {
"byr": ValidationRule("([\d]{4})$", lambda x: 1920 <= int(x) <= 2002),
"iyr": ValidationRule("([\d]{4})$", lambda x: 2010 <= int(x) <= 2020),
"eyr": ValidationRule("([\d]{4})$", lambda x: 2020 <= int(x) <= 2030),
"hgt": ValidationRule("([\d]+)([\D]*)$", hgt_validation_func),
"hcl": ValidationRule("(#[\d|a-f]{6})$", lambda x: True if x else False),
"ecl": ValidationRule("(.*)", lambda x: x in ECL),
"pid": ValidationRule("(^[\d|\w]{9}$)", lambda x: True if x else False)
# "cid"
}
class Day(AoCDay):
def __init__(self):
super().__init__(4)
def _preprocess_input(self):
p_list = []
for p_string in collapse_strings(self._input_data):
p_dict = {}
for element in p_string.split(" "):
p_dict = {**p_dict, **dict_from_string(element)}
p_list.append(p_dict)
self.__passport_list = p_list
def _calculate_1(self):
passports = self.__passport_list
return sum(
self.__validate(passport, skip_elements_validation=True)
for passport in passports
)
def _calculate_2(self):
passports = self.__passport_list
return sum(self.__validate(passport) for passport in passports)
@staticmethod
def __validate_element(element: str, value: str) -> bool:
if not (parse := re.findall(FIELDS[element].regex, value)):
return False
return FIELDS[element].validation_func(parse[0])
@staticmethod
def __validate(passport: dict, skip_elements_validation: bool = False) -> bool:
return all(
f in passport
and (skip_elements_validation or Day.__validate_element(f, passport[f]))
for f in FIELDS
)
| [
"re.findall"
] | [((1806, 1846), 're.findall', 're.findall', (['FIELDS[element].regex', 'value'], {}), '(FIELDS[element].regex, value)\n', (1816, 1846), False, 'import re\n')] |
import numpy
a = numpy.array((input().split(' ')), int)
ar = numpy.reshape(a, (3, 3))
print(ar)
| [
"numpy.reshape"
] | [((62, 86), 'numpy.reshape', 'numpy.reshape', (['a', '(3, 3)'], {}), '(a, (3, 3))\n', (75, 86), False, 'import numpy\n')] |
import os
import random
import sys
from argparse import ArgumentParser
from shutil import copyfile
IMAGE_FILES_EXT = ['jpg', 'jpeg', 'png', 'gif']
def split_train_val(target_dir, val_ratio=0.05):
parent_dir = os.path.dirname(target_dir)
print(parent_dir)
print(target_dir)
curr_dir = target_dir.split(os.sep)[-1]
curr_dir = os.path.join(parent_dir, '{}_split'.format(curr_dir))
if not os.path.exists(curr_dir):
os.mkdir(curr_dir)
train_dir = os.path.join(curr_dir, 'train')
val_dir = os.path.join(curr_dir, 'val')
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(val_dir):
os.mkdir(val_dir)
for root, dirs, _ in os.walk(target_dir):
for dir_ in dirs:
print(dir_)
copy_files(target_dir, dir_, train_dir, val_dir, val_ratio)
def copy_files(target_dir, dir_, train_dir, val_dir, val_ratio):
files_dir = os.path.join(target_dir, dir_)
tar_train_dir = os.path.join(train_dir, dir_)
tar_val_dir = os.path.join(val_dir, dir_)
if not os.path.exists(tar_train_dir):
os.mkdir(tar_train_dir)
if not os.path.exists(tar_val_dir):
os.mkdir(tar_val_dir)
for _, _, files in os.walk(files_dir):
print('Split dataset: {}'.format(dir_))
files_len = len(files)
val_len = int(val_ratio * files_len)
print('train_len : {}'.format(files_len - val_len))
print('val_len : {}'.format(val_len))
random.shuffle(files)
val_files = files[0:val_len]
train_files = files[val_len:]
for t in train_files:
f_name = os.path.join(files_dir, t)
t_name = os.path.join(tar_train_dir, t)
copyfile(f_name, t_name)
for v in val_files:
f_name = os.path.join(files_dir, v)
t_name = os.path.join(tar_val_dir, v)
copyfile(f_name, t_name)
def main(dog_vs_cat_images_dir, target_dir, val_ratio):
if target_dir.endswith(os.sep):
target_dir = target_dir[:len(target_dir) - 1]
dog_dir = os.path.join(target_dir, 'dog')
cat_dir = os.path.join(target_dir, 'cat')
if not os.path.exists(dog_dir):
os.mkdir(dog_dir)
if not os.path.exists(cat_dir):
os.mkdir(cat_dir)
dog_count = 0
cat_count = 0
for root, dirs, files in os.walk(dog_vs_cat_images_dir):
for file in files:
print('Processing: {}'.format(file))
ext = file.split(os.extsep)[-1]
if ext.lower() in IMAGE_FILES_EXT:
file_path = os.path.join(dog_vs_cat_images_dir, file)
if file.startswith('dog'):
f_name = '{0:08d}'.format(dog_count)
tar_path = os.path.join(dog_dir, '{}.{}'.format(f_name, ext))
dog_count += 1
elif file.startswith('cat'):
f_name = '{0:08d}'.format(cat_count)
tar_path = os.path.join(cat_dir, '{}.{}'.format(f_name, ext))
cat_count += 1
else:
print('Do not know what this image is of {}'.format(file))
continue
copyfile(file_path, tar_path)
print('Dog images: {}'.format(dog_count))
print('Cat images: {}'.format(cat_count))
split_train_val(target_dir, val_ratio=val_ratio)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('dog_vs_cat_images_dir', type=str)
parser.add_argument('target_dir', type=str)
parser.add_argument('--val_ratio', type=float, default=0.2)
args = parser.parse_args(sys.argv[1:])
main(args.dog_vs_cat_images_dir, args.target_dir, args.val_ratio)
| [
"os.path.exists",
"random.shuffle",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"shutil.copyfile",
"os.mkdir",
"os.walk"
] | [((216, 243), 'os.path.dirname', 'os.path.dirname', (['target_dir'], {}), '(target_dir)\n', (231, 243), False, 'import os\n'), ((481, 512), 'os.path.join', 'os.path.join', (['curr_dir', '"""train"""'], {}), "(curr_dir, 'train')\n", (493, 512), False, 'import os\n'), ((527, 556), 'os.path.join', 'os.path.join', (['curr_dir', '"""val"""'], {}), "(curr_dir, 'val')\n", (539, 556), False, 'import os\n'), ((711, 730), 'os.walk', 'os.walk', (['target_dir'], {}), '(target_dir)\n', (718, 730), False, 'import os\n'), ((937, 967), 'os.path.join', 'os.path.join', (['target_dir', 'dir_'], {}), '(target_dir, dir_)\n', (949, 967), False, 'import os\n'), ((989, 1018), 'os.path.join', 'os.path.join', (['train_dir', 'dir_'], {}), '(train_dir, dir_)\n', (1001, 1018), False, 'import os\n'), ((1037, 1064), 'os.path.join', 'os.path.join', (['val_dir', 'dir_'], {}), '(val_dir, dir_)\n', (1049, 1064), False, 'import os\n'), ((1233, 1251), 'os.walk', 'os.walk', (['files_dir'], {}), '(files_dir)\n', (1240, 1251), False, 'import os\n'), ((2084, 2115), 'os.path.join', 'os.path.join', (['target_dir', '"""dog"""'], {}), "(target_dir, 'dog')\n", (2096, 2115), False, 'import os\n'), ((2130, 2161), 'os.path.join', 'os.path.join', (['target_dir', '"""cat"""'], {}), "(target_dir, 'cat')\n", (2142, 2161), False, 'import os\n'), ((2353, 2383), 'os.walk', 'os.walk', (['dog_vs_cat_images_dir'], {}), '(dog_vs_cat_images_dir)\n', (2360, 2383), False, 'import os\n'), ((3424, 3440), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3438, 3440), False, 'from argparse import ArgumentParser\n'), ((412, 436), 'os.path.exists', 'os.path.exists', (['curr_dir'], {}), '(curr_dir)\n', (426, 436), False, 'import os\n'), ((446, 464), 'os.mkdir', 'os.mkdir', (['curr_dir'], {}), '(curr_dir)\n', (454, 464), False, 'import os\n'), ((568, 593), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (582, 593), False, 'import os\n'), ((603, 622), 'os.mkdir', 'os.mkdir', (['train_dir'], {}), '(train_dir)\n', (611, 622), False, 'import os\n'), ((634, 657), 'os.path.exists', 'os.path.exists', (['val_dir'], {}), '(val_dir)\n', (648, 657), False, 'import os\n'), ((667, 684), 'os.mkdir', 'os.mkdir', (['val_dir'], {}), '(val_dir)\n', (675, 684), False, 'import os\n'), ((1076, 1105), 'os.path.exists', 'os.path.exists', (['tar_train_dir'], {}), '(tar_train_dir)\n', (1090, 1105), False, 'import os\n'), ((1115, 1138), 'os.mkdir', 'os.mkdir', (['tar_train_dir'], {}), '(tar_train_dir)\n', (1123, 1138), False, 'import os\n'), ((1150, 1177), 'os.path.exists', 'os.path.exists', (['tar_val_dir'], {}), '(tar_val_dir)\n', (1164, 1177), False, 'import os\n'), ((1187, 1208), 'os.mkdir', 'os.mkdir', (['tar_val_dir'], {}), '(tar_val_dir)\n', (1195, 1208), False, 'import os\n'), ((1493, 1514), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (1507, 1514), False, 'import random\n'), ((2173, 2196), 'os.path.exists', 'os.path.exists', (['dog_dir'], {}), '(dog_dir)\n', (2187, 2196), False, 'import os\n'), ((2206, 2223), 'os.mkdir', 'os.mkdir', (['dog_dir'], {}), '(dog_dir)\n', (2214, 2223), False, 'import os\n'), ((2235, 2258), 'os.path.exists', 'os.path.exists', (['cat_dir'], {}), '(cat_dir)\n', (2249, 2258), False, 'import os\n'), ((2268, 2285), 'os.mkdir', 'os.mkdir', (['cat_dir'], {}), '(cat_dir)\n', (2276, 2285), False, 'import os\n'), ((1642, 1668), 'os.path.join', 'os.path.join', (['files_dir', 't'], {}), '(files_dir, t)\n', (1654, 1668), False, 'import os\n'), ((1690, 1720), 'os.path.join', 'os.path.join', (['tar_train_dir', 't'], {}), '(tar_train_dir, t)\n', (1702, 1720), False, 'import os\n'), ((1733, 1757), 'shutil.copyfile', 'copyfile', (['f_name', 't_name'], {}), '(f_name, t_name)\n', (1741, 1757), False, 'from shutil import copyfile\n'), ((1808, 1834), 'os.path.join', 'os.path.join', (['files_dir', 'v'], {}), '(files_dir, v)\n', (1820, 1834), False, 'import os\n'), ((1856, 1884), 'os.path.join', 'os.path.join', (['tar_val_dir', 'v'], {}), '(tar_val_dir, v)\n', (1868, 1884), False, 'import os\n'), ((1897, 1921), 'shutil.copyfile', 'copyfile', (['f_name', 't_name'], {}), '(f_name, t_name)\n', (1905, 1921), False, 'from shutil import copyfile\n'), ((2580, 2621), 'os.path.join', 'os.path.join', (['dog_vs_cat_images_dir', 'file'], {}), '(dog_vs_cat_images_dir, file)\n', (2592, 2621), False, 'import os\n'), ((3205, 3234), 'shutil.copyfile', 'copyfile', (['file_path', 'tar_path'], {}), '(file_path, tar_path)\n', (3213, 3234), False, 'from shutil import copyfile\n')] |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
fampaths = IN[0]
famnames = IN[1]
elementlist = []
booleans = []
TransactionManager.Instance.EnsureInTransaction(doc)
for fampath in fampaths:
try:
doc.LoadFamily(fampath)
booleans.append(True)
except: booleans.append(False)
TransactionManager.Instance.TransactionTaskDone()
collector = FilteredElementCollector(doc)
collector.OfClass(Family)
for item in collector.ToElements():
if item.Name in famnames:
typelist = list()
for famtypeid in item.GetFamilySymbolIds():
typelist.append(doc.GetElement(famtypeid).ToDSType(True))
elementlist.append(typelist)
OUT = (elementlist,booleans) | [
"RevitServices.Transactions.TransactionManager.Instance.EnsureInTransaction",
"RevitServices.Transactions.TransactionManager.Instance.TransactionTaskDone",
"clr.AddReference",
"clr.ImportExtensions"
] | [((11, 39), 'clr.AddReference', 'clr.AddReference', (['"""RevitAPI"""'], {}), "('RevitAPI')\n", (27, 39), False, 'import clr\n'), ((73, 103), 'clr.AddReference', 'clr.AddReference', (['"""RevitNodes"""'], {}), "('RevitNodes')\n", (89, 103), False, 'import clr\n'), ((117, 153), 'clr.ImportExtensions', 'clr.ImportExtensions', (['Revit.Elements'], {}), '(Revit.Elements)\n', (137, 153), False, 'import clr\n'), ((155, 188), 'clr.AddReference', 'clr.AddReference', (['"""RevitServices"""'], {}), "('RevitServices')\n", (171, 188), False, 'import clr\n'), ((438, 490), 'RevitServices.Transactions.TransactionManager.Instance.EnsureInTransaction', 'TransactionManager.Instance.EnsureInTransaction', (['doc'], {}), '(doc)\n', (485, 490), False, 'from RevitServices.Transactions import TransactionManager\n'), ((605, 654), 'RevitServices.Transactions.TransactionManager.Instance.TransactionTaskDone', 'TransactionManager.Instance.TransactionTaskDone', ([], {}), '()\n', (652, 654), False, 'from RevitServices.Transactions import TransactionManager\n')] |
"""VOC Dataset Classes
Original author: <NAME>
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: <NAME>, <NAME>
"""
from .config import HOME
import os.path as osp
import os
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
import shutil
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# # 'aeroplane', 'bus', 'car', 'motorbike', 'person' #Easy classes
# # 'pottedplant', 'bottle', 'cow', 'chair', 'bird', #Hard classes
# VOC_SUBSET_CLASSES = (
# 'aeroplane', 'bird', 'bottle', 'bus', 'car', 'chair', 'cow',
# 'motorbike', 'person','pottedplant'
# )
# note: if you used our download scripts, this should be right
VOC_ROOT_LOCAL = osp.join(HOME, "data/VOCdevkit/")
VOC_ROOT = "data/VOCdevkit/"
"""
@Maarten: for a custom dataset, this looks promising; https://github.com/amdegroot/ssd.pytorch/issues/72
"""
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
if class_to_ind != None:
self.skip_non_relevant_classes = True
else:
self.skip_non_relevant_classes = False
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
# if class_to_ind
name = obj.find('name').text.lower().strip()
# if class_to_ind is passed, we want to skip classes that are set to background (-1)
if self.skip_non_relevant_classes:
if self.class_to_ind[name] == -1:
continue
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class VOCAnnotationTransform2(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
if class_to_ind != None:
self.skip_non_relevant_classes = True
else:
self.skip_non_relevant_classes = False
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
# if class_to_ind
name = obj.find('name').text.lower().strip()
# if class_to_ind is passed, we want to skip classes that are set to background (-1)
if self.skip_non_relevant_classes:
if self.class_to_ind[name] == -1:
continue
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
if difficult:
bndbox.append(1)
else:
bndbox.append(0)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class VOCDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root,
image_sets=[('2007', 'trainval'),
('2007', 'train'),
('2007', 'val'),
('2012', 'trainval')],
transform=None, target_transform=VOCAnnotationTransform(keep_difficult=True),
dataset_name='VOC0712', idx = None, num_classes = 20, object_class_number = None):
self.root = root
self.image_set = image_sets
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self._annopath = osp.join('%s', 'Annotations', '%s.xml')
self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')
self.num_classes = num_classes
self.object_class_number = object_class_number
if not idx:
self.ids = list()
for (year, name) in image_sets:
rootpath = osp.join(self.root, 'VOC' + year)
for line in open(osp.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
else:
self.ids=list()
for year, name in image_sets:
rootpath = osp.join(self.root,'VOC' + year)
for id in idx:
self.ids.append((rootpath, id))
self.size = len(self.ids)
def __getitem__(self, index):
# if self.num_classes == 20:
im, gt, h, w = self.pull_item(index)
# elif self.num_classes == 1:
# im, gt, h, w = self.pull_item(index)
# if gt[4] != self.object_class_number:
# gt[4] = 0 # turn other classes into background classes if we only use one
# else:
# gt[4] = 1 # turn class of interest into foreground class (always 1)
# else:
# raise NotImplementedError()
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None and target != []: # target is an empty list if it contains no relevant class
target = np.array(target)
try:
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
except IndexError:
print()
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_image_using_imageset_id(self,imageset_id):
return cv2.imread(self._imgpath % imageset_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno, 1, 1)
return img_id[1], gt
def pull_anno_using_imageset_id(self,img_id):
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno, 1, 1)
return img_id[1], gt
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def VOC_file_classification_to_detection_file(input_file_name):
# Open
in_path = VOC_ROOT_LOCAL+'/VOC2007/ImageSets/Main/'+input_file_name+'.txt'
# Read file
with open(in_path,'r') as f:
data = f.readlines()
# strip \n and ' '
stripped_data = [l[:6] for l in data if l[-3:-1] != '-1']
stripped_data = [l+'\n' for l in stripped_data]
# save file
out_path = VOC_ROOT_LOCAL+'/VOC2007/ImageSets/Main/'+input_file_name+'_detect.txt'
with open(out_path,'w') as f:
f.writelines(stripped_data)
def coco_to_voc_weights(source_path, destination_path):
"""
# inspired by: https://github.com/pierluigiferrari/ssd_keras/blob/master/weight_sampling_tutorial.ipynb
"""
# 0 -> 0 (background)
# 5 -> 1 (airplane)
# 2 -> 2 (bicycle)
# 15 -> 3 (bird)
# 9 -> 4 (boat)
# 40 -> 5 (bottle)
# 6-> 6(bus)
# 3-> 7(car)
# 16-> 8(cat)
# 57-> 9(chair)
# 20-> 10 (cow)
# 61-> 11 (dining table)
# 17-> 12 (dog)
# 18-> 13 (horse)
# 4-> 14 (motorbike)
# 1-> 15(person)
# 59 -> 16 (pottedplant)
# 19-> 17 (sheep)
# 58-> 18 (couch->sofa)
# 7-> 19 (train)
# 63-> 20 (tvmomitor)
classes_of_interest = [0, 5, 2, 15, 9, 40, 6, 3, 16, 57, 20, 61, 17, 18, 4, 1, 59, 19, 58, 7, 63]
# classes_of_interest = [0, 3, 8, 1, 2, 10, 4, 6, 12]
# torch.load_state_dict('../active_learning_dir/debug/weights/SSD300_train-loss_7.00734196465446__val-loss_7.189980634894848_COCO_train-iter_3000_trained_COCO.pth')
n_classes_source = 81
classifier_names = ['conf.0',
'conf.1',
'conf.2',
'conf.3',
'conf.4',
'conf.5']
if not osp.isfile(destination_path):
# load weights
trained_weights = torch.load(source_path, map_location='cpu')
# Make a copy of the weights file.
shutil.copy(source_path, destination_path)
else:
# load weights
trained_weights = torch.load(source_path, map_location='cpu')
weights_destination_file = torch.load(destination_path, map_location='cpu')
for name in classifier_names:
# get the trained weights for this layer
kernel = trained_weights[name + '.weight']
bias = trained_weights[name + '.bias']
# get the shape of the kernel.
# height, width, in_channels, out_channels = kernel.shape #3 3 512 324
out_channels, in_channels, height, width = kernel.shape
# print(kernel.shape)
# Compute the indices of the elements we want to sub-sample.
# Keep in mind that each classification predictor layer predicts multiple
# bounding boxes for every spatial location, so we want to sub-sample
# the relevant classes for each of these boxes.
if isinstance(classes_of_interest, (list, tuple)):
subsampling_indices = []
for i in range(int(out_channels / n_classes_source)):
indices = np.array(classes_of_interest) + i * n_classes_source
subsampling_indices.append(indices)
subsampling_indices = list(np.concatenate(subsampling_indices))
elif isinstance(classes_of_interest, int):
subsampling_indices = int(classes_of_interest * (out_channels / n_classes_source))
else:
raise ValueError("`classes_of_interest` must be either an integer or a list/tuple.")
# Sub-sample the kernel and bias.
new_kernel, new_bias = sample_tensors(weights_list=[kernel.numpy(), bias.numpy()],
sampling_instructions= [subsampling_indices,in_channels, height, width],
axes=[[0]],
init=['gaussian', 'zeros'],
mean=0.0,
stddev=0.005)
# Delete the old weights from the destination file.
del weights_destination_file[name+'.weight']
del weights_destination_file[name+'.bias']
# Create new datasets for the sub-sampled weights.
weights_destination_file[name+'.weight'] = torch.FloatTensor(new_kernel)
weights_destination_file[name+'.bias'] = torch.FloatTensor(new_bias)
# save state-dict with voc output nodes
torch.save(weights_destination_file, destination_path)
def sample_tensors(weights_list, sampling_instructions, axes=None, init=None, mean=0.0, stddev=0.005):
'''
Adjusted from: https://github.com/pierluigiferrari/ssd_keras/blob/master/misc_utils/tensor_sampling_utils.py
Can sub-sample and/or up-sample individual dimensions of the tensors in the given list
of input tensors.
It is possible to sub-sample some dimensions and up-sample other dimensions at the same time.
The tensors in the list will be sampled consistently, i.e. for any given dimension that
corresponds among all tensors in the list, the same elements will be picked for every tensor
along that dimension.
For dimensions that are being sub-sampled, you can either provide a list of the indices
that should be picked, or you can provide the number of elements to be sub-sampled, in which
case the elements will be chosen at random.
For dimensions that are being up-sampled, "filler" elements will be insterted at random
positions along the respective dimension. These filler elements will be initialized either
with zero or from a normal distribution with selectable mean and standard deviation.
Arguments:
weights_list (list): A list of Numpy arrays. Each array represents one of the tensors
to be sampled. The tensor with the greatest number of dimensions must be the first
element in the list. For example, in the case of the weights of a 2D convolutional
layer, the kernel must be the first element in the list and the bias the second,
not the other way around. For all tensors in the list after the first tensor, the
lengths of each of their axes must identical to the length of some axis of the
first tensor.
sampling_instructions (list): A list that contains the sampling instructions for each
dimension of the first tensor. If the first tensor has `n` dimensions, then this
must be a list of length `n`. That means, sampling instructions for every dimension
of the first tensor must still be given even if not all dimensions should be changed.
The elements of this list can be either lists of integers or integers. If the sampling
instruction for a given dimension is a list of integers, then these integers represent
the indices of the elements of that dimension that will be sub-sampled. If the sampling
instruction for a given dimension is an integer, then that number of elements will be
sampled along said dimension. If the integer is greater than the number of elements
of the input tensors in that dimension, that dimension will be up-sampled. If the integer
is smaller than the number of elements of the input tensors in that dimension, that
dimension will be sub-sampled. If the integer is equal to the number of elements
of the input tensors in that dimension, that dimension will remain the same.
axes (list, optional): Only relevant if `weights_list` contains more than one tensor.
This list contains a list for each additional tensor in `weights_list` beyond the first.
Each of these lists contains integers that determine to which axes of the first tensor
the axes of the respective tensor correspond. For example, let the first tensor be a
4D tensor and the second tensor in the list be a 2D tensor. If the first element of
`axis` is the list `[2,3]`, then that means that the two axes of the second tensor
correspond to the last two axes of the first tensor, in the same order. The point of
this list is for the program to know, if a given dimension of the first tensor is to
be sub- or up-sampled, which dimensions of the other tensors in the list must be
sub- or up-sampled accordingly.
init (list, optional): Only relevant for up-sampling. Must be `None` or a list of strings
that determines for each tensor in `weights_list` how the newly inserted values should
be initialized. The possible values are 'gaussian' for initialization from a normal
distribution with the selected mean and standard deviation (see the following two arguments),
or 'zeros' for zero-initialization. If `None`, all initializations default to
'gaussian'.
mean (float, optional): Only relevant for up-sampling. The mean of the values that will
be inserted into the tensors at random in the case of up-sampling.
stddev (float, optional): Only relevant for up-sampling. The standard deviation of the
values that will be inserted into the tensors at random in the case of up-sampling.
Returns:
A list containing the sampled tensors in the same order in which they were given.
''''''
Utilities that are useful to sub- or up-sample weights tensors.
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
first_tensor = weights_list[0]
# if (not isinstance(sampling_instructions, (list, tuple))) or (len(sampling_instructions) != len(first_tensor.shape)):
if (not isinstance(sampling_instructions, (list, tuple))) or (len(sampling_instructions) != first_tensor.ndim):
raise ValueError(
"The sampling instructions must be a list whose length is the number of dimensions of the first tensor in `weights_list`.")
if (not init is None) and len(init) != len(weights_list):
raise ValueError(
"`init` must either be `None` or a list of strings that has the same length as `weights_list`.")
up_sample = [] # Store the dimensions along which we need to up-sample.
out_shape = [] # Store the shape of the output tensor here.
# Store two stages of the new (sub-sampled and/or up-sampled) weights tensors in the following two lists.
subsampled_weights_list = [] # Tensors after sub-sampling, but before up-sampling (if any).
upsampled_weights_list = [] # Sub-sampled tensors after up-sampling (if any), i.e. final output tensors.
# Create the slicing arrays from the sampling instructions.
sampling_slices = []
for i, sampling_inst in enumerate(sampling_instructions):
if isinstance(sampling_inst, (list, tuple)):
amax = np.amax(np.array(sampling_inst))
if amax > first_tensor.shape[i]:
raise ValueError(
"The sample instructions for dimension {} contain index {}, which is greater than the length of that dimension, which is {}.".format(
i, amax, first_tensor.shape[i]))
sampling_slices.append(np.array(sampling_inst))
out_shape.append(len(sampling_inst))
elif isinstance(sampling_inst, int):
out_shape.append(sampling_inst)
if sampling_inst == first_tensor.shape[i]:
# Nothing to sample here, we're keeping the original number of elements along this axis.
sampling_slice = np.arange(sampling_inst)
sampling_slices.append(sampling_slice)
elif sampling_inst < first_tensor.shape[i]:
# We want to SUB-sample this dimension. Randomly pick `sample_inst` many elements from it.
sampling_slice1 = np.array([0]) # We will always sample class 0, the background class.
# Sample the rest of the classes.
sampling_slice2 = np.sort(
np.random.choice(np.arange(1, first_tensor.shape[i]), sampling_inst - 1, replace=False))
sampling_slice = np.concatenate([sampling_slice1, sampling_slice2])
sampling_slices.append(sampling_slice)
else:
# We want to UP-sample. Pick all elements from this dimension.
sampling_slice = np.arange(first_tensor.shape[i])
sampling_slices.append(sampling_slice)
up_sample.append(i)
else:
raise ValueError(
"Each element of the sampling instructions must be either an integer or a list/tuple of integers, but received `{}`".format(
type(sampling_inst)))
# Process the first tensor.
subsampled_first_tensor = np.copy(first_tensor[np.ix_(*sampling_slices)])
subsampled_weights_list.append(subsampled_first_tensor)
# Process the other tensors.
if len(weights_list) > 1:
for j in range(1, len(weights_list)):
this_sampling_slices = [sampling_slices[i] for i in axes[j - 1]] # Get the sampling slices for this tensor.
subsampled_weights_list.append(np.copy(weights_list[j][np.ix_(*this_sampling_slices)]))
if up_sample:
# Take care of the dimensions that are to be up-sampled.
out_shape = np.array(out_shape)
# Process the first tensor.
if init is None or init[0] == 'gaussian':
upsampled_first_tensor = np.random.normal(loc=mean, scale=stddev, size=out_shape)
elif init[0] == 'zeros':
upsampled_first_tensor = np.zeros(out_shape)
else:
raise ValueError("Valid initializations are 'gaussian' and 'zeros', but received '{}'.".format(init[0]))
# Pick the indices of the elements in `upsampled_first_tensor` that should be occupied by `subsampled_first_tensor`.
up_sample_slices = [np.arange(k) for k in subsampled_first_tensor.shape]
for i in up_sample:
# Randomly select across which indices of this dimension to scatter the elements of `new_weights_tensor` in this dimension.
up_sample_slice1 = np.array([0])
up_sample_slice2 = np.sort(
np.random.choice(np.arange(1, upsampled_first_tensor.shape[i]), subsampled_first_tensor.shape[i] - 1,
replace=False))
up_sample_slices[i] = np.concatenate([up_sample_slice1, up_sample_slice2])
upsampled_first_tensor[np.ix_(*up_sample_slices)] = subsampled_first_tensor
upsampled_weights_list.append(upsampled_first_tensor)
# Process the other tensors
if len(weights_list) > 1:
for j in range(1, len(weights_list)):
if init is None or init[j] == 'gaussian':
upsampled_tensor = np.random.normal(loc=mean, scale=stddev, size=out_shape[axes[j - 1]])
elif init[j] == 'zeros':
upsampled_tensor = np.zeros(out_shape[axes[j - 1]])
else:
raise ValueError(
"Valid initializations are 'gaussian' and 'zeros', but received '{}'.".format(init[j]))
this_up_sample_slices = [up_sample_slices[i] for i in
axes[j - 1]] # Get the up-sampling slices for this tensor.
upsampled_tensor[np.ix_(*this_up_sample_slices)] = subsampled_weights_list[j]
upsampled_weights_list.append(upsampled_tensor)
return upsampled_weights_list
else:
return subsampled_weights_list
| [
"numpy.random.normal",
"xml.etree.ElementTree.parse",
"torch.load",
"os.path.join",
"numpy.ix_",
"torch.from_numpy",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"torch.save",
"shutil.copy",
"numpy.concatenate",
"numpy.expand_dims",
"cv2.imread",
"torch.FloatTensor",
"numpy.arange"
] | [((1037, 1070), 'os.path.join', 'osp.join', (['HOME', '"""data/VOCdevkit/"""'], {}), "(HOME, 'data/VOCdevkit/')\n", (1045, 1070), True, 'import os.path as osp\n'), ((13493, 13541), 'torch.load', 'torch.load', (['destination_path'], {'map_location': '"""cpu"""'}), "(destination_path, map_location='cpu')\n", (13503, 13541), False, 'import torch\n'), ((15782, 15836), 'torch.save', 'torch.save', (['weights_destination_file', 'destination_path'], {}), '(weights_destination_file, destination_path)\n', (15792, 15836), False, 'import torch\n'), ((7259, 7298), 'os.path.join', 'osp.join', (['"""%s"""', '"""Annotations"""', '"""%s.xml"""'], {}), "('%s', 'Annotations', '%s.xml')\n", (7267, 7298), True, 'import os.path as osp\n'), ((7323, 7361), 'os.path.join', 'osp.join', (['"""%s"""', '"""JPEGImages"""', '"""%s.jpg"""'], {}), "('%s', 'JPEGImages', '%s.jpg')\n", (7331, 7361), True, 'import os.path as osp\n'), ((8759, 8793), 'cv2.imread', 'cv2.imread', (['(self._imgpath % img_id)'], {}), '(self._imgpath % img_id)\n', (8769, 8793), False, 'import cv2\n'), ((9959, 10011), 'cv2.imread', 'cv2.imread', (['(self._imgpath % img_id)', 'cv2.IMREAD_COLOR'], {}), '(self._imgpath % img_id, cv2.IMREAD_COLOR)\n', (9969, 10011), False, 'import cv2\n'), ((10084, 10141), 'cv2.imread', 'cv2.imread', (['(self._imgpath % imageset_id)', 'cv2.IMREAD_COLOR'], {}), '(self._imgpath % imageset_id, cv2.IMREAD_COLOR)\n', (10094, 10141), False, 'import cv2\n'), ((13140, 13168), 'os.path.isfile', 'osp.isfile', (['destination_path'], {}), '(destination_path)\n', (13150, 13168), True, 'import os.path as osp\n'), ((13219, 13262), 'torch.load', 'torch.load', (['source_path'], {'map_location': '"""cpu"""'}), "(source_path, map_location='cpu')\n", (13229, 13262), False, 'import torch\n'), ((13314, 13356), 'shutil.copy', 'shutil.copy', (['source_path', 'destination_path'], {}), '(source_path, destination_path)\n', (13325, 13356), False, 'import shutil\n'), ((13417, 13460), 'torch.load', 'torch.load', (['source_path'], {'map_location': '"""cpu"""'}), "(source_path, map_location='cpu')\n", (13427, 13460), False, 'import torch\n'), ((15627, 15656), 'torch.FloatTensor', 'torch.FloatTensor', (['new_kernel'], {}), '(new_kernel)\n', (15644, 15656), False, 'import torch\n'), ((15706, 15733), 'torch.FloatTensor', 'torch.FloatTensor', (['new_bias'], {}), '(new_bias)\n', (15723, 15733), False, 'import torch\n'), ((25195, 25214), 'numpy.array', 'np.array', (['out_shape'], {}), '(out_shape)\n', (25203, 25214), True, 'import numpy as np\n'), ((9088, 9104), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (9096, 9104), True, 'import numpy as np\n'), ((24672, 24696), 'numpy.ix_', 'np.ix_', (['*sampling_slices'], {}), '(*sampling_slices)\n', (24678, 24696), True, 'import numpy as np\n'), ((25339, 25395), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'stddev', 'size': 'out_shape'}), '(loc=mean, scale=stddev, size=out_shape)\n', (25355, 25395), True, 'import numpy as np\n'), ((25770, 25782), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (25779, 25782), True, 'import numpy as np\n'), ((26018, 26031), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (26026, 26031), True, 'import numpy as np\n'), ((26273, 26325), 'numpy.concatenate', 'np.concatenate', (['[up_sample_slice1, up_sample_slice2]'], {}), '([up_sample_slice1, up_sample_slice2])\n', (26287, 26325), True, 'import numpy as np\n'), ((26357, 26382), 'numpy.ix_', 'np.ix_', (['*up_sample_slices'], {}), '(*up_sample_slices)\n', (26363, 26382), True, 'import numpy as np\n'), ((7579, 7612), 'os.path.join', 'osp.join', (['self.root', "('VOC' + year)"], {}), "(self.root, 'VOC' + year)\n", (7587, 7612), True, 'import os.path as osp\n'), ((7876, 7909), 'os.path.join', 'osp.join', (['self.root', "('VOC' + year)"], {}), "(self.root, 'VOC' + year)\n", (7884, 7909), True, 'import os.path as osp\n'), ((8701, 8734), 'xml.etree.ElementTree.parse', 'ET.parse', (['(self._annopath % img_id)'], {}), '(self._annopath % img_id)\n', (8709, 8734), True, 'import xml.etree.ElementTree as ET\n'), ((10630, 10663), 'xml.etree.ElementTree.parse', 'ET.parse', (['(self._annopath % img_id)'], {}), '(self._annopath % img_id)\n', (10638, 10663), True, 'import xml.etree.ElementTree as ET\n'), ((10816, 10849), 'xml.etree.ElementTree.parse', 'ET.parse', (['(self._annopath % img_id)'], {}), '(self._annopath % img_id)\n', (10824, 10849), True, 'import xml.etree.ElementTree as ET\n'), ((14556, 14591), 'numpy.concatenate', 'np.concatenate', (['subsampling_indices'], {}), '(subsampling_indices)\n', (14570, 14591), True, 'import numpy as np\n'), ((22713, 22736), 'numpy.array', 'np.array', (['sampling_inst'], {}), '(sampling_inst)\n', (22721, 22736), True, 'import numpy as np\n'), ((23063, 23086), 'numpy.array', 'np.array', (['sampling_inst'], {}), '(sampling_inst)\n', (23071, 23086), True, 'import numpy as np\n'), ((25466, 25485), 'numpy.zeros', 'np.zeros', (['out_shape'], {}), '(out_shape)\n', (25474, 25485), True, 'import numpy as np\n'), ((7646, 7700), 'os.path.join', 'osp.join', (['rootpath', '"""ImageSets"""', '"""Main"""', "(name + '.txt')"], {}), "(rootpath, 'ImageSets', 'Main', name + '.txt')\n", (7654, 7700), True, 'import os.path as osp\n'), ((9405, 9435), 'numpy.expand_dims', 'np.expand_dims', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (9419, 9435), True, 'import numpy as np\n'), ((9454, 9475), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (9470, 9475), False, 'import torch\n'), ((14412, 14441), 'numpy.array', 'np.array', (['classes_of_interest'], {}), '(classes_of_interest)\n', (14420, 14441), True, 'import numpy as np\n'), ((23419, 23443), 'numpy.arange', 'np.arange', (['sampling_inst'], {}), '(sampling_inst)\n', (23428, 23443), True, 'import numpy as np\n'), ((26105, 26150), 'numpy.arange', 'np.arange', (['(1)', 'upsampled_first_tensor.shape[i]'], {}), '(1, upsampled_first_tensor.shape[i])\n', (26114, 26150), True, 'import numpy as np\n'), ((26690, 26759), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'stddev', 'size': 'out_shape[axes[j - 1]]'}), '(loc=mean, scale=stddev, size=out_shape[axes[j - 1]])\n', (26706, 26759), True, 'import numpy as np\n'), ((27249, 27279), 'numpy.ix_', 'np.ix_', (['*this_up_sample_slices'], {}), '(*this_up_sample_slices)\n', (27255, 27279), True, 'import numpy as np\n'), ((23696, 23709), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (23704, 23709), True, 'import numpy as np\n'), ((24001, 24051), 'numpy.concatenate', 'np.concatenate', (['[sampling_slice1, sampling_slice2]'], {}), '([sampling_slice1, sampling_slice2])\n', (24015, 24051), True, 'import numpy as np\n'), ((24237, 24269), 'numpy.arange', 'np.arange', (['first_tensor.shape[i]'], {}), '(first_tensor.shape[i])\n', (24246, 24269), True, 'import numpy as np\n'), ((25057, 25086), 'numpy.ix_', 'np.ix_', (['*this_sampling_slices'], {}), '(*this_sampling_slices)\n', (25063, 25086), True, 'import numpy as np\n'), ((26840, 26872), 'numpy.zeros', 'np.zeros', (['out_shape[axes[j - 1]]'], {}), '(out_shape[axes[j - 1]])\n', (26848, 26872), True, 'import numpy as np\n'), ((23896, 23931), 'numpy.arange', 'np.arange', (['(1)', 'first_tensor.shape[i]'], {}), '(1, first_tensor.shape[i])\n', (23905, 23931), True, 'import numpy as np\n')] |
from typing import Callable, Optional
import tensorflow as tf
from tfrng.core import global_generator_context
from tfrng.generator import GeneratorGenerator
from tfrng.stateless import StatelessGenerator
Transform = Callable[[tf.data.Dataset], tf.data.Dataset]
def with_seed(seed: Optional[int] = None, size: Optional[int] = None) -> Transform:
"""
Get a transform that zips a random seed along with dataset elements.
Note the seeds for each element of the transformed dataset are generated
pseudo-randomly and independently of the dataset.
Example usage:
```python
x = tf.data.Dataset.range(5)
x = x.apply(tfrng.data.with_seed(0, size=3))
print(x.element_spec)
# (TensorSpec(shape=(3,), dtype=tf.int64, name=None),
# TensorSpec(shape=(), dtype=tf.int64, name=None))
```
Args:
seed: seed used in tf.data.experimental.RandomDataset which generates element
seeds.
size: size of each element seed.
Returns:
A transform that, when applied to a dataset with elements `element`, returns
another dataset with elements `(element_seed, element)`, where `element_seed`
has shape `[] if size is None else [size]`.
"""
def transform(dataset: tf.data.Dataset) -> tf.data.Dataset:
random_dataset = tf.data.experimental.RandomDataset(seed)
if size is not None:
random_dataset = random_dataset.batch(size, drop_remainder=True)
return tf.data.Dataset.zip((random_dataset, dataset))
return transform
def stateless_map(
map_func: Callable,
num_parallel_calls: int = tf.data.experimental.AUTOTUNE,
deterministic: Optional[bool] = None,
seed: Optional[int] = None,
) -> Transform:
"""
Similar to `tf.data.Dataset.map` but in a `StatelessGenerator` context.
Note the resulting dataset will be deterministic if `deterministic` is True.
Separate iterations over the dataset will yield the same results from tfrng ops.
Example usage:
```python
def map_func(x):
return x + tfrng.uniform(())
map_kwargs = dict(
num_parallel_calls=tf.data.experimental.AUTOTUNE, deterministic=True)
dataset = tf.data.Dataset.range(8, output_type=tf.float32).apply(
stateless_map(map_func, **map_kwargs))
```
Arguments:
map_func, num_parallel_calls, deterministic: see `tf.data.Dataset.map`
seed: value used for `StatelessGenerator`
Returns:
A transform that can be applied to a dataset using `tf.data.Dataset.apply`.
"""
def actual_map_func(element_seed, element):
with global_generator_context(StatelessGenerator(element_seed)):
if isinstance(element, tuple):
return map_func(*element)
return map_func(element)
def transform(dataset: tf.data.Dataset) -> tf.data.Dataset:
return dataset.apply(with_seed(seed, 2)).map(
actual_map_func,
num_parallel_calls=num_parallel_calls,
deterministic=deterministic,
)
return transform
def generator_map(
map_func, num_parallel_calls: int = 1, rng: Optional[tf.random.Generator] = None
) -> Transform:
"""
Similar to `tf.data.Dataset.map` but in a `GeneratorGenerator` context.
Note the resulting dataset will not be deterministic if `num_parallel_calls > 1`.
Separate iterations over the dataset will yield different results from tfrng ops.
Arguments:
map_func, num_parallel_calls: see `tf.data.Dataset.map`
rng: Generator instance.
Returns:
A transform that can be applied to a dataset using `tf.data.Dataset.apply`.
```
"""
def actual_map_func(*element):
with global_generator_context(
GeneratorGenerator(tf.random.get_global_generator() if rng is None else rng)
):
return map_func(*element)
def transform(dataset: tf.data.Dataset) -> tf.data.Dataset:
return dataset.map(
actual_map_func,
num_parallel_calls=num_parallel_calls,
deterministic=num_parallel_calls == 1,
)
return transform
| [
"tfrng.stateless.StatelessGenerator",
"tensorflow.data.Dataset.zip",
"tensorflow.random.get_global_generator",
"tensorflow.data.experimental.RandomDataset"
] | [((1319, 1359), 'tensorflow.data.experimental.RandomDataset', 'tf.data.experimental.RandomDataset', (['seed'], {}), '(seed)\n', (1353, 1359), True, 'import tensorflow as tf\n'), ((1481, 1527), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(random_dataset, dataset)'], {}), '((random_dataset, dataset))\n', (1500, 1527), True, 'import tensorflow as tf\n'), ((2657, 2689), 'tfrng.stateless.StatelessGenerator', 'StatelessGenerator', (['element_seed'], {}), '(element_seed)\n', (2675, 2689), False, 'from tfrng.stateless import StatelessGenerator\n'), ((3798, 3830), 'tensorflow.random.get_global_generator', 'tf.random.get_global_generator', ([], {}), '()\n', (3828, 3830), True, 'import tensorflow as tf\n')] |
from model.contact import Contact
testdata = [
Contact(firstname="", middlename="", secondname="", nickname="", title="", company="", address_1="",
homephone="", mobilephone="", workphone="", mail_1="", mail_2="", mail_3="", address_2="",
secondaryphone="", notes=""),
Contact(firstname="firstname", middlename="middlename", secondname="secondname", nickname="nickname",
title="title", company="company", address_1="address_1", homephone="homephone",
mobilephone="mobilephone", workphone="workphone", mail_1="<EMAIL>", mail_2="<EMAIL>",
mail_3="<EMAIL>", address_2="address_2", secondaryphone="secondaryphone", notes="notes")
]
| [
"model.contact.Contact"
] | [((63, 291), 'model.contact.Contact', 'Contact', ([], {'firstname': '""""""', 'middlename': '""""""', 'secondname': '""""""', 'nickname': '""""""', 'title': '""""""', 'company': '""""""', 'address_1': '""""""', 'homephone': '""""""', 'mobilephone': '""""""', 'workphone': '""""""', 'mail_1': '""""""', 'mail_2': '""""""', 'mail_3': '""""""', 'address_2': '""""""', 'secondaryphone': '""""""', 'notes': '""""""'}), "(firstname='', middlename='', secondname='', nickname='', title='',\n company='', address_1='', homephone='', mobilephone='', workphone='',\n mail_1='', mail_2='', mail_3='', address_2='', secondaryphone='', notes='')\n", (70, 291), False, 'from model.contact import Contact\n'), ((341, 719), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""firstname"""', 'middlename': '"""middlename"""', 'secondname': '"""secondname"""', 'nickname': '"""nickname"""', 'title': '"""title"""', 'company': '"""company"""', 'address_1': '"""address_1"""', 'homephone': '"""homephone"""', 'mobilephone': '"""mobilephone"""', 'workphone': '"""workphone"""', 'mail_1': '"""<EMAIL>"""', 'mail_2': '"""<EMAIL>"""', 'mail_3': '"""<EMAIL>"""', 'address_2': '"""address_2"""', 'secondaryphone': '"""secondaryphone"""', 'notes': '"""notes"""'}), "(firstname='firstname', middlename='middlename', secondname=\n 'secondname', nickname='nickname', title='title', company='company',\n address_1='address_1', homephone='homephone', mobilephone='mobilephone',\n workphone='workphone', mail_1='<EMAIL>', mail_2='<EMAIL>', mail_3=\n '<EMAIL>', address_2='address_2', secondaryphone='secondaryphone',\n notes='notes')\n", (348, 719), False, 'from model.contact import Contact\n')] |
import json
from typing import AnyStr, Tuple
from cli.controller import CommandLineController
from core.service.mock_schema import mock_book_author_publisher
from core.service.output_driver.file_driver.json_file import JsonOutputDriver
class TestCli:
"""Test the CLI functionality."""
def test_json(self,
saved_mock_project_file: Tuple[int, AnyStr],
temp_output_file: Tuple[int, AnyStr]):
"""Test JSON data generation."""
# project file
project_fd, project_file_path = saved_mock_project_file
# output file
output_fd, output_file_path = temp_output_file
# run the generator
controller = CommandLineController()
controller.execute([
JsonOutputDriver.cli_command,
project_file_path,
output_file_path
])
# check the output
with open(output_file_path, 'r') as output_file:
generated_obj = json.load(output_file)
meta = mock_book_author_publisher()
for table_name in meta.tables:
assert table_name in generated_obj
assert generated_obj[table_name]
| [
"json.load",
"cli.controller.CommandLineController",
"core.service.mock_schema.mock_book_author_publisher"
] | [((692, 715), 'cli.controller.CommandLineController', 'CommandLineController', ([], {}), '()\n', (713, 715), False, 'from cli.controller import CommandLineController\n'), ((1009, 1037), 'core.service.mock_schema.mock_book_author_publisher', 'mock_book_author_publisher', ([], {}), '()\n', (1035, 1037), False, 'from core.service.mock_schema import mock_book_author_publisher\n'), ((971, 993), 'json.load', 'json.load', (['output_file'], {}), '(output_file)\n', (980, 993), False, 'import json\n')] |
# Exponential generating function
from sympy import *
from unittest import TestCase, main
from random import random
def genFunction(expr,x,n):
m = symbols('m')
try:
return Sum(
expr.subs(n,m)*(x**m)/factorial(m),(m,0,oo)
).doit()
except:
return nan
class testSuite(TestCase):
def test1(self):
x, n = symbols('x n')
self.assertTrue(
genFunction(n**2,x,n) == x*(x+1)*exp(x)
)
def test2(self):
x, n = symbols('x n')
self.assertTrue(
genFunction((-1)**n,x,n) == exp(-x)
)
def test3(self):
n = symbols('n')
k = random()
self.assertTrue(
abs(genFunction(factorial(n),k,n) - N(1/(1-k))) < 10**(-3)
)
if __name__ == '__main__': main()
| [
"unittest.main",
"random.random"
] | [((801, 807), 'unittest.main', 'main', ([], {}), '()\n', (805, 807), False, 'from unittest import TestCase, main\n'), ((654, 662), 'random.random', 'random', ([], {}), '()\n', (660, 662), False, 'from random import random\n')] |
# Copyright (c) 2020 Civic Knowledge. This file is licensed under the terms of the
# MIT license included in this distribution as LICENSE
import numpy as np
def vectors_length(v):
"""Return the lengths of an array, where each row is a vector"""
return np.sqrt(np.sum(np.square(v), axis=1, keepdims=True).astype(float))
def vectors_normalize(v):
"""Normalize an array of vectors, in a 2-d array. """
return v/vectors_length(v)
def vector_length(v):
"""Return the length of a single vector"""
return np.sqrt(np.sum(np.square(v)))
def vector_normalize(v):
"""Normalize a single vector"""
return v/vector_length(v)
def _rand_round(v):
"""Randomly round a value up or down to the nearest integer, with probabilities based on the
value. Generally, np.mean([rand_round(v) for _ in range(N)]) == v, for large enough N
"""
if v == 0:
return v
f, c = np.floor(v), np.ceil(v)
try:
return np.random.choice([f, c], p=[c - v, v - f])
except:
print(v, f, c)
raise
rand_round = np.vectorize(_rand_round) | [
"numpy.ceil",
"numpy.random.choice",
"numpy.floor",
"numpy.square",
"numpy.vectorize"
] | [((1066, 1091), 'numpy.vectorize', 'np.vectorize', (['_rand_round'], {}), '(_rand_round)\n', (1078, 1091), True, 'import numpy as np\n'), ((911, 922), 'numpy.floor', 'np.floor', (['v'], {}), '(v)\n', (919, 922), True, 'import numpy as np\n'), ((924, 934), 'numpy.ceil', 'np.ceil', (['v'], {}), '(v)\n', (931, 934), True, 'import numpy as np\n'), ((960, 1002), 'numpy.random.choice', 'np.random.choice', (['[f, c]'], {'p': '[c - v, v - f]'}), '([f, c], p=[c - v, v - f])\n', (976, 1002), True, 'import numpy as np\n'), ((543, 555), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (552, 555), True, 'import numpy as np\n'), ((278, 290), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (287, 290), True, 'import numpy as np\n')] |
r"""
Radon/CH4 Forward Model
"""
# Standard Library imports
import argparse
import gzip
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import os
import pandas as pd
import sys
import xarray as xr
# Third party imports
from collections import OrderedDict
from datetime import datetime
from sklearn import linear_model
# Semi-local imports
import name_qch4_couple.io
import name_qch4_couple.name
import name_qch4_couple.plot_h2
# Local imports
import routines
import chem_ch4_validation
import chem_co
'''
# Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument("-date", required=True)
parser.add_argument("-odir", required=True)
args = parser.parse_args()
date = args.date
odir = args.odir
'''
date = '2018-01'
date_nodash = date.replace('-', '')
# Dates
dates_tHour = pd.date_range(
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=1),
closed='left',
freq='1H'
)
# Grid
grid_info = routines.define_grid()
inv_reg_map0 = grid_info['inv_reg_map']
nlat = grid_info['nlat']
nlon = grid_info['nlon']
area = grid_info['area']
grid_centre = grid_info['grid_centre']
grid_vertex = grid_info['grid_vertex']
inv_reg_uniq = grid_info['inv_reg_uniq']
# Standard atmospheric conditions
p_std = 1013.25
T_std = 15
# =============================================================================
# CH4
# =============================================================================
M_CH4 = 16.043 # g mol-1 - IUPAC
M_H2 = 2.016
# Q - CH4
Q = chem_ch4_validation.read_Q(dates_tHour)
# Dilution matrix - CH4 mhd
Dfile_H2 = (
'inputs/footprints_hfd/'
f'HFD-100magl_UKV_EUROPE_{date_nodash}.nc'
)
with xr.open_dataset(Dfile_H2) as ds_read:
with ds_read.load() as Din:
D = Din.fp.transpose('time', 'lat', 'lon').values
# baseline
def read_baseline(timestamps):
date = timestamps[0].strftime('%Y-%m')
year = timestamps[0].strftime('%Y')
chi0file = (
'outputs/validation_ch4/baseline-MHD_10magl-ch4-2018.nc'
)
with xr.open_dataset(chi0file) as ds_read: #put as
with ds_read.load() as ds:
chi0 = ds.chi_CH4.sel(time=date).to_series()
return chi0
chi0 = read_baseline(dates_tHour)
'''
# modelled methane mhd
mod_CH4 = pd.Series(
chi0.values + (D * Q).sum((1, 2)) / M_CH4 * 1e9,
index=dates_tHour
)
'''
# for conversion
mod_CH4 = pd.Series(
(D * Q).sum((1, 2)) / M_CH4 * 1e9,
index=dates_tHour
)
mod_H2 = pd.Series(
(D * Q).sum((1, 2)) / M_H2 * 1e9,
index=dates_tHour
)
# obs mhd
#obs_ch4_mhd, sigma_obs_H2_mhd = chem_ch4_validation.read_obs(dates_tHour, "MHD_10magl") # could add st dev
# to save
#pd.concat([
# pd.Series(mod_CH4, index=dates_tHour, name='chi_CH4'),
# ], axis=1).to_csv(os.path.join(odir, f'hfd_ch4_{date}.csv'))
#'''
# Plots
fig = {}
ax = {}
colours = {
'obs_mhd': '#000000',
'mod_ch4': '#0000FF',
}
fig_param = {
'w': 6, 'h': 3,
'px0': 0.80, 'py0': 0.50,
'pw': 5.15, 'ph': 2.45,
'ylblx': 0.05, 'ylbly': 1.5, # left, centre aligned
'fontsize': 8,
}
plt.close('all')
# Concentration
fig['main'] = plt.figure(figsize=(fig_param['w'], fig_param['h']), dpi=300)
for i in ['CH4']:
fig['main'].clf()
ax['main'] = name_qch4_couple.plot_h2.generic(
fig=fig['main'],
idata={
#'hfd': [
# 'line',
# [obs_ch4_mhd.index, np.array(obs_ch4_mhd), '-'],
# {'c': colours['obs_mhd'], 'lw': 0.5, 'label': 'Measured HFD'}
# ],
'mod_h2': [
'line',
[mod_H2.index, np.array(mod_H2), '-'],
{'c': colours['obs_mhd'], 'lw': 0.5, 'label': u'Modelled H$_{2}$ - $\mu$:317.24'}
],
'mod_ch4': [
'line',
[mod_CH4.index, np.array(mod_CH4), '-'],
{'c': colours['mod_ch4'], 'lw': 0.5, 'label': u'Modelled CH$_{4}$ - $\mu$:39.86'}
],
},
texts=[
{
'x': fig_param['ylblx'] / fig_param['w'],
'y': fig_param['ylbly'] / fig_param['h'],
's': (
u'$\chi$ (nmol mol$^{-1}$)'
),
'ha': 'left', 'va': 'center',
'size': fig_param['fontsize'], 'rotation': 90
}
],
xlim=[
pd.to_datetime(date),
pd.to_datetime(date) + pd.DateOffset(months=1),
],
ylim=(
[0., 2000.]
),
yticks=(
np.arange(0., 2000., 200.)
),
tick_fontsize=fig_param['fontsize'],
loc_plot=[
fig_param['px0'] / fig_param['w'],
fig_param['py0'] / fig_param['h'],
fig_param['pw'] / fig_param['w'],
fig_param['ph'] / fig_param['h']
],
xtick_params=[
True,
mdates.DateFormatter('%Y-%m-%d'),
mdates.WeekdayLocator(byweekday=6),
]
)
for l in ax['main'].get_xticklabels():
l.set_ha("right")
l.set_rotation(30)
ax['main'].legend(
loc='upper right', ncol=4, fontsize=fig_param['fontsize']
)
fig['main'].savefig(f'outputs/validation_ch4/conversion_{i}.png')
#'''
| [
"matplotlib.dates.WeekdayLocator",
"numpy.arange",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.close",
"numpy.array",
"routines.define_grid",
"matplotlib.pyplot.figure",
"pandas.DateOffset",
"xarray.open_dataset",
"chem_ch4_validation.read_Q",
"pandas.to_datetime"
] | [((999, 1021), 'routines.define_grid', 'routines.define_grid', ([], {}), '()\n', (1019, 1021), False, 'import routines\n'), ((1549, 1588), 'chem_ch4_validation.read_Q', 'chem_ch4_validation.read_Q', (['dates_tHour'], {}), '(dates_tHour)\n', (1575, 1588), False, 'import chem_ch4_validation\n'), ((3175, 3191), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3184, 3191), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3284), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': "(fig_param['w'], fig_param['h'])", 'dpi': '(300)'}), "(figsize=(fig_param['w'], fig_param['h']), dpi=300)\n", (3233, 3284), True, 'import matplotlib.pyplot as plt\n'), ((866, 886), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (880, 886), True, 'import pandas as pd\n'), ((1719, 1744), 'xarray.open_dataset', 'xr.open_dataset', (['Dfile_H2'], {}), '(Dfile_H2)\n', (1734, 1744), True, 'import xarray as xr\n'), ((892, 912), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (906, 912), True, 'import pandas as pd\n'), ((915, 938), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (928, 938), True, 'import pandas as pd\n'), ((2090, 2115), 'xarray.open_dataset', 'xr.open_dataset', (['chi0file'], {}), '(chi0file)\n', (2105, 2115), True, 'import xarray as xr\n'), ((4679, 4708), 'numpy.arange', 'np.arange', (['(0.0)', '(2000.0)', '(200.0)'], {}), '(0.0, 2000.0, 200.0)\n', (4688, 4708), True, 'import numpy as np\n'), ((4497, 4517), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (4511, 4517), True, 'import pandas as pd\n'), ((5038, 5070), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (5058, 5070), True, 'import matplotlib.dates as mdates\n'), ((5084, 5118), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'byweekday': '(6)'}), '(byweekday=6)\n', (5105, 5118), True, 'import matplotlib.dates as mdates\n'), ((4532, 4552), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (4546, 4552), True, 'import pandas as pd\n'), ((4555, 4578), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (4568, 4578), True, 'import pandas as pd\n'), ((3709, 3725), 'numpy.array', 'np.array', (['mod_H2'], {}), '(mod_H2)\n', (3717, 3725), True, 'import numpy as np\n'), ((3931, 3948), 'numpy.array', 'np.array', (['mod_CH4'], {}), '(mod_CH4)\n', (3939, 3948), True, 'import numpy as np\n')] |
# This is Hydroponic project in MicroPython with the ESP32 board.
# Using devices are SSD1306 OLED, DS18B20, BME280, and Touch Pin.
#
# Copyright (c) 2020 ks-tec
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to dealin the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sellcopies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE NOT LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS INTHE SOFTWARE.
from machine import I2C, Pin, TouchPad
import os, sys, machine, onewire, ubinascii, ujson, utime, _thread
from lib import ssd1306, bme280, ds18, relay, waterlevel, util
from resource import splashicon
# application setting file
CONFIG_FILE = "hydroponic.json"
# ==================== Main Functions ====================
def main():
"""
Main function for Hydroponic system.
"""
splash_screen()
utime.sleep_ms(DISPLAY_WAITING_SPLASH)
check_platform()
utime.sleep_ms(DISPLAY_WAITING_PLATFORM)
# thread start
_thread.start_new_thread(display_callback, (1, OLED_INTERVAL - ds18.reading_wait))
_thread.start_new_thread(wsupply_callback, (2, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL))
# ==================== Callback Functions ====================
def display_callback(id, interval_ms):
"""
Callback function for read values from BME280 and DS18x20, water level detector.
After that, bellow showing values to OLED.
Args:
id : thread id
interval_ms : interval time to repeat this function
"""
while True:
oled.fill(0)
oled.text("[air]", 0, 0) # [air]
oled.text("T=" + bme.values[0], 0, 10) # - temperature
oled.text("H=" + bme.values[2], 64, 10) # - humidity
oled.text("P=" + bme.values[1], 0, 20) # - pressure
oled.text("[water]", 0, 30) # [water]
oled.text("W=" + ds18.values[0], 0, 40) # - temperature
if wlevel is not None:
oled.text("L=" + get_wlevel(), 64, 40) # - water level
oled.show()
for cnt in range(3600): # max waiting 1hour = 60min = 3600sec
utime.sleep_ms(1000)
oled.text(".", 8*cnt, 55)
oled.show()
waiting = (cnt + 1) * 1000
if interval_ms <= waiting: # waiting limit has exceeded interval_ms
break
cnt += 1
def wsupply_callback(id, interval_on_ms, interval_off_ms):
"""
Callback function for water supply relay control.
The water supply relay switch to ON when the water level is under water supply start level.
The water supply relay switch to OFF when the water level is over the water supply funish level.
The thread loop can not start and it is terminated, if the water supply is on and the water level detection is off.
Args:
id : thread id
interval_on_ms : interval time to detect the water level and turn on the relay
interval_off_ms : interval time to detect the water level and turn off the relay
"""
while True:
# thread loop is finish, because water supply is off in setting
if wsupply is None:
break
# thread loop is finish, because water level dection is off in setting
if wlevel is None:
print("=" * 20)
print("Warning @{}".format(wsupply_callback.__name__))
print(" The thread for automatic water relay control is terminated because water level dection is off.")
print("=" * 20)
break
# when the detected water level is under the water supply start level
value = get_wlevel(False)
if value < wsupply.supply_start:
print("water supply swith to ON. (L={:3.1f})".format(value))
wsupply.on()
# continue water supply until water supply finish level
while value < wsupply.supply_finish:
utime.sleep_ms(interval_off_ms)
value = get_wlevel(False)
# print("L=({})".format(value))
# when the detected water level is over the water supply finish level
wsupply.off()
print("water supply swith to OFF. (L={:3.1f})".format(value))
utime.sleep_ms(interval_on_ms)
def conv_temperature(value, unit):
"""
"""
if type(unit) is str and unit.upper() in ["C", "F"]:
raise TypeError("the type of paramter unit must be string.")
if unit.upper() == "C":
pass
elif unit.upper() == "F":
value = value * 1.8 + 32
else:
raise ValueError("")
return value
# ==================== Configuration Functions ====================
def load_settings(filename):
"""
Load application setting values from specified file.
The contents of the file must be in json format, and keywords are fixed.
The read value is converted once as string, and then re-converted to the required data type and held in each global variables.
Args:
filename : file name of setting file
Raises:
ValueError : A filename of settings is not specified.
OSError : A setting file is not exists.
"""
global DISPLAY_SPLASH_ICON, DISPLAY_WAITING_SPLASH, DISPLAY_WAITING_PLATFORM, DISPLAY_TEMPERATURE_UNIT
global OLED_PIN_SCL, OLED_PIN_SDA, OLED_ADDRESS, OLED_WIDTH, OLED_HEIGHT, OLED_INTERVAL
global BME280_PIN_SCL, BME280_PIN_SDA, BME280_ADDRESS
global DS18_PIN_DQ, DS18_ADDRESS, DS18_READING_WAIT
global WATER_LEVEL_ENABLE, WATER_LEVEL_PIN, WATER_LEVEL_SENSE_MAX, WATER_LEVEL_SENSE_MIN
global WATER_SUPPLY_ENABLE, WATER_SUPPLY_PIN, WATER_SUPPLY_START, WATER_SUPPLY_FINISH, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL
if filename is None or len(filename) == 0:
raise ValueError("An application setting file is required.")
elif filename not in os.listdir():
raise OSError("An application setting file is NOT exists.")
with open(filename) as f:
settings = ujson.load(f)
# COMMON settings
DISPLAY_SPLASH_ICON = str(settings["COMMON"]["SPLASH_ICON"]).lower()
DISPLAY_WAITING_SPLASH = int(str(settings["COMMON"]["SPLASH_WAITING"]))
DISPLAY_WAITING_PLATFORM = int(str(settings["COMMON"]["PLATFORM_WAITING"]))
DISPLAY_TEMPERATURE_UNIT = str(settings["COMMON"]["TEMPERATURE_UNIT"])
# OLED settings
OLED_PIN_SCL = int(str(settings["OLED"]["PIN_SCL"]))
OLED_PIN_SDA = int(str(settings["OLED"]["PIN_SDA"]))
OLED_ADDRESS = int(str(settings["OLED"]["ADDRESS"]))
OLED_WIDTH = int(str(settings["OLED"]["WIDTH"]))
OLED_HEIGHT = int(str(settings["OLED"]["HEIGHT"]))
OLED_INTERVAL = int(str(settings["OLED"]["DISPLAY_INTERVAL"]))
# BME280 settings
BME280_PIN_SCL = int(str(settings["BME280"]["PIN_SCL"]))
BME280_PIN_SDA = int(str(settings["BME280"]["PIN_SDA"]))
BME280_ADDRESS = int(str(settings["BME280"]["ADDRESS"]))
# DS18B20 settinsgs
DS18_PIN_DQ = int(str(settings["DS18X20"]["PIN_DQ"]))
DS18_ADDRESS = [int(str(addr)) for addr in settings["DS18X20"]["ADDRESS"]]
DS18_READING_WAIT = int(str(settings["DS18X20"]["READING_WAIT"]))
# WATER LEVEL SENSOR settings
WATER_LEVEL_ENABLE = util.strtobool(str(settings["WATER_LEVEL"]["IS_ENABLE"]))
WATER_LEVEL_PIN = int(str(settings["WATER_LEVEL"]["PIN_DQ"]))
WATER_LEVEL_SENSE_MAX = int(str(settings["WATER_LEVEL"]["SENSE_MAX"]))
WATER_LEVEL_SENSE_MIN = int(str(settings["WATER_LEVEL"]["SENSE_MIN"]))
# WATER SUPPLY RELAY settings
WATER_SUPPLY_ENABLE = util.strtobool(str(settings["WATER_SUPPLY"]["IS_ENABLE"]))
WATER_SUPPLY_PIN = int(str(settings["WATER_SUPPLY"]["PIN_DQ"]))
WATER_SUPPLY_START = float(str(settings["WATER_SUPPLY"]["SUPPLY_START"]))
WATER_SUPPLY_FINISH = float(str(settings["WATER_SUPPLY"]["SUPPLY_FINISH"]))
WATER_SUPPLY_ON_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_ON"]))
WATER_SUPPLY_OFF_INTERVAL = int(str(settings["WATER_SUPPLY"]["DETECT_INTERVAL_OFF"]))
# ==================== I2C device Functions ====================
def detect_i2c_device(i2c=None, device=None, address=None):
"""
I2C device scan and it was found or else, show message.
Args:
i2c : machine.I2C object
device : name of I2C device to display
address : address of I2C device
Raises:
ValueError : One of the paramters is not specified.
"""
if i2c is None:
raise ValueError("An I2C object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
i2cDevs = i2c.scan()
for idx, dev in enumerate(i2cDevs):
if dev == address:
print(" Found {} device: ['{}']".format(device, hex(dev)))
break
else:
print(" NOT Found I2C device, check wiring of device !")
# ==================== SPI device Functions ====================
def detect_ow_device(ow=None, device=None, address=None):
"""
1-Wire device scan and it was found, show message.
Args:
ow : machine.OneWire object
device : name of 1-Wire device to display
address : list of address for 1-Wire deviece address
Raises:
ValueError : One of the paramters is not specified.
"""
if ow is None:
raise ValueError("An ow object is required.")
if address is None:
raise ValueError("A device address is required.")
if device is None or len(device) == 0:
raise ValueError("A device name is required.")
print("Detecting {} ...".format(device))
owDevs = ow.scan()
for idx, dev in enumerate(owDevs):
addr_int = [int(r) for r in dev]
if addr_int == address:
print(" Found {} device: {}".format(device, [hex(r) for r in dev]))
break
else:
print(" NOT Found 1-Wire device, check wiring of device !")
# ==================== Platform Functions ====================
def check_platform():
"""
Check running platform, and show result to OLED.
Raises:
OSError : The running platform is not ESP32 board.
"""
platform = sys.platform
chip_id = str(ubinascii.hexlify(machine.unique_id()))[2:14]
pclk = machine.freq() // (1000 ** 2)
supported = " Supported"
if platform != "esp32":
raise OSError("Platform is esp32 board required.")
oled.fill(0)
oled.show()
oled.text(platform, 0, 0)
oled.text(supported, 0, 10)
oled.text("UID {}".format(chip_id), 0, 20)
oled.text("PCLK {}MHz".format(pclk) , 0, 30)
oled.show()
print("-" * 20)
print("PLATFORM : {}".format(platform))
print("CHIP UID : {}".format(chip_id))
print("PERIPHERAL CLOCK : {} MHz".format(pclk))
print("-" * 20)
# ==================== OLED Functions ====================
def splash_screen():
"""
Splash logo image to OLED from binary array.
Raises:
ValueError : The parameter value is not in "v" "vertical" "h" "horizontal".
"""
icon = None
if DISPLAY_SPLASH_ICON in ["vertical", "v"]:
icon = splashicon.SplashIcon.logo_v()
elif DISPLAY_SPLASH_ICON in ["horizontal", "h"]:
icon = splashicon.SplashIcon.logo_h()
else:
raise ValueError("The value of 'DISPLAY_SPLASH_ICON' can specify 'v' or 'h' only.")
dx = (oled.width - icon.logo_width) // 2
dy = (oled.height - icon.logo_height) // 2
oled.fill(0)
oled.show()
for y, fila in enumerate(icon.logo_icon):
for x, c in enumerate(fila):
oled.pixel(x + dx, y + dy, c)
oled.show()
# ==================== Water Level Functions ====================
def get_wlevel(with_unit=True):
"""
Remove units from the tuple head index value returned by WaterLevelSensor.
And returns it as a float value.
Also, it uses a lock object because it is called from within the thread.
Args:
with_unit : False is remove units, True does nothing. True is default value.
Retun:
The value part of the tuple head index value returned by WaterLevelSensor.
"""
if wlevel is None:
raise OSError("The water level dection setting is off, must be on.")
with lock:
ret_value = wlevel.values[0]
if with_unit == False:
ret_value = float(ret_value[:len(ret_value)-2])
return ret_value
# ==================== Entry Point ====================
if __name__ == "__main__":
"""
Entry point at functional execution.
"""
try:
# load configuration values
load_settings(CONFIG_FILE)
# gobal devices initialization (I2C OLED SSD1306)
i2c = I2C(scl=Pin(OLED_PIN_SCL), sda=Pin(OLED_PIN_SDA))
oled = ssd1306.SSD1306_I2C(width=OLED_WIDTH, height=OLED_HEIGHT, i2c=i2c)
detect_i2c_device(i2c, "SSD1306", OLED_ADDRESS)
# gobal devices initialization (I2C BME280)
i2c = I2C(scl=Pin(BME280_PIN_SCL), sda=Pin(BME280_PIN_SDA))
bme = bme280.BME280(i2c=i2c, unit=DISPLAY_TEMPERATURE_UNIT)
detect_i2c_device(i2c, "BME280", BME280_ADDRESS)
# gobal devices initialization (1-Wire DS18B20)
ow = onewire.OneWire(pin=Pin(DS18_PIN_DQ))
ds18 = ds18.DS18(ow=ow, reading_wait=DS18_READING_WAIT, unit=DISPLAY_TEMPERATURE_UNIT)
detect_ow_device(ds18, "DS18X20", DS18_ADDRESS)
# global devices initialization (Water Level Capacitive Sensor)
wlevel = None
if WATER_LEVEL_ENABLE == True:
tp = TouchPad(Pin(WATER_LEVEL_PIN))
wlevel = waterlevel.WaterLevelSensor(tp=tp, sense_max=WATER_LEVEL_SENSE_MAX, sense_min=WATER_LEVEL_SENSE_MIN)
# global devices initialization (Water Supply Relay)
wsupply = None
if WATER_SUPPLY_ENABLE == True:
wsupply = relay.Relay(pin=Pin(WATER_SUPPLY_PIN, mode=Pin.OUT), supply_start=WATER_SUPPLY_START, supply_finish=WATER_SUPPLY_FINISH)
wsupply.off()
# call main routine
lock = _thread.allocate_lock()
main()
except Exception as e:
print("\nAn error has occured !")
print("-" * 20)
sys.print_exception(e)
print("-" * 20)
| [
"lib.ssd1306.SSD1306_I2C",
"os.listdir",
"resource.splashicon.SplashIcon.logo_v",
"lib.ds18.DS18",
"utime.sleep_ms",
"_thread.allocate_lock",
"machine.unique_id",
"machine.freq",
"lib.bme280.BME280",
"machine.Pin",
"lib.waterlevel.WaterLevelSensor",
"sys.print_exception",
"resource.splashicon.SplashIcon.logo_h",
"_thread.start_new_thread",
"ujson.load"
] | [((1497, 1535), 'utime.sleep_ms', 'utime.sleep_ms', (['DISPLAY_WAITING_SPLASH'], {}), '(DISPLAY_WAITING_SPLASH)\n', (1511, 1535), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((1558, 1598), 'utime.sleep_ms', 'utime.sleep_ms', (['DISPLAY_WAITING_PLATFORM'], {}), '(DISPLAY_WAITING_PLATFORM)\n', (1572, 1598), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((1619, 1706), '_thread.start_new_thread', '_thread.start_new_thread', (['display_callback', '(1, OLED_INTERVAL - ds18.reading_wait)'], {}), '(display_callback, (1, OLED_INTERVAL - ds18.\n reading_wait))\n', (1643, 1706), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((1704, 1808), '_thread.start_new_thread', '_thread.start_new_thread', (['wsupply_callback', '(2, WATER_SUPPLY_ON_INTERVAL, WATER_SUPPLY_OFF_INTERVAL)'], {}), '(wsupply_callback, (2, WATER_SUPPLY_ON_INTERVAL,\n WATER_SUPPLY_OFF_INTERVAL))\n', (1728, 1808), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((4657, 4687), 'utime.sleep_ms', 'utime.sleep_ms', (['interval_on_ms'], {}), '(interval_on_ms)\n', (4671, 4687), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((6331, 6344), 'ujson.load', 'ujson.load', (['f'], {}), '(f)\n', (6341, 6344), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((10508, 10522), 'machine.freq', 'machine.freq', ([], {}), '()\n', (10520, 10522), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((11318, 11348), 'resource.splashicon.SplashIcon.logo_v', 'splashicon.SplashIcon.logo_v', ([], {}), '()\n', (11346, 11348), False, 'from resource import splashicon\n'), ((12838, 12904), 'lib.ssd1306.SSD1306_I2C', 'ssd1306.SSD1306_I2C', ([], {'width': 'OLED_WIDTH', 'height': 'OLED_HEIGHT', 'i2c': 'i2c'}), '(width=OLED_WIDTH, height=OLED_HEIGHT, i2c=i2c)\n', (12857, 12904), False, 'from lib import ssd1306, bme280, ds18, relay, waterlevel, util\n'), ((13080, 13133), 'lib.bme280.BME280', 'bme280.BME280', ([], {'i2c': 'i2c', 'unit': 'DISPLAY_TEMPERATURE_UNIT'}), '(i2c=i2c, unit=DISPLAY_TEMPERATURE_UNIT)\n', (13093, 13133), False, 'from lib import ssd1306, bme280, ds18, relay, waterlevel, util\n'), ((13298, 13377), 'lib.ds18.DS18', 'ds18.DS18', ([], {'ow': 'ow', 'reading_wait': 'DS18_READING_WAIT', 'unit': 'DISPLAY_TEMPERATURE_UNIT'}), '(ow=ow, reading_wait=DS18_READING_WAIT, unit=DISPLAY_TEMPERATURE_UNIT)\n', (13307, 13377), False, 'from lib import ssd1306, bme280, ds18, relay, waterlevel, util\n'), ((14016, 14039), '_thread.allocate_lock', '_thread.allocate_lock', ([], {}), '()\n', (14037, 14039), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((2745, 2765), 'utime.sleep_ms', 'utime.sleep_ms', (['(1000)'], {}), '(1000)\n', (2759, 2765), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((6209, 6221), 'os.listdir', 'os.listdir', ([], {}), '()\n', (6219, 6221), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((11411, 11441), 'resource.splashicon.SplashIcon.logo_h', 'splashicon.SplashIcon.logo_h', ([], {}), '()\n', (11439, 11441), False, 'from resource import splashicon\n'), ((13609, 13713), 'lib.waterlevel.WaterLevelSensor', 'waterlevel.WaterLevelSensor', ([], {'tp': 'tp', 'sense_max': 'WATER_LEVEL_SENSE_MAX', 'sense_min': 'WATER_LEVEL_SENSE_MIN'}), '(tp=tp, sense_max=WATER_LEVEL_SENSE_MAX,\n sense_min=WATER_LEVEL_SENSE_MIN)\n', (13636, 13713), False, 'from lib import ssd1306, bme280, ds18, relay, waterlevel, util\n'), ((14139, 14161), 'sys.print_exception', 'sys.print_exception', (['e'], {}), '(e)\n', (14158, 14161), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((4381, 4412), 'utime.sleep_ms', 'utime.sleep_ms', (['interval_off_ms'], {}), '(interval_off_ms)\n', (4395, 4412), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((10471, 10490), 'machine.unique_id', 'machine.unique_id', ([], {}), '()\n', (10488, 10490), False, 'import os, sys, machine, onewire, ubinascii, ujson, utime, _thread\n'), ((12785, 12802), 'machine.Pin', 'Pin', (['OLED_PIN_SCL'], {}), '(OLED_PIN_SCL)\n', (12788, 12802), False, 'from machine import I2C, Pin, TouchPad\n'), ((12808, 12825), 'machine.Pin', 'Pin', (['OLED_PIN_SDA'], {}), '(OLED_PIN_SDA)\n', (12811, 12825), False, 'from machine import I2C, Pin, TouchPad\n'), ((13024, 13043), 'machine.Pin', 'Pin', (['BME280_PIN_SCL'], {}), '(BME280_PIN_SCL)\n', (13027, 13043), False, 'from machine import I2C, Pin, TouchPad\n'), ((13049, 13068), 'machine.Pin', 'Pin', (['BME280_PIN_SDA'], {}), '(BME280_PIN_SDA)\n', (13052, 13068), False, 'from machine import I2C, Pin, TouchPad\n'), ((13269, 13285), 'machine.Pin', 'Pin', (['DS18_PIN_DQ'], {}), '(DS18_PIN_DQ)\n', (13272, 13285), False, 'from machine import I2C, Pin, TouchPad\n'), ((13572, 13592), 'machine.Pin', 'Pin', (['WATER_LEVEL_PIN'], {}), '(WATER_LEVEL_PIN)\n', (13575, 13592), False, 'from machine import I2C, Pin, TouchPad\n'), ((13855, 13890), 'machine.Pin', 'Pin', (['WATER_SUPPLY_PIN'], {'mode': 'Pin.OUT'}), '(WATER_SUPPLY_PIN, mode=Pin.OUT)\n', (13858, 13890), False, 'from machine import I2C, Pin, TouchPad\n')] |
from typing import Any, List, Literal, TypedDict
from .FHIR_Annotation import FHIR_Annotation
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_DeviceDefinition_Capability import FHIR_DeviceDefinition_Capability
from .FHIR_DeviceDefinition_DeviceName import FHIR_DeviceDefinition_DeviceName
from .FHIR_DeviceDefinition_Material import FHIR_DeviceDefinition_Material
from .FHIR_DeviceDefinition_Property import FHIR_DeviceDefinition_Property
from .FHIR_DeviceDefinition_Specialization import FHIR_DeviceDefinition_Specialization
from .FHIR_DeviceDefinition_UdiDeviceIdentifier import (
FHIR_DeviceDefinition_UdiDeviceIdentifier,
)
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_Identifier import FHIR_Identifier
from .FHIR_Meta import FHIR_Meta
from .FHIR_Narrative import FHIR_Narrative
from .FHIR_ProdCharacteristic import FHIR_ProdCharacteristic
from .FHIR_ProductShelfLife import FHIR_ProductShelfLife
from .FHIR_Quantity import FHIR_Quantity
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
from .FHIR_uri import FHIR_uri
# The characteristics, operational status and capabilities of a medical-related component of a medical device.
FHIR_DeviceDefinition = TypedDict(
"FHIR_DeviceDefinition",
{
# This is a DeviceDefinition resource
"resourceType": Literal["DeviceDefinition"],
# The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes.
"id": FHIR_id,
# The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource.
"meta": FHIR_Meta,
# A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc.
"implicitRules": FHIR_uri,
# Extensions for implicitRules
"_implicitRules": FHIR_Element,
# The base language in which the resource is written.
"language": FHIR_code,
# Extensions for language
"_language": FHIR_Element,
# A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety.
"text": FHIR_Narrative,
# These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope.
"contained": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Unique instance identifiers assigned to a device by the software, manufacturers, other organizations or owners. For example: handle ID.
"identifier": List[FHIR_Identifier],
# Unique device identifier (UDI) assigned to device label or package. Note that the Device may include multiple udiCarriers as it either may include just the udiCarrier for the jurisdiction it is sold, or for multiple jurisdictions it could have been sold.
"udiDeviceIdentifier": List[FHIR_DeviceDefinition_UdiDeviceIdentifier],
# A name of the manufacturer.
"manufacturerString": str,
# Extensions for manufacturerString
"_manufacturerString": FHIR_Element,
# A name of the manufacturer.
"manufacturerReference": FHIR_Reference,
# A name given to the device to identify it.
"deviceName": List[FHIR_DeviceDefinition_DeviceName],
# The model number for the device.
"modelNumber": FHIR_string,
# Extensions for modelNumber
"_modelNumber": FHIR_Element,
# What kind of device or device system this is.
"type": FHIR_CodeableConcept,
# The capabilities supported on a device, the standards to which the device conforms for a particular purpose, and used for the communication.
"specialization": List[FHIR_DeviceDefinition_Specialization],
# The available versions of the device, e.g., software versions.
"version": List[FHIR_string],
# Extensions for version
"_version": List[FHIR_Element],
# Safety characteristics of the device.
"safety": List[FHIR_CodeableConcept],
# Shelf Life and storage information.
"shelfLifeStorage": List[FHIR_ProductShelfLife],
# Dimensions, color etc.
"physicalCharacteristics": FHIR_ProdCharacteristic,
# Language code for the human-readable text strings produced by the device (all supported).
"languageCode": List[FHIR_CodeableConcept],
# Device capabilities.
"capability": List[FHIR_DeviceDefinition_Capability],
# The actual configuration settings of a device as it actually operates, e.g., regulation status, time properties.
"property": List[FHIR_DeviceDefinition_Property],
# An organization that is responsible for the provision and ongoing maintenance of the device.
"owner": FHIR_Reference,
# Contact details for an organization or a particular human that is responsible for the device.
"contact": List[FHIR_ContactPoint],
# A network address on which the device may be contacted directly.
"url": FHIR_uri,
# Extensions for url
"_url": FHIR_Element,
# Access to on-line information about the device.
"onlineInformation": FHIR_uri,
# Extensions for onlineInformation
"_onlineInformation": FHIR_Element,
# Descriptive information, usage information or implantation information that is not captured in an existing element.
"note": List[FHIR_Annotation],
# The quantity of the device present in the packaging (e.g. the number of devices present in a pack, or the number of devices in the same package of the medicinal product).
"quantity": FHIR_Quantity,
# The parent device it can be part of.
"parentDevice": FHIR_Reference,
# A substance used to create the material(s) of which the device is made.
"material": List[FHIR_DeviceDefinition_Material],
},
total=False,
)
| [
"typing.TypedDict"
] | [((1328, 2838), 'typing.TypedDict', 'TypedDict', (['"""FHIR_DeviceDefinition"""', "{'resourceType': Literal['DeviceDefinition'], 'id': FHIR_id, 'meta':\n FHIR_Meta, 'implicitRules': FHIR_uri, '_implicitRules': FHIR_Element,\n 'language': FHIR_code, '_language': FHIR_Element, 'text':\n FHIR_Narrative, 'contained': List[Any], 'extension': List[Any],\n 'modifierExtension': List[Any], 'identifier': List[FHIR_Identifier],\n 'udiDeviceIdentifier': List[FHIR_DeviceDefinition_UdiDeviceIdentifier],\n 'manufacturerString': str, '_manufacturerString': FHIR_Element,\n 'manufacturerReference': FHIR_Reference, 'deviceName': List[\n FHIR_DeviceDefinition_DeviceName], 'modelNumber': FHIR_string,\n '_modelNumber': FHIR_Element, 'type': FHIR_CodeableConcept,\n 'specialization': List[FHIR_DeviceDefinition_Specialization], 'version':\n List[FHIR_string], '_version': List[FHIR_Element], 'safety': List[\n FHIR_CodeableConcept], 'shelfLifeStorage': List[FHIR_ProductShelfLife],\n 'physicalCharacteristics': FHIR_ProdCharacteristic, 'languageCode':\n List[FHIR_CodeableConcept], 'capability': List[\n FHIR_DeviceDefinition_Capability], 'property': List[\n FHIR_DeviceDefinition_Property], 'owner': FHIR_Reference, 'contact':\n List[FHIR_ContactPoint], 'url': FHIR_uri, '_url': FHIR_Element,\n 'onlineInformation': FHIR_uri, '_onlineInformation': FHIR_Element,\n 'note': List[FHIR_Annotation], 'quantity': FHIR_Quantity,\n 'parentDevice': FHIR_Reference, 'material': List[\n FHIR_DeviceDefinition_Material]}"], {'total': '(False)'}), "('FHIR_DeviceDefinition', {'resourceType': Literal[\n 'DeviceDefinition'], 'id': FHIR_id, 'meta': FHIR_Meta, 'implicitRules':\n FHIR_uri, '_implicitRules': FHIR_Element, 'language': FHIR_code,\n '_language': FHIR_Element, 'text': FHIR_Narrative, 'contained': List[\n Any], 'extension': List[Any], 'modifierExtension': List[Any],\n 'identifier': List[FHIR_Identifier], 'udiDeviceIdentifier': List[\n FHIR_DeviceDefinition_UdiDeviceIdentifier], 'manufacturerString': str,\n '_manufacturerString': FHIR_Element, 'manufacturerReference':\n FHIR_Reference, 'deviceName': List[FHIR_DeviceDefinition_DeviceName],\n 'modelNumber': FHIR_string, '_modelNumber': FHIR_Element, 'type':\n FHIR_CodeableConcept, 'specialization': List[\n FHIR_DeviceDefinition_Specialization], 'version': List[FHIR_string],\n '_version': List[FHIR_Element], 'safety': List[FHIR_CodeableConcept],\n 'shelfLifeStorage': List[FHIR_ProductShelfLife],\n 'physicalCharacteristics': FHIR_ProdCharacteristic, 'languageCode':\n List[FHIR_CodeableConcept], 'capability': List[\n FHIR_DeviceDefinition_Capability], 'property': List[\n FHIR_DeviceDefinition_Property], 'owner': FHIR_Reference, 'contact':\n List[FHIR_ContactPoint], 'url': FHIR_uri, '_url': FHIR_Element,\n 'onlineInformation': FHIR_uri, '_onlineInformation': FHIR_Element,\n 'note': List[FHIR_Annotation], 'quantity': FHIR_Quantity,\n 'parentDevice': FHIR_Reference, 'material': List[\n FHIR_DeviceDefinition_Material]}, total=False)\n", (1337, 2838), False, 'from typing import Any, List, Literal, TypedDict\n')] |
import os
import argparse
from bilm.training import test, load_options_latest_checkpoint, load_vocab
from bilm.data import LMDataset, BidirectionalLMDataset
def top_level(args):
options, ckpt_file = load_options_latest_checkpoint(args.save_dir)
vocab_file = os.path.join(args.save_dir, 'vocabs.txt')
# load the vocab
if 'char_cnn' in options:
max_word_length = options['char_cnn']['max_characters_per_token']
else:
max_word_length = None
vocab = load_vocab(vocab_file, max_word_length)
test_prefix = args.test_prefix
kwargs = {
'test': True,
'shuffle_on_load': False,
}
if options.get('bidirectional'):
data = BidirectionalLMDataset(test_prefix, vocab, **kwargs)
else:
data = LMDataset(test_prefix, vocab, **kwargs)
test(options, ckpt_file, data, batch_size=args.batch_size)
def main():
parser = argparse.ArgumentParser(description='Compute test perplexity')
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--test_prefix', help='Prefix for test files')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size')
args = parser.parse_args()
top_level(args)
| [
"bilm.training.test",
"argparse.ArgumentParser",
"os.path.join",
"bilm.data.BidirectionalLMDataset",
"bilm.training.load_options_latest_checkpoint",
"bilm.data.LMDataset",
"bilm.training.load_vocab"
] | [((205, 250), 'bilm.training.load_options_latest_checkpoint', 'load_options_latest_checkpoint', (['args.save_dir'], {}), '(args.save_dir)\n', (235, 250), False, 'from bilm.training import test, load_options_latest_checkpoint, load_vocab\n'), ((268, 309), 'os.path.join', 'os.path.join', (['args.save_dir', '"""vocabs.txt"""'], {}), "(args.save_dir, 'vocabs.txt')\n", (280, 309), False, 'import os\n'), ((489, 528), 'bilm.training.load_vocab', 'load_vocab', (['vocab_file', 'max_word_length'], {}), '(vocab_file, max_word_length)\n', (499, 528), False, 'from bilm.training import test, load_options_latest_checkpoint, load_vocab\n'), ((819, 877), 'bilm.training.test', 'test', (['options', 'ckpt_file', 'data'], {'batch_size': 'args.batch_size'}), '(options, ckpt_file, data, batch_size=args.batch_size)\n', (823, 877), False, 'from bilm.training import test, load_options_latest_checkpoint, load_vocab\n'), ((905, 967), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute test perplexity"""'}), "(description='Compute test perplexity')\n", (928, 967), False, 'import argparse\n'), ((696, 748), 'bilm.data.BidirectionalLMDataset', 'BidirectionalLMDataset', (['test_prefix', 'vocab'], {}), '(test_prefix, vocab, **kwargs)\n', (718, 748), False, 'from bilm.data import LMDataset, BidirectionalLMDataset\n'), ((774, 813), 'bilm.data.LMDataset', 'LMDataset', (['test_prefix', 'vocab'], {}), '(test_prefix, vocab, **kwargs)\n', (783, 813), False, 'from bilm.data import LMDataset, BidirectionalLMDataset\n')] |
"""ProcessFS:
deal with Process
mocked in Test
"""
# pylint: disable=C0301,W0621,C0116,R0903,E0401,W0703,W1201,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203
import logging
import shlex
import subprocess
from configs.config import ConfigMap
class ProcessFS:
"""Process_fs."""
DEBUG_ME = "echo"
DEBUG_ME_NOT = ""
DEBUG_Y_N = False
@classmethod
def debug_y_n(cls):
return cls.DEBUG_ME if cls.DEBUG_Y_N else cls.DEBUG_ME_NOT
@classmethod
def write_img(cls, dir_img, http_url_img):
logging.info(f"write_img {dir_img} {http_url_img}")
cmd = f"{cls.debug_y_n()} curl -o {dir_img} {http_url_img}"
subprocess.call(shlex.split(cmd))
@classmethod
def write_epub(cls, config_map: ConfigMap, dir_epub, isbn):
logging.info(f"write_epub {dir_epub} {isbn}")
cls.download_epub(config_map, isbn)
@classmethod
def download_epub(cls, config_map, isbn):
logging.info(f"download_epub {isbn}")
cmd = f"{cls.debug_y_n()} python {config_map.get_download_engine_path} --cred {config_map.get_oreilly_username}:{config_map.get_oreilly_userpassword} {isbn}"
proc = subprocess.run(cmd.split(), check=True)
logging.info(proc.stdout)
| [
"shlex.split",
"logging.info"
] | [((554, 606), 'logging.info', 'logging.info', (['f"""write_img {dir_img} {http_url_img}"""'], {}), "(f'write_img {dir_img} {http_url_img}')\n", (566, 606), False, 'import logging\n'), ((809, 854), 'logging.info', 'logging.info', (['f"""write_epub {dir_epub} {isbn}"""'], {}), "(f'write_epub {dir_epub} {isbn}')\n", (821, 854), False, 'import logging\n'), ((971, 1008), 'logging.info', 'logging.info', (['f"""download_epub {isbn}"""'], {}), "(f'download_epub {isbn}')\n", (983, 1008), False, 'import logging\n'), ((1238, 1263), 'logging.info', 'logging.info', (['proc.stdout'], {}), '(proc.stdout)\n', (1250, 1263), False, 'import logging\n'), ((701, 717), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (712, 717), False, 'import shlex\n')] |
""""
Created by <NAME>, based on the Master Thesis:
"A proposed method for unsupervised anomaly detection for a multivariate building dataset "
University of Bern/Neutchatel/Fribourg - 2017
Any copy of this code should be notified at <EMAIL>; you can redistribute it
and/or modify it under the terms of the MIT License.
The F.R.E.D.A project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
MIT license terms for more details.
"My work is well done to honor God at any time" <NAME>.
Mateo 6:33
The following adapts a restful API for the Historian functionality
"""
import logging
import traceback
from flask_restplus import Api
from settings.initial_settings import FLASK_DEBUG
from sqlalchemy.orm.exc import NoResultFound
log = logging.getLogger(__name__)
api = Api(version='1.0', title='Historian API',
description='API for saving data in the Historian')
@api.errorhandler
def default_error_handler(e):
message = 'An unhandled exception occurred.'
log.exception(message)
if not FLASK_DEBUG:
return {'message': message}, 500
@api.errorhandler(NoResultFound)
def database_not_found_error_handler(e):
log.warning(traceback.format_exc())
return {'message': 'A database result was required but none was found.'}, 404
| [
"logging.getLogger",
"traceback.format_exc",
"flask_restplus.Api"
] | [((907, 934), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (924, 934), False, 'import logging\n'), ((942, 1040), 'flask_restplus.Api', 'Api', ([], {'version': '"""1.0"""', 'title': '"""Historian API"""', 'description': '"""API for saving data in the Historian"""'}), "(version='1.0', title='Historian API', description=\n 'API for saving data in the Historian')\n", (945, 1040), False, 'from flask_restplus import Api\n'), ((1330, 1352), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1350, 1352), False, 'import traceback\n')] |
import sqlite3
conn = sqlite3.connect('../db.sqlite3')
import pandas as pd
pd.read_sql_query('select * from members where age >= 25', conn)
conn.close() | [
"pandas.read_sql_query",
"sqlite3.connect"
] | [((23, 55), 'sqlite3.connect', 'sqlite3.connect', (['"""../db.sqlite3"""'], {}), "('../db.sqlite3')\n", (38, 55), False, 'import sqlite3\n'), ((77, 141), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""select * from members where age >= 25"""', 'conn'], {}), "('select * from members where age >= 25', conn)\n", (94, 141), True, 'import pandas as pd\n')] |
# Generated by Django 3.0.8 on 2020-07-14 09:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('artist_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('title', models.CharField(max_length=100)),
('song_id', models.AutoField(primary_key=True, serialize=False)),
('year', models.DateField()),
('duration', models.DurationField()),
('artists', models.ManyToManyField(to='backend.Artist')),
],
),
migrations.CreateModel(
name='PlayList',
fields=[
('playlist_name', models.CharField(max_length=50)),
('playlist_id', models.AutoField(primary_key=True, serialize=False)),
('duration', models.DurationField()),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('last_modified', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('playlist_songs', models.ManyToManyField(to='backend.Song')),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.ForeignKey",
"django.db.models.DurationField",
"django.db.models.ManyToManyField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((276, 333), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (307, 333), False, 'from django.db import migrations, models\n'), ((471, 522), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (487, 522), False, 'from django.db import migrations, models\n'), ((550, 581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (566, 581), False, 'from django.db import migrations, models\n'), ((714, 746), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (730, 746), False, 'from django.db import migrations, models\n'), ((777, 828), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (793, 828), False, 'from django.db import migrations, models\n'), ((856, 874), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (872, 874), False, 'from django.db import migrations, models\n'), ((906, 928), 'django.db.models.DurationField', 'models.DurationField', ([], {}), '()\n', (926, 928), False, 'from django.db import migrations, models\n'), ((959, 1002), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""backend.Artist"""'}), "(to='backend.Artist')\n", (981, 1002), False, 'from django.db import migrations, models\n'), ((1147, 1178), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1163, 1178), False, 'from django.db import migrations, models\n'), ((1213, 1264), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (1229, 1264), False, 'from django.db import migrations, models\n'), ((1296, 1318), 'django.db.models.DurationField', 'models.DurationField', ([], {}), '()\n', (1316, 1318), False, 'from django.db import migrations, models\n'), ((1354, 1409), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (1374, 1409), False, 'from django.db import migrations, models\n'), ((1446, 1481), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1466, 1481), False, 'from django.db import migrations, models\n'), ((1512, 1608), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1529, 1608), False, 'from django.db import migrations, models\n'), ((1641, 1682), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""backend.Song"""'}), "(to='backend.Song')\n", (1663, 1682), False, 'from django.db import migrations, models\n')] |
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import Optional
from aws_ddk.sh import run
from aws_ddk.utils import is_in_git_repository
from click import echo, secho
from cookiecutter.main import cookiecutter
_logger: logging.Logger = logging.getLogger(__name__)
def python_executable() -> str:
if sys.platform == "win32":
return "python"
return "python3"
def init_project(name: str, environment: str, template: Optional[str], generate_only: Optional[bool]) -> None:
_logger.debug(f"name: {name}")
_logger.debug(f"environment: {environment}")
_logger.debug(f"template: {template}")
_logger.debug(f"generate_only: {generate_only}")
python_exec: str = python_executable()
# Initialize from a project template
echo("Initializing AWS DDK project...")
path: str = cookiecutter(
template,
no_input=True,
extra_context={
"directory_name": name,
"environment_id": environment,
"python_executable": python_exec,
},
)
if not generate_only:
# Create git repository
if not is_in_git_repository(path):
echo("Initializing a new git repository...")
cmds = [
"git init",
"git checkout -b main",
"git add .",
"git commit --message='Initial commit' --no-gpg-sign",
]
try:
for cmd in cmds:
run(cmd, path)
except Exception:
secho(f"Failed to run `{cmd}`", blink=True, bold=True, fg="red")
# Create virtual environment (.venv)
echo(f"Creating virtual environment in `{path}`...")
cmd = f"{python_exec} -m venv '{path}/.venv'"
try:
run(cmd)
except Exception:
secho(f"Failed to run `{cmd}`", blink=True, bold=True, fg="red")
echo("Done.")
| [
"logging.getLogger",
"aws_ddk.sh.run",
"click.secho",
"cookiecutter.main.cookiecutter",
"click.echo",
"aws_ddk.utils.is_in_git_repository"
] | [((850, 877), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (867, 877), False, 'import logging\n'), ((1372, 1411), 'click.echo', 'echo', (['"""Initializing AWS DDK project..."""'], {}), "('Initializing AWS DDK project...')\n", (1376, 1411), False, 'from click import echo, secho\n'), ((1428, 1574), 'cookiecutter.main.cookiecutter', 'cookiecutter', (['template'], {'no_input': '(True)', 'extra_context': "{'directory_name': name, 'environment_id': environment, 'python_executable':\n python_exec}"}), "(template, no_input=True, extra_context={'directory_name': name,\n 'environment_id': environment, 'python_executable': python_exec})\n", (1440, 1574), False, 'from cookiecutter.main import cookiecutter\n'), ((2510, 2523), 'click.echo', 'echo', (['"""Done."""'], {}), "('Done.')\n", (2514, 2523), False, 'from click import echo, secho\n'), ((2261, 2313), 'click.echo', 'echo', (['f"""Creating virtual environment in `{path}`..."""'], {}), "(f'Creating virtual environment in `{path}`...')\n", (2265, 2313), False, 'from click import echo, secho\n'), ((1723, 1749), 'aws_ddk.utils.is_in_git_repository', 'is_in_git_repository', (['path'], {}), '(path)\n', (1743, 1749), False, 'from aws_ddk.utils import is_in_git_repository\n'), ((1763, 1807), 'click.echo', 'echo', (['"""Initializing a new git repository..."""'], {}), "('Initializing a new git repository...')\n", (1767, 1807), False, 'from click import echo, secho\n'), ((2393, 2401), 'aws_ddk.sh.run', 'run', (['cmd'], {}), '(cmd)\n', (2396, 2401), False, 'from aws_ddk.sh import run\n'), ((2440, 2504), 'click.secho', 'secho', (['f"""Failed to run `{cmd}`"""'], {'blink': '(True)', 'bold': '(True)', 'fg': '"""red"""'}), "(f'Failed to run `{cmd}`', blink=True, bold=True, fg='red')\n", (2445, 2504), False, 'from click import echo, secho\n'), ((2081, 2095), 'aws_ddk.sh.run', 'run', (['cmd', 'path'], {}), '(cmd, path)\n', (2084, 2095), False, 'from aws_ddk.sh import run\n'), ((2142, 2206), 'click.secho', 'secho', (['f"""Failed to run `{cmd}`"""'], {'blink': '(True)', 'bold': '(True)', 'fg': '"""red"""'}), "(f'Failed to run `{cmd}`', blink=True, bold=True, fg='red')\n", (2147, 2206), False, 'from click import echo, secho\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Tuple
from mmcls.models.builder import HEADS, build_loss
from mmcls.models.losses import Accuracy
from mmcv.runner import BaseModule
from torch import Tensor
@HEADS.register_module()
class BaseFewShotHead(BaseModule, metaclass=ABCMeta):
"""Base head for few shot classifier.
Args:
loss (dict): Training loss.
topk (tuple[int]): Topk metric for computing the accuracy.
cal_acc (bool): Whether to compute the accuracy during training.
Default: False.
"""
def __init__(self,
loss: Dict = dict(type='CrossEntropyLoss', loss_weight=1.0),
topk: Tuple[int] = (1, ),
cal_acc: bool = False) -> None:
super().__init__()
assert isinstance(loss, dict)
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
for _topk in topk:
assert _topk > 0, 'Top-k should be larger than 0'
self.topk = topk
self.compute_loss = build_loss(loss)
self.compute_accuracy = Accuracy(topk=self.topk)
self.cal_acc = cal_acc
def loss(self, cls_score: Tensor, gt_label: Tensor) -> Dict:
"""Calculate loss.
Args:
cls_score (Tensor): The prediction.
gt_label (Tensor): The learning target of the prediction.
Returns:
Dict: The calculated loss.
"""
num_samples = len(cls_score)
losses = dict()
# compute loss
loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples)
if self.cal_acc:
# compute accuracy
acc = self.compute_accuracy(cls_score, gt_label)
assert len(acc) == len(self.topk)
losses['accuracy'] = {
f'top-{k}': a
for k, a in zip(self.topk, acc)
}
losses['loss'] = loss
return losses
@abstractmethod
def forward_train(self, **kwargs):
"""Forward training data."""
@abstractmethod
def forward_support(self, x, gt_label, **kwargs):
"""Forward support data in meta testing."""
@abstractmethod
def forward_query(self, x, **kwargs):
"""Forward query data in meta testing."""
@abstractmethod
def before_forward_support(self):
"""Used in meta testing.
This function will be called before model forward support data during
meta testing.
"""
@abstractmethod
def before_forward_query(self):
"""Used in meta testing.
This function will be called before model forward query data during
meta testing.
"""
| [
"mmcls.models.losses.Accuracy",
"mmcls.models.builder.HEADS.register_module",
"mmcls.models.builder.build_loss"
] | [((275, 298), 'mmcls.models.builder.HEADS.register_module', 'HEADS.register_module', ([], {}), '()\n', (296, 298), False, 'from mmcls.models.builder import HEADS, build_loss\n'), ((1128, 1144), 'mmcls.models.builder.build_loss', 'build_loss', (['loss'], {}), '(loss)\n', (1138, 1144), False, 'from mmcls.models.builder import HEADS, build_loss\n'), ((1177, 1201), 'mmcls.models.losses.Accuracy', 'Accuracy', ([], {'topk': 'self.topk'}), '(topk=self.topk)\n', (1185, 1201), False, 'from mmcls.models.losses import Accuracy\n')] |
import os
import shutil
import sys
import pandas as pd
from timeit import default_timer as timer
import environ
from environ import *
from deeplens.constants import *
from deeplens.error import CorruptedOrMissingVideo
from deeplens.full_manager.condition import Condition
from deeplens.full_manager.full_manager import FullStorageManager
from deeplens.full_manager.full_video_processing import NullSplitter
from deeplens.utils.utils import get_local_ip
def runFullPut(src):
local_folder = '/var/www/html/videos'
ip_addr = get_local_ip()
remote_folder = 'http://' + ip_addr + '/videos'
manager = FullStorageManager(None, NullSplitter(), local_folder, remote_folder,
dsn='dbname=header user=postgres password=<PASSWORD> host=10.0.0.5')
def put():
now = timer()
manager.put(src, os.path.basename(src),
args={'encoding': XVID, 'size': -1, 'sample': 1.0, 'offset': 0, 'limit': -1, 'background_scale': 1})
put_time = timer() - now
logrecord('full', ({'file': src}), 'put', str({'elapsed': put_time}), 's')
def get():
clips = manager.get('test', Condition(label='foreground', custom_filter=None))
now = timer()
frame_count = 0
for c in clips:
for frame in c:
frame_count += 1
result = timer() - now
logrecord('full', ({'file': src, 'frames': frame_count}), 'get', str(result), 's')
put()
df = pd.read_csv('http://10.0.0.5/train/' + get_local_ip() + '.csv', sep=',',
dtype={'youtube_id': str})
youtube_ids=df['youtube_id']
youtube_ids2=list(dict.fromkeys(youtube_ids))
total_start = timer()
for item in youtube_ids2:
try:
video_path="http://10.0.0.5/train/"+item+".mp4"
runFullPut(video_path)
except CorruptedOrMissingVideo:
print("missing file for full", item)
print("Total put time on worker %s):" % get_local_ip(), timer() - total_start)
| [
"timeit.default_timer",
"deeplens.utils.utils.get_local_ip",
"deeplens.full_manager.condition.Condition",
"os.path.basename",
"deeplens.full_manager.full_video_processing.NullSplitter"
] | [((1690, 1697), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1695, 1697), True, 'from timeit import default_timer as timer\n'), ((534, 548), 'deeplens.utils.utils.get_local_ip', 'get_local_ip', ([], {}), '()\n', (546, 548), False, 'from deeplens.utils.utils import get_local_ip\n'), ((640, 654), 'deeplens.full_manager.full_video_processing.NullSplitter', 'NullSplitter', ([], {}), '()\n', (652, 654), False, 'from deeplens.full_manager.full_video_processing import NullSplitter\n'), ((817, 824), 'timeit.default_timer', 'timer', ([], {}), '()\n', (822, 824), True, 'from timeit import default_timer as timer\n'), ((1227, 1234), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1232, 1234), True, 'from timeit import default_timer as timer\n'), ((1941, 1955), 'deeplens.utils.utils.get_local_ip', 'get_local_ip', ([], {}), '()\n', (1953, 1955), False, 'from deeplens.utils.utils import get_local_ip\n'), ((1957, 1964), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1962, 1964), True, 'from timeit import default_timer as timer\n'), ((850, 871), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (866, 871), False, 'import os\n'), ((1013, 1020), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1018, 1020), True, 'from timeit import default_timer as timer\n'), ((1162, 1211), 'deeplens.full_manager.condition.Condition', 'Condition', ([], {'label': '"""foreground"""', 'custom_filter': 'None'}), "(label='foreground', custom_filter=None)\n", (1171, 1211), False, 'from deeplens.full_manager.condition import Condition\n'), ((1361, 1368), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1366, 1368), True, 'from timeit import default_timer as timer\n'), ((1522, 1536), 'deeplens.utils.utils.get_local_ip', 'get_local_ip', ([], {}), '()\n', (1534, 1536), False, 'from deeplens.utils.utils import get_local_ip\n')] |
from django.utils.translation import gettext as _
from enums_test import EnumsTest
class TestPublisher(EnumsTest):
entity = 'publisher'
title = _("Éditeur")
data = [
{"id":"1", "name":"editeur 1", "notes":"Notes éditeur 1"}
]
new_data = {"name":"editeur 2", "notes":"Notes éditeur 2"}
| [
"django.utils.translation.gettext"
] | [((154, 166), 'django.utils.translation.gettext', '_', (['"""Éditeur"""'], {}), "('Éditeur')\n", (155, 166), True, 'from django.utils.translation import gettext as _\n')] |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP Server
This server uses asyncore to accept connections and do initial
processing but threads to do work.
"""
from zope.server.serverbase import ServerBase
from zope.server.http.httpserverchannel import HTTPServerChannel
class HTTPServer(ServerBase):
"""This is a generic HTTP Server."""
channel_class = HTTPServerChannel
SERVER_IDENT = 'zope.server.http'
def executeRequest(self, task):
"""Execute an HTTP request."""
# This is a default implementation, meant to be overridden.
body = b"The HTTP server is running!\r\n" * 10
task.response_headers['Content-Type'] = 'text/plain'
task.response_headers['Content-Length'] = str(len(body))
task.write(body)
def getExtraLogMessage(self):
return '\n\tURL: http://%s:%d/' % (self.server_name, self.port)
if __name__ == '__main__':
from zope.server.taskthreads import ThreadedTaskDispatcher
td = ThreadedTaskDispatcher()
td.setThreadCount(4)
HTTPServer('', 8080, task_dispatcher=td)
try:
import asyncore
while 1:
asyncore.poll(5)
except KeyboardInterrupt:
print('shutting down...')
td.shutdown()
| [
"asyncore.poll",
"zope.server.taskthreads.ThreadedTaskDispatcher"
] | [((1581, 1605), 'zope.server.taskthreads.ThreadedTaskDispatcher', 'ThreadedTaskDispatcher', ([], {}), '()\n', (1603, 1605), False, 'from zope.server.taskthreads import ThreadedTaskDispatcher\n'), ((1739, 1755), 'asyncore.poll', 'asyncore.poll', (['(5)'], {}), '(5)\n', (1752, 1755), False, 'import asyncore\n')] |
from flask import Blueprint
bp = Blueprint('patpass_comercio', __name__)
from patpass_comercio import routes
| [
"flask.Blueprint"
] | [((34, 73), 'flask.Blueprint', 'Blueprint', (['"""patpass_comercio"""', '__name__'], {}), "('patpass_comercio', __name__)\n", (43, 73), False, 'from flask import Blueprint\n')] |
import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from config import config
from utils import utils
class Dataset_OCR(Dataset):
def __init__(self, image_root, label_path, alphabet, resize_shape, transform=None):
super(Dataset_OCR).__init__()
self.image_root = image_root
self.labels = self.get_labels(label_path)
self.alphabet = alphabet
self.height, self.width = resize_shape
self.transform = transform
@staticmethod
def get_labels(label_path_):
with open(label_path_) as f:
labels = [{a.split(' ', 1)[0]: a.strip().split(' ', 1)[1]}for a in f.readlines()]
return labels
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
image_name = list(self.labels[item].keys())[0]
image = cv2.imread(self.image_root + os.sep + image_name)
if image is None:
print('{} not exit!'.format(image_name))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
h, w = image.shape
image = cv2.resize(image, (0, 0), fx=self.height / h, fy=self.height / h, interpolation=cv2.INTER_CUBIC)
# 不足的,补充白色区域
image = self.padding_image(image)
# cv2.imshow('image {}'.format(image_name), image)
# cv2.waitKey(0)
image = (np.reshape(image, (32, self.width, 1))).transpose(2, 0, 1)
# 预处理,转换为torchTensor
image = self.preprocess(image)
return image, item
def padding_image(self, image_):
h, w = image_.shape
img = 255. * np.ones((self.height, self.width))
if w < self.width:
img[:, :w] = image_
else:
img = cv2.resize(image_, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
img = np.uint8(img)
return img
def preprocess(self, image_):
image = image_.astype(np.float32) / 255.
image = torch.from_numpy(image).type(torch.FloatTensor)
image.sub_(config.mean).div_(config.std)
return image
if __name__ == '__main__':
image_root = 'images_sentences/images'
label_path = 'images_sentences/labels/sentences_label.txt'
# image_root = 'Synthetic_Chinese_3_6M/train_tiny_images'
# label_path = 'Synthetic_Chinese_3_6M/label/train_tiny.txt'
alphabet_path = './alphabets.txt'
alphabet = utils.generate_alphabets(alphabet_path)
resize_shape = (32, 560)
dataset = Dataset_OCR(image_root, label_path, alphabet, resize_shape)
for i in range(len(dataset)):
dataset[i]
# datasetLoader = DataLoader(dataset, batch_size=config.batch_size, shuffle=True, num_workers=config.num_works)
#
# for i_batch, (img, index) in enumerate(datasetLoader):
# print(i_batch)
# print(img.shape)
# print(index)
# images = os.listdir(dataset.image_root)
# heights = []
# widths = []
# scales = []
# for image_name in images:
# image = cv2.imread(image_root+'/'+image_name)
# h, w, c = image.shape
# heights.append(h)
# widths.append(w)
# scales.append(w/h)
# print(images)
| [
"utils.utils.generate_alphabets",
"numpy.uint8",
"numpy.reshape",
"numpy.ones",
"torch.from_numpy",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread"
] | [((2390, 2429), 'utils.utils.generate_alphabets', 'utils.generate_alphabets', (['alphabet_path'], {}), '(alphabet_path)\n', (2414, 2429), False, 'from utils import utils\n'), ((876, 925), 'cv2.imread', 'cv2.imread', (['(self.image_root + os.sep + image_name)'], {}), '(self.image_root + os.sep + image_name)\n', (886, 925), False, 'import cv2\n'), ((1021, 1060), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1033, 1060), False, 'import cv2\n'), ((1104, 1204), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': '(self.height / h)', 'fy': '(self.height / h)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, (0, 0), fx=self.height / h, fy=self.height / h,\n interpolation=cv2.INTER_CUBIC)\n', (1114, 1204), False, 'import cv2\n'), ((1823, 1836), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (1831, 1836), True, 'import numpy as np\n'), ((1606, 1640), 'numpy.ones', 'np.ones', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (1613, 1640), True, 'import numpy as np\n'), ((1732, 1808), 'cv2.resize', 'cv2.resize', (['image_', '(self.width, self.height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image_, (self.width, self.height), interpolation=cv2.INTER_CUBIC)\n', (1742, 1808), False, 'import cv2\n'), ((1365, 1403), 'numpy.reshape', 'np.reshape', (['image', '(32, self.width, 1)'], {}), '(image, (32, self.width, 1))\n', (1375, 1403), True, 'import numpy as np\n'), ((1956, 1979), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1972, 1979), False, 'import torch\n')] |
from datetime import datetime
from starlette.responses import JSONResponse
from starlette.requests import Request
from .dataaccess import toggleda
from .dataaccess import releasesda
from . import auditing
from . import permissions
async def get_release_notes_for_env(request: Request) -> JSONResponse:
params = request.query_params
env = request.path_params.get('name').lower()
enrollment_id = params.get('enrollment_id')
num_of_days = int(params.get('num_of_days', '90'))
release_notes_data = await releasesda.get_release_notes()
release_notes = {}
for r in release_notes_data:
if r['feature'] not in release_notes:
release_notes[r['feature']] = [r]
else:
release_notes[r['feature']].append(r)
features = release_notes.keys()
# Get sort order
results = await toggleda.get_real_toggle_states(env, features, _with_results=True)
feature_order = [(row['feature'], row['date_on'])
for row in results
if row['state'] == 'ON' and
(datetime.now() - row['date_on']).days <= num_of_days]
# add in releases without features
for _, releases in release_notes.items():
for release in releases:
if 'feature' not in release or release['feature'] is None:
feature_order.append((release['feature'], release['date']))
feature_order = sorted(feature_order, key=lambda x: x[1], reverse=True)
feature_order = [a for a, _ in feature_order]
# now match with toggles this person has on
toggles = await toggleda.get_toggle_states_for_env(env, features, user_id=enrollment_id)
toggles = [k for k, v in toggles.items() if v] # only get ones that are on
# build the list
results = []
for t in toggles:
if t not in feature_order:
# get the ones on for this user, but not on globally, put these at the top
# (not worth sorting by date)
for r in release_notes.get(t):
results.append(r)
for f in feature_order:
# now go based on sort order
for r in release_notes.get(f):
if r not in results:
results.append(r)
# finally, make the dates json-able
for r in results:
r['date'] = f"{r['date'].year}-{r['date'].month}-{r['date'].day}"
return JSONResponse({"release_notes": results},
headers={'Access-Control-Allow-Origin': '*'})
async def get_all_release_notes(request: Request) -> JSONResponse:
# dont currently support number of days
# num_of_days = int(request.query_params.get('num_of_days', '-1'))
release_note_list = await releasesda.get_release_notes()
for r in release_note_list:
r['date'] = f"{r['date'].year}-{r['date'].month}-{r['date'].day}"
return JSONResponse({'release_notes': release_note_list})
async def create_release_note(request: Request) -> JSONResponse:
user = request.user.display_name
request_body = await request.json()
release_title = request_body.get('title', '').strip()
feature = request_body.get('feature', '').strip()
body = request_body.get('body', '').strip()
await permissions.check_permissions(user, permissions.Action.manage_release_notes)
if not release_title:
return JSONResponse({'Message': 'No valid title'}, status_code=400)
release_note_id = await releasesda.create_release_note(
release_title, body=body, feature=feature)
await auditing.audit_event('release_note.create', user,
{'id': release_note_id, 'title': release_title,
'body': body, 'feature': feature})
request_body['id'] = release_note_id
return JSONResponse(request_body)
async def delete_release_note(request: Request) -> JSONResponse:
user = request.user.display_name
release_note_id = int(request.path_params.get('id'))
await permissions.check_permissions(user, permissions.Action.manage_release_notes)
results = await releasesda.delete_release_note(release_note_id)
await auditing.audit_event('release_note.delete', user,
{'id': release_note_id, 'title': results['title'],
'body': results['body'], 'feature': results['feature']})
return JSONResponse(None, status_code=204)
async def edit_release_note(request: Request) -> JSONResponse:
user = request.user.display_name
release_note_id = int(request.path_params.get('id'))
request_body = await request.json()
release_title = request_body.get('title', '').strip()
feature = request_body.get('feature', '').strip()
body = request_body.get('body', '').strip()
await permissions.check_permissions(user, permissions.Action.manage_release_notes)
await releasesda.update_release_note(release_note_id,
title=release_title,
body=body,
feature=feature)
await auditing.audit_event('release_note.edit', user,
{'id': release_note_id, 'title': release_title,
'body': body, 'feature': feature})
return JSONResponse(request_body)
| [
"starlette.responses.JSONResponse",
"datetime.datetime.now"
] | [((2372, 2463), 'starlette.responses.JSONResponse', 'JSONResponse', (["{'release_notes': results}"], {'headers': "{'Access-Control-Allow-Origin': '*'}"}), "({'release_notes': results}, headers={\n 'Access-Control-Allow-Origin': '*'})\n", (2384, 2463), False, 'from starlette.responses import JSONResponse\n'), ((2846, 2896), 'starlette.responses.JSONResponse', 'JSONResponse', (["{'release_notes': release_note_list}"], {}), "({'release_notes': release_note_list})\n", (2858, 2896), False, 'from starlette.responses import JSONResponse\n'), ((3764, 3790), 'starlette.responses.JSONResponse', 'JSONResponse', (['request_body'], {}), '(request_body)\n', (3776, 3790), False, 'from starlette.responses import JSONResponse\n'), ((4352, 4387), 'starlette.responses.JSONResponse', 'JSONResponse', (['None'], {'status_code': '(204)'}), '(None, status_code=204)\n', (4364, 4387), False, 'from starlette.responses import JSONResponse\n'), ((5284, 5310), 'starlette.responses.JSONResponse', 'JSONResponse', (['request_body'], {}), '(request_body)\n', (5296, 5310), False, 'from starlette.responses import JSONResponse\n'), ((3331, 3391), 'starlette.responses.JSONResponse', 'JSONResponse', (["{'Message': 'No valid title'}"], {'status_code': '(400)'}), "({'Message': 'No valid title'}, status_code=400)\n", (3343, 3391), False, 'from starlette.responses import JSONResponse\n'), ((1080, 1094), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1092, 1094), False, 'from datetime import datetime\n')] |
from project.user import User
from project.library import Library
import unittest
class TestsUser(unittest.TestCase):
def setUp(self):
self.user = User(12, 'Valentina')
self.library = Library()
def test_init(self):
self.assertEqual(self.user.user_id, 12)
self.assertEqual(self.user.username, 'Valentina')
self.assertEqual(self.user.books, [])
def test_get_book_method_with_book_available_in_the_library_should_add_it_in_the_books_list(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
result = self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
self.assertEqual(result, 'Harry Potter and the Deathly Hallows successfully rented for the next 17 days!')
self.assertEqual(self.user.books, ["Harry Potter and the Deathly Hallows"])
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Deathly Hallows': 17}})
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Order of the Phoenix']})
def test_get_book_method_with_book_already_rented_should_return_a_message(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
second_user = User(13, 'Peter')
result = second_user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
self.assertEqual(result,
'The book "Harry Potter and the Deathly Hallows" is already rented and will be available in 17 days!')
self.assertEqual(self.user.books, ["Harry Potter and the Deathly Hallows"])
self.assertEqual(second_user.books, [])
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Deathly Hallows': 17}})
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Order of the Phoenix']})
def test_return_book_method_with_rented_book_should_remove_from_user_records_and_add_it_back_to_library_records(
self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 3, self.library)
self.user.get_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', 12, self.library)
self.assertEqual(self.user.books,
['Harry Potter and the Deathly Hallows', 'Harry Potter and the Order of the Phoenix'])
self.user.return_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', self.library)
self.assertEqual(self.user.books, ['Harry Potter and the Order of the Phoenix'])
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows']})
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Order of the Phoenix': 12}})
def test_return_book_method_with_book_NOT_rented_by_the_user_should_return_message(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 3, self.library)
result = self.user.return_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', self.library)
self.assertEqual(result, f'Valentina doesn\'t have this book in his/her records!')
def test_info_method_should_return_sorted_books_list(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosophers Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', 3, self.library)
self.user.get_book('J.K.Rowling', 'Harry Potter and the Philosophers Stone', 3, self.library)
self.user.get_book('J.K.Rowling', '<NAME> and the Deathly Hallows', 3, self.library)
result = self.user.info()
self.assertEqual(result,
"Harry Potter and the Deathly Hallows, Harry Potter and the Order of the Phoenix, Harry Potter and the Philosophers Stone")
def test_init(self):
self.assertEqual(self.library.user_records, [])
self.assertEqual(self.library.books_available, {})
self.assertEqual(self.library.rented_books, {})
def test_add_user_already_registered_in_the_library_should_return_message(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
result = library.add_user(user)
self.assertEqual(result, 'User with id = 12 already registered in the library!')
def test_add_user_method_with_valid_data_should_update_records_properly(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
library.add_user(User(13, 'Peter'))
self.assertEqual(library.user_records[0].__str__(), '12, Valentina, []')
self.assertEqual(library.user_records[1].__str__(), '13, Peter, []')
def test_remove_user_method_with_valid_data_should_update_library_records_properly(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
library.add_user(User(13, 'Peter'))
library.remove_user(user)
self.assertEqual(library.user_records[0].__str__(), '13, Peter, []')
def test_remove_user_method_with_user_not_registered_should_return_message(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.remove_user(p)
self.assertEqual(result, 'We could not find such user to remove!')
def test_change_username_method_with_user_id_not_included_in_library_records_should_return_message(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(13, 'George')
self.assertEqual(result, 'There is no user with id = 13!')
def test_change_username_method_with_user_id_included_in_library_records_but_provided_new_username_is_the_same_should_return_message(
self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(12, 'Valentina')
self.assertEqual(result,
'Please check again the provided username - it should be different than the username used so far!')
def test_change_username_method_with_valid_data_should_return_message_and_update_library_records(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(12, 'Violeta')
self.assertEqual(result, 'Username successfully changed to: Violeta for userid: 12')
self.assertEqual(library.user_records[0].__str__(), '12, Violeta, []')
if __name__ == "__main__":
unittest.main()
class TestsUser(unittest.TestCase):
def setUp(self):
self.user = User(12, 'Valentina')
self.library = Library()
def test_init(self):
self.assertEqual(self.user.user_id, 12)
self.assertEqual(self.user.username, 'Valentina')
self.assertEqual(self.user.books, [])
def test_get_book_method_with_book_available_in_the_library_should_add_it_in_the_books_list(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
result = self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
self.assertEqual(result, 'Harry Potter and the Deathly Hallows successfully rented for the next 17 days!')
self.assertEqual(self.user.books, ["Harry Potter and the Deathly Hallows"])
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Deathly Hallows': 17}})
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Order of the Phoenix']})
def test_get_book_method_with_book_already_rented_should_return_a_message(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
second_user = User(13, 'Peter')
result = second_user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 17, self.library)
self.assertEqual(result,
'The book "Harry Potter and the Deathly Hallows" is already rented and will be available in 17 days!')
self.assertEqual(self.user.books, ["Harry Potter and the Deathly Hallows"])
self.assertEqual(second_user.books, [])
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Deathly Hallows': 17}})
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Order of the Phoenix']})
def test_return_book_method_with_rented_book_should_remove_from_user_records_and_add_it_back_to_library_records(
self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 3, self.library)
self.user.get_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', 12, self.library)
self.assertEqual(self.user.books,
['Harry Potter and the Deathly Hallows', 'Harry Potter and the Order of the Phoenix'])
self.user.return_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', self.library)
self.assertEqual(self.user.books, ['Harry Potter and the Order of the Phoenix'])
self.assertEqual(self.library.books_available, {'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows']})
self.assertEqual(self.library.rented_books, {'Valentina': {'Harry Potter and the Order of the Phoenix': 12}})
def test_return_book_method_with_book_NOT_rented_by_the_user_should_return_message(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosopher\'s Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 3, self.library)
result = self.user.return_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', self.library)
self.assertEqual(result, f'Valentina doesn\'t have this book in his/her records!')
def test_info_method_should_return_sorted_books_list(self):
self.library.books_available.update({'J.K.Rowling': ['Harry Potter and the Philosophers Stone',
'Harry Potter and the Deathly Hallows',
'Harry Potter and the Order of the Phoenix']})
self.user.get_book('J.K.Rowling', 'Harry Potter and the Order of the Phoenix', 3, self.library)
self.user.get_book('J.K.Rowling', 'Harry Potter and the Philosophers Stone', 3, self.library)
self.user.get_book('J.K.Rowling', 'Harry Potter and the Deathly Hallows', 3, self.library)
result = self.user.info()
self.assertEqual(result,
"Harry Potter and the Deathly Hallows, Harry Potter and the Order of the Phoenix, Harry Potter and the Philosophers Stone")
def test_init(self):
self.assertEqual(self.library.user_records, [])
self.assertEqual(self.library.books_available, {})
self.assertEqual(self.library.rented_books, {})
def test_add_user_already_registered_in_the_library_should_return_message(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
result = library.add_user(user)
self.assertEqual(result, 'User with id = 12 already registered in the library!')
def test_add_user_method_with_valid_data_should_update_records_properly(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
library.add_user(User(13, 'Peter'))
self.assertEqual(library.user_records[0].__str__(), '12, Valentina, []')
self.assertEqual(library.user_records[1].__str__(), '13, Peter, []')
def test_remove_user_method_with_valid_data_should_update_library_records_properly(self):
user = User(12, 'Valentina')
library = Library()
library.add_user(user)
library.add_user(User(13, 'Peter'))
library.remove_user(user)
self.assertEqual(library.user_records[0].__str__(), '13, Peter, []')
def test_remove_user_method_with_user_not_registered_should_return_message(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.remove_user(p)
self.assertEqual(result, 'We could not find such user to remove!')
def test_change_username_method_with_user_id_not_included_in_library_records_should_return_message(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(13, 'George')
self.assertEqual(result, 'There is no user with id = 13!')
def test_change_username_method_with_user_id_included_in_library_records_but_provided_new_username_is_the_same_should_return_message(
self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(12, 'Valentina')
self.assertEqual(result,
'Please check again the provided username - it should be different than the username used so far!')
def test_change_username_method_with_valid_data_should_return_message_and_update_library_records(self):
v = User(12, 'Valentina')
p = User(13, 'Peter')
library = Library()
library.add_user(v)
result = library.change_username(12, 'Violeta')
self.assertEqual(result, 'Username successfully changed to: Violeta for userid: 12')
self.assertEqual(library.user_records[0].__str__(), '12, Violeta, []')
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"project.library.Library",
"project.user.User"
] | [((9059, 9074), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9072, 9074), False, 'import unittest\n'), ((18058, 18073), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18071, 18073), False, 'import unittest\n'), ((161, 182), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (165, 182), False, 'from project.user import User\n'), ((206, 215), 'project.library.Library', 'Library', ([], {}), '()\n', (213, 215), False, 'from project.library import Library\n'), ((2322, 2339), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (2326, 2339), False, 'from project.user import User\n'), ((6462, 6483), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (6466, 6483), False, 'from project.user import User\n'), ((6502, 6511), 'project.library.Library', 'Library', ([], {}), '()\n', (6509, 6511), False, 'from project.library import Library\n'), ((6771, 6792), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (6775, 6792), False, 'from project.user import User\n'), ((6811, 6820), 'project.library.Library', 'Library', ([], {}), '()\n', (6818, 6820), False, 'from project.library import Library\n'), ((7164, 7185), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (7168, 7185), False, 'from project.user import User\n'), ((7204, 7213), 'project.library.Library', 'Library', ([], {}), '()\n', (7211, 7213), False, 'from project.library import Library\n'), ((7499, 7520), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (7503, 7520), False, 'from project.user import User\n'), ((7533, 7550), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (7537, 7550), False, 'from project.user import User\n'), ((7569, 7578), 'project.library.Library', 'Library', ([], {}), '()\n', (7576, 7578), False, 'from project.library import Library\n'), ((7845, 7866), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (7849, 7866), False, 'from project.user import User\n'), ((7879, 7896), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (7883, 7896), False, 'from project.user import User\n'), ((7915, 7924), 'project.library.Library', 'Library', ([], {}), '()\n', (7922, 7924), False, 'from project.library import Library\n'), ((8245, 8266), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (8249, 8266), False, 'from project.user import User\n'), ((8279, 8296), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (8283, 8296), False, 'from project.user import User\n'), ((8315, 8324), 'project.library.Library', 'Library', ([], {}), '()\n', (8322, 8324), False, 'from project.library import Library\n'), ((8690, 8711), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (8694, 8711), False, 'from project.user import User\n'), ((8724, 8741), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (8728, 8741), False, 'from project.user import User\n'), ((8760, 8769), 'project.library.Library', 'Library', ([], {}), '()\n', (8767, 8769), False, 'from project.library import Library\n'), ((9154, 9175), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (9158, 9175), False, 'from project.user import User\n'), ((9199, 9208), 'project.library.Library', 'Library', ([], {}), '()\n', (9206, 9208), False, 'from project.library import Library\n'), ((11315, 11332), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (11319, 11332), False, 'from project.user import User\n'), ((15461, 15482), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (15465, 15482), False, 'from project.user import User\n'), ((15501, 15510), 'project.library.Library', 'Library', ([], {}), '()\n', (15508, 15510), False, 'from project.library import Library\n'), ((15770, 15791), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (15774, 15791), False, 'from project.user import User\n'), ((15810, 15819), 'project.library.Library', 'Library', ([], {}), '()\n', (15817, 15819), False, 'from project.library import Library\n'), ((16163, 16184), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (16167, 16184), False, 'from project.user import User\n'), ((16203, 16212), 'project.library.Library', 'Library', ([], {}), '()\n', (16210, 16212), False, 'from project.library import Library\n'), ((16498, 16519), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (16502, 16519), False, 'from project.user import User\n'), ((16532, 16549), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (16536, 16549), False, 'from project.user import User\n'), ((16568, 16577), 'project.library.Library', 'Library', ([], {}), '()\n', (16575, 16577), False, 'from project.library import Library\n'), ((16844, 16865), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (16848, 16865), False, 'from project.user import User\n'), ((16878, 16895), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (16882, 16895), False, 'from project.user import User\n'), ((16914, 16923), 'project.library.Library', 'Library', ([], {}), '()\n', (16921, 16923), False, 'from project.library import Library\n'), ((17244, 17265), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (17248, 17265), False, 'from project.user import User\n'), ((17278, 17295), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (17282, 17295), False, 'from project.user import User\n'), ((17314, 17323), 'project.library.Library', 'Library', ([], {}), '()\n', (17321, 17323), False, 'from project.library import Library\n'), ((17689, 17710), 'project.user.User', 'User', (['(12)', '"""Valentina"""'], {}), "(12, 'Valentina')\n", (17693, 17710), False, 'from project.user import User\n'), ((17723, 17740), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (17727, 17740), False, 'from project.user import User\n'), ((17759, 17768), 'project.library.Library', 'Library', ([], {}), '()\n', (17766, 17768), False, 'from project.library import Library\n'), ((6877, 6894), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (6881, 6894), False, 'from project.user import User\n'), ((7270, 7287), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (7274, 7287), False, 'from project.user import User\n'), ((15876, 15893), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (15880, 15893), False, 'from project.user import User\n'), ((16269, 16286), 'project.user.User', 'User', (['(13)', '"""Peter"""'], {}), "(13, 'Peter')\n", (16273, 16286), False, 'from project.user import User\n')] |
"""
Python code for testing the performance of the latent embedding model described in
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Latent Embeddings for Zero-shot Classification. IEEE CVPR 2016.
Author: <NAME>
E-mail: <EMAIL>
University of Beira Interior, Portugal
"""
import os
import numpy as np
from scipy import stats
from sklearn import preprocessing
import time
import pickle
#######################################################################
# AUXILIARY FUNCTIONS
#######################################################################
def l2_normalization(X):
norm = np.sqrt(np.sum(X**2, axis=1))
l2norm = X / norm[:,None]
return l2norm
def zscore_normalization(X):
"""
Compute the z-score over image features X
:param X: image embedding matrix, each row is an instance
:return: z-score
"""
z_score = stats.zscore(X, axis=1)
return z_score
def w_init(X, Y, K):
"""
Initialization of matrix W
:param X: images embedding matrix, each row is an image instance
:param Y: class embedding matrix, each row is a class
:param K: number of embeddings to learn
:return: a matrix with K embeddings
"""
dim_X = X.shape[1]
dim_Y = Y.shape[0]
W = []
for i in range(K):
W.append(np.random.randn(dim_X, dim_Y) * 1.0 / np.sqrt(dim_X))
W = np.array(W)
return W
def argmax_over_matrices(x, y, W):
"""
Calculates the maximum score over matrices
:param x: an image embedding instance
:param y: a class embedding
:param W: a cell array of embeddings
:return best_score: best bilinear score among all the embeddings
:return best_idx: index of the embedding with the best score
"""
K = len(W)
best_score = -1e12
best_idx = -1
score = np.zeros((K, 1))
for i in range(K):
projected_x = np.dot(x, W[i])
projected_x = projected_x.astype(float)
y = y.astype(float)
score[i] = np.dot(projected_x, y)
if score[i] > best_score:
best_score = score[i]
best_idx = i
return best_score, best_idx
def latem_train(X, labels, Y, learning_rate, n_epochs, K):
"""
SGD optimization for LatEm
:param X: images embedding matrix, each row is an image instance
:param labels: ground truth labels of all image instances
:param Y: class embedding matrix, each row is for a class
:param learning_rate: learning rate for SGD algorithm
:param n_epochs: number of epochs for SGD algorithm
:param K: number of embeddings to learn
:return W: a cell array with K embeddings
"""
n_train = X.shape[0]
n_class = len(np.unique(labels))
W = w_init(X, Y, K)
tic = time.time()
for i in range(n_epochs):
print("[INFO]: Epoch %d / %d" % (i+1, n_epochs))
perm = np.random.permutation(n_train)
for j in range(n_train):
n_i = perm[j] # Choose a training instance
best_j = -1
picked_y = labels[n_i] # Correspondent class label for the chosen training instance
while picked_y == labels[
n_i]: # Enforces to randomly select an y that is different from yn (the correspondent to xn)
picked_y = np.random.randint(n_class)
max_score, best_j = argmax_over_matrices(X[n_i, :], Y[:, picked_y],
W) # Get max score over W_i matrices given an x and the random y
best_score_yi, best_j_yi = argmax_over_matrices(X[n_i, :], Y[:, labels[n_i] - 1],
W) # Get max score over W_i matrices given an x and the correspondent y
if max_score + 1 > best_score_yi:
if best_j == best_j_yi:
# print(W[best_j].shape)
X = X.astype(float)
Y = Y.astype(float)
W[best_j] = W[best_j] - np.dot(np.multiply(learning_rate, X[n_i, :].reshape((X.shape[1], 1))),
(Y[:, picked_y] - Y[:, labels[n_i] - 1]).reshape((1, Y.shape[0])))
else:
X = X.astype(float)
Y = Y.astype(float)
W[best_j] = W[best_j] - np.dot(np.multiply(learning_rate, X[n_i, :].reshape((X.shape[1], 1))),
Y[:, picked_y].reshape((1, Y.shape[0])))
W[best_j_yi] = W[best_j_yi] + np.dot(np.multiply(learning_rate, X[n_i, :].reshape((X.shape[1], 1))),
Y[:, labels[n_i] - 1].reshape((1, Y.shape[0])))
toc = time.time()
training_time = (toc-tic)/60.0
return W, training_time
def latem_test(W, X, Y, labels):
"""
Perform classification task and returns mean class accuracy
:param W: latent embeddings
:param X: images embedding matrix, each row is an image instance
:param Y: class embedding matrix, each row is for a class
:param labels: ground truth labels of all image instances
:return: the classification accuracy averaged over all classes
"""
n_samples = X.shape[0]
preds = []
K = len(W)
scores = {}
max_scores = {}
idx = {}
X = X.astype(float)
Y = Y.astype(float)
print("[INFO]: Testing...")
for i in range(K):
projected_X = np.dot(X, W[i])
scores[i] = np.dot(projected_X, Y)
max_scores[i], idx[i] = np.sum(scores[i], axis=1), np.argmax(scores[i], axis=1)
# Convert dict into matrix
dataMatrix = np.array([max_scores[i] for i in range(K)])
# Get list with maximum_scores
maximum_scores = np.amax(dataMatrix, axis=0)
# Get index of chosen latent embedding
idxs = np.argwhere(dataMatrix == maximum_scores)
final_idx = idxs[np.argsort(idxs[:, 1]), 0]
for i, index in enumerate(final_idx):
# Get value of preds
preds.append(idx[index][i])
preds = np.array(preds)
diff = preds - labels
n_incorrect = len(np.nonzero(diff)[0])
mean_accuracy = (n_samples - n_incorrect) / n_samples
return preds, mean_accuracy
def get_emb_vectors(stage="train"):
"""
Get embedding vectors of classes using GloVe
:return: vectors
"""
if stage == "train":
classes = ['antelope', 'grizzly', 'killer', 'beaver', 'dalmatian', 'horse', 'shepherd', 'whale', 'siamese', 'skunk',
'mole', 'tiger', 'moose', 'monkey', 'elephant', 'gorilla', 'ox', 'fox', 'sheep',
'hamster', 'squirrel', 'rhinoceros', 'rabbit', 'bat', 'giraffe', 'wolf', 'chihuahua', 'weasel',
'otter', 'buffalo', 'zebra', 'deer', 'bobcat', 'lion', 'mouse', 'bear', 'collie', 'walrus',
'cow', 'dolphin']
else:
classes = ['chimpanzee', 'panda', 'leopard', 'cat', 'pig', 'hippopotamus', 'whale', 'raccoon', 'rat', 'seal']
vectors = []
f = open("glove.6B.300d.txt")
for i in f:
word = i.split()[0]
if word in classes:
vectors.append(i.split()[1:])
vectors = np.array(vectors)
f.close()
return vectors
#######################################################################
# PREPROCESSING DATA
#######################################################################
# Loading the AwA dataset
print('[INFO]: Loading dataset...')
labels = np.loadtxt(
'../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt')
if not os.path.exists('../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.pkl'):
X = np.loadtxt(
'../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt')
pickle.dump(X, open('../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.pkl', "wb"))
else:
X = pickle.load(open('../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.pkl', "rb"))
# Get all classes
classes = {}
with open('../Datasets/Animals_with_Attributes2/classes.txt') as f_classes:
lines = f_classes.readlines()
for l in lines:
classes[l.strip().split("\t")[1]] = l.strip().split("\t")[0]
# Get training classes
train_classes = []
with open("Data/trainclasses.txt") as f_tclasses:
lines = f_tclasses.readlines()
for l in lines:
classname = l.strip()
train_classes.append(int(classes[classname]))
train_classes = np.array(train_classes)
# Get test classes
test_classes = []
with open("Data/testclasses.txt") as f:
lines = f.readlines()
for l in lines:
classname = l.strip()
test_classes.append(int(classes[classname]))
test_classes = np.array(test_classes)
# Split into train and test sets (40 classes for training and 10 classe for test)
lbl = preprocessing.LabelEncoder()
y_train = lbl.fit_transform(labels[np.where([labels == i for i in train_classes])[1]])
X_train = X[np.where([labels == i for i in train_classes])[1]]
X_train = zscore_normalization(X_train)
S = np.loadtxt('../Datasets/Animals_with_Attributes2/predicate-matrix-continuous.txt')
# l2-normalize the samples (rows).
S_normalized = preprocessing.normalize(S, norm='l2', axis=1)
#S_normalized = l2_normalization(S)
#Y = get_emb_vectors(stage="train").T
Y = S_normalized[[(i-1) for i in train_classes]].T
#######################################################################
# TRAINING
#######################################################################
# X = np.random.randn(20, 1024)
# labels = np.random.randint(3, size=20) + 1
# Y = np.random.randint(3, size=(85, 3))
learning_rate = 1e-3
n_epochs = 10
K = 6
print("########### TRAINING INFO ###########")
print("X shape --- ", X_train.shape)
print("Y shape --- ", Y.shape)
print("Labels --- ", y_train.shape)
print("Learning rate --- ", learning_rate)
print("No. epochs --- ", n_epochs)
print("No. latent embeddings --- ", K)
W, training_time = latem_train(X_train, y_train, Y, learning_rate, n_epochs, K)
print("W shape --- ", W.shape)
print("[INFO]: Training time: %.2f minutes" % (training_time))
#######################################################################
# TEST
#######################################################################
lbl = preprocessing.LabelEncoder()
y_test = lbl.fit_transform(labels[np.where([labels == i for i in test_classes])[1]])
X_test = X[np.where([labels == i for i in test_classes])[1]]
X_test = zscore_normalization(X_test)
#Y = get_emb_vectors(stage="test").T
Y = S_normalized[[(i-1) for i in test_classes]].T
#X = np.random.randn(10, 1024)
#X = normalization(X)
#labels = np.random.randint(3, size=10) + 1
#Y = np.random.randint(3, size=(85, 3))
print("########## TEST INFO ##########")
print("X shape --- ", X_test.shape)
print("Y shape --- ", Y.shape)
print("Labels --- ", y_test)
preds, acc = latem_test(W, X_test, Y, y_test)
print("Preds --- ", preds)
print("Accuracy --- %.2f %%" % (acc * 100))
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"sklearn.preprocessing.normalize",
"os.path.exists",
"numpy.where",
"numpy.dot",
"numpy.random.permutation",
"numpy.argmax",
"scipy.stats.zscore",
"numpy.nonzero",
"time.time",
"numpy.random.randn",
"numpy.unique",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"numpy.random.randint",
"numpy.loadtxt",
"numpy.amax"
] | [((7422, 7512), 'numpy.loadtxt', 'np.loadtxt', (['"""../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt"""'], {}), "(\n '../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt')\n", (7432, 7512), True, 'import numpy as np\n'), ((8421, 8444), 'numpy.array', 'np.array', (['train_classes'], {}), '(train_classes)\n', (8429, 8444), True, 'import numpy as np\n'), ((8667, 8689), 'numpy.array', 'np.array', (['test_classes'], {}), '(test_classes)\n', (8675, 8689), True, 'import numpy as np\n'), ((8779, 8807), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (8805, 8807), False, 'from sklearn import preprocessing\n'), ((9003, 9090), 'numpy.loadtxt', 'np.loadtxt', (['"""../Datasets/Animals_with_Attributes2/predicate-matrix-continuous.txt"""'], {}), "(\n '../Datasets/Animals_with_Attributes2/predicate-matrix-continuous.txt')\n", (9013, 9090), True, 'import numpy as np\n'), ((9136, 9181), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['S'], {'norm': '"""l2"""', 'axis': '(1)'}), "(S, norm='l2', axis=1)\n", (9159, 9181), False, 'from sklearn import preprocessing\n'), ((10230, 10258), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (10256, 10258), False, 'from sklearn import preprocessing\n'), ((865, 888), 'scipy.stats.zscore', 'stats.zscore', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (877, 888), False, 'from scipy import stats\n'), ((1352, 1363), 'numpy.array', 'np.array', (['W'], {}), '(W)\n', (1360, 1363), True, 'import numpy as np\n'), ((1796, 1812), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (1804, 1812), True, 'import numpy as np\n'), ((2723, 2734), 'time.time', 'time.time', ([], {}), '()\n', (2732, 2734), False, 'import time\n'), ((4695, 4706), 'time.time', 'time.time', ([], {}), '()\n', (4704, 4706), False, 'import time\n'), ((5711, 5738), 'numpy.amax', 'np.amax', (['dataMatrix'], {'axis': '(0)'}), '(dataMatrix, axis=0)\n', (5718, 5738), True, 'import numpy as np\n'), ((5793, 5834), 'numpy.argwhere', 'np.argwhere', (['(dataMatrix == maximum_scores)'], {}), '(dataMatrix == maximum_scores)\n', (5804, 5834), True, 'import numpy as np\n'), ((6003, 6018), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (6011, 6018), True, 'import numpy as np\n'), ((7128, 7145), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (7136, 7145), True, 'import numpy as np\n'), ((7521, 7622), 'os.path.exists', 'os.path.exists', (['"""../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.pkl"""'], {}), "(\n '../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.pkl'\n )\n", (7535, 7622), False, 'import os\n'), ((7622, 7719), 'numpy.loadtxt', 'np.loadtxt', (['"""../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt"""'], {}), "(\n '../Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt'\n )\n", (7632, 7719), True, 'import numpy as np\n'), ((603, 625), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (609, 625), True, 'import numpy as np\n'), ((1859, 1874), 'numpy.dot', 'np.dot', (['x', 'W[i]'], {}), '(x, W[i])\n', (1865, 1874), True, 'import numpy as np\n'), ((1970, 1992), 'numpy.dot', 'np.dot', (['projected_x', 'y'], {}), '(projected_x, y)\n', (1976, 1992), True, 'import numpy as np\n'), ((2668, 2685), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2677, 2685), True, 'import numpy as np\n'), ((2837, 2867), 'numpy.random.permutation', 'np.random.permutation', (['n_train'], {}), '(n_train)\n', (2858, 2867), True, 'import numpy as np\n'), ((5415, 5430), 'numpy.dot', 'np.dot', (['X', 'W[i]'], {}), '(X, W[i])\n', (5421, 5430), True, 'import numpy as np\n'), ((5451, 5473), 'numpy.dot', 'np.dot', (['projected_X', 'Y'], {}), '(projected_X, Y)\n', (5457, 5473), True, 'import numpy as np\n'), ((8907, 8955), 'numpy.where', 'np.where', (['[(labels == i) for i in train_classes]'], {}), '([(labels == i) for i in train_classes])\n', (8915, 8955), True, 'import numpy as np\n'), ((10355, 10402), 'numpy.where', 'np.where', (['[(labels == i) for i in test_classes]'], {}), '([(labels == i) for i in test_classes])\n', (10363, 10402), True, 'import numpy as np\n'), ((5506, 5531), 'numpy.sum', 'np.sum', (['scores[i]'], {'axis': '(1)'}), '(scores[i], axis=1)\n', (5512, 5531), True, 'import numpy as np\n'), ((5533, 5561), 'numpy.argmax', 'np.argmax', (['scores[i]'], {'axis': '(1)'}), '(scores[i], axis=1)\n', (5542, 5561), True, 'import numpy as np\n'), ((5856, 5878), 'numpy.argsort', 'np.argsort', (['idxs[:, 1]'], {}), '(idxs[:, 1])\n', (5866, 5878), True, 'import numpy as np\n'), ((6068, 6084), 'numpy.nonzero', 'np.nonzero', (['diff'], {}), '(diff)\n', (6078, 6084), True, 'import numpy as np\n'), ((8843, 8891), 'numpy.where', 'np.where', (['[(labels == i) for i in train_classes]'], {}), '([(labels == i) for i in train_classes])\n', (8851, 8891), True, 'import numpy as np\n'), ((10293, 10340), 'numpy.where', 'np.where', (['[(labels == i) for i in test_classes]'], {}), '([(labels == i) for i in test_classes])\n', (10301, 10340), True, 'import numpy as np\n'), ((1327, 1341), 'numpy.sqrt', 'np.sqrt', (['dim_X'], {}), '(dim_X)\n', (1334, 1341), True, 'import numpy as np\n'), ((3253, 3279), 'numpy.random.randint', 'np.random.randint', (['n_class'], {}), '(n_class)\n', (3270, 3279), True, 'import numpy as np\n'), ((1289, 1318), 'numpy.random.randn', 'np.random.randn', (['dim_X', 'dim_Y'], {}), '(dim_X, dim_Y)\n', (1304, 1318), True, 'import numpy as np\n')] |
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 <NAME> r.c.i.m<EMAIL>enzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package i18n
# localization back end.
#
import os
import locale
import gettext
from cal_path import get_lang_path
from gpvdm_local import gpvdm_local
a=gpvdm_local()
if a.international.lang=="auto":
current_locale, encoding = locale.getdefaultlocale()
if current_locale==None:
print("No local language set assuming en_US")
current_locale="en_US"
else:
current_locale=a.international.lang
language = gettext.translation ('gpvdm', get_lang_path(), [current_locale] , fallback=True)
language.install()
def get_language():
lang=current_locale.split("_")[1].lower()
return lang
def get_full_language():
return current_locale
def get_full_desired_lang_path():
return os.path.join(get_lang_path(),get_full_language(),"LC_MESSAGES")
def get_languages():
langs=[]
langs.append("en_US")
path=get_lang_path()
if os.path.isdir(path)==False:
return False
print(os.listdir(path))
for my_dir in os.listdir(path):
if os.path.isdir(os.path.join(path,my_dir))==True:
langs.append(my_dir)
return langs
| [
"os.listdir",
"os.path.join",
"os.path.isdir",
"cal_path.get_lang_path",
"gpvdm_local.gpvdm_local",
"locale.getdefaultlocale"
] | [((1084, 1097), 'gpvdm_local.gpvdm_local', 'gpvdm_local', ([], {}), '()\n', (1095, 1097), False, 'from gpvdm_local import gpvdm_local\n'), ((1160, 1185), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (1183, 1185), False, 'import locale\n'), ((1370, 1385), 'cal_path.get_lang_path', 'get_lang_path', ([], {}), '()\n', (1383, 1385), False, 'from cal_path import get_lang_path\n'), ((1736, 1751), 'cal_path.get_lang_path', 'get_lang_path', ([], {}), '()\n', (1749, 1751), False, 'from cal_path import get_lang_path\n'), ((1841, 1857), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1851, 1857), False, 'import os\n'), ((1624, 1639), 'cal_path.get_lang_path', 'get_lang_path', ([], {}), '()\n', (1637, 1639), False, 'from cal_path import get_lang_path\n'), ((1756, 1775), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1769, 1775), False, 'import os\n'), ((1808, 1824), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1818, 1824), False, 'import os\n'), ((1878, 1904), 'os.path.join', 'os.path.join', (['path', 'my_dir'], {}), '(path, my_dir)\n', (1890, 1904), False, 'import os\n')] |
from dataclasses import dataclass
from typing import Dict, List, Tuple, Any
import warnings
import numpy as np
import pandas as pd
from floodlight.core.definitions import essential_events_columns, protected_columns
@dataclass
class Events:
"""Event data fragment. Core class of floodlight.
Event data is stored in `pandas` ``DataFrame``, where each row stores one event
with its different properties organized in columns. You may put whatever
information you like in these columns. Yet, the columns `"eID"` and `"gameclock"`
are mandatory to identify and time-locate events. Some special column names are
reserved for properties that follow conventions. These may be necessary and their
existence is checked for running particular analyses.
Attributes
----------
events: pd.DataFrame
DataFrame containing rows of events and columns of respective event properties.
direction: str, optional
Playing direction of players in data fragment, should be either
'lr' (left-to-right) or 'rl' (right-to-left).
essential: list
List of essential columns available for stored events.
protected: list
List of protected columns available for stored events.
custom: list
List of custom (i.e. non-essential and non-protected) columns available for
stored events.
essential_missing: list or None
List of missing essential columns or None if no columns are missing.
essential_invalid: list or None
List of essential columns that violate the definitions or None if all columns
match the definitions.
protected_missing: list or None
List of missing protected columns or None if no columns are missing.
protected_invalid: list or None
List of protected columns that violate the definitions or None if all columns
match the definitions.
"""
events: pd.DataFrame
direction: str = None
def __post_init__(self):
# check for missing essential columns
missing_columns = self.essential_missing
if missing_columns is not None:
raise ValueError(
f"Floodlight Events object is missing the essential "
f"column(s) {missing_columns}!"
)
# warn if value ranges are violated
incorrect_columns = self.essential_invalid
if incorrect_columns is not None:
for col in incorrect_columns:
warnings.warn(
f"Floodlight Events column {col} does not match the defined value"
f"range (from floodlight.core.definitions). You can pursue at this "
f"point, however, be aware that this may lead to unexpected "
f"behavior in the future."
)
def __str__(self):
return f"Floodlight Events object of shape {self.events.shape}"
def __len__(self):
return len(self.events)
def __getitem__(self, key):
return self.events[key]
def __setitem__(self, key, value):
self.events[key] = value
@property
def essential(self):
essential = [
col for col in self.events.columns if col in essential_events_columns
]
return essential
@property
def protected(self):
protected = [col for col in self.events.columns if col in protected_columns]
return protected
@property
def custom(self):
custom = [
col
for col in self.events.columns
if col not in essential_events_columns and col not in protected_columns
]
return custom
@property
def essential_missing(self):
missing_columns = [
col for col in essential_events_columns if col not in self.essential
]
if not missing_columns:
return None
else:
return missing_columns
@property
def essential_invalid(self):
invalid_columns = [
col
for col in self.essential
if not self.column_values_in_range(col, essential_events_columns)
]
if not invalid_columns:
invalid_columns = None
return invalid_columns
@property
def protected_missing(self):
missing_columns = [
col for col in protected_columns if col not in self.protected
]
if not missing_columns:
missing_columns = None
return missing_columns
@property
def protected_invalid(self):
invalid_columns = [
col
for col in self.protected
if not self.column_values_in_range(col, protected_columns)
]
if not invalid_columns:
invalid_columns = None
return invalid_columns
def column_values_in_range(self, col: str, definitions: Dict[str, Dict]) -> bool:
"""Check if values for a single column of the inner event DataFrame are in
correct range using using the specifications from floodlight.core.definitions.
Parameters
----------
col: str
Column name of the inner event DataFrame to be checked
definitions: Dict
Dictionary (from floodlight.core.definitions) containing specifications for
the columns to be checked.
The definitions need to contain an entry for the column to be checked and
this entry needs to contain information about the value range in the form:
``definitions[col][value_range] = (min, max)``.
Returns
-------
bool
True if the checks for value range pass and False otherwise
Notes
-----
Non-integer results of this computation will always be rounded to the next
smaller integer.
"""
# skip if value range is not defined
if definitions[col]["value_range"] is None:
return True
# skip values that are None or NaN
col_nan_free = self.events[col].dropna()
# retrieve value range from definitions
min_val, max_val = definitions[col]["value_range"]
# check value range for remaining values
if not (min_val <= col_nan_free).all() & (col_nan_free <= max_val).all():
return False
# all checks passed
return True
def add_frameclock(self, framerate: int):
"""Add the column "frameclock", computed as the rounded multiplication of
gameclock and framerate, to the inner events DataFrame.
Parameters
----------
framerate: int
Temporal resolution of data in frames per second/Hertz.
"""
frameclock = np.full((len(self.events)), -1, dtype=int)
frameclock[:] = np.floor(self.events["gameclock"].values * framerate)
self.events["frameclock"] = frameclock
def select(
self, conditions: Tuple[str, Any] or List[Tuple[str, Any]]
) -> pd.DataFrame:
"""Returns a DataFrame containing all entries from the inner events DataFrame
that satisfy all given conditions.
Parameters
----------
conditions: Tuple or List of Tuples
A single or a list of conditions used for filtering. Each condition should
follow the form ``(column, value)``. If ``value`` is given as a variable
(can also be None), it is used to filter for an exact value. If given as a
tuple ``value = (min, max)`` that specifies a minimum and maximum value, it
is filtered for a value range.
For example, to filter all events that have the ``eID`` of ``"Pass"`` and
that happened within the first 1000 seconds of the segment, conditions
should look like:
``conditions = [("eID", "Pass"), ("gameclock", (0, 1000))]``
Returns
-------
filtered_events: pd.DataFrame
A view of the inner events DataFrame with rows fulfilling all criteria
specified in conditions. The DataFrame can be empty if no row fulfills all
specified criteria.
"""
filtered_events = self.events
# convert single non-list condition to list
if not isinstance(conditions, list):
conditions = [conditions]
# loop through and filter by conditions
for column, value in conditions:
# if the value is None filter for all entries with None, NaN or NA
if value is None:
filtered_events = filtered_events[filtered_events[column].isna()]
# check if a single value or a value range is given
else:
# value range: filter by minimum and maximum value
if isinstance(value, (list, tuple)):
min_val, max_val = value
filtered_events = filtered_events[
filtered_events[column] >= min_val
]
filtered_events = filtered_events[
filtered_events[column] <= max_val
]
# single value: filter by that value
else:
filtered_events = filtered_events[filtered_events[column] == value]
return filtered_events
| [
"warnings.warn",
"numpy.floor"
] | [((6815, 6868), 'numpy.floor', 'np.floor', (["(self.events['gameclock'].values * framerate)"], {}), "(self.events['gameclock'].values * framerate)\n", (6823, 6868), True, 'import numpy as np\n'), ((2478, 2715), 'warnings.warn', 'warnings.warn', (['f"""Floodlight Events column {col} does not match the defined valuerange (from floodlight.core.definitions). You can pursue at this point, however, be aware that this may lead to unexpected behavior in the future."""'], {}), "(\n f'Floodlight Events column {col} does not match the defined valuerange (from floodlight.core.definitions). You can pursue at this point, however, be aware that this may lead to unexpected behavior in the future.'\n )\n", (2491, 2715), False, 'import warnings\n')] |
from dagu_local.dagu.dagu_helper import DaguHelper
class DaguZero:
def __init__(self,):
self.__record = None
self.__date_record = None
self.__timestamp_record = None
self.__divider = None
self.__alpha_check = 0
def set_record(self, record):
self.__record = record
def get_record(self):
return self.__record
def set_divider(self, divider):
self.__divider = divider
def get_divider(self):
return self.__divider
def get_date_record(self):
return self.__date_record
def get_timestamp_record(self):
return self.__timestamp_record
def set_alpha_check(self):
date_record = self.get_date_record()
alpha_check = DaguHelper.alpha_check(record=date_record)
self.__alpha_check = alpha_check
def get_alpha_check(self):
return self.__alpha_check
def set_leading_zero_time(self):
timestamp_record = self.__timestamp_record
if len(timestamp_record[0:timestamp_record.find(":")]) < 2:
timestamp_record = '0' + timestamp_record
self.__timestamp_record = timestamp_record
def set_leading_zero_date_without_divider(self):
date_record = self.get_date_record()
if len(date_record) == 4:
if int(date_record[0:2]) > 12:
date_record = '0' + date_record[0] + '0' + date_record[1] + date_record[2:len(date_record)]
self.__date_record = date_record
elif len(date_record) in [5, 7]:
result = None
if len(date_record) == 5:
result = DaguHelper.try_month_year_wd_len_5(date_record)
elif len(date_record) == 7:
result = DaguHelper.try_month_year_wd_len_7(date_record)
if result is None:
if int(date_record[0]) == 0:
date_record = date_record[0:2] + '0' + date_record[2:len(date_record)]
self.__date_record = date_record
else:
date_record = '0' + date_record
self.__date_record = date_record
elif len(date_record) == 6:
result = DaguHelper.try_day_month_year(date_record)
if result is None:
date_record = '0' + date_record[0] + '0' + date_record[1] + date_record[2:len(date_record)]
self.__date_record = date_record
def set_leading_zero_date_with_divider(self):
date_record = self.get_date_record()
divider = self.get_divider()
if len(date_record[0:date_record.find(divider)]) < 2:
date_record = '0' + date_record
self.__date_record = date_record
position = DaguHelper.findnth(record=date_record, n=2, divider=divider)
if len(date_record[3:position]) < 2:
date_record = date_record[0:3 ] + "0" + date_record[3:len(date_record)]
self.__date_record = date_record
# date_record = self.get_date_record()
# if date_record[2:6].count(divider) == 2:
# date_record = date_record[0:3] + '0' + date_record[3:len(date_record)]
# self.__date_record = date_record
def set_leading_zero_alpha(self):
date_record = self.get_date_record()
date_record = DaguHelper.numeric_in_alpha(record=date_record)
self.__date_record = date_record
def split_record(self):
record = self.get_record()
timestamp_record, date_record = DaguHelper.split_record(record=record)
self.__timestamp_record = timestamp_record
self.__date_record = date_record
def assemble_record(self):
date_record = self.get_date_record()
timestamp_record = self.get_timestamp_record()
if timestamp_record is not None:
record = date_record + " " + timestamp_record
self.__record = record
else:
self.__record = date_record
def execute(self, record, divider):
self.set_record(record=record)
self.set_divider(divider=divider)
self.split_record()
self.set_alpha_check()
divider = self.get_divider()
alpha_check = self.get_alpha_check()
if alpha_check == 1:
self.set_leading_zero_alpha()
elif alpha_check == 0 and divider not in ["/", "-", " "]:
self.set_leading_zero_date_without_divider()
else:
self.set_leading_zero_date_with_divider()
timestamp_record = self.get_timestamp_record()
if timestamp_record is not None:
self.set_leading_zero_time()
self.assemble_record()
| [
"dagu_local.dagu.dagu_helper.DaguHelper.try_month_year_wd_len_7",
"dagu_local.dagu.dagu_helper.DaguHelper.numeric_in_alpha",
"dagu_local.dagu.dagu_helper.DaguHelper.findnth",
"dagu_local.dagu.dagu_helper.DaguHelper.try_month_year_wd_len_5",
"dagu_local.dagu.dagu_helper.DaguHelper.try_day_month_year",
"dagu_local.dagu.dagu_helper.DaguHelper.alpha_check",
"dagu_local.dagu.dagu_helper.DaguHelper.split_record"
] | [((749, 791), 'dagu_local.dagu.dagu_helper.DaguHelper.alpha_check', 'DaguHelper.alpha_check', ([], {'record': 'date_record'}), '(record=date_record)\n', (771, 791), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((2730, 2790), 'dagu_local.dagu.dagu_helper.DaguHelper.findnth', 'DaguHelper.findnth', ([], {'record': 'date_record', 'n': '(2)', 'divider': 'divider'}), '(record=date_record, n=2, divider=divider)\n', (2748, 2790), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((3302, 3349), 'dagu_local.dagu.dagu_helper.DaguHelper.numeric_in_alpha', 'DaguHelper.numeric_in_alpha', ([], {'record': 'date_record'}), '(record=date_record)\n', (3329, 3349), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((3495, 3533), 'dagu_local.dagu.dagu_helper.DaguHelper.split_record', 'DaguHelper.split_record', ([], {'record': 'record'}), '(record=record)\n', (3518, 3533), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((1626, 1673), 'dagu_local.dagu.dagu_helper.DaguHelper.try_month_year_wd_len_5', 'DaguHelper.try_month_year_wd_len_5', (['date_record'], {}), '(date_record)\n', (1660, 1673), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((2194, 2236), 'dagu_local.dagu.dagu_helper.DaguHelper.try_day_month_year', 'DaguHelper.try_day_month_year', (['date_record'], {}), '(date_record)\n', (2223, 2236), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n'), ((1739, 1786), 'dagu_local.dagu.dagu_helper.DaguHelper.try_month_year_wd_len_7', 'DaguHelper.try_month_year_wd_len_7', (['date_record'], {}), '(date_record)\n', (1773, 1786), False, 'from dagu_local.dagu.dagu_helper import DaguHelper\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# pycairo/cairocffi-based FreeType example - Copyright 2017 <NAME>
# Distributed under the terms of the new BSD license.
#
# rewrite of the numply,matplotlib-based example from <NAME>
#
# -----------------------------------------------------------------------------
#
# Direct translation of example 1 from the freetype tutorial:
# http://www.freetype.org/freetype2/docs/tutorial/step1.html
#
# Except we uses FreeType's own trigonometric functions instead of those
# from the system/python's math library.
from cairo import Context, ImageSurface, FORMAT_A8
from bitmap_to_surface import make_image_surface
from freetype.raw import *
from PIL import Image
WIDTH, HEIGHT = 640, 480
image = ImageSurface(FORMAT_A8, WIDTH, HEIGHT)
ctx = Context(image)
def to_c_str(text):
''' Convert python strings to null terminated c strings. '''
cStr = create_string_buffer(text.encode(encoding='UTF-8'))
return cast(pointer(cStr), POINTER(c_char))
def draw_bitmap( bitmap, x, y):
global image, ctx
# cairo does not like zero-width surface
if (bitmap.width > 0):
glyph_surface = make_image_surface(bitmap)
ctx.set_source_surface(glyph_surface, x, y)
ctx.paint()
def main():
library = FT_Library()
matrix = FT_Matrix()
face = FT_Face()
pen = FT_Vector()
filename= 'Vera.ttf'
text = 'Hello World !'
num_chars = len(text)
# FT_Angle is a 16.16 fixed-point value expressed in degrees.
angle = FT_Angle(25 * 65536)
# initialize library, error handling omitted
error = FT_Init_FreeType( byref(library) )
# create face object, error handling omitted
error = FT_New_Face( library, to_c_str(filename), 0, byref(face) )
# set character size: 50pt at 100dpi, error handling omitted
error = FT_Set_Char_Size( face, 50 * 64, 0, 100, 0 )
slot = face.contents.glyph
# set up matrix
matrix.xx = FT_Cos( angle )
matrix.xy = - FT_Sin( angle )
matrix.yx = FT_Sin( angle )
matrix.yy = FT_Cos( angle )
# the pen position in 26.6 cartesian space coordinates; */
# start at (300,200) relative to the upper left corner */
pen.x = 200 * 64;
pen.y = ( HEIGHT - 300 ) * 64
for n in range(num_chars):
# set transformation
FT_Set_Transform( face, byref(matrix), byref(pen) )
# load glyph image into the slot (erase previous one)
charcode = ord(text[n])
index = FT_Get_Char_Index( face, charcode )
FT_Load_Glyph( face, index, FT_LOAD_RENDER )
# now, draw to our target surface (convert position)
draw_bitmap( slot.contents.bitmap,
slot.contents.bitmap_left,
HEIGHT - slot.contents.bitmap_top )
# increment pen position
pen.x += slot.contents.advance.x
pen.y += slot.contents.advance.y
FT_Done_Face(face)
FT_Done_FreeType(library)
image.flush()
image.write_to_png("example_1-cairo.png")
Image.open("example_1-cairo.png").show()
if __name__ == '__main__':
main()
| [
"cairo.Context",
"cairo.ImageSurface",
"bitmap_to_surface.make_image_surface",
"PIL.Image.open"
] | [((824, 862), 'cairo.ImageSurface', 'ImageSurface', (['FORMAT_A8', 'WIDTH', 'HEIGHT'], {}), '(FORMAT_A8, WIDTH, HEIGHT)\n', (836, 862), False, 'from cairo import Context, ImageSurface, FORMAT_A8\n'), ((869, 883), 'cairo.Context', 'Context', (['image'], {}), '(image)\n', (876, 883), False, 'from cairo import Context, ImageSurface, FORMAT_A8\n'), ((1232, 1258), 'bitmap_to_surface.make_image_surface', 'make_image_surface', (['bitmap'], {}), '(bitmap)\n', (1250, 1258), False, 'from bitmap_to_surface import make_image_surface\n'), ((3107, 3140), 'PIL.Image.open', 'Image.open', (['"""example_1-cairo.png"""'], {}), "('example_1-cairo.png')\n", (3117, 3140), False, 'from PIL import Image\n')] |
import os
import mojang
import mojang.exceptions
from dotenv import load_dotenv
from flask import Flask, jsonify, redirect, request
load_dotenv()
app = Flask(__name__)
microsoft_app = mojang.microsoft_app(
os.getenv("CLIENT_ID"), os.getenv("CLIENT_SECRET"), "http://localhost:3000"
)
def _sess_to_json(sess):
return {
"name": sess.name,
"uuid": sess.uuid,
"is_legacy": sess.is_legacy,
"is_demo": sess.is_demo,
"names": [
{"name": name[0], "changed_to_at": name[1]} for name in sess.names
],
"skin": {"url": sess.skin.source, "variant": sess.skin.variant},
"cape": {"url": sess.cape.source},
"created_at": sess.created_at,
"can_change_name": sess.name_change_allowed,
}
@app.route("/")
def index():
if request.args.get("code", False):
try:
sess = microsoft_app.authenticate(request.args["code"])
return jsonify(_sess_to_json(sess))
except mojang.exceptions.MicrosoftInvalidGrant:
pass
return redirect(microsoft_app.authorization_url())
if __name__ == "__main__":
app.run(debug=True, port=3000)
| [
"flask.request.args.get",
"flask.Flask",
"os.getenv",
"dotenv.load_dotenv"
] | [((134, 147), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (145, 147), False, 'from dotenv import load_dotenv\n'), ((155, 170), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (160, 170), False, 'from flask import Flask, jsonify, redirect, request\n'), ((213, 235), 'os.getenv', 'os.getenv', (['"""CLIENT_ID"""'], {}), "('CLIENT_ID')\n", (222, 235), False, 'import os\n'), ((237, 263), 'os.getenv', 'os.getenv', (['"""CLIENT_SECRET"""'], {}), "('CLIENT_SECRET')\n", (246, 263), False, 'import os\n'), ((816, 847), 'flask.request.args.get', 'request.args.get', (['"""code"""', '(False)'], {}), "('code', False)\n", (832, 847), False, 'from flask import Flask, jsonify, redirect, request\n')] |
import os
from collections import namedtuple, OrderedDict
from distutils.dir_util import copy_tree
import pytest
from pip._vendor.pkg_resources import Distribution
from pipm import operations
@pytest.fixture
def chdir(tmpdir_factory):
tmpdir = tmpdir_factory.mktemp("test")
os.chdir(tmpdir.strpath)
return tmpdir
DIRNAME = os.path.dirname(__file__)
DATA_DIR = os.path.join(DIRNAME, "data")
DIST_PKG_COUNT = 10
@pytest.fixture
def data_dir(chdir):
copy_tree(DATA_DIR, str(chdir))
Req = namedtuple("Req", ["name"])
def distribution_factory(proj):
return Distribution(
project_name=proj, location=".venv/lib/python/{}".format(proj), version="1.0.0"
)
@pytest.fixture
def patch_dists(mocker):
def _patch_dist(remove=0):
dists = OrderedDict()
cnt = DIST_PKG_COUNT - remove
for i in range(cnt):
proj = "proj-{}".format(i)
dists[proj] = distribution_factory(proj) # type: Distribution
# update requires method
prev_dist = None
for name, dist in dists.items():
if prev_dist is None:
prev_dist = dist
else:
mocker.patch.object(
prev_dist, "requires", return_value=[dist.as_requirement()]
)
prev_dist = dist
m = mocker.patch.object(operations, "get_distributions", return_value=dists)
m.cnt = cnt
return m
return _patch_dist
@pytest.fixture
def patched_dists(patch_dists):
return patch_dists()
@pytest.fixture
def config(chdir):
setup_cfg_str = """\
[options]
install_requires =
six~=1.11.0
[options.extras_require]
dev =
pytest~=3.7.2
"""
from pipm import setup_cfg
with open(setup_cfg.SETUP_FILE_NAME, "w") as f:
f.write(setup_cfg_str)
@pytest.fixture
def install_requirement_factory():
def _factory(r):
from pip._internal.req.constructors import install_req_from_line
req = install_req_from_line(r)
req.is_direct = True
return req
return _factory
@pytest.fixture
def pkg_ir_py(install_requirement_factory):
return install_requirement_factory("py==1.0.0")
@pytest.fixture
def pkg_ir_six(install_requirement_factory):
return install_requirement_factory("six~=1.11.0")
| [
"collections.OrderedDict",
"collections.namedtuple",
"pip._internal.req.constructors.install_req_from_line",
"os.path.join",
"os.chdir",
"os.path.dirname"
] | [((341, 366), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (356, 366), False, 'import os\n'), ((378, 407), 'os.path.join', 'os.path.join', (['DIRNAME', '"""data"""'], {}), "(DIRNAME, 'data')\n", (390, 407), False, 'import os\n'), ((511, 538), 'collections.namedtuple', 'namedtuple', (['"""Req"""', "['name']"], {}), "('Req', ['name'])\n", (521, 538), False, 'from collections import namedtuple, OrderedDict\n'), ((286, 310), 'os.chdir', 'os.chdir', (['tmpdir.strpath'], {}), '(tmpdir.strpath)\n', (294, 310), False, 'import os\n'), ((782, 795), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (793, 795), False, 'from collections import namedtuple, OrderedDict\n'), ((1985, 2009), 'pip._internal.req.constructors.install_req_from_line', 'install_req_from_line', (['r'], {}), '(r)\n', (2006, 2009), False, 'from pip._internal.req.constructors import install_req_from_line\n')] |
import numpy as np
def dummy_jagged_eta_pt():
np.random.seed(42)
counts = np.random.exponential(2, size=50).astype(int)
entries = np.sum(counts)
test_eta = np.random.uniform(-3., 3., size=entries)
test_pt = np.random.exponential(10., size=entries)+np.random.exponential(10, size=entries)
return (counts, test_eta, test_pt)
def dummy_four_momenta():
np.random.seed(12345)
nrows = 1000
counts = np.minimum(np.random.exponential(0.5, size=nrows).astype(int), 20)
px = np.random.normal(loc=20.0,scale=5.0,size=np.sum(counts))
py = np.random.normal(loc=20.0,scale=5.0,size=np.sum(counts))
pz = np.random.normal(loc=0, scale=55, size=np.sum(counts))
m_pi = np.full_like(px,fill_value=0.135)
energy = np.sqrt(px*px + py*py + pz*pz + m_pi*m_pi)
return (counts,px,py,pz,energy)
def dummy_events():
counts, px, py, pz, energy = dummy_four_momenta()
thep4 = np.stack((px,py,pz,energy)).T
class obj(object):
def __init__(self):
self.p4 = thep4
self.blah = energy*px
self.count = counts
class events(object):
def __init__(self):
self.thing = obj()
return events()
| [
"numpy.sqrt",
"numpy.full_like",
"numpy.random.exponential",
"numpy.sum",
"numpy.stack",
"numpy.random.seed",
"numpy.random.uniform"
] | [((51, 69), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (65, 69), True, 'import numpy as np\n'), ((143, 157), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (149, 157), True, 'import numpy as np\n'), ((173, 215), 'numpy.random.uniform', 'np.random.uniform', (['(-3.0)', '(3.0)'], {'size': 'entries'}), '(-3.0, 3.0, size=entries)\n', (190, 215), True, 'import numpy as np\n'), ((379, 400), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (393, 400), True, 'import numpy as np\n'), ((710, 744), 'numpy.full_like', 'np.full_like', (['px'], {'fill_value': '(0.135)'}), '(px, fill_value=0.135)\n', (722, 744), True, 'import numpy as np\n'), ((757, 807), 'numpy.sqrt', 'np.sqrt', (['(px * px + py * py + pz * pz + m_pi * m_pi)'], {}), '(px * px + py * py + pz * pz + m_pi * m_pi)\n', (764, 807), True, 'import numpy as np\n'), ((228, 269), 'numpy.random.exponential', 'np.random.exponential', (['(10.0)'], {'size': 'entries'}), '(10.0, size=entries)\n', (249, 269), True, 'import numpy as np\n'), ((269, 308), 'numpy.random.exponential', 'np.random.exponential', (['(10)'], {'size': 'entries'}), '(10, size=entries)\n', (290, 308), True, 'import numpy as np\n'), ((923, 953), 'numpy.stack', 'np.stack', (['(px, py, pz, energy)'], {}), '((px, py, pz, energy))\n', (931, 953), True, 'import numpy as np\n'), ((83, 116), 'numpy.random.exponential', 'np.random.exponential', (['(2)'], {'size': '(50)'}), '(2, size=50)\n', (104, 116), True, 'import numpy as np\n'), ((553, 567), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (559, 567), True, 'import numpy as np\n'), ((619, 633), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (625, 633), True, 'import numpy as np\n'), ((683, 697), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (689, 697), True, 'import numpy as np\n'), ((442, 480), 'numpy.random.exponential', 'np.random.exponential', (['(0.5)'], {'size': 'nrows'}), '(0.5, size=nrows)\n', (463, 480), True, 'import numpy as np\n')] |
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from random import randint
from qnarre.neura import tf
def dset(ps, _):
ps.update(PAD=0, UNK=1, BEG=2, END=3, num_toks=20)
ps.update(len_tgt=ps.len_src)
t, sh = tf.int32, tf.TensorShape((ps.len_src, ))
return tf.Dataset.from_generator(
lambda: _generator(ps),
((t, ) * 4, t),
((sh, ) * 4, sh),
)
def _generator(ps):
sl = ps.len_src
for _ in range(10000):
n = randint(1, sl - 2)
c = randint(0, 9) + 10
s = [ps.BEG] + [c] * n + [ps.END] + [ps.PAD] * (sl - n - 2)
h = [ps.BEG] + [ps.UNK] * (sl - 1)
t = s[:n + 2] + h[n + 2:]
yield (s, [0] * sl, h, t), t
| [
"qnarre.neura.tf.TensorShape",
"random.randint"
] | [((870, 899), 'qnarre.neura.tf.TensorShape', 'tf.TensorShape', (['(ps.len_src,)'], {}), '((ps.len_src,))\n', (884, 899), False, 'from qnarre.neura import tf\n'), ((1108, 1126), 'random.randint', 'randint', (['(1)', '(sl - 2)'], {}), '(1, sl - 2)\n', (1115, 1126), False, 'from random import randint\n'), ((1139, 1152), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1146, 1152), False, 'from random import randint\n')] |
"""
File: Pira.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/jplehr/pira/LICENSE.txt
Description: Module implementing the main workflow of PIRA.
"""
from lib.ConfigurationLoader import SimplifiedConfigurationLoader as SCLoader
from lib.ConfigurationLoader import ConfigurationLoader as CLoader
from lib.Configuration import TargetConfiguration, PiraConfiguration, ExtrapConfiguration, InvocationConfiguration, PiraConfigurationErrorException
from lib.Runner import Runner, LocalRunner, LocalScalingRunner
from lib.Builder import Builder as B
from lib.Analyzer import Analyzer as A
from lib.Checker import Checker as checker
import lib.Logging as log
import lib.Utility as util
import lib.BatchSystemHelper as bat_sys
import lib.FunctorManagement as fm
import lib.Measurement as ms
import lib.TimeTracking as tt
import lib.Database as d
import lib.ProfileSink as sinks
from lib.RunnerFactory import PiraRunnerFactory
import typing
import sys
def execute_with_config(runner: Runner, analyzer: A, pira_iters: int, target_config: TargetConfiguration) -> None:
try:
log.get_logger().log('run_setup phase.', level='debug')
instrument = False
pira_iterations = pira_iters
# Build without any instrumentation
vanilla_builder = B(target_config, instrument)
tracker = tt.TimeTracker()
tracker.m_track('Vanilla Build', vanilla_builder, 'build')
# Run without instrumentation for baseline
log.get_logger().log('Running baseline measurements', level='info')
vanilla_rr = runner.do_baseline_run(target_config)
log.get_logger().log(
'Pira::execute_with_config: RunResult: ' + str(vanilla_rr) + ' | avg: ' + str(vanilla_rr.get_average()),
level='debug')
instr_file = ''
for x in range(0, pira_iterations):
log.get_logger().log('Running instrumentation iteration ' + str(x), level='info')
# Only run the pgoe to get the functions name
iteration_tracker = tt.TimeTracker()
# Analysis Phase
instr_file = analyzer.analyze(target_config, x)
log.get_logger().log('[WHITELIST] $' + str(x) + '$ ' + str(util.lines_in_file(instr_file)), level='perf')
util.shell('stat ' + instr_file)
# After baseline measurement is complete, do the instrumented build/run
# This is only necessary in every iteration when run in compile-time mode.
if x is 0 or target_config.is_compile_time_filtering():
instrument = True
instr_builder = B(target_config, instrument, instr_file)
tracker.m_track('Instrument Build', instr_builder, 'build')
#Run Phase
log.get_logger().log('Running profiling measurements', level='info')
instr_rr = runner.do_profile_run(target_config, x)
# Compute overhead of instrumentation
ovh_percentage = instr_rr.compute_overhead(vanilla_rr)
log.get_logger().log('[RUNTIME] $' + str(x) + '$ ' + str(instr_rr.get_average()), level='perf')
log.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' + str(ovh_percentage), level='perf')
iteration_tracker.stop()
user_time, system_time = iteration_tracker.get_time()
log.get_logger().log('[ITERTIME] $' + str(x) + '$ ' + str(user_time) + ', ' + str(system_time), level='perf')
except Exception as e:
log.get_logger().log(
'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n' + str(e), level='error')
raise RuntimeError(str(e))
def process_args_for_extrap(cmdline_args) -> typing.Tuple[bool, str]:
use_extra_p = False
extrap_config = ExtrapConfiguration('', '', '')
if cmdline_args.extrap_dir is not '':
use_extra_p = True
extrap_config = ExtrapConfiguration(cmdline_args.extrap_dir, cmdline_args.extrap_prefix, '')
num_reps = cmdline_args.repetitions
if num_reps < 5:
log.get_logger().log('At least 5 repetitions are recommended for Extra-P modelling.', level='warn')
if num_reps < 0:
log.get_logger().log('REMEMBER TO REMOVE IN PIRA::process_args_for_extrap', level='warn')
log.get_logger().log('At least 3 repetitions are required for Extra-P modelling.', level='error')
raise RuntimeError('At least 5 repetitions are needed for Extra-P modelling.')
return use_extra_p, extrap_config
def show_pira_invoc_info(cmdline_args) -> None:
invoc_cfg = process_args_for_invoc(cmdline_args)
cf_str = 'compile-time filtering'
if not invoc_cfg.is_compile_time_filtering():
cf_str = 'runtime filtering'
log.get_logger().log(
'Pira::main: Running PIRA in ' + cf_str + ' with configuration\n ' + str(invoc_cfg.get_path_to_cfg()),
level='info')
def process_args_for_invoc(cmdline_args) -> None:
path_to_config = cmdline_args.config
compile_time_filter = not cmdline_args.runtime_filter
pira_iters = cmdline_args.iterations
num_reps = cmdline_args.repetitions
invoc_cfg = InvocationConfiguration(path_to_config, compile_time_filter, pira_iters, num_reps)
return invoc_cfg
def main(arguments) -> None:
""" Main function for pira framework. Used to invoke the various components. """
show_pira_invoc_info(arguments)
invoc_cfg = process_args_for_invoc(arguments)
use_extra_p, extrap_config = process_args_for_extrap(arguments)
home_dir = util.get_cwd()
util.set_home_dir(home_dir)
try:
if arguments.version is 1:
config_loader = CLoader()
configuration = config_loader.load_conf(invoc_cfg.get_path_to_cfg())
checker.check_configfile_v1(configuration)
else:
config_loader = SCLoader()
configuration = config_loader.load_conf(invoc_cfg.get_path_to_cfg())
checker.check_configfile_v2(configuration)
if bat_sys.check_queued_job():
# FIXME: Implement
log.get_logger().log('In this version of PIRA it is not yet implemented', level='error')
assert (False)
else:
'''
This branch is running PIRA actively on the local machine.
It is blocking, and the user can track the progress in the terminal.
'''
log.get_logger().log('Running the local case')
# The FunctorManager manages loaded functors and generates the respective names
fm.FunctorManager(configuration)
dbm = d.DBManager(d.DBManager.db_name + '.' + d.DBManager.db_ext)
dbm.create_cursor()
analyzer = A(configuration)
runner_factory = PiraRunnerFactory(invoc_cfg, configuration)
runner = runner_factory.get_simple_local_runner()
if use_extra_p:
log.get_logger().log('Running with Extra-P runner')
runner = runner_factory.get_scalability_runner(extrap_config)
if runner.has_sink():
analyzer.set_profile_sink(runner.get_sink())
# A build/place is a top-level directory
for build in configuration.get_builds():
log.get_logger().log('Build: ' + str(build))
app_tuple = (util.generate_random_string(), build, '', '')
dbm.insert_data_application(app_tuple)
# An item is a target/software in that directory
for item in configuration.get_items(build):
log.get_logger().log('Running for item ' + str(item))
# A flavor is a specific version to build
if configuration.has_local_flavors(build, item):
for flavor in configuration.get_flavors(build, item):
log.get_logger().log('Running for local flavor ' + flavor, level='debug')
# prepare database, and get a unique handle for current item.
db_item_id = dbm.prep_db_for_build_item_in_flavor(configuration, build, item, flavor)
# Create configuration object for the item currently processed.
place = configuration.get_place(build)
t_config = TargetConfiguration(place, build, item, flavor, db_item_id, invoc_cfg.is_compile_time_filtering())
# Execute using a local runner, given the generated target description
execute_with_config(runner, analyzer, invoc_cfg.get_pira_iters(), t_config)
# If global flavor
else:
# TODO: Implement
log.get_logger().log('In this version of PIRA it is not yet implemented', level='error')
assert (False)
util.change_cwd(home_dir)
except RuntimeError as rt_err:
util.change_cwd(home_dir)
log.get_logger().log('Runner.run caught exception. Message: ' + str(rt_err), level='error')
log.get_logger().dump_tape()
sys.exit(-1)
| [
"lib.Utility.generate_random_string",
"lib.Utility.change_cwd",
"lib.Analyzer.Analyzer",
"sys.exit",
"lib.Utility.set_home_dir",
"lib.Checker.Checker.check_configfile_v1",
"lib.TimeTracking.TimeTracker",
"lib.Utility.get_cwd",
"lib.Configuration.InvocationConfiguration",
"lib.Utility.lines_in_file",
"lib.Database.DBManager",
"lib.FunctorManagement.FunctorManager",
"lib.Configuration.ExtrapConfiguration",
"lib.Checker.Checker.check_configfile_v2",
"lib.Builder.Builder",
"lib.RunnerFactory.PiraRunnerFactory",
"lib.ConfigurationLoader.SimplifiedConfigurationLoader",
"lib.Utility.shell",
"lib.BatchSystemHelper.check_queued_job",
"lib.ConfigurationLoader.ConfigurationLoader",
"lib.Logging.get_logger"
] | [((3599, 3630), 'lib.Configuration.ExtrapConfiguration', 'ExtrapConfiguration', (['""""""', '""""""', '""""""'], {}), "('', '', '')\n", (3618, 3630), False, 'from lib.Configuration import TargetConfiguration, PiraConfiguration, ExtrapConfiguration, InvocationConfiguration, PiraConfigurationErrorException\n'), ((4920, 5006), 'lib.Configuration.InvocationConfiguration', 'InvocationConfiguration', (['path_to_config', 'compile_time_filter', 'pira_iters', 'num_reps'], {}), '(path_to_config, compile_time_filter, pira_iters,\n num_reps)\n', (4943, 5006), False, 'from lib.Configuration import TargetConfiguration, PiraConfiguration, ExtrapConfiguration, InvocationConfiguration, PiraConfigurationErrorException\n'), ((5300, 5314), 'lib.Utility.get_cwd', 'util.get_cwd', ([], {}), '()\n', (5312, 5314), True, 'import lib.Utility as util\n'), ((5317, 5344), 'lib.Utility.set_home_dir', 'util.set_home_dir', (['home_dir'], {}), '(home_dir)\n', (5334, 5344), True, 'import lib.Utility as util\n'), ((1319, 1347), 'lib.Builder.Builder', 'B', (['target_config', 'instrument'], {}), '(target_config, instrument)\n', (1320, 1347), True, 'from lib.Builder import Builder as B\n'), ((1362, 1378), 'lib.TimeTracking.TimeTracker', 'tt.TimeTracker', ([], {}), '()\n', (1376, 1378), True, 'import lib.TimeTracking as tt\n'), ((3714, 3790), 'lib.Configuration.ExtrapConfiguration', 'ExtrapConfiguration', (['cmdline_args.extrap_dir', 'cmdline_args.extrap_prefix', '""""""'], {}), "(cmdline_args.extrap_dir, cmdline_args.extrap_prefix, '')\n", (3733, 3790), False, 'from lib.Configuration import TargetConfiguration, PiraConfiguration, ExtrapConfiguration, InvocationConfiguration, PiraConfigurationErrorException\n'), ((5715, 5741), 'lib.BatchSystemHelper.check_queued_job', 'bat_sys.check_queued_job', ([], {}), '()\n', (5739, 5741), True, 'import lib.BatchSystemHelper as bat_sys\n'), ((8243, 8268), 'lib.Utility.change_cwd', 'util.change_cwd', (['home_dir'], {}), '(home_dir)\n', (8258, 8268), True, 'import lib.Utility as util\n'), ((2007, 2023), 'lib.TimeTracking.TimeTracker', 'tt.TimeTracker', ([], {}), '()\n', (2021, 2023), True, 'import lib.TimeTracking as tt\n'), ((2220, 2252), 'lib.Utility.shell', 'util.shell', (["('stat ' + instr_file)"], {}), "('stat ' + instr_file)\n", (2230, 2252), True, 'import lib.Utility as util\n'), ((4530, 4546), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (4544, 4546), True, 'import lib.Logging as log\n'), ((5406, 5415), 'lib.ConfigurationLoader.ConfigurationLoader', 'CLoader', ([], {}), '()\n', (5413, 5415), True, 'from lib.ConfigurationLoader import ConfigurationLoader as CLoader\n'), ((5497, 5539), 'lib.Checker.Checker.check_configfile_v1', 'checker.check_configfile_v1', (['configuration'], {}), '(configuration)\n', (5524, 5539), True, 'from lib.Checker import Checker as checker\n'), ((5572, 5582), 'lib.ConfigurationLoader.SimplifiedConfigurationLoader', 'SCLoader', ([], {}), '()\n', (5580, 5582), True, 'from lib.ConfigurationLoader import SimplifiedConfigurationLoader as SCLoader\n'), ((5664, 5706), 'lib.Checker.Checker.check_configfile_v2', 'checker.check_configfile_v2', (['configuration'], {}), '(configuration)\n', (5691, 5706), True, 'from lib.Checker import Checker as checker\n'), ((6201, 6233), 'lib.FunctorManagement.FunctorManager', 'fm.FunctorManager', (['configuration'], {}), '(configuration)\n', (6218, 6233), True, 'import lib.FunctorManagement as fm\n'), ((6246, 6305), 'lib.Database.DBManager', 'd.DBManager', (["(d.DBManager.db_name + '.' + d.DBManager.db_ext)"], {}), "(d.DBManager.db_name + '.' + d.DBManager.db_ext)\n", (6257, 6305), True, 'import lib.Database as d\n'), ((6349, 6365), 'lib.Analyzer.Analyzer', 'A', (['configuration'], {}), '(configuration)\n', (6350, 6365), True, 'from lib.Analyzer import Analyzer as A\n'), ((6390, 6433), 'lib.RunnerFactory.PiraRunnerFactory', 'PiraRunnerFactory', (['invoc_cfg', 'configuration'], {}), '(invoc_cfg, configuration)\n', (6407, 6433), False, 'from lib.RunnerFactory import PiraRunnerFactory\n'), ((8307, 8332), 'lib.Utility.change_cwd', 'util.change_cwd', (['home_dir'], {}), '(home_dir)\n', (8322, 8332), True, 'import lib.Utility as util\n'), ((8466, 8478), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (8474, 8478), False, 'import sys\n'), ((1143, 1159), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (1157, 1159), True, 'import lib.Logging as log\n'), ((1494, 1510), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (1508, 1510), True, 'import lib.Logging as log\n'), ((1621, 1637), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (1635, 1637), True, 'import lib.Logging as log\n'), ((2525, 2565), 'lib.Builder.Builder', 'B', (['target_config', 'instrument', 'instr_file'], {}), '(target_config, instrument, instr_file)\n', (2526, 2565), True, 'from lib.Builder import Builder as B\n'), ((1846, 1862), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (1860, 1862), True, 'import lib.Logging as log\n'), ((2108, 2124), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (2122, 2124), True, 'import lib.Logging as log\n'), ((2658, 2674), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (2672, 2674), True, 'import lib.Logging as log\n'), ((2896, 2912), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (2910, 2912), True, 'import lib.Logging as log\n'), ((2998, 3014), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (3012, 3014), True, 'import lib.Logging as log\n'), ((3185, 3201), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (3199, 3201), True, 'import lib.Logging as log\n'), ((3325, 3341), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (3339, 3341), True, 'import lib.Logging as log\n'), ((3859, 3875), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (3873, 3875), True, 'import lib.Logging as log\n'), ((5774, 5790), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (5788, 5790), True, 'import lib.Logging as log\n'), ((6061, 6077), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (6075, 6077), True, 'import lib.Logging as log\n'), ((6893, 6922), 'lib.Utility.generate_random_string', 'util.generate_random_string', ([], {}), '()\n', (6920, 6922), True, 'import lib.Utility as util\n'), ((8337, 8353), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (8351, 8353), True, 'import lib.Logging as log\n'), ((8433, 8449), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (8447, 8449), True, 'import lib.Logging as log\n'), ((2167, 2197), 'lib.Utility.lines_in_file', 'util.lines_in_file', (['instr_file'], {}), '(instr_file)\n', (2185, 2197), True, 'import lib.Utility as util\n'), ((3990, 4006), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (4004, 4006), True, 'import lib.Logging as log\n'), ((4088, 4104), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (4102, 4104), True, 'import lib.Logging as log\n'), ((6520, 6536), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (6534, 6536), True, 'import lib.Logging as log\n'), ((6827, 6843), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (6841, 6843), True, 'import lib.Logging as log\n'), ((7106, 7122), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (7120, 7122), True, 'import lib.Logging as log\n'), ((8122, 8138), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (8136, 8138), True, 'import lib.Logging as log\n'), ((7352, 7368), 'lib.Logging.get_logger', 'log.get_logger', ([], {}), '()\n', (7366, 7368), True, 'import lib.Logging as log\n')] |
try:
from openpyxl import load_workbook
except ImportError:
print('openpyxl must be installed to run the script. [ pip install openpyxl ]')
exit(-1)
from os import path, makedirs
import argparse
import json
import io
import copy
directory = 'export/'
prologue = None
epilogue = None
file_format = 'json'
force_insertion = False
flatten_keys = False
# http://stackoverflow.com/a/18809656/2304450
from collections import OrderedDict
class NestedOrderedDict(OrderedDict):
def __missing__(self, key):
val = self[key] = NestedOrderedDict()
return val
# two super useful utils to get/set a key recursively
# http://stackoverflow.com/a/14692747/2304450
from functools import reduce
import operator
def getFromDict(dataDict, mapList):
try:
return reduce(operator.getitem, mapList, dataDict)
except KeyError:
return None
except TypeError:
return None
def setInDict(dataDict, mapList, value):
getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
def createNestedKeysFile(column, keys, jsonSchema, immutableJsonSchema):
langCode = column[0].value
langName = column[1].value
if langName is None or langCode is None:
return
langCode = langCode.strip()
langName = langName.strip().title()
print(' Processing ' + langName)
translatedDict = (copy.deepcopy(jsonSchema), NestedOrderedDict())[jsonSchema is None]
for index, cell in enumerate(column):
if index > 1 and index < keys.__len__():
if cell.value is None:
cell.value = ''
key = keys[index].strip()
value = cell.value.replace('"', '\\"')
if jsonSchema:
if getFromDict(immutableJsonSchema, key.split('.')) is not None:
setInDict(translatedDict, key.split('.'), value)
else:
if force_insertion:
setInDict(translatedDict, key.split('.'), value)
print(' WARN: key not found in given schema: ' + key + ' [forcing insert]')
else:
print(' WARN: key not found in given schema: ' + key + ' [skipping]')
elif flatten_keys:
translatedDict[key] = value
else:
setInDict(translatedDict, key.split('.'), value)
filename = directory + '/' + langCode + '.' + file_format
print(' Saving file ' + filename)
with io.open(filename, 'w', encoding='utf-8') as outfile:
if prologue:
outfile.write(unicode(prologue))
outfile.write(json.dumps(translatedDict, ensure_ascii=False, indent=2, sort_keys=flatten_keys))
if epilogue:
outfile.write(unicode(epilogue))
def process(sheet, schema):
keys = []
jsonSchema = None
immutableJsonSchema = None
if schema is not None:
with open(schema, 'r') as schema_file:
jsonSchema = json.load(schema_file, object_pairs_hook=NestedOrderedDict)
with open(schema, 'r') as schema_file:
immutableJsonSchema = json.load(schema_file, object_pairs_hook=OrderedDict)
for column in sheet.columns:
# generate the keys list
if keys.__len__() == 0:
for cell in column:
if cell.value is not None:
keys.append(cell.value)
else:
createNestedKeysFile(column, keys, jsonSchema, immutableJsonSchema)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Transform an excel file containing translations in the separate JSON files required by polyglot.js',
epilog='For more info, see https://github.com/BelkaLab/translationpy'
)
parser.add_argument('input', help='the input excel file')
parser.add_argument('--out-dir', help='specify the output directory (default: export/)', default='export')
parser.add_argument('--sheets', help='process only the specified sheets (default: active sheet, * for all)')
parser.add_argument('--prologue', help='a string to prepend to the dumped JSON variable')
parser.add_argument('--epilogue', help='a string to append to the dumped JSON variable')
parser.add_argument('--schema', help='honour the given schema in output files')
parser.add_argument('--format', help='output files format', choices={'js', 'json', 'jsonp'}, default='json')
parser.add_argument('--force', help='force insertion of keys not found in given schema', action='store_true')
parser.add_argument('--flatten-keys', help='make the output a flat dictionary', action='store_true')
args = parser.parse_args()
prologue = args.prologue.replace('\\n', '\n') # fix newlines!
epilogue = args.epilogue.replace('\\n', '\n') # fix newlines!
file_format = args.format
force_insertion = args.force
flatten_keys = args.flatten_keys
workbook = load_workbook(args.input)
if args.sheets:
# process all sheets matching given list, or all sheets if '*' was given
for sheetName in workbook.get_sheet_names():
if args.sheets == '*' or sheetName in args.sheets:
print('\nProcessing worksheet "%s"' % sheetName)
directory = args.out_dir + '/' + sheetName
if not path.exists(directory):
makedirs(directory)
process(workbook.get_sheet_by_name(sheetName), args.schema)
else:
# process only active worksheet
directory = args.out_dir
if not path.exists(directory):
makedirs(directory)
print('\nProcessing active worksheet "%s"' % workbook.active.title)
process(workbook.active, args.schema)
print('\nEverything done.\n')
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"openpyxl.load_workbook",
"functools.reduce",
"json.dumps",
"io.open",
"copy.deepcopy",
"json.load"
] | [((3070, 3288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Transform an excel file containing translations in the separate JSON files required by polyglot.js"""', 'epilog': '"""For more info, see https://github.com/BelkaLab/translationpy"""'}), "(description=\n 'Transform an excel file containing translations in the separate JSON files required by polyglot.js'\n , epilog='For more info, see https://github.com/BelkaLab/translationpy')\n", (3093, 3288), False, 'import argparse\n'), ((4408, 4433), 'openpyxl.load_workbook', 'load_workbook', (['args.input'], {}), '(args.input)\n', (4421, 4433), False, 'from openpyxl import load_workbook\n'), ((770, 813), 'functools.reduce', 'reduce', (['operator.getitem', 'mapList', 'dataDict'], {}), '(operator.getitem, mapList, dataDict)\n', (776, 813), False, 'from functools import reduce\n'), ((2180, 2220), 'io.open', 'io.open', (['filename', '"""w"""'], {'encoding': '"""utf-8"""'}), "(filename, 'w', encoding='utf-8')\n", (2187, 2220), False, 'import io\n'), ((1283, 1308), 'copy.deepcopy', 'copy.deepcopy', (['jsonSchema'], {}), '(jsonSchema)\n', (1296, 1308), False, 'import copy\n'), ((2301, 2386), 'json.dumps', 'json.dumps', (['translatedDict'], {'ensure_ascii': '(False)', 'indent': '(2)', 'sort_keys': 'flatten_keys'}), '(translatedDict, ensure_ascii=False, indent=2, sort_keys=flatten_keys\n )\n', (2311, 2386), False, 'import json\n'), ((2606, 2665), 'json.load', 'json.load', (['schema_file'], {'object_pairs_hook': 'NestedOrderedDict'}), '(schema_file, object_pairs_hook=NestedOrderedDict)\n', (2615, 2665), False, 'import json\n'), ((2732, 2785), 'json.load', 'json.load', (['schema_file'], {'object_pairs_hook': 'OrderedDict'}), '(schema_file, object_pairs_hook=OrderedDict)\n', (2741, 2785), False, 'import json\n'), ((4932, 4954), 'os.path.exists', 'path.exists', (['directory'], {}), '(directory)\n', (4943, 4954), False, 'from os import path, makedirs\n'), ((4959, 4978), 'os.makedirs', 'makedirs', (['directory'], {}), '(directory)\n', (4967, 4978), False, 'from os import path, makedirs\n'), ((4740, 4762), 'os.path.exists', 'path.exists', (['directory'], {}), '(directory)\n', (4751, 4762), False, 'from os import path, makedirs\n'), ((4769, 4788), 'os.makedirs', 'makedirs', (['directory'], {}), '(directory)\n', (4777, 4788), False, 'from os import path, makedirs\n')] |
import pytest
from digeiz_api.app import create_app
from digeiz_api.models.db import db as _db
from digeiz_api.models import Account, Mall, Unit
@pytest.fixture
def app():
app = create_app(testing=True)
return app
@pytest.fixture
def client(app):
yield app.test_client()
@pytest.fixture
def db(app):
_db.app = app
_db.session.expire_on_commit = False
with app.app_context():
_db.create_all()
yield _db
_db.session.close()
_db.drop_all()
@pytest.fixture
def account(db):
account = Account(name="test account")
db.session.add(account)
db.session.commit()
return account
@pytest.fixture
def mall(db, account):
mall = Mall(name="test mall", account_id=account.id)
db.session.add(mall)
db.session.commit()
return mall
@pytest.fixture
def unit(db, mall):
unit = Unit(name="test unit", mall_id=mall.id)
db.session.add(unit)
db.session.commit()
return mall
| [
"digeiz_api.models.Mall",
"digeiz_api.models.Unit",
"digeiz_api.models.db.db.drop_all",
"digeiz_api.models.Account",
"digeiz_api.models.db.db.create_all",
"digeiz_api.app.create_app",
"digeiz_api.models.db.db.session.close"
] | [((184, 208), 'digeiz_api.app.create_app', 'create_app', ([], {'testing': '(True)'}), '(testing=True)\n', (194, 208), False, 'from digeiz_api.app import create_app\n'), ((451, 470), 'digeiz_api.models.db.db.session.close', '_db.session.close', ([], {}), '()\n', (468, 470), True, 'from digeiz_api.models.db import db as _db\n'), ((475, 489), 'digeiz_api.models.db.db.drop_all', '_db.drop_all', ([], {}), '()\n', (487, 489), True, 'from digeiz_api.models.db import db as _db\n'), ((539, 567), 'digeiz_api.models.Account', 'Account', ([], {'name': '"""test account"""'}), "(name='test account')\n", (546, 567), False, 'from digeiz_api.models import Account, Mall, Unit\n'), ((691, 736), 'digeiz_api.models.Mall', 'Mall', ([], {'name': '"""test mall"""', 'account_id': 'account.id'}), "(name='test mall', account_id=account.id)\n", (695, 736), False, 'from digeiz_api.models import Account, Mall, Unit\n'), ((851, 890), 'digeiz_api.models.Unit', 'Unit', ([], {'name': '"""test unit"""', 'mall_id': 'mall.id'}), "(name='test unit', mall_id=mall.id)\n", (855, 890), False, 'from digeiz_api.models import Account, Mall, Unit\n'), ((414, 430), 'digeiz_api.models.db.db.create_all', '_db.create_all', ([], {}), '()\n', (428, 430), True, 'from digeiz_api.models.db import db as _db\n')] |
import torch.nn as nn
class MLP(nn.Module):
"""
Creates sequential fully-connected layers FC_1->FC_2->...->FC_N.
Parameters
----------
fc_sizes : int
Fully connected sequential layer sizes.
"""
def __init__(self, *fc_sizes: int):
super().__init__()
fc_sizes = list(fc_sizes)
n_classes = fc_sizes.pop()
classifier = []
for in_features, out_features in zip(fc_sizes[:-1], fc_sizes[1:]):
classifier.append(nn.Linear(in_features, out_features))
classifier.append(nn.ReLU(inplace=True))
classifier.append(nn.Linear(in_features=fc_sizes[-1],
out_features=n_classes))
self.mlp = nn.Sequential(*classifier)
def forward(self, x):
x = x.flatten(start_dim=1)
x = self.mlp(x)
return x
| [
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Linear"
] | [((728, 754), 'torch.nn.Sequential', 'nn.Sequential', (['*classifier'], {}), '(*classifier)\n', (741, 754), True, 'import torch.nn as nn\n'), ((612, 671), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'fc_sizes[-1]', 'out_features': 'n_classes'}), '(in_features=fc_sizes[-1], out_features=n_classes)\n', (621, 671), True, 'import torch.nn as nn\n'), ((495, 531), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (504, 531), True, 'import torch.nn as nn\n'), ((563, 584), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (570, 584), True, 'import torch.nn as nn\n')] |
import os
import numpy as np
import lbfcs
import lbfcs.simulate as simulate
### Define system constants
reps = 1000
M = [9000]*1
CycleTime = 0.2
N = 1
koff = 2.66e-1
kon = 6.5e6
cs = [5000e-12]
box = 7
e_tot = 350
sigma = 0.9
use_weight = False
savedir = r'C:\Data\p17.lbFCS2\21-07-13_sim_Pm2_exp200'
#%%
for i,c in enumerate(cs):
### Path and naming
N_str=('%i'%(N)).zfill(2)
c_str=('%i'%(c*1e12)).zfill(4)
savename='N%s_c%s_picked.hdf5'%(N_str,c_str)
savepath=os.path.join(savedir,savename)
### Generate simulation
locs = simulate.generate_locs(savepath,
reps,
M[i],
CycleTime,
N,
koff,
kon,
c,
box,
e_tot,
lbfcs.snr_from_conc(c),
sigma,
use_weight)
| [
"lbfcs.snr_from_conc",
"os.path.join"
] | [((493, 524), 'os.path.join', 'os.path.join', (['savedir', 'savename'], {}), '(savedir, savename)\n', (505, 524), False, 'import os\n'), ((998, 1020), 'lbfcs.snr_from_conc', 'lbfcs.snr_from_conc', (['c'], {}), '(c)\n', (1017, 1020), False, 'import lbfcs\n')] |
# Generated by Django 3.2.9 on 2021-11-07 13:18
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0008_alter_profile_language"),
]
operations = [
migrations.AddField(
model_name="profile",
name="received_count",
field=models.IntegerField(
default=0, help_text="Number of alerts sent to this user."
),
),
migrations.AddField(
model_name="profile",
name="updated_at",
field=models.DateTimeField(
default=django.utils.timezone.now,
help_text="Timestamp when user was last updated.",
),
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.IntegerField"
] | [((373, 452), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'help_text': '"""Number of alerts sent to this user."""'}), "(default=0, help_text='Number of alerts sent to this user.')\n", (392, 452), False, 'from django.db import migrations, models\n'), ((607, 718), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'help_text': '"""Timestamp when user was last updated."""'}), "(default=django.utils.timezone.now, help_text=\n 'Timestamp when user was last updated.')\n", (627, 718), False, 'from django.db import migrations, models\n')] |
from io import BytesIO
import os
import selectors
from threading import Lock, Thread
from typing import Dict, Iterator, Optional, Tuple, Union, cast
import evdev
import rospy
from .shape import DeviceShape
from .util import evdev_util
from .util.evdev_const import DeviceAxis, DeviceEventType, DeviceKey, SyncEvent
class AxisBuf:
"""A buffer storing the state of a single absolute axis on an input device.
Axis values are remapped from a source interval [a, b] to a used-defined target
interval [a', b'], which allows for the normalization of joystick values.
"""
def __init__(self, init_value: float, from_min: float, from_max: float, to_min: float, to_max: float, deadband: float):
"""Creates a new buffer for an absolute axis with the given properties.
Parameters
----------
init_value : float
The initial axis value.
from_min : float
The minimum value for the source interval.
from_max : float
The maximum value for the source interval.
to_min : float
The minimum value for the target interval.
to_max : float
The maximum value for the target interval.
deadband : float
The ratio of input area on both sides of the input center that should be centered in the output.
"""
# Set up slopes and output offsets, as well as the bounds for the 3 regions introduced by the deadband
self._scale = (to_max - to_min) / ((1 - deadband) * (from_max - from_min))
self._offset = (to_max + to_min) / 2
input_center = (from_max + from_min) / 2
self._deadband_high = deadband * (from_max - input_center) + input_center
self._deadband_low = deadband * (from_min - input_center) + input_center
# Sort the regions in the event of inversion maps
self._low_region_lower_bound = min(self._deadband_low, from_min)
self._low_region_upper_bound = max(self._deadband_low, from_min)
self._high_region_lower_bound = min(self._deadband_high, from_max)
self._high_region_upper_bound = max(self._deadband_high, from_max)
self._center_region_lower_bound = min(self._low_region_upper_bound, self._high_region_lower_bound)
self._center_region_upper_bound = max(self._low_region_upper_bound, self._high_region_lower_bound)
# Update the output value
self._value = self._remap(init_value)
@property
def value(self) -> float:
"""The current remapped value of the axis.
Returns
-------
float
The remapped axis value.
"""
return self._value
def update(self, unmapped_value: float):
"""Writes a new value to the buffer.
Parameters
----------
unmapped_value : float
The unmapped axis value.
"""
self._value = self._remap(unmapped_value)
def _remap(self, unmapped_value: float) -> float:
"""Remaps an axis value to the target interval.
Parameters
----------
unmapped_value : float
The unmapped axis value.
Returns
-------
float
The remapped axis value.
"""
# TODO: This is an opportunity to improve code in the future if we migrate the wrover and base station to Python 3.10
if self._low_region_lower_bound <= unmapped_value <= self._low_region_upper_bound:
return self._scale * (unmapped_value - self._deadband_low) + self._offset
elif self._center_region_lower_bound <= unmapped_value <= self._center_region_upper_bound:
return self._offset
elif self._high_region_lower_bound <= unmapped_value <= self._high_region_upper_bound:
return self._scale * (unmapped_value - self._deadband_high) + self._offset
else:
raise ValueError(f"Value {unmapped_value} is not in the desginated input region of {(self._low_region_lower_bound, self._high_region_upper_bound)}")
class KeyBuf:
"""A buffer storing the state of a single button on an input device.
Considerably simpler than the axis buffer.
"""
def __init__(self, init_value: bool):
"""Creates a new buffer for a button.
Parameters
----------
init_value : bool
The initial state of the button.
"""
self.value = init_value
class InputDevice:
"""Represents a single input device and all of its state.
Instances of this class maintain a polling thread that consumes evdev events.
To ensure that the thread is cleaned up and to prevent deadlocks, users of this
class should make sure to call `kill` on an instance when it is no longer needed.
"""
def __init__(self, device: Union[str, evdev.InputDevice], shape: DeviceShape):
"""Constructs a new `InputDevice` instance for the given device.
The device will be polled for capabilities, which will allow for the creation
of state buffers for each axis and button on the device. To start the evdev
polling thread, call `start`; the `InputDevice` will not be able to track the
device's state until then.
Parameters
----------
device : Union[str, evdev.InputDevice]
The device, given either as a path to a device file or as an instance of
`evdev.InputDevice`.
shape : DeviceShape
The shape of the device.
See Also
--------
start : Initializes evdev polling.
"""
self.shape = shape
self._dev = device if isinstance(device, evdev.InputDevice) else evdev.InputDevice(device)
self._poll_thread_ctx: Optional[Tuple[Thread, int]] = None # thread and notify pipe
self._thread_lock = Lock()
self._axis_cache: Dict[DeviceAxis, AxisBuf] = {}
self._key_cache: Dict[DeviceKey, KeyBuf] = {}
self._data_lock = Lock()
# construct axis and key buffers based on the device's advertised capabilities
for ev_type, ev_caps in self._dev.capabilities().items():
ev_codes = evdev_util.get_capability_codes(ev_caps)
if ev_type == DeviceEventType.EV_ABS:
for code in ev_codes:
axis: DeviceAxis
try:
axis = DeviceAxis(code)
except ValueError:
continue
axis_def = shape.axes.get(axis)
if axis_def is None:
continue
axis_info = self._dev.absinfo(code)
self._axis_cache[axis] = AxisBuf(
axis_info.value, axis_info.min, axis_info.max, axis_def.min_val, axis_def.max_val, axis_def.deadband)
elif ev_type == DeviceEventType.EV_KEY:
init_key_states = set(self._dev.active_keys())
for code in ev_codes:
try:
self._key_cache[DeviceKey(code)] = KeyBuf(code in init_key_states)
except ValueError:
pass
def start(self):
"""Initializes the evdev polling thread.
This is what allows for the tracking of the device's state. Once this device
is no longer needed, the `kill` method should be called to ensure that the
polling thread is cleaned up properly in order to prevent resource leaks and
deadlocks.
Raises
------
ValueError
If the polling thread has already been started, or if the device has
already been shut down.
Notes
-----
Evdev events are organized into "frames", each of which is separated by an
EV_SYN event of code SYN_REPORT. Inputs should only be considered committed
when a whole frame has been sent. In the case where the event buffer overflows,
events will be lost, in which case the frame may be incomplete. This is
indicated by an EV_SYN event of code SYN_DROPPED, which signals to us that we
need to resynchronize with the frames. See [1]_ for more details.
References
----------
.. [1] https://www.freedesktop.org/software/libevdev/doc/latest/syn_dropped.html
"""
with self._thread_lock:
if self._dev.fd == -1:
raise ValueError('Device is already closed!')
elif self._poll_thread_ctx is not None:
raise ValueError('Poll thread already exists!')
# may deadlock if the device is lost, since the selector will never receive an event
# so we add a virtual pipe for the selector to read from that we can use to "break out"
notify_pipe_r, notify_pipe_w = os.pipe2(os.O_NONBLOCK)
def poll():
rospy.loginfo('Initializing evdev thread state...')
axis_temp: Dict[int, int] = dict() # temp buffers for the current incomplete frame
key_temp: Dict[int, int] = dict()
syn_okay = True # if SYN_DROPPED, this becomes false to indicate that the frame is fragmented
def consume_event(event: evdev.InputEvent):
nonlocal syn_okay
if event.type == DeviceEventType.EV_ABS: # axis state event
if syn_okay:
try:
axis_temp[DeviceAxis(event.code)] = event.value
except ValueError:
pass
elif event.type == DeviceEventType.EV_KEY: # key state event
if syn_okay:
try:
key_temp[DeviceKey(event.code)] = event.value
except ValueError:
pass
elif event.type == DeviceEventType.EV_SYN: # synchronization event
if event.code == SyncEvent.SYN_REPORT: # end of a sync frame
if syn_okay: # sync frame was okay; copy data for frame to state caches
for axis_code, state in axis_temp.items(): # copy axis state
try:
axis_buf = self._axis_cache.get(DeviceAxis(axis_code))
if axis_buf is not None:
axis_buf.update(state)
except ValueError:
pass
axis_temp.clear()
for key_code, state in key_temp.items(): # copy key state
try:
key_buf = self._key_cache.get(DeviceKey(key_code))
if key_buf is not None:
key_buf.value = state != 0 # 0 => release; 1 => press; 2 => hold
except ValueError:
pass
key_temp.clear()
else: # sync frame was bad; retrieve actual state using ioctl, then return to normal
syn_okay = True
for abs_code, abs_buf in self._axis_cache.items(): # resync axis states
abs_buf.update(self._dev.absinfo(abs_code.value).value) # shouldn't need to update other axis props... probably
for key_buf in self._key_cache.values(): # resync key states
key_buf.value = False
for key_code in self._dev.active_keys():
try:
key_buf = self._key_cache.get(DeviceKey(key_code))
if key_buf is not None:
key_buf.value = True
except ValueError:
pass
elif event.code == SyncEvent.SYN_DROPPED: # sync was lost; drop the sync frame and wait for the next one
axis_temp.clear()
key_temp.clear()
syn_okay = False
# check to ensure that the device is still there; better safe than sorry
with self._thread_lock:
if self._dev.fd == -1:
rospy.loginfo('Device was dead before the evdev thread was ready!')
return
with open(notify_pipe_r, 'rb') as notify_pipe_file:
# use selector to conjoin the device and the virtual "break-out" pipe
sel = selectors.DefaultSelector()
sel.register(self._dev, selectors.EVENT_READ)
sel.register(notify_pipe_file, selectors.EVENT_READ)
rospy.loginfo('Entering evdev polling loop...')
while True:
# read events
for key, _ in sel.select():
if key.fileobj == self._dev: # it's from evdev
for event in cast(Iterator[evdev.InputEvent], self._dev.read()):
consume_event(event)
else: # must be the virtual pipe
rospy.loginfo('Received notification from virtual pipe!')
cast(BytesIO, key.fileobj).read()
# terminate if the device is closed
with self._thread_lock:
if self._dev.fd == -1:
rospy.loginfo('The device was closed! Terminating the evdev thread...')
break
poll_thread = Thread(target=poll)
poll_thread.start()
self._poll_thread_ctx = poll_thread, notify_pipe_w
def get_axis(self, axis: DeviceAxis) -> Optional[float]:
"""Retrieves the state of an absolute axis.
The axis value will be normalized. See the `AxisBuf` class for more details.
Parameters
----------
axis : DeviceAxis
The axis whose state should be queried.
Returns
-------
Optional[float]
The axis' state, or `None` if there is no data available for it.
"""
with self._data_lock:
axis_buf = self._axis_cache.get(axis)
return axis_buf.value if axis_buf is not None else None
def get_key(self, key: DeviceKey) -> Optional[bool]:
"""Retrieves the state of a button.
Parameters
----------
key : DeviceKey
The button whose state should be queried.
Returns
-------
Optional[bool]
The button's state, or `None` if there is no data available for it.
"""
with self._data_lock:
key_buf = self._key_cache.get(key)
return key_buf.value if key_buf is not None else None
def kill(self):
"""Shuts down the device.
This closes any relevant file handles and terminates the polling thread.
The `InputDevice` instance can no longer be used once this is done.
"""
with self._thread_lock:
self._dev.close()
if self._poll_thread_ctx is not None:
os.write(self._poll_thread_ctx[1], b'\0') # write some random byte to break out of the selector read
os.close(self._poll_thread_ctx[1])
self._poll_thread_ctx[0].join()
self._poll_thread_ctx = None
| [
"os.close",
"threading.Lock",
"os.write",
"evdev.InputDevice",
"typing.cast",
"os.pipe2",
"selectors.DefaultSelector",
"threading.Thread",
"rospy.loginfo"
] | [((5864, 5870), 'threading.Lock', 'Lock', ([], {}), '()\n', (5868, 5870), False, 'from threading import Lock, Thread\n'), ((6017, 6023), 'threading.Lock', 'Lock', ([], {}), '()\n', (6021, 6023), False, 'from threading import Lock, Thread\n'), ((8876, 8899), 'os.pipe2', 'os.pipe2', (['os.O_NONBLOCK'], {}), '(os.O_NONBLOCK)\n', (8884, 8899), False, 'import os\n'), ((13907, 13926), 'threading.Thread', 'Thread', ([], {'target': 'poll'}), '(target=poll)\n', (13913, 13926), False, 'from threading import Lock, Thread\n'), ((5717, 5742), 'evdev.InputDevice', 'evdev.InputDevice', (['device'], {}), '(device)\n', (5734, 5742), False, 'import evdev\n'), ((8933, 8984), 'rospy.loginfo', 'rospy.loginfo', (['"""Initializing evdev thread state..."""'], {}), "('Initializing evdev thread state...')\n", (8946, 8984), False, 'import rospy\n'), ((15488, 15531), 'os.write', 'os.write', (['self._poll_thread_ctx[1]', "b'\\x00'"], {}), "(self._poll_thread_ctx[1], b'\\x00')\n", (15496, 15531), False, 'import os\n'), ((15601, 15635), 'os.close', 'os.close', (['self._poll_thread_ctx[1]'], {}), '(self._poll_thread_ctx[1])\n', (15609, 15635), False, 'import os\n'), ((12822, 12849), 'selectors.DefaultSelector', 'selectors.DefaultSelector', ([], {}), '()\n', (12847, 12849), False, 'import selectors\n'), ((13014, 13061), 'rospy.loginfo', 'rospy.loginfo', (['"""Entering evdev polling loop..."""'], {}), "('Entering evdev polling loop...')\n", (13027, 13061), False, 'import rospy\n'), ((12554, 12621), 'rospy.loginfo', 'rospy.loginfo', (['"""Device was dead before the evdev thread was ready!"""'], {}), "('Device was dead before the evdev thread was ready!')\n", (12567, 12621), False, 'import rospy\n'), ((13474, 13531), 'rospy.loginfo', 'rospy.loginfo', (['"""Received notification from virtual pipe!"""'], {}), "('Received notification from virtual pipe!')\n", (13487, 13531), False, 'import rospy\n'), ((13770, 13841), 'rospy.loginfo', 'rospy.loginfo', (['"""The device was closed! Terminating the evdev thread..."""'], {}), "('The device was closed! Terminating the evdev thread...')\n", (13783, 13841), False, 'import rospy\n'), ((13560, 13586), 'typing.cast', 'cast', (['BytesIO', 'key.fileobj'], {}), '(BytesIO, key.fileobj)\n', (13564, 13586), False, 'from typing import Dict, Iterator, Optional, Tuple, Union, cast\n')] |
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.shortcuts import reverse
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from subscribers.tokens import account_activation_token
def create_secure_link(request=None, user=None, viewname='', external=True):
"""Generates a one-time URL that identifies a user.
The resulting URL can then be used as confirmation links or
unsubscribe links.
Args:
request (Request object): needed to obtain the current scheme,
domain, and port for building the link. If no request obj
is passed, the values for these URL components are
obtained from the settings file.
user (Subscriber object): the subscriber whose primary key
will be encoded as a base64 UID in the returned URL
viewname (str): the name of the view which the URL points to
external (bool): if True, pre-appends the scheme and netloc to
the path. Otherwise, only the path component is returned.
"""
url_path = reverse(
viewname,
kwargs = {
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user)
}
)
if external:
# pre-append protocol and domain
url_scheme = ''
url_host = ''
if request is not None:
url_scheme = request.scheme
url_host = request.get_host()
else:
url_scheme = settings.EXTERNAL_URL_SCHEME
url_host = settings.EXTERNAL_URL_HOST
url_path = f'{url_scheme}://{url_host}{url_path}'
return url_path
def get_uid(uidb64):
"""Converts the base64-encoded UID into a string."""
return force_text(urlsafe_base64_decode(uidb64))
def get_external_link_for_static_file(fpath=''):
"""Generates the absolute URL to a static asset.
Args:
fpath (str): the path to the file, relative to static directory
"""
url_path = static(fpath)
url_scheme = settings.EXTERNAL_URL_SCHEME
url_host = settings.EXTERNAL_URL_HOST
return f'{url_scheme}://{url_host}{url_path}'
| [
"django.utils.encoding.force_bytes",
"django.contrib.staticfiles.templatetags.staticfiles.static",
"subscribers.tokens.account_activation_token.make_token",
"django.utils.http.urlsafe_base64_decode"
] | [((2192, 2205), 'django.contrib.staticfiles.templatetags.staticfiles.static', 'static', (['fpath'], {}), '(fpath)\n', (2198, 2205), False, 'from django.contrib.staticfiles.templatetags.staticfiles import static\n'), ((1942, 1971), 'django.utils.http.urlsafe_base64_decode', 'urlsafe_base64_decode', (['uidb64'], {}), '(uidb64)\n', (1963, 1971), False, 'from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\n'), ((1345, 1386), 'subscribers.tokens.account_activation_token.make_token', 'account_activation_token.make_token', (['user'], {}), '(user)\n', (1380, 1386), False, 'from subscribers.tokens import account_activation_token\n'), ((1300, 1320), 'django.utils.encoding.force_bytes', 'force_bytes', (['user.pk'], {}), '(user.pk)\n', (1311, 1320), False, 'from django.utils.encoding import force_bytes, force_text\n')] |
from allauth.socialaccount.providers.oauth.urls import default_urlpatterns
from .provider import SpotifyOAuth2Provider
urlpatterns = default_urlpatterns(SpotifyOAuth2Provider)
| [
"allauth.socialaccount.providers.oauth.urls.default_urlpatterns"
] | [((135, 177), 'allauth.socialaccount.providers.oauth.urls.default_urlpatterns', 'default_urlpatterns', (['SpotifyOAuth2Provider'], {}), '(SpotifyOAuth2Provider)\n', (154, 177), False, 'from allauth.socialaccount.providers.oauth.urls import default_urlpatterns\n')] |
import json
import requests
from requests_oauthlib import OAuth1
from rich import print
import exposurebot
__copyright__ = "Copyright 2022 <NAME>"
__license__ = """
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class TweetError(Exception):
pass
class Twitter:
tweet_url = "https://api.twitter.com/2/tweets"
@staticmethod
def load_credentials(file_name: str) -> tuple[str, str, str, str]:
"""Returns the client key, client secret, api key, and api secret as a tuple."""
with open(file_name, "r") as file_:
json_content = json.load(file_)
client_key = json_content["client_key"]
client_secret = json_content["client_secret"]
api_key = json_content["api_key"]
api_secret = json_content["api_secret"]
return client_key, client_secret, api_key, api_secret
def __init__(self):
client_key, client_secret, api_key, api_secret = Twitter.load_credentials("twitter_credentials.json")
self.header = OAuth1(api_key, api_secret, client_key, client_secret, signature_type="auth_header",
callback_uri="https://github.com/Denyol")
def tweet(self, text: str) -> json:
"""Returns twitter JSON response"""
data = {"text": text}
r = requests.post(Twitter.tweet_url, json=data, auth=self.header)
print("[bold blue]Tweet posted:", r.content)
if r.json().get("status") == 403:
raise TweetError("Duplicate tweet: " + text)
return r.json()
def delete(self, *tweet_ids):
"""Deletes the specified tweet ids from Twitter and the post history JSON file."""
url = "https://api.twitter.com/2/tweets/"
for t_id in tweet_ids:
print("Deleting: ", t_id)
r = requests.delete(url + t_id, auth=self.header)
print(r.text, r.status_code)
if r.status_code == 200:
exposurebot.ExposureSiteBot.remove_saved_post(t_id)
| [
"requests.post",
"requests.delete",
"rich.print",
"exposurebot.ExposureSiteBot.remove_saved_post",
"json.load",
"requests_oauthlib.OAuth1"
] | [((1560, 1691), 'requests_oauthlib.OAuth1', 'OAuth1', (['api_key', 'api_secret', 'client_key', 'client_secret'], {'signature_type': '"""auth_header"""', 'callback_uri': '"""https://github.com/Denyol"""'}), "(api_key, api_secret, client_key, client_secret, signature_type=\n 'auth_header', callback_uri='https://github.com/Denyol')\n", (1566, 1691), False, 'from requests_oauthlib import OAuth1\n'), ((1844, 1905), 'requests.post', 'requests.post', (['Twitter.tweet_url'], {'json': 'data', 'auth': 'self.header'}), '(Twitter.tweet_url, json=data, auth=self.header)\n', (1857, 1905), False, 'import requests\n'), ((1914, 1958), 'rich.print', 'print', (['"""[bold blue]Tweet posted:"""', 'r.content'], {}), "('[bold blue]Tweet posted:', r.content)\n", (1919, 1958), False, 'from rich import print\n'), ((1113, 1129), 'json.load', 'json.load', (['file_'], {}), '(file_)\n', (1122, 1129), False, 'import json\n'), ((2304, 2329), 'rich.print', 'print', (['"""Deleting: """', 't_id'], {}), "('Deleting: ', t_id)\n", (2309, 2329), False, 'from rich import print\n'), ((2346, 2391), 'requests.delete', 'requests.delete', (['(url + t_id)'], {'auth': 'self.header'}), '(url + t_id, auth=self.header)\n', (2361, 2391), False, 'import requests\n'), ((2404, 2432), 'rich.print', 'print', (['r.text', 'r.status_code'], {}), '(r.text, r.status_code)\n', (2409, 2432), False, 'from rich import print\n'), ((2486, 2537), 'exposurebot.ExposureSiteBot.remove_saved_post', 'exposurebot.ExposureSiteBot.remove_saved_post', (['t_id'], {}), '(t_id)\n', (2531, 2537), False, 'import exposurebot\n')] |
# -*-coding: cp1252 -*-
from models import Encuesta, Opcion
from django.contrib import admin
class OpcionesEnLinea(admin.TabularInline):
model = Opcion
extra = 1
class EncuestaAdmin(admin.ModelAdmin):
list_display = ['pregunta', 'fecha_pub', 'es_reciente']
fieldsets = [
(None, {'fields': ['pregunta']}),
('Publicacion', {'fields': ['fecha_pub'],
'classes': ['collapse']}),
]
inlines = [OpcionesEnLinea]
list_filter = ['fecha_pub']
search_fields = ['pregunta']
admin.site.register(Encuesta, EncuestaAdmin)
admin.site.register(Opcion)
| [
"django.contrib.admin.site.register"
] | [((526, 570), 'django.contrib.admin.site.register', 'admin.site.register', (['Encuesta', 'EncuestaAdmin'], {}), '(Encuesta, EncuestaAdmin)\n', (545, 570), False, 'from django.contrib import admin\n'), ((571, 598), 'django.contrib.admin.site.register', 'admin.site.register', (['Opcion'], {}), '(Opcion)\n', (590, 598), False, 'from django.contrib import admin\n')] |
# Authors: <NAME> <<EMAIL>>
# Date: 15th December 2019
# License: See LICENSE file
import pandas as pd
from PSPConstants import *
PROGRAMMINGLANGUAGE_COLNAME = 'ProgrammingLanguage'
ACTMINCODE_COLNAME = 'ActMinCode'
def getPSPDF():
PSP_MAIN_DF = pd.read_csv('C:/Users/ncshr/PycharmProjects/SE_Beliefs/data/filtered_data.csv')
# # PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME] = PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME].map({'POWERBUILDER': 'PB'})
# PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME] = PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME].replace('POWERBUILDER','PB')
# PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME] = PSP_MAIN_DF[PROGRAMMINGLANGUAGE_COLNAME].replace('PROGRESS', 'PRGS')
# PSP_MAIN_DF[ACTMINCODE_COLNAME] = PSP_MAIN_DF[ACTMINCODE_COLNAME].replace(1735, 221)
return PSP_MAIN_DF
#
# if __name__ == '__main__':
#
# df = getPSPDF()
#
# df = df[ df[PROGRAMMINGLANGUAGE_COLNAME].isin(PROGRAMMING_LANGUAGES) ]
#
# df.to_csv('filtered_data.csv', index=False)
| [
"pandas.read_csv"
] | [((256, 335), 'pandas.read_csv', 'pd.read_csv', (['"""C:/Users/ncshr/PycharmProjects/SE_Beliefs/data/filtered_data.csv"""'], {}), "('C:/Users/ncshr/PycharmProjects/SE_Beliefs/data/filtered_data.csv')\n", (267, 335), True, 'import pandas as pd\n')] |
import os
from pynestml.frontend.pynestml_frontend import to_nest, install_nest
# folder
home = os.path.expanduser("~")
folder = os.path.dirname(__file__)
root = os.path.abspath(folder if folder else ".")
target = root + "/build"
# build
to_nest(logging_level='ERROR', input_path=root, target_path=target,
module_name="energy_module")
# install
import nest
nest_file = nest.__file__
pos = nest_file.find("/lib")
install_nest(target, nest_file[:pos])
| [
"os.path.dirname",
"pynestml.frontend.pynestml_frontend.install_nest",
"os.path.abspath",
"pynestml.frontend.pynestml_frontend.to_nest",
"os.path.expanduser"
] | [((101, 124), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (119, 124), False, 'import os\n'), ((134, 159), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (149, 159), False, 'import os\n'), ((169, 211), 'os.path.abspath', 'os.path.abspath', (["(folder if folder else '.')"], {}), "(folder if folder else '.')\n", (184, 211), False, 'import os\n'), ((247, 347), 'pynestml.frontend.pynestml_frontend.to_nest', 'to_nest', ([], {'logging_level': '"""ERROR"""', 'input_path': 'root', 'target_path': 'target', 'module_name': '"""energy_module"""'}), "(logging_level='ERROR', input_path=root, target_path=target,\n module_name='energy_module')\n", (254, 347), False, 'from pynestml.frontend.pynestml_frontend import to_nest, install_nest\n'), ((434, 471), 'pynestml.frontend.pynestml_frontend.install_nest', 'install_nest', (['target', 'nest_file[:pos]'], {}), '(target, nest_file[:pos])\n', (446, 471), False, 'from pynestml.frontend.pynestml_frontend import to_nest, install_nest\n')] |
#---------------------------------------------------------#
# #
# WINTERM #
# A Terminal For Windows #
# Author: <NAME> #
# #
#---------------------------------------------------------#
from __future__ import print_function
from colorama import init, Fore, Style
import os
import shutil
import threading
import sys
init()
#Below is a class to use threading to find files.
class myThread(threading.Thread):
def __init__(self, pathToSearch):
threading.Thread.__init__(self)
self.pathToSearch = pathToSearch
def run(self):
List(self.pathToSearch)
#Below is a class to run functions used in grep in multiple threads
class grepThread(threading.Thread):
def __init__(self, keyword, extension, path, Option):
threading.Thread.__init__(self)
self.keyword = keyword
self.extension = extension
self.path = path
self.Option = Option
def run(self):
find_in_all_readable(self.keyword, self.extension, self.path, self.Option)
def showPath():
#This one just shows the current path
print()
print(Fore.LIGHTBLUE_EX + ' ' + os.getcwd(), end='\n >')
print(Style.RESET_ALL, end = '')
#---List of functions to be used by other functions.
working_Functions = ['ls', 'man', 'cd', 'touch', 'rm', 'rmdir', 'mkdir', 'clear', 'mv', 'locate', 'cp', 'cat', 'grep']
wip_Functions = []
#-------Show on launch---------
def show_about():
#This will show the available commands at launch.
print(Fore.GREEN + '2017-18 : developed by <NAME>' + Style.RESET_ALL)
print('Working functions are : ', end='')
for func in working_Functions:
print(Fore.CYAN + func, end=' | ')
print(Style.RESET_ALL + '\n\nWork in progress functions are : ', end='')
for func in wip_Functions:
print(Fore.RED + func, end=' | ')
print(Style.RESET_ALL + '\n\nTo know more about them use : man "command name"')
input("press any Key when you are done.")
os.system('cls')
#-------ERROR-------
#Below are definitions of errors to be shown.
#This is a dict that stores the param passed by function when they throw an unknown error. MAkes easier to debug.
unknow_error_dict = {1 : 'is_available', 2 : 'mv', 3 : 'grep', 4 : 'rm', 5 : 'cat_singleFile', 6 : 'cp', 7 : 'showman'}
def noFile_error(Filename=''):
#This function shows that Filename was not found.
#End the exec of the function after calling this function
print(Fore.RED + Filename+" : Not Found\a" + Style.RESET_ALL)
def unknown_error(param):
#param is used to know which function called this error
print(Fore.RED + "\aSome unknown error occured. Err no : "+str(param)+"\nPlease take a look at the command syntax using 'man [COMMAND]'" + Style.RESET_ALL, end='')
def option_not_available(option, command):
#This will show if the provided option is not found available for the command
#End the exec of the function after calling this function
print(Fore.RED + option+"\a : No such option available in "+command+" command" + Style.RESET_ALL)
def unknown_command(cmd):
#This will be called when an unknown command is passed by the user
print('\a' + cmd + Fore.RED + ' : No such command Available' + Style.RESET_ALL, end=' ')
#----------Error def end here------------
def give_rootPath():
#This one returns the rootpath of the current working directory
currentPath = os.getcwd()
rootPath = currentPath[:currentPath.index("\\")] + "\\"
return rootPath
def is_available(pathToFile):
#This function checks if file or folder is available.
#Return : True or False
#Note : pathToFile can be a path to a file or just a file also.
#Here we need to check three things.
#If pathToFile is a folder or it is a path to a file within folders or just a file in the working directory.
#Try to see if it is a folder.
try:
if os.path.isdir(pathToFile) :
#It is a folder
return True
except:
pass
#If not a folder then try to see if it is a file within folders.
try:
file_Path = os.path.dirname(pathToFile)
#file_Path is the directory name where the file should be.
file_Name = os.path.basename(pathToFile)
#file_Name is the files name
#If pathToFile is just a filename without directories than file_Path is equal to ""
if file_Path != "":
if file_Name in os.listdir(file_Path) :
#It is a file within folders
return True
else:
if file_Name in os.listdir(os.getcwd()) :
#It is a file in the working directory
return True
else:
noFile_error(pathToFile)
return False
except:
unknown_error(1)
def showman(command):
#This shows the available functions in this terminal
#The syntax is man [COMMAND NAME]
#The command is passed to see which functions manual the user wants
fun = {'cd':'Usage : cd [DIR NAME] \nUsed to change directory.',
'ls':'Usage : ls [DIR NAME] -[OPTION] \nUsed to list a directory.',
'touch':'Usage : touch [FILE NAME] \nUsed to create a new file.',
'rm':'Usage : rm [FILE NAME] \nUsed to remove a file or folder.',
'mkdir':'Usage : mkdir [DIR NAME]\nUsed to create a new directory.',
'clear':'Usage : clear \nUsed to clear the screen.',
'mv':'Usage : mv [SOURCE] [DESTINATION] \nUsed to move a file from SOURCE to DESTINATION',
'cat':'Usage : cat [OPTION] [FILENAME] \nUsed to ',
'locate' : 'Usage : locate [FILENAME] \nUsed to locate a file in the working Drive.\nNOTE : Make sure not to try on the drive where Windows is installed since there is lack of permission.',
'grep' : 'Usage : grep [OPTION] "KEYWORD" [FILENAME]. \nUsed to find keyword in the given file.',
'rmdir' : 'Usage : rmdir [DIRECTORY NAME]\n Used to remove directories.',
'cp' : 'Usage : cp [SOURCE] [DESTINATION]\n Used to Copy files or folders',
}
#fun is a dictionary. [Function Name] : [Command Name]
#It should be updated after adding a working function
try:
whichFun = command[4:]
if whichFun in working_Functions:
print(Fore.GREEN + fun[whichFun] + Style.RESET_ALL)
elif whichFun in wip_Functions:
print(Fore.RED + 'This command is still being worked on!\a' + Style.RESET_ALL)
print(Fore.CYAN + fun[whichFun] + Style.RESET_ALL)
else:
print("\nPlease enter a valid Command\a. The Syntax is man [COMMAND NAME]", end='')
except:
unknown_error(7)
#------COMMANDS--------
#Below is the definition of all the commands.
#-----locate-------
#locate actually makes a database of the files which is updated with the updatedb command
folders_to_skip = ["System Volume Information"] #This folders will be skipped while creating the db
db_path = "C:\\Database\\locate.db"
drive_path = "E:"
def updatedb():
#This will just erase all the data from the database and call the list function.
print("Updating database...")
erase_db = open(db_path, "w")
erase_db.close()
List()
def writeUpdate(file_TO_WRITE):
# The whole drives file details will be stored in a file in C:
write_update = open(db_path, "a")
write_update.write(file_TO_WRITE + "\n")
write_update.close()
def Show(pathtoLaunch):
exitFlag = True
input("Found in " + pathtoLaunch)
def List(pathToSearch = "E:"):
pathToSearch += "\\"
for files in os.listdir(pathToSearch):
if os.path.isdir(pathToSearch + "\\" + files) and files not in folders_to_skip:
newThread = myThread(pathToSearch + "\\" + files)
newThread.start()
newThread.join()
else:
writeUpdate(pathToSearch + "\\" + files)
def locate(keyword):
#This will check and display all the lines in test.db where the keyword is present
print("\a" + Fore.RED + "If this is the first time you're running this command, make sure you run 'updatedb' command!" + Style.RESET_ALL)
check_db = open(db_path, "r")
while True:
line = check_db.readline()
if not line:
break
if keyword in line:
print(line, end='')
#--------------End------------
def cd(command):
#The change directory command.
if command[:2] == "..":
#Find dirname of currentPath
os.chdir(os.path.dirname(os.getcwd()))
elif command[:2] == "--":
#If -- is present then we want to move to the root of the working directory path
os.chdir(give_rootPath())
else:
newPath = command
if os.path.isdir(newPath):
os.chdir(newPath)
else:
print(newPath+": No such directory found\a", end='')
#----------ls command-----------
#This list will contain all the executable files in windows to show them with a green accent in terminal
executables = ['py', 'exe', 'msi', 'bat']
#This will be the list pof available options in ls used to check if the option is valid or not
options_in_ls = ['l', 'r', 't']
def grab(cmd):
#This should grab all the extra options.
#The ls command should be like this ls -[OPTIONS][OPTIONS]
#So it will search for all the valid options after the '-' sign
folder = os.getcwd()
if len(cmd) > 2:
try:
flag = False
posSpace_ls = cmd.index(' ')
try:
posSign_ls = cmd.index('-')
if posSign_ls - posSpace_ls > 1:
flag = True
except:
#If the execution comes here then prob there was no '-' in the command
tempFolder = cmd[posSpace_ls+1:len(cmd)]
if flag:
#If flag happens to be true then probably we need to name tempFolder
tempFolder = cmd[posSpace_ls+1:posSign_ls-1]
#Since we have tempFolder now, just check if the folder is available or not
if os.path.isdir(tempFolder):
folder = tempFolder
else:
print(tempFolder+" : No such directory found\a")
return False
except:
pass
#Above checks if any directory is passed as argue then it exists or not.
try:
opt = cmd[cmd.index('-')+1:]
try:
for single_options in opt:
if single_options in options_in_ls:
disp(folder, single_options)
return True
except:
option_not_available(opt, 'ls')
except:
disp(folder)
def disp(folder, option = ' '):
#This will display.
#It will do the final task of displaying.
end_option = ' '
#files in folder will be the list of files in the folder
files_inFolder = os.listdir(folder)
if option == 'l':
end_option = '\n'
if option == 'r':
files_inFolder = files_inFolder[::-1]
for files in files_inFolder:
if os.path.isdir(folder + '\\' + files):
#If its a directory then print in blue
print(Fore.BLUE + files + Style.RESET_ALL, end=end_option)
else :
#Check if its an exec file
flag = False
for ext in executables:
if files.endswith(ext):
flag = True
print(Fore.GREEN + files + Style.RESET_ALL, end= end_option)
break
if not flag :
print(files, end= end_option)
#-----ls ends here-------
def touch(nameOfFile):
#Makes a new file.
make = open(nameOfFile, "w")
make.close()
#------rm-------
def rm(fileName):
#Remove command.
try:
if fileName[1:4] == "-rf":
if is_available(fileName[5:]):
shutil.rmtree(fileName[5:])
else:
noFile_error(fileName[5:])
elif fileName[:3] == 'dir':
if is_available(fileName[4:]):
shutil.rmtree(fileName[4:])
elif fileName[1:3] == '-i':
if is_available(fileName[4:]):
if not os.path.isdir(fileName[4:]):
ask = input("Are you sure you want to delete? [y/n]")
if ask == 'y':
os.remove(fileName[4:])
else:
for files in os.listdir(fileName[4:]):
ask = input(files + ": Are you sure you want to delete? [y/n]")
if ask == 'y':
if os.path.isdir(fileName[4:] + "\\" +files):
shutil.rmtree(fileName[4:] + "\\" +files)
else:
os.remove(fileName[4:] + "\\" +files)
elif is_available(fileName[1:]):
if os.path.isdir(fileName[1:]):
counter = 0
for files in os.listdir(fileName[1:]):
if counter > 0 or not files:
break
counter += 1
if counter > 0:
print("\a Folder is not empty!", end='')
else:
os.rmdir(fileName[1:])
else:
os.remove(fileName[1:])
else:
noFile_error()
except:
unknown_error(4)
def MakeDir(name):
#Makes a new folder.
#Before making we need to check if existing directory with same name is available.
#MAYBE we can do that by try
try:
os.mkdir(name)
except:
print("\aERROR : Directory with same name exists.", end='')
def clear():
#Clears the screen.
os.system("cls")
#---------mv--------
available_Options_mv = ['-i', '-b']
def mv(names):
#Moves the file to the said directory.
#Heres a catch. Damn it!
#We need to know if where we need to move is a dir name or just a filename.
#Coz if its just a file name, we need not check if it exists.
option = ''
try:
#check if theres an option.
try:
if names[0] == '-':
#This means an option is added.
option = names[:2]
if option not in available_Options_mv:
option_not_available(option, 'mv')
return False
names = names[3:]
except:
pass
except:
pass
posSpace = names.index(" ")
try:
fileToMove = os.getcwd()+"\\"+names[:posSpace] #The path to the file to be moved.
except:
print("\aPlease follow the syntax of command.", end='')
return False
if not is_available(fileToMove):
return False
whereToMove = names[posSpace+1:] #The path where it is to be moved.
#whereToMove can be just a file name too. So lets just try to check if its a dir.
#If not a dir, then probably a filename.
try:
#This will try to see if where to move has a dir name in it.
#If it does then it will check if its a dir by calling is_available()
if os.path.dirname(whereToMove) != "" :
if not is_available(os.path.dirname(whereToMove)):
return False
except:
pass
#If it got past above then probably the source and destination are available
try:
if option == '-i':
if os.path.isfile(whereToMove):
ask = input("\aFile already exists. Do yoy want to overwrite?[no\yes]")
if ask != 'yes':
return False
elif option == '-b':
shutil.copyfile(fileToMove, fileToMove+".bak")
shutil.move(fileToMove, whereToMove)
except:
unknown_error(2)
#--------cat-------------
#Definition of all functions used for cat start here.
available_Options_cat = ['-n', '-e', '-T', ]
def checkCat(name):
#The cat commnad.
#This function just checks the options. The main function would be done by the following cat_exec() function.
#The syntax is cat -[OPTION] [FILE]...
#We can make the OPTION default to read which will read and display the contents of the file
#We need to make a list of options available to check if the passed option is valid.
#If its not a valid option then pass the option to file name and see if the file exists in the working directory
#This will be like the executor.
#It will send the name to be checked if it is for single file or multiple files
Option = cat_singleFile(name)
if Option == 0:
return False
if not Option:
Option = cat_doubleFiles(name)
if not Option:
option_not_available('','cat')
return False
return True
def cat_singleFile(name):
#This will check if the command is for single files
#If it is then it will be sent to cat_exec() to be executed
#RETURNS : TRUE OR FALSE OR 0 if the option is -[] type but not available in the list
Option = ''
File = ''
try:
#Try to find the - in the passed argue which is name
if name[0] == '-' :
Option = name[:2]
if Option not in available_Options_cat:
option_not_available(Option, 'cat')
return 0
File = name[3:]
cat_exec(Option, File)
return True
else:
#If Options not in available Options list then theres a possibility that it is just a command to read the file
#Or something like cat >name : we need to create name in this case
#First lets check if its a > cat command
if name[0] == '>':
cat_exec('m', name[1:])
return True
elif '>' not in name and '<' not in name:
#print("Entered")
if is_available(name):
cat_exec('', name)
return True
except:
unknown_error(5)
def cat_doubleFiles(name):
#This will check name for stuff like file1 > file2 or file1 >> file2 or file1 < file2
File = ''
File2 = ''
try:
if '>' in name or '<' in name:
#There's a possibility its a two file operation
try:
pos = name.index('>')
#If > is not there then it will give error so it is in try
if name[pos+1] == '>':
#We need to append file1 to file2
File = name[:pos-1]
File2 = name[pos+3:]
cat_exec('>>', File, File2)
return True
else:
#We need to overwrite file2 and put file1 stuff in there
File = name[:pos-1]
File2 = name[pos+2:]
cat_exec('>', File, File2)
return True
except:
#Here we should try to see if it is the < command
pass
except:
pass
def cat_exec(Option, File1, File2 = ''):
if Option != 'm':
open_the_File = open(File1, "r")
if File2 != '':
if Option == '>>':
open_File2 = open(File2, 'a')
elif Option == '>':
open_File2 = open(File2, 'w')
countLine = 0
while True:
if Option != 'm':
readLine = open_the_File.readline()
if not readLine:
break
if Option == '-n':
countLine += 1
print(str(countLine)+" "+readLine, end='')
elif Option == "-e":
if readLine == " ":
print("$", end='')
else:
print(readLine[:len(readLine)-1]+"$", end='\n')
elif Option == "-T":
'^I'.join(readLine.split())
print(readLine, end='')
elif Option == '':
print(readLine, end='')
elif Option == 'm':
touch(File1)
return True
elif Option == '>' or Option == '>>':
open_File2.write(readLine)
open_the_File.close()
#Definition ends here.
def openFile(name):
#This one opens a file after checking if the file is available in the working directory and is not a folder
fileAvailable = False
for files in os.listdir(os.getcwd()):
if name == files and not(os.path.isdir(files)):
fileAvailable = True
break
if not fileAvailable:
noFile_error(name)
else:
os.startfile(name)
#-------cp---------
def cp(command):
#This function will work similar to cp.
#We need to extract the source and destination from from name
option = ''
try:
#check if theres an option.
try:
if command[0] == '-':
#This means an option is added.
option = command[:2]
if option != '-i':
option_not_available(option, 'cp')
return False
command = command[3:]
except:
pass
except:
pass
posSpace = command.index(" ")
#The source and destination should be seperated by a spcace
try:
src = command[:posSpace]
#Now check if src exists or not.
if is_available(src):
#So it exists.Check if the destination exists.
dst = command[posSpace+1:]
if not is_available(os.path.dirname(dst)):
#If the dir of dest is not available then create it.
MakeDir(os.path.dirname(dst))
shutil.copytree(src, dst)
else:
#If the dir is already available then check if dst already exists.
if os.path.isfile(dst):
print("File with same name already exists!\a")
ask = 'yes'
if option == '-i':
ask = input("Do you want to overwrite?[yes/no]")
if ask == 'yes':
shutil.copyfile(src, dst)
return True
shutil.copyfile(src, dst)
else:
noFile_error(src)
return False
except:
unknown_error(6)
#-------grep---------
available_Options_grep = ['', '-n', '-v', '^', '$', '-c', '-h', '-s']
extension_of_Files_tosearch = ['txt', 'html', 'py',]
def grep(command):
#This function will work like the grep command.
#The syntax is grep [OPTION] "[string to find]" [FILENAME]
#If no FILENAME is provided, it will search the root directory of the working path.
Option = ''
try:
pos = command.index('"')
if pos != 0:
Option = command[:pos-1]
if Option not in available_Options_grep:
option_not_available(Option, "grep")
return False
file_name = command[-command[::-1].index('"')+1:]
if file_name != 'file*.*':
if not is_available(file_name):
noFile_error(file_name)
return False
#If file_name == file*.* then we need to check all files with extensions in extension_of_Files_tosearch
#Now that we have the file we want to search and the option
keyword = command[command.index('"')+1:-1-command[::-1].index('"')]
#We need to check if start of keyword has ^ or end has $
if keyword[0] == '^':
#We need to show only the ones that begin with keyword.
Option = '^'
keyword = keyword[1:]
elif keyword[len(keyword)-1] == '$':
#We need to show only the ones that end with keyword.
Option = '$'
keyword = keyword[:-1]
grep_exec(Option, file_name, keyword)
#Now we have the kewyword too.
except:
unknown_error(3)
return False
def grep_exec(Option, fileName, keyword):
#This will execute the grep command
if fileName != 'file*.*':
find_in_File(fileName, keyword, Option)
else:
for i in range(len(extension_of_Files_tosearch)):
find_in_all_readable(keyword, extension_of_Files_tosearch[i], give_rootPath(), Option)
def find_in_File(file, keyword, conditions = ''):
#This will find keyword in file
open_File = open(file, 'r')
if(conditions != '-h'):
print("Finding in "+file, end=':\n')
countLine = 0
countmatch = 0
while True:
read_word = open_File.readline()
if not read_word:
return True
if conditions == '-n':
countLine += 1
if keyword in read_word:
print(str(countLine)+" "+read_word, end='')
elif conditions == '-v':
if keyword not in read_word:
print(read_word, end='')
elif conditions == '^':
if read_word[:len(keyword)] == keyword:
print(read_word, end='')
elif conditions == '$':
if read_word[-len(keyword):] == keyword:
print(read_word, end='')
elif conditions == '-c':
if keyword in read_word:
countmatch += 1
print("Match : "+str(countmatch)+" "+read_word, end='')
elif conditions == '-s':
if keyword in read_word:
print("Found!", end=' ')
else:
if keyword in read_word:
print(read_word, end='')
def find_in_all_readable(keyword, extension, path, Option = ''):
#This will find the keyword in all files with given extension
for stuff in os.listdir(path):
if os.path.isdir(stuff) and stuff != "System Volume Information":
thread = grepThread(keyword, extension, path+"\\"+stuff, Option)
thread.start()
thread.join()
else:
if stuff.endswith(extension):
find_in_File(path+"\\"+stuff, keyword, Option)
#------COMMANDS/----------
#The function list ends here.
def runCommand(cmd):
#This function checks the command and sends it to be executed.
if cmd == "exit":
return 0
elif cmd[:2] == "ls":
grab(cmd)
elif cmd[:6] == "locate":
locate(cmd[7:])
elif cmd[:3] == "man":
showman(cmd)
elif cmd[:2] == "cd":
cd(cmd[3:])
elif cmd[:5] == "touch":
touch(cmd[6:])
elif cmd[:2] == "rm":
rm(cmd[2:])
elif cmd[:5] == "mkdir":
MakeDir(cmd[6:])
elif cmd[:5] == "clear":
clear()
elif cmd[:2] == "mv":
mv(cmd[3:])
elif cmd[:3] == "cat":
checkCat(cmd[4:])
elif cmd[:4] == 'grep':
grep(cmd[5:])
elif cmd[:2] == 'cp':
cp(cmd[3:])
elif cmd == 'about':
show_about()
elif cmd == 'updatedb':
updatedb()
else:
openFile(cmd)
show_about()
def main():
while True:
showPath()
prompt = input()
ret = runCommand(prompt)
print("")
if ret == 0:
break
main() | [
"threading.Thread.__init__",
"os.listdir",
"shutil.copytree",
"shutil.move",
"os.getcwd",
"os.path.isfile",
"os.path.dirname",
"os.chdir",
"os.path.isdir",
"shutil.copyfile",
"os.path.basename",
"os.mkdir",
"shutil.rmtree",
"os.startfile",
"os.system",
"os.rmdir",
"colorama.init",
"os.remove"
] | [((540, 546), 'colorama.init', 'init', ([], {}), '()\n', (544, 546), False, 'from colorama import init, Fore, Style\n'), ((2177, 2193), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (2186, 2193), False, 'import os\n'), ((3602, 3613), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3611, 3613), False, 'import os\n'), ((7747, 7771), 'os.listdir', 'os.listdir', (['pathToSearch'], {}), '(pathToSearch)\n', (7757, 7771), False, 'import os\n'), ((9530, 9541), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9539, 9541), False, 'import os\n'), ((11034, 11052), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (11044, 11052), False, 'import os\n'), ((13880, 13896), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (13889, 13896), False, 'import os\n'), ((25779, 25795), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (25789, 25795), False, 'import os\n'), ((677, 708), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (702, 708), False, 'import threading\n'), ((972, 1003), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (997, 1003), False, 'import threading\n'), ((4089, 4114), 'os.path.isdir', 'os.path.isdir', (['pathToFile'], {}), '(pathToFile)\n', (4102, 4114), False, 'import os\n'), ((4297, 4324), 'os.path.dirname', 'os.path.dirname', (['pathToFile'], {}), '(pathToFile)\n', (4312, 4324), False, 'import os\n'), ((4413, 4441), 'os.path.basename', 'os.path.basename', (['pathToFile'], {}), '(pathToFile)\n', (4429, 4441), False, 'import os\n'), ((11214, 11250), 'os.path.isdir', 'os.path.isdir', (["(folder + '\\\\' + files)"], {}), "(folder + '\\\\' + files)\n", (11227, 11250), False, 'import os\n'), ((13743, 13757), 'os.mkdir', 'os.mkdir', (['name'], {}), '(name)\n', (13751, 13757), False, 'import os\n'), ((15852, 15888), 'shutil.move', 'shutil.move', (['fileToMove', 'whereToMove'], {}), '(fileToMove, whereToMove)\n', (15863, 15888), False, 'import shutil\n'), ((20545, 20556), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20554, 20556), False, 'import os\n'), ((20737, 20755), 'os.startfile', 'os.startfile', (['name'], {}), '(name)\n', (20749, 20755), False, 'import os\n'), ((1333, 1344), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1342, 1344), False, 'import os\n'), ((7784, 7826), 'os.path.isdir', 'os.path.isdir', (["(pathToSearch + '\\\\' + files)"], {}), "(pathToSearch + '\\\\' + files)\n", (7797, 7826), False, 'import os\n'), ((8882, 8904), 'os.path.isdir', 'os.path.isdir', (['newPath'], {}), '(newPath)\n', (8895, 8904), False, 'import os\n'), ((10218, 10243), 'os.path.isdir', 'os.path.isdir', (['tempFolder'], {}), '(tempFolder)\n', (10231, 10243), False, 'import os\n'), ((15287, 15315), 'os.path.dirname', 'os.path.dirname', (['whereToMove'], {}), '(whereToMove)\n', (15302, 15315), False, 'import os\n'), ((15573, 15600), 'os.path.isfile', 'os.path.isfile', (['whereToMove'], {}), '(whereToMove)\n', (15587, 15600), False, 'import os\n'), ((25808, 25828), 'os.path.isdir', 'os.path.isdir', (['stuff'], {}), '(stuff)\n', (25821, 25828), False, 'import os\n'), ((4628, 4649), 'os.listdir', 'os.listdir', (['file_Path'], {}), '(file_Path)\n', (4638, 4649), False, 'import os\n'), ((8668, 8679), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8677, 8679), False, 'import os\n'), ((8918, 8935), 'os.chdir', 'os.chdir', (['newPath'], {}), '(newPath)\n', (8926, 8935), False, 'import os\n'), ((12026, 12053), 'shutil.rmtree', 'shutil.rmtree', (['fileName[5:]'], {}), '(fileName[5:])\n', (12039, 12053), False, 'import shutil\n'), ((14676, 14687), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14685, 14687), False, 'import os\n'), ((15797, 15845), 'shutil.copyfile', 'shutil.copyfile', (['fileToMove', "(fileToMove + '.bak')"], {}), "(fileToMove, fileToMove + '.bak')\n", (15812, 15845), False, 'import shutil\n'), ((20592, 20612), 'os.path.isdir', 'os.path.isdir', (['files'], {}), '(files)\n', (20605, 20612), False, 'import os\n'), ((21811, 21836), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (21826, 21836), False, 'import shutil\n'), ((21957, 21976), 'os.path.isfile', 'os.path.isfile', (['dst'], {}), '(dst)\n', (21971, 21976), False, 'import os\n'), ((22325, 22350), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dst'], {}), '(src, dst)\n', (22340, 22350), False, 'import shutil\n'), ((4778, 4789), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4787, 4789), False, 'import os\n'), ((12210, 12237), 'shutil.rmtree', 'shutil.rmtree', (['fileName[4:]'], {}), '(fileName[4:])\n', (12223, 12237), False, 'import shutil\n'), ((15356, 15384), 'os.path.dirname', 'os.path.dirname', (['whereToMove'], {}), '(whereToMove)\n', (15371, 15384), False, 'import os\n'), ((21657, 21677), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (21672, 21677), False, 'import os\n'), ((21773, 21793), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (21788, 21793), False, 'import os\n'), ((13042, 13069), 'os.path.isdir', 'os.path.isdir', (['fileName[1:]'], {}), '(fileName[1:])\n', (13055, 13069), False, 'import os\n'), ((22250, 22275), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dst'], {}), '(src, dst)\n', (22265, 22275), False, 'import shutil\n'), ((12340, 12367), 'os.path.isdir', 'os.path.isdir', (['fileName[4:]'], {}), '(fileName[4:])\n', (12353, 12367), False, 'import os\n'), ((12581, 12605), 'os.listdir', 'os.listdir', (['fileName[4:]'], {}), '(fileName[4:])\n', (12591, 12605), False, 'import os\n'), ((13129, 13153), 'os.listdir', 'os.listdir', (['fileName[1:]'], {}), '(fileName[1:])\n', (13139, 13153), False, 'import os\n'), ((13459, 13482), 'os.remove', 'os.remove', (['fileName[1:]'], {}), '(fileName[1:])\n', (13468, 13482), False, 'import os\n'), ((12502, 12525), 'os.remove', 'os.remove', (['fileName[4:]'], {}), '(fileName[4:])\n', (12511, 12525), False, 'import os\n'), ((13402, 13424), 'os.rmdir', 'os.rmdir', (['fileName[1:]'], {}), '(fileName[1:])\n', (13410, 13424), False, 'import os\n'), ((12765, 12807), 'os.path.isdir', 'os.path.isdir', (["(fileName[4:] + '\\\\' + files)"], {}), "(fileName[4:] + '\\\\' + files)\n", (12778, 12807), False, 'import os\n'), ((12840, 12882), 'shutil.rmtree', 'shutil.rmtree', (["(fileName[4:] + '\\\\' + files)"], {}), "(fileName[4:] + '\\\\' + files)\n", (12853, 12882), False, 'import shutil\n'), ((12948, 12986), 'os.remove', 'os.remove', (["(fileName[4:] + '\\\\' + files)"], {}), "(fileName[4:] + '\\\\' + files)\n", (12957, 12986), False, 'import os\n')] |
import uuid
from copy import deepcopy
from typing import Callable
import pytest
from stac_pydantic import Item
from tests.conftest import MockStarletteRequest
from stac_fastapi.api.app import StacApi
from stac_fastapi.mongo.core import CoreCrudClient
from stac_fastapi.mongo.transactions import BulkTransactionsClient, TransactionsClient
from stac_fastapi.types.errors import ConflictError, NotFoundError
def test_create_collection(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
data = load_test_data("test_collection.json")
mongo_transactions.create_collection(data, request=MockStarletteRequest)
coll = mongo_core.get_collection(data["id"], request=MockStarletteRequest)
assert coll["id"] == data["id"]
def test_create_collection_already_exists(
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
data = load_test_data("test_collection.json")
mongo_transactions.create_collection(data, request=MockStarletteRequest)
# change id to avoid mongo duplicate key error
data["_id"] = str(uuid.uuid4())
with pytest.raises(ConflictError):
mongo_transactions.create_collection(data, request=MockStarletteRequest)
def test_update_collection(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
data = load_test_data("test_collection.json")
mongo_transactions.create_collection(data, request=MockStarletteRequest)
data["keywords"].append("new keyword")
mongo_transactions.update_collection(data, request=MockStarletteRequest)
coll = mongo_core.get_collection(data["id"], request=MockStarletteRequest)
assert "new keyword" in coll["keywords"]
def test_delete_collection(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
data = load_test_data("test_collection.json")
mongo_transactions.create_collection(data, request=MockStarletteRequest)
mongo_transactions.delete_collection(data["id"], request=MockStarletteRequest)
with pytest.raises(NotFoundError):
mongo_core.get_collection(data["id"], request=MockStarletteRequest)
def test_get_collection(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
data = load_test_data("test_collection.json")
mongo_transactions.create_collection(data, request=MockStarletteRequest)
coll = mongo_core.get_collection(data["id"], request=MockStarletteRequest)
assert coll["id"] == data["id"]
def test_get_item(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
collection_data = load_test_data("test_collection.json")
item_data = load_test_data("test_item.json")
mongo_transactions.create_collection(collection_data, request=MockStarletteRequest)
mongo_transactions.create_item(item_data, request=MockStarletteRequest)
coll = mongo_core.get_item(
item_id=item_data["id"],
collection_id=item_data["collection"],
request=MockStarletteRequest,
)
assert coll["id"] == item_data["id"]
assert coll["collection"] == item_data["collection"]
def test_get_collection_items(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
for _ in range(5):
item["_id"] = str(uuid.uuid4())
item["id"] = str(uuid.uuid4())
mongo_transactions.create_item(item, request=MockStarletteRequest)
fc = mongo_core.item_collection(coll["id"], request=MockStarletteRequest)
assert len(fc["features"]) == 5
for item in fc["features"]:
assert item["collection"] == coll["id"]
def test_create_item(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
mongo_transactions.create_item(item, request=MockStarletteRequest)
resp = mongo_core.get_item(
item["id"], item["collection"], request=MockStarletteRequest
)
assert Item(**item).dict(
exclude={"links": ..., "properties": {"created", "updated"}}
) == Item(**resp).dict(exclude={"links": ..., "properties": {"created", "updated"}})
def test_create_item_already_exists(
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
mongo_transactions.create_item(item, request=MockStarletteRequest)
with pytest.raises(ConflictError):
mongo_transactions.create_item(item, request=MockStarletteRequest)
def test_update_item(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
mongo_transactions.create_item(item, request=MockStarletteRequest)
item["properties"]["foo"] = "bar"
mongo_transactions.update_item(item, request=MockStarletteRequest)
updated_item = mongo_core.get_item(
item["id"], item["collection"], request=MockStarletteRequest
)
assert updated_item["properties"]["foo"] == "bar"
def test_update_geometry(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
mongo_transactions.create_item(item, request=MockStarletteRequest)
new_coordinates = [
[
[142.15052873427666, -33.82243006904891],
[140.1000346138806, -34.257132625788756],
[139.5776607193635, -32.514709769700254],
[141.6262528041627, -32.08081674221862],
[142.15052873427666, -33.82243006904891],
]
]
item["geometry"]["coordinates"] = new_coordinates
mongo_transactions.update_item(item, request=MockStarletteRequest)
updated_item = mongo_core.get_item(
item["id"], item["collection"], request=MockStarletteRequest
)
assert updated_item["geometry"]["coordinates"] == new_coordinates
def test_delete_item(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
mongo_transactions.create_item(item, request=MockStarletteRequest)
mongo_transactions.delete_item(
item["id"], item["collection"], request=MockStarletteRequest
)
with pytest.raises(NotFoundError):
mongo_core.get_item(
item["id"], item["collection"], request=MockStarletteRequest
)
def test_bulk_item_insert(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
mongo_bulk_transactions: BulkTransactionsClient,
load_test_data: Callable,
):
coll = load_test_data("test_collection.json")
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
item = load_test_data("test_item.json")
items = []
for _ in range(10):
_item = deepcopy(item)
_item["id"] = str(uuid.uuid4())
items.append(_item)
fc = mongo_core.item_collection(coll["id"], request=MockStarletteRequest)
assert len(fc["features"]) == 0
mongo_bulk_transactions.bulk_item_insert(items=items)
fc = mongo_core.item_collection(coll["id"], request=MockStarletteRequest)
assert len(fc["features"]) == 10
for item in items:
mongo_transactions.delete_item(
item["id"], item["collection"], request=MockStarletteRequest
)
@pytest.mark.skip(reason="Not working")
def test_landing_page_no_collection_title(
mongo_core: CoreCrudClient,
mongo_transactions: TransactionsClient,
load_test_data: Callable,
api_client: StacApi,
):
class MockStarletteRequestWithApp(MockStarletteRequest):
app = api_client.app
coll = load_test_data("test_collection.json")
del coll["title"]
mongo_transactions.create_collection(coll, request=MockStarletteRequest)
landing_page = mongo_core.landing_page(request=MockStarletteRequestWithApp)
for link in landing_page["links"]:
if link["href"].split("/")[-1] == coll["id"]:
assert link["title"]
| [
"pytest.mark.skip",
"uuid.uuid4",
"pytest.raises",
"stac_pydantic.Item",
"copy.deepcopy"
] | [((8342, 8380), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Not working"""'}), "(reason='Not working')\n", (8358, 8380), False, 'import pytest\n'), ((1111, 1123), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1121, 1123), False, 'import uuid\n'), ((1135, 1163), 'pytest.raises', 'pytest.raises', (['ConflictError'], {}), '(ConflictError)\n', (1148, 1163), False, 'import pytest\n'), ((2118, 2146), 'pytest.raises', 'pytest.raises', (['NotFoundError'], {}), '(NotFoundError)\n', (2131, 2146), False, 'import pytest\n'), ((4987, 5015), 'pytest.raises', 'pytest.raises', (['ConflictError'], {}), '(ConflictError)\n', (5000, 5015), False, 'import pytest\n'), ((7258, 7286), 'pytest.raises', 'pytest.raises', (['NotFoundError'], {}), '(NotFoundError)\n', (7271, 7286), False, 'import pytest\n'), ((7819, 7833), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (7827, 7833), False, 'from copy import deepcopy\n'), ((3624, 3636), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3634, 3636), False, 'import uuid\n'), ((3663, 3675), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3673, 3675), False, 'import uuid\n'), ((7860, 7872), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7870, 7872), False, 'import uuid\n'), ((4441, 4453), 'stac_pydantic.Item', 'Item', ([], {}), '(**item)\n', (4445, 4453), False, 'from stac_pydantic import Item\n'), ((4538, 4550), 'stac_pydantic.Item', 'Item', ([], {}), '(**resp)\n', (4542, 4550), False, 'from stac_pydantic import Item\n')] |
# The MIT License (MIT)
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`opensign.canvas`
================================================================================
A library to facilitate easy RGB Matrix Sign Animations.
* Author(s): <NAME>
Implementation Notes
--------------------
**Software and Dependencies:**
* Henner Zeller RGB Matrix Library:
https://github.com/hzeller/rpi-rgb-led-matrix
* Python Imaging Library (Pillow)
"""
from PIL import Image, ImageDraw, ImageFont
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/makermelissa/OpenSign.git"
class OpenSignCanvas:
"""The Canvas is an empty image that you add text and graphics to. It will automatically
expand as you add content. You can then display the canvas on the sign and use the animation
functions to convey it."""
def __init__(self):
self._fonts = {}
self._current_font = None
self._current_color = (255, 0, 0, 255)
self._image = Image.new("RGBA", (0, 0), (0, 0, 0, 0))
self._draw = ImageDraw.Draw(self._image)
self._cursor = [0, 0]
self._stroke_width = 0
self._stroke_color = None
self._shadow_intensity = 0
self._shadow_offset = 0
self._opacity = 1.0
def add_font(self, name, file, size=None, use=False):
"""Add a font to the font pool. If there is no current font set,
then the new font will automatically become the current font
:param string name: The name of the font. This is used when setting the font.
:param string file: The filename of the font. This should be the full path.
:param float size: (optional) The font-size to use if it is a True Type font.
Set to None for bitmap fonts. (default=None)
:param bool use: (optional) Whether or not the font should immediately be used.
(default=False)
"""
if size is not None:
self._fonts[name] = ImageFont.truetype(file, size)
else:
self._fonts[name] = ImageFont.load(file)
if use or self._current_font is None:
self._current_font = self._fonts[name]
def set_font(self, fontname):
"""Set the current font
:param string fontname: The name of the font to use. This should match the name parameter
used when adding the font.
"""
if self._fonts.get(fontname) is None:
raise ValueError("Font name not found.")
self._current_font = self._fonts[fontname]
def set_stroke(self, width, color=None):
"""Set the text stroke width and color
:param int width: The stroke width to use. This is how wide the outline of
the text is in pixels.
:param color: (optional) The color of the stroke. (default=None)
:type color: tuple or list or int
"""
self._stroke_width = width
if color is not None:
self._stroke_color = self._convert_color(color)
else:
self._stroke_color = None
# pylint: disable=no-self-use
def _convert_color(self, color):
if isinstance(color, (tuple, list)):
if len(color) == 3:
return (color[0], color[1], color[2], 255)
if len(color) == 4:
return tuple(color)
if isinstance(color, int):
return ((color >> 16) & 0xFF, (color >> 8) & 0xFF, color & 0xFF, 255)
raise ValueError("Color should be an integer or 3 or 4 value tuple or list.")
# pylint: enable=no-self-use
def _enlarge_canvas(self, width, height):
if self._cursor[0] + width >= self._image.width:
new_width = self._cursor[0] + width
else:
new_width = self._image.width
if self._cursor[1] + height >= self._image.height:
new_height = self._cursor[1] + height
else:
new_height = self._image.height
new_image = Image.new("RGBA", (new_width, new_height))
new_image.alpha_composite(self._image)
self._image = new_image
self._draw = ImageDraw.Draw(self._image)
def set_color(self, color):
"""Set the current text color.
:param color: The color of the text.
:type color: tuple or list or int
"""
self._current_color = self._convert_color(color)
def set_shadow(self, intensity=0.5, offset=1):
"""Set the canvas to display a shadow of the content. To turn shadow off, set
the intensity to 0. The shadow is global for the entire canvas.
:param float intensity: (optional) The opaquness of the shadow (default=0.5).
:param int offset: (optional) The offset in pixels towards the lower right (default=1).
"""
intensity = max(0, min(1.0, intensity))
self._shadow_intensity = intensity
self._shadow_offset = offset
# pylint: disable=too-many-arguments
def add_text(
self,
text,
color=None,
font=None,
stroke_width=None,
stroke_color=None,
x_offset=0,
y_offset=0,
):
"""Add text to the canvas.
:param string text: The text to add.
:param color: (optional) The color of the text to override the current setting.
(default=Current Setting)
:param string fontname: (optional) The name of the font to override the current setting.
(default=Current Setting)
:param int stroke_width: (optional) The stroke width to override the current setting.
(default=Current Setting)
:param stroke_color: (optional) The color of the stroke to override the current setting.
(default=Current Setting)
:param int x_offset: (optional) The amount of x-offset to nudge the text. (default=0)
:param int y_offset: (optional) The amount of y-offset to nudge the text. (default=0)
:type color: tuple or list or int
:type stroke_color: tuple or list or int
"""
if font is not None:
font = self._fonts[font]
else:
font = self._current_font
if font is None:
font = ImageFont.load_default()
x, y = self._cursor
if color is None:
color = self._current_color
else:
color = self._convert_color(color)
if stroke_color is None:
stroke_color = self._stroke_color
else:
stroke_color = self._convert_color(stroke_color)
if stroke_width is None:
stroke_width = self._stroke_width
lines = text.split("\n")
for index, line in enumerate(lines):
(text_width, text_height) = font.getsize(line, stroke_width=stroke_width)
self._enlarge_canvas(text_width, text_height)
# Draw the text
self._draw.text(
(x + x_offset, y + y_offset),
line,
font=font,
fill=color,
stroke_width=stroke_width,
stroke_fill=stroke_color,
)
# Get size and add to cursor
self._cursor[0] += text_width
if index < len(lines) - 1:
y += text_height
self._cursor[0] = 0
self._cursor[1] += text_height
# pylint: enable=too-many-arguments
def add_image(self, file):
"""Add an image to the canvas.
:param string file: The filename of the image. This should be the full path.
"""
x, y = self._cursor
new_image = Image.open(file).convert("RGBA")
self._enlarge_canvas(new_image.width, new_image.height)
self._image.alpha_composite(new_image, dest=(x, y))
self._cursor[0] += new_image.width
def clear(self):
"""Clear the canvas content, but retain all of the style settings"""
self._image = Image.new("RGBA", (0, 0), (0, 0, 0, 0))
self._cursor = [0, 0]
def get_image(self):
"""Get the canvas content as an image"""
return self._image
@property
def width(self):
"""Get the current canvas width in pixels"""
return self._image.width
@property
def height(self):
"""Get the current canvas height in pixels"""
return self._image.height
@property
def shadow_offset(self):
"""Get or set the current shadow offset in pixels"""
return self._shadow_offset
@shadow_offset.setter
def shadow_offset(self, value):
if not isinstance(value, int):
raise TypeError("Shadow offset must be an integer")
value = max(value, 0)
self._shadow_offset = value
@property
def shadow_intensity(self):
"""Get or set the current shadow intensity where 0 is
no shadow and 1 is a fully opaque shadow."""
return self._shadow_intensity
@shadow_intensity.setter
def shadow_intensity(self, value):
if not isinstance(value, (int, float)):
raise TypeError("Shadow intensity must be an integer or float")
value = max(0, min(1.0, value))
self._shadow_intensity = value
@property
def opacity(self):
"""Get or set the maximum opacity of the canvas where 0 is
transparent and 1 is opaque."""
return self._opacity
@opacity.setter
def opacity(self, value):
if not isinstance(value, (int, float)):
raise TypeError("Opacity must be an integer or float")
value = max(0, min(1.0, value))
self._opacity = value
@property
def cursor(self):
"""Get or set the current cursor position in pixels with the top left
being (0, 0)."""
return self._cursor
@cursor.setter
def cursor(self, value):
if isinstance(value, (tuple, list)) and len(value) >= 2:
self._cursor = [value[0], value[1]]
else:
raise TypeError("Value must be a tuple or list")
| [
"PIL.Image.open",
"PIL.ImageFont.load_default",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"PIL.ImageFont.load",
"PIL.ImageDraw.Draw"
] | [((2024, 2063), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(0, 0)', '(0, 0, 0, 0)'], {}), "('RGBA', (0, 0), (0, 0, 0, 0))\n", (2033, 2063), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2085, 2112), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self._image'], {}), '(self._image)\n', (2099, 2112), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5055, 5097), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(new_width, new_height)'], {}), "('RGBA', (new_width, new_height))\n", (5064, 5097), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5198, 5225), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self._image'], {}), '(self._image)\n', (5212, 5225), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((9081, 9120), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(0, 0)', '(0, 0, 0, 0)'], {}), "('RGBA', (0, 0), (0, 0, 0, 0))\n", (9090, 9120), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3035, 3065), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['file', 'size'], {}), '(file, size)\n', (3053, 3065), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3112, 3132), 'PIL.ImageFont.load', 'ImageFont.load', (['file'], {}), '(file)\n', (3126, 3132), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7346, 7370), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (7368, 7370), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8760, 8776), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (8770, 8776), False, 'from PIL import Image, ImageDraw, ImageFont\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from rgbhistogram import RGBHistogram
# [sunflowers向日葵, crocus番红花, daisies雏菊, pansies三色青, ]
from sklearn.preprocessing import LabelEncoder # distinguish between flower species
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split # training set, testing set
from sklearn.metrics import classification_report
import numpy as np
import argparse
import glob # glob the paths of images off disk
import cv2
import imutils
ap = argparse.ArgumentParser()
# The images used in this example are a sample of the Flowers 17 dataset
# http://www.robots.ox.ac.uk/~vgg/data/flowers/17/index.html
ap.add_argument("-i", "--images", required = True,
help = "path to the image dataset")
ap.add_argument("-m", "--masks", required = True,
help = "path to the image masks")
args = vars(ap.parse_args())
imagePaths = sorted(glob.glob(args["images"] + "/*.jpg"))
maskPaths = sorted(glob.glob(args["masks"] + "/*.png"))
data = []
target = []
desc = RGBHistogram([8, 8, 8])
for (imagePath, maskPath) in zip(imagePaths, maskPaths):
image = cv2.imread(imagePath)
mask = cv2.imread(maskPath)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
features = desc.describe(image, mask)
data.append(features)
target.append(imagePath.split("_")[-2])
targetNames = np.unique(target)
le = LabelEncoder()
target = le.fit_transform(target)
(trainData, testData, trainTarget, testTarget) = train_test_split(data, target,
test_size = 0.3, random_state = 42)
# using 25 decision trees in the forest
model = RandomForestClassifier(n_estimators = 25, random_state = 84)
model.fit(trainData, trainTarget)
# classification_report function print out the accuracy of his model
print(classification_report(testTarget, model.predict(testData),
target_names = targetNames))
# randomly picks 10 different images to in-vestigate
for i in np.random.choice(np.arange(0, len(imagePaths)), 10):
imagePath = imagePaths[i]
maskPath = maskPaths[i]
image = cv2.imread(imagePath)
mask = cv2.imread(maskPath)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
features = desc.describe(image, mask)
flower = le.inverse_transform(model.predict([features]))[0]
print(imagePath)
print("I think this flower is a {}".format(flower.upper()))
cv2.imshow("image", image)
cv2.waitKey(0)
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"argparse.ArgumentParser",
"rgbhistogram.RGBHistogram",
"sklearn.ensemble.RandomForestClassifier",
"cv2.imshow",
"cv2.waitKey",
"sklearn.cross_validation.train_test_split",
"cv2.cvtColor",
"cv2.imread",
"glob.glob"
] | [((521, 546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (544, 546), False, 'import argparse\n'), ((1035, 1058), 'rgbhistogram.RGBHistogram', 'RGBHistogram', (['[8, 8, 8]'], {}), '([8, 8, 8])\n', (1047, 1058), False, 'from rgbhistogram import RGBHistogram\n'), ((1362, 1379), 'numpy.unique', 'np.unique', (['target'], {}), '(target)\n', (1371, 1379), True, 'import numpy as np\n'), ((1385, 1399), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1397, 1399), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1484, 1546), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(data, target, test_size=0.3, random_state=42)\n', (1500, 1546), False, 'from sklearn.cross_validation import train_test_split\n'), ((1648, 1704), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(25)', 'random_state': '(84)'}), '(n_estimators=25, random_state=84)\n', (1670, 1704), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((910, 946), 'glob.glob', 'glob.glob', (["(args['images'] + '/*.jpg')"], {}), "(args['images'] + '/*.jpg')\n", (919, 946), False, 'import glob\n'), ((967, 1002), 'glob.glob', 'glob.glob', (["(args['masks'] + '/*.png')"], {}), "(args['masks'] + '/*.png')\n", (976, 1002), False, 'import glob\n'), ((1129, 1150), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (1139, 1150), False, 'import cv2\n'), ((1162, 1182), 'cv2.imread', 'cv2.imread', (['maskPath'], {}), '(maskPath)\n', (1172, 1182), False, 'import cv2\n'), ((1194, 1232), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_BGR2GRAY'], {}), '(mask, cv2.COLOR_BGR2GRAY)\n', (1206, 1232), False, 'import cv2\n'), ((2098, 2119), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (2108, 2119), False, 'import cv2\n'), ((2131, 2151), 'cv2.imread', 'cv2.imread', (['maskPath'], {}), '(maskPath)\n', (2141, 2151), False, 'import cv2\n'), ((2163, 2201), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_BGR2GRAY'], {}), '(mask, cv2.COLOR_BGR2GRAY)\n', (2175, 2201), False, 'import cv2\n'), ((2399, 2425), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (2409, 2425), False, 'import cv2\n'), ((2430, 2444), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'import cv2\n')] |
from manim_express import EagerModeScene, Size, SceneArgs
from manimlib import *
from manim_express.plot import m_line, m_scatter
# SceneArgs.color = rgb_to_hex([0., 0, 0])
SceneArgs.color = "#222222"
SceneArgs.full_screen = True
# SceneArgs.frame_rate = 30
# SceneArgs.gif = True
scene = EagerModeScene(screen_size=Size.big)
axes = ThreeDAxes()
tri = Triangle().scale(2)
scene.play(ShowCreation(tri))
dots = []
for i in tri.get_all_points():
dot = Dot(i)
dot.shift([0, 0, np.random.random()])
dot.set_color(rgb_to_hex(np.random.randint(100, 254, size=(3))))
dots.append(dot)
dots = VGroup(*dots)
scene.play(Write(dots))
scene.play(dots.move_to, UL*2)
scene.play(FadeOut(tri))
#
circle = Circle(stroke=0.1)
circle.move_to(LEFT*3)
# circle.set_stroke(color=BLUE_D, width=0.1)
# scene.camera.background_rgba =[0.3, 0.4, 0.5, 1]
scene.play(ShowCreation(circle))
scene.play(
circle.scale, 2,
circle.shift, RIGHT*5,
run_time=2
)
print(circle.point_at_angle(PI))
#
square = Square()
scene.play(ReplacementTransform(circle, square))
scene.play(square.shift, DOWN*2)
scene.play(FadeOut(square))
theta = np.linspace(0, 2 * PI, 200)
x = np.cos(theta)
y = np.sin(theta)
z = theta * 3
line = m_line(x, y, z, axes=axes)
scene.add(axes)
scene.play(ShowCreation(line, run_time=1))
c = ParametricCurve(
lambda theta: [np.cos(theta), np.sin(theta), 0], [0, 2 * PI])
c2 = ParametricCurve(lambda x: [x, np.sin(x), 0], [-2, 5])
scene.play(*map(ShowCreation, (c, c2)))
now = scene.time
c.add_updater(lambda c: c.set_y(math.sin(3 * (scene.time - now))))
scene.wait(1)
scene.play(Indicate(line))
dot = Dot()
dot.move_to(c.get_start())
scene.add(dot)
scene.play(MoveAlongPath(dot, c))
scene.play(ShowCreation(Circle()))
# scene.play(WiggleOutThenIn(line))
# image = scene.get_image()
# import matplotlib.pyplot as plt
# image.show()
# plt.imshow(np.array((image))
# plt.show()
scene.hold_on() | [
"manim_express.EagerModeScene",
"manim_express.plot.m_line"
] | [((292, 328), 'manim_express.EagerModeScene', 'EagerModeScene', ([], {'screen_size': 'Size.big'}), '(screen_size=Size.big)\n', (306, 328), False, 'from manim_express import EagerModeScene, Size, SceneArgs\n'), ((1219, 1245), 'manim_express.plot.m_line', 'm_line', (['x', 'y', 'z'], {'axes': 'axes'}), '(x, y, z, axes=axes)\n', (1225, 1245), False, 'from manim_express.plot import m_line, m_scatter\n')] |
"""
Channel
The channel class represents the out-of-character chat-room usable by
Players in-game. It is mostly overloaded to change its appearance, but
channels can be used to implement many different forms of message
distribution systems.
Note that sending data to channels are handled via the CMD_CHANNEL
syscommand (see evennia.syscmds). The sending should normally not need
to be modified.
"""
from evennia.comms.models import TempMsg
from evennia.comms.comms import DefaultChannel
from evennia.utils.utils import make_iter
from muddery.server.utils.localized_strings_handler import _
from muddery.server.utils.defines import ConversationType
class MudderyChannel(DefaultChannel):
"""
Working methods:
at_channel_creation() - called once, when the channel is created
has_connection(player) - check if the given player listens to this channel
connect(player) - connect player to this channel
disconnect(player) - disconnect player from channel
access(access_obj, access_type='listen', default=False) - check the
access on this channel (default access_type is listen)
delete() - delete this channel
message_transform(msg, emit=False, prefix=True,
sender_strings=None, external=False) - called by
the comm system and triggers the hooks below
msg(msgobj, header=None, senders=None, sender_strings=None,
persistent=None, online=False, emit=False, external=False) - main
send method, builds and sends a new message to channel.
tempmsg(msg, header=None, senders=None) - wrapper for sending non-persistent
messages.
distribute_message(msg, online=False) - send a message to all
connected players on channel, optionally sending only
to players that are currently online (optimized for very large sends)
Useful hooks:
channel_prefix(msg, emit=False) - how the channel should be
prefixed when returning to user. Returns a string
format_senders(senders) - should return how to display multiple
senders to a channel
pose_transform(msg, sender_string) - should detect if the
sender is posing, and if so, modify the string
format_external(msg, senders, emit=False) - format messages sent
from outside the game, like from IRC
format_message(msg, emit=False) - format the message body before
displaying it to the user. 'emit' generally means that the
message should not be displayed with the sender's name.
pre_join_channel(joiner) - if returning False, abort join
post_join_channel(joiner) - called right after successful join
pre_leave_channel(leaver) - if returning False, abort leave
post_leave_channel(leaver) - called right after successful leave
pre_send_message(msg) - runs just before a message is sent to channel
post_send_message(msg) - called just after message was sent to channel
"""
def channel_prefix(self, msg=None, emit=False, **kwargs):
"""
Hook method. How the channel should prefix itself for users.
Args:
msg (str, optional): Prefix text
emit (bool, optional): Switches to emit mode, which usually
means to not prefix the channel's info.
Returns:
prefix (str): The created channel prefix.
"""
return '' if emit else '[%s] ' % _(self.key, category="channels")
def get_message(self, caller, message):
"""
Receive a message from a character.
:param caller: talker.
:param message: content.
"""
if not self.access(caller, "send"):
caller.msg(_("You can not talk in this channel."))
return
output = {
"conversation": {
"type": ConversationType.CHANNEL.value,
"channel": _(self.key, category="channels"),
"from_id": caller.get_id(),
"from_name": caller.get_name(),
"msg": message,
}
}
msgobj = TempMsg(message=output, channels=[self])
self.msg(msgobj, emit=True)
def msg(
self,
msgobj,
header=None,
senders=None,
sender_strings=None,
keep_log=None,
online=False,
emit=False,
external=False,
):
"""
Send the given message to all accounts connected to channel. Note that
no permission-checking is done here; it is assumed to have been
done before calling this method. The optional keywords are not used if
persistent is False.
Args:
msgobj (Msg, TempMsg or str): If a Msg/TempMsg, the remaining
keywords will be ignored (since the Msg/TempMsg object already
has all the data). If a string, this will either be sent as-is
(if persistent=False) or it will be used together with `header`
and `senders` keywords to create a Msg instance on the fly.
header (str, optional): A header for building the message.
senders (Object, Account or list, optional): Optional if persistent=False, used
to build senders for the message.
sender_strings (list, optional): Name strings of senders. Used for external
connections where the sender is not an account or object.
When this is defined, external will be assumed.
keep_log (bool or None, optional): This allows to temporarily change the logging status of
this channel message. If `None`, the Channel's `keep_log` Attribute will
be used. If `True` or `False`, that logging status will be used for this
message only (note that for unlogged channels, a `True` value here will
create a new log file only for this message).
online (bool, optional) - If this is set true, only messages people who are
online. Otherwise, messages all accounts connected. This can
make things faster, but may not trigger listeners on accounts
that are offline.
emit (bool, optional) - Signals to the message formatter that this message is
not to be directly associated with a name.
external (bool, optional): Treat this message as being
agnostic of its sender.
Returns:
success (bool): Returns `True` if message sending was
successful, `False` otherwise.
"""
senders = make_iter(senders) if senders else []
if isinstance(msgobj, str):
# given msgobj is a string - convert to msgobject (always TempMsg)
msgobj = TempMsg(senders=senders, header=header, message=msgobj, channels=[self])
# we store the logging setting for use in distribute_message()
msgobj.keep_log = keep_log if keep_log is not None else self.db.keep_log
# start the sending
msgobj = self.pre_send_message(msgobj)
if not msgobj:
return False
msgobj = self.message_transform(
msgobj, emit=emit, prefix=False, sender_strings=sender_strings, external=external
)
self.distribute_message(msgobj, online=online)
self.post_send_message(msgobj)
return True
| [
"evennia.utils.utils.make_iter",
"evennia.comms.models.TempMsg",
"muddery.server.utils.localized_strings_handler._"
] | [((4252, 4292), 'evennia.comms.models.TempMsg', 'TempMsg', ([], {'message': 'output', 'channels': '[self]'}), '(message=output, channels=[self])\n', (4259, 4292), False, 'from evennia.comms.models import TempMsg\n'), ((6780, 6798), 'evennia.utils.utils.make_iter', 'make_iter', (['senders'], {}), '(senders)\n', (6789, 6798), False, 'from evennia.utils.utils import make_iter\n'), ((6954, 7026), 'evennia.comms.models.TempMsg', 'TempMsg', ([], {'senders': 'senders', 'header': 'header', 'message': 'msgobj', 'channels': '[self]'}), '(senders=senders, header=header, message=msgobj, channels=[self])\n', (6961, 7026), False, 'from evennia.comms.models import TempMsg\n'), ((3583, 3615), 'muddery.server.utils.localized_strings_handler._', '_', (['self.key'], {'category': '"""channels"""'}), "(self.key, category='channels')\n", (3584, 3615), False, 'from muddery.server.utils.localized_strings_handler import _\n'), ((3861, 3899), 'muddery.server.utils.localized_strings_handler._', '_', (['"""You can not talk in this channel."""'], {}), "('You can not talk in this channel.')\n", (3862, 3899), False, 'from muddery.server.utils.localized_strings_handler import _\n'), ((4053, 4085), 'muddery.server.utils.localized_strings_handler._', '_', (['self.key'], {'category': '"""channels"""'}), "(self.key, category='channels')\n", (4054, 4085), False, 'from muddery.server.utils.localized_strings_handler import _\n')] |
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import ndimage as ndi
import scipy.misc
from PIL import Image
import tensorflow as tf
import cv2
from skimage.color import rgb2lab, lab2rgb, xyz2lab, rgb2gray, gray2rgb
from skimage.io import imsave
from skimage.transform import resize
import pickle
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
from skimage import feature
CNNCLASSIFIER='classifier.h5'
CIFAR100='../datasets/cifar-100-python'
CIFAR10='../datasets/cifar-10-batches-py'
LABEL_MAP={0:'airplane',1:'automobile',2:'bird',3:'cat',4:'deer',5:'dog',6:'frog',7:'horse',8:'ship',9:'truck'} | [
"numpy.dot"
] | [((388, 431), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.114])\n', (394, 431), True, 'import numpy as np\n')] |
from dracoon import core
import typer
def password_flow(client_id: str, target_url: str,
username: str, password: str, client_secret: str = None):
if client_secret:
my_dracoon = core.Dracoon(clientID=client_id, clientSecret=client_secret)
else:
my_dracoon = core.Dracoon(clientID=client_id)
my_dracoon.set_URLs(target_url)
# try to authenticate - exit if request fails (timeout, connection error..)
try:
login_response = my_dracoon.basic_auth(username, password)
except core.requests.exceptions.RequestException as e:
typer.echo(f'Connection error: {e}')
raise SystemExit(e)
# get access token or quit if not successful
if login_response.status_code == 200:
success_txt = typer.style('Authenticated: 200', fg=typer.colors.BRIGHT_GREEN)
typer.echo(f'{success_txt}')
auth_header = my_dracoon.api_call_headers
else:
error_txt = typer.style('Authentication error: ', bg=typer.colors.RED, fg=typer.colors.WHITE)
typer.echo(f'{error_txt} {login_response.json()["error"]}')
raise SystemExit()
return auth_header
def auth_code_flow(client_id: str, client_secret: str, target_url: str, code: str):
my_dracoon = core.Dracoon(clientID=client_id, clientSecret=client_secret)
my_dracoon.set_URLs(target_url)
# try to authenticate - exit if request fails (timeout, connection error..)
try:
login_response = my_dracoon.oauth_code_auth(code)
except core.requests.exceptions.RequestException as e:
typer.echo(f'Connection error: {e}')
raise SystemExit(e)
# get access token or quit if not successful
if login_response.status_code == 200:
success_txt = typer.style('Authenticated: 200', fg=typer.colors.BRIGHT_GREEN)
typer.echo(f'{success_txt}')
auth_header = my_dracoon.api_call_headers
else:
error_txt = typer.style('Authentication error: ', bg=typer.colors.RED, fg=typer.colors.WHITE)
typer.echo(f'{error_txt} {login_response.json()["error"]}')
raise SystemExit()
return auth_header
| [
"typer.echo",
"typer.style",
"dracoon.core.Dracoon"
] | [((1274, 1334), 'dracoon.core.Dracoon', 'core.Dracoon', ([], {'clientID': 'client_id', 'clientSecret': 'client_secret'}), '(clientID=client_id, clientSecret=client_secret)\n', (1286, 1334), False, 'from dracoon import core\n'), ((212, 272), 'dracoon.core.Dracoon', 'core.Dracoon', ([], {'clientID': 'client_id', 'clientSecret': 'client_secret'}), '(clientID=client_id, clientSecret=client_secret)\n', (224, 272), False, 'from dracoon import core\n'), ((306, 338), 'dracoon.core.Dracoon', 'core.Dracoon', ([], {'clientID': 'client_id'}), '(clientID=client_id)\n', (318, 338), False, 'from dracoon import core\n'), ((780, 843), 'typer.style', 'typer.style', (['"""Authenticated: 200"""'], {'fg': 'typer.colors.BRIGHT_GREEN'}), "('Authenticated: 200', fg=typer.colors.BRIGHT_GREEN)\n", (791, 843), False, 'import typer\n'), ((852, 880), 'typer.echo', 'typer.echo', (['f"""{success_txt}"""'], {}), "(f'{success_txt}')\n", (862, 880), False, 'import typer\n'), ((963, 1049), 'typer.style', 'typer.style', (['"""Authentication error: """'], {'bg': 'typer.colors.RED', 'fg': 'typer.colors.WHITE'}), "('Authentication error: ', bg=typer.colors.RED, fg=typer.colors.\n WHITE)\n", (974, 1049), False, 'import typer\n'), ((1767, 1830), 'typer.style', 'typer.style', (['"""Authenticated: 200"""'], {'fg': 'typer.colors.BRIGHT_GREEN'}), "('Authenticated: 200', fg=typer.colors.BRIGHT_GREEN)\n", (1778, 1830), False, 'import typer\n'), ((1839, 1867), 'typer.echo', 'typer.echo', (['f"""{success_txt}"""'], {}), "(f'{success_txt}')\n", (1849, 1867), False, 'import typer\n'), ((1950, 2036), 'typer.style', 'typer.style', (['"""Authentication error: """'], {'bg': 'typer.colors.RED', 'fg': 'typer.colors.WHITE'}), "('Authentication error: ', bg=typer.colors.RED, fg=typer.colors.\n WHITE)\n", (1961, 2036), False, 'import typer\n'), ((599, 635), 'typer.echo', 'typer.echo', (['f"""Connection error: {e}"""'], {}), "(f'Connection error: {e}')\n", (609, 635), False, 'import typer\n'), ((1586, 1622), 'typer.echo', 'typer.echo', (['f"""Connection error: {e}"""'], {}), "(f'Connection error: {e}')\n", (1596, 1622), False, 'import typer\n')] |
import json
import logging
import os
from random import shuffle
import requests
from typing import Optional, Tuple
from tomd import Tomd
from urllib.parse import urlencode, parse_qs
from src.constants import NO_RESULTS_MESSAGE, RESPONSE_MESSAGE, FOLLOW_UP_URL, DDG_QUERY_URL
from src.utils import get_soup
logging.basicConfig()
def get_follow_up(query: str) -> Tuple[Optional[str], Optional[str]]:
logger = logging.getLogger('follow-up')
logger.setLevel('DEBUG')
url = DDG_QUERY_URL.format(urlencode({'q': query + ' site:answers.yahoo.com/question'}))
soup = get_soup(url)
# Get top 3 links
ddg_links = [a.get('href') for a in soup.find_all('a', {'class': 'result__a'})][:5]
# Parse out redirect link
qss = [link.split('?')[1] for link in ddg_links]
ya_links = [parse_qs(qs)['uddg'][0] for qs in qss]
logger.debug(f"{len(ya_links)} links found")
if not len(ya_links):
logger.debug('No answers found')
return (None, None)
shuffle(ya_links)
# For each link, see if it's answered (has 1+ answer). Return the first one
# found. If none are found in any links, return that no results are found
for link in ya_links:
soup = get_soup(link)
top_answer = soup.find('div', {'class': lambda c: c is not None and c.startswith('Answer__answer___')})
if top_answer is None:
logger.debug('No top answer found')
continue
top_answer_body = top_answer.find('div', {'class': lambda c: c is not None and c.startswith('ExpandableContent__content___')})
if top_answer_body is None:
logger.debug('No top answer body found')
continue
ya_question = soup.find('h1', {'class': lambda c: c is not None and c.startswith('Question__title___')}).text
answer = top_answer_body.encode_contents(formatter=None).decode("utf-8").strip()
logger.debug('Found answer')
return (ya_question, answer)
logger.debug('No answers found')
return (None, None)
def response_handler(event: dict, context: dict) -> dict:
logger = logging.getLogger('howdoi-processor')
logger.setLevel(logging.DEBUG)
logger.debug(f"event {event}")
query = event['query']
question, answer = get_follow_up(query)
if question is None:
response = NO_RESULTS_MESSAGE.format(query)
else:
md_answer = Tomd(answer).markdown.replace('\n\n', '\n').strip()
response = RESPONSE_MESSAGE.format(question, md_answer).strip()
updated_answer = {
'content': response
}
r = requests.post(
FOLLOW_UP_URL.format(event['application_id'], event['token']),
headers={'Content-Type': 'application/json'},
data=json.dumps(updated_answer)
)
if r.status_code == 200:
logger.debug('Sent message successfully')
else:
logger.debug(f"Discord responded with status code {r.status_code}: {r.text}")
return {} | [
"logging.basicConfig",
"logging.getLogger",
"src.constants.RESPONSE_MESSAGE.format",
"random.shuffle",
"src.utils.get_soup",
"tomd.Tomd",
"json.dumps",
"urllib.parse.parse_qs",
"urllib.parse.urlencode",
"src.constants.NO_RESULTS_MESSAGE.format",
"src.constants.FOLLOW_UP_URL.format"
] | [((310, 331), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (329, 331), False, 'import logging\n'), ((417, 447), 'logging.getLogger', 'logging.getLogger', (['"""follow-up"""'], {}), "('follow-up')\n", (434, 447), False, 'import logging\n'), ((582, 595), 'src.utils.get_soup', 'get_soup', (['url'], {}), '(url)\n', (590, 595), False, 'from src.utils import get_soup\n'), ((997, 1014), 'random.shuffle', 'shuffle', (['ya_links'], {}), '(ya_links)\n', (1004, 1014), False, 'from random import shuffle\n'), ((2106, 2143), 'logging.getLogger', 'logging.getLogger', (['"""howdoi-processor"""'], {}), "('howdoi-processor')\n", (2123, 2143), False, 'import logging\n'), ((509, 569), 'urllib.parse.urlencode', 'urlencode', (["{'q': query + ' site:answers.yahoo.com/question'}"], {}), "({'q': query + ' site:answers.yahoo.com/question'})\n", (518, 569), False, 'from urllib.parse import urlencode, parse_qs\n'), ((1215, 1229), 'src.utils.get_soup', 'get_soup', (['link'], {}), '(link)\n', (1223, 1229), False, 'from src.utils import get_soup\n'), ((2332, 2364), 'src.constants.NO_RESULTS_MESSAGE.format', 'NO_RESULTS_MESSAGE.format', (['query'], {}), '(query)\n', (2357, 2364), False, 'from src.constants import NO_RESULTS_MESSAGE, RESPONSE_MESSAGE, FOLLOW_UP_URL, DDG_QUERY_URL\n'), ((2609, 2670), 'src.constants.FOLLOW_UP_URL.format', 'FOLLOW_UP_URL.format', (["event['application_id']", "event['token']"], {}), "(event['application_id'], event['token'])\n", (2629, 2670), False, 'from src.constants import NO_RESULTS_MESSAGE, RESPONSE_MESSAGE, FOLLOW_UP_URL, DDG_QUERY_URL\n'), ((2739, 2765), 'json.dumps', 'json.dumps', (['updated_answer'], {}), '(updated_answer)\n', (2749, 2765), False, 'import json\n'), ((806, 818), 'urllib.parse.parse_qs', 'parse_qs', (['qs'], {}), '(qs)\n', (814, 818), False, 'from urllib.parse import urlencode, parse_qs\n'), ((2466, 2510), 'src.constants.RESPONSE_MESSAGE.format', 'RESPONSE_MESSAGE.format', (['question', 'md_answer'], {}), '(question, md_answer)\n', (2489, 2510), False, 'from src.constants import NO_RESULTS_MESSAGE, RESPONSE_MESSAGE, FOLLOW_UP_URL, DDG_QUERY_URL\n'), ((2395, 2407), 'tomd.Tomd', 'Tomd', (['answer'], {}), '(answer)\n', (2399, 2407), False, 'from tomd import Tomd\n')] |
import argparse
from pathlib import Path
import re
header = [
'w4 0x40004c4c 0x00000180',
'w4 0x40004610 0x00000007',
'w4 0x40004088 0x0000003f',
'w4 0x40004044 0x00000007',
'w4 0x4000404c 0x00000006',
'w4 0x40004064 0x00000001',
'w4 0x40004070 0x00000001',
'w4 0x4000411c 0x00000006',
'w4 0x40005310 0x1acce551',
'w4 0x40004054 0x00000001',
'sleep 100',
'w4 0x40014000 0x0000bdff',
'sleep 100',
]
apbconfigon = [
'sleep 100',
'w4 0x40014000 0x00000000',
'sleep 100',
'w4 0x40000300 0x00000001',
'sleep 100',
]
apbconfigoff = [
'sleep 100',
'w4 0x40000300 0x00000000',
'sleep 100',
]
footer = [
'sleep 100',
'w4 0x40014000 0x00000000',
'w4 0x400047f0 0x00000000',
'sleep 100',
'w4 0x400047f4 0x00000000',
'w4 0x40004088 0x00000000',
'w4 0x40004094 0x00000000',
'w4 0x400047f8 0x00000090',
'w4 0x40004040 0x00000295',
'w4 0x40004048 0x00000001',
'w4 0x4000404c 0x0000003f',
'sleep 100',
'w4 0x40004c4c 0x000009a0',
'sleep 100',
]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Converts QuickLogic bitstream to JLINK script"
)
parser.add_argument(
"infile",
type=Path,
help="The input file (bitstream)",
)
parser.add_argument(
"outfile",
type=Path,
help="The output file (JLink script)",
)
args = parser.parse_args()
##################### JLINK HEADER #######################
jlinkscript = header
##########################################################
############# BITSTREAM JLINK SCRIPT #####################
with open(args.infile, 'rb') as bitstream:
while True:
data = bitstream.read(4)
if not data:
break
bitword = int.from_bytes(data, 'little')
line = 'w4 0x40014ffc, 0x{:08x}'.format(bitword)
jlinkscript.append(line)
##########################################################
######################### MEMINIT JLINK SCRIPT ###########################
line_parser = re.compile(r'(?P<addr>[xX0-9a-f]+).*:(?P<data>[xX0-9a-f]+).*')
fp = open(Path(args.infile.parent).joinpath("ram.mem"), 'r')
file_data = fp.readlines()
counter = 0
headerdata = header
for line in file_data:
linematch = line_parser.match(line)
if linematch:
if (counter == 0):
jlinkscript.extend(apbconfigon)
curr_data = linematch.group('data')
curr_addr = linematch.group('addr')
line = 'w4 {}, {}'.format(curr_addr, curr_data)
jlinkscript.append(line)
counter += 1
else:
continue
if (counter != 0):
jlinkscript.extend(apbconfigoff)
##############################################################################
##################### JLINK FOOTER #######################
jlinkscript.extend(footer)
##########################################################
############# IOMUX JLINK SCRIPT ###################
# if bitstream file == NAME.bit, then the iomux jlink script will be generated as:
# NAME_iomux.jlink, use this to locate the iomux binary
iomuxjlink_file = Path(args.infile.parent).joinpath(args.infile.stem + "_iomux.jlink")
with open(iomuxjlink_file, 'r') as iomuxjlink:
iomux_lines = iomuxjlink.readlines()
for line in iomux_lines:
jlinkscript.append(line.rstrip())
####################################################
################ FINAL JLINK FILE #################
with open(args.outfile, 'w') as jlink:
jlink.write('\n'.join(jlinkscript))
###################################################
| [
"pathlib.Path",
"argparse.ArgumentParser",
"re.compile"
] | [((1115, 1204), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts QuickLogic bitstream to JLINK script"""'}), "(description=\n 'Converts QuickLogic bitstream to JLINK script')\n", (1138, 1204), False, 'import argparse\n'), ((2155, 2216), 're.compile', 're.compile', (['"""(?P<addr>[xX0-9a-f]+).*:(?P<data>[xX0-9a-f]+).*"""'], {}), "('(?P<addr>[xX0-9a-f]+).*:(?P<data>[xX0-9a-f]+).*')\n", (2165, 2216), False, 'import re\n'), ((3319, 3343), 'pathlib.Path', 'Path', (['args.infile.parent'], {}), '(args.infile.parent)\n', (3323, 3343), False, 'from pathlib import Path\n'), ((2233, 2257), 'pathlib.Path', 'Path', (['args.infile.parent'], {}), '(args.infile.parent)\n', (2237, 2257), False, 'from pathlib import Path\n')] |
from flask import request
from flask_restful import Resource
from app import mysql
import json
class postalClass(Resource):
def get(self, cp):
cur = mysql.get_db().cursor()
#query insert record
cur.execute("INSERT INTO records (url, cp, host) VALUES ('/postal',%s,%s)",(cp,request.remote_addr) )
# query consult código postal
query=cur.execute("SELECT cp,colonia,municipio,estado,ciudad FROM `location_postal` WHERE cp=%s ",(cp))
cur.connection.commit()
if query>0:
#result=cur.fetchone()
#col_names = [i[0] for i in cur.description]
col_names = cur.fetchone()
col_colonias = [i[1] for i in cur]
res={"status": "success","data": {"cp":col_names[0],"colonia": col_colonias,"municipio":col_names[2],"estado":col_names[3],"ciudad":col_names[4]}, "error": ""}
return res
else:
return {"status": "error","data": "", "error": "No se encontraron coincidencias"} | [
"app.mysql.get_db"
] | [((154, 168), 'app.mysql.get_db', 'mysql.get_db', ([], {}), '()\n', (166, 168), False, 'from app import mysql\n')] |
#!/usr/bin/env python3
"""
<NAME>, 20181003
"""
import matplotlib.dates as mdates
from netCDF4 import Dataset, num2date
from testrestool import *
def main():
# variable
var = 'PE'
# loop over all cases
# nc = len(case_list)
# nm = len(turbmethod_list)
if len(sys.argv) == 1:
args_list = np.arange(nc)
else:
args_list = sys.argv[1:]
for i in args_list:
i = int(i)
case = case_list[i]
depth = depth_list[i]
print(case)
plot_ts_cmp_dz_dt(case, var, depth)
def plot_ts_cmp_dz_dt(case, var, depth):
dz = np.zeros(nzt)
dt = np.zeros(nzt)
dz_str, dt_str = dzdt_list[0].split('_')
dz[0] = float(dz_str.replace('VR','').replace('m',''))
dt[0] = float(dt_str.replace('DT','').replace('s',''))
# input data directory
dataroot = dir_in+'/'+case
# paths of files
paths = [dataroot+'/'+turbmethod_list[i]+'_VR1m_DT60s/gotm_out.nc' for i in range(nm)]
# initialize dataset
data = GOTMOutputDataSet(paths=paths, keys=turbmethod_list)
# output figure name
figdir = dir_out+'/'+case
os.makedirs(figdir, exist_ok=True)
figname = figdir+'/IPE_cmp_dzdt_'+var+'.png'
# figure
nrow = (nm+2)//2
fig_width = 12
fig_height = 3+2*(nrow-1)
# plot figure
height_ratios = [1]*nrow
height_ratios.append(0.15)
width_ratios = [1, 1, 0.05]
f, axarr = plt.subplots(nrow, 2)
f.set_size_inches(fig_width, fig_height)
# panel a
gotmdata0 = data.cases['KPP-CVMix']
ts = gotmdata0.read_timeseries(var, depth=depth)
ts0 = ts.data
dttime0 = ts.time
par1 = axarr[0, 0].twinx()
par1.plot(dttime0, ts0, color='lightgray', linewidth=2.5)
par1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
par1.set_ylabel('$\Delta PE$ (J)', fontsize=12)
for i in np.arange(nm):
gotmdata1 = data.cases[turbmethod_list[i]]
ts = gotmdata1.read_timeseries(var, depth=depth)
ts1 = ts.data
dttime1 = ts.time
axarr[0, 0].plot(dttime1, ts1-ts0, color=tm_color[i], linewidth=1)
axarr[0, 0].set_ylabel('$\Delta PE -\Delta PE_r$ (J)', fontsize=12)
axarr[0, 0].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
axarr[0, 0].autoscale(enable=True, axis='x', tight=True)
axarr[0, 0].set_zorder(par1.get_zorder()+1)
axarr[0, 0].patch.set_visible(False)
axarr[0, 0].text(0.04, 0.92, '(a)', transform=axarr[0, 0].transAxes, fontsize=16,
fontweight='bold', va='top')
axarr[0, 0].xaxis.set_major_formatter(mdates.DateFormatter('%y-%m'))
# panel b-l
# loop over other turbmethods
for i in np.arange(nm):
n = icol_2col[i]
m = irow_2col[i]
# paths of files
tm_paths = [dataroot+'/'+turbmethod_list[i]+'_'+dzdt_list[k]+'/gotm_out.nc'
for k in range(nzt)]
# initialize dataset
tm_data = GOTMOutputDataSet(paths=tm_paths, keys=dzdt_list)
# base case
gotmdata0 = tm_data.cases['VR1m_DT60s']
fld0 = gotmdata0.read_timeseries(var, depth=depth).data
dfld0 = fld0[-1] - fld0[0]
error_dzdt = np.zeros(nzt)
# loop over other cases
for ii in np.arange(nzt-1):
j = ii+1
gotmdata1 = tm_data.cases[dzdt_list[j]]
fld1 = gotmdata1.read_timeseries(var, depth=depth).data
# compute percentage error
error_dzdt[j] = np.sqrt(((fld1-fld0)**2).mean())/abs(dfld0)*100
# get coordinate
dz_str, dt_str = dzdt_list[j].split('_')
dz[j] = float(dz_str.replace('VR','').replace('m',''))
dt[j] = float(dt_str.replace('DT','').replace('s',''))
# plt.plot(dz[0], error3_dzdt[0], 'ko')
axarr[m, n].plot(dz[0:9:4], error_dzdt[0:9:4], '--kx', linewidth=1.5)
axarr[m, n].plot(dz[1:10:4], error_dzdt[1:10:4], '--rx', linewidth=1.5)
axarr[m, n].plot(dz[2:11:4], error_dzdt[2:11:4], '--bx', linewidth=1.5)
axarr[m, n].plot(dz[3:12:4], error_dzdt[3:12:4], '--gx', linewidth=1.5)
axarr[m, n].axhline(0, color='black', linewidth=0.75)
axarr[m, n].set_xlabel('$\Delta z$ (m)', fontsize=12)
axarr[m, n].set_ylabel('% error in $PE$', fontsize=12)
axarr[m, n].set_xlim(0,11)
axarr[m, n].text(0.04, 0.92, labels_2col[i], transform=axarr[m, n].transAxes,
fontsize=16, fontweight='bold', va='top')
axarr[m, n].text(0.98, 1.15, legend_list[i], color=tm_color[i],
transform=axarr[m, n].transAxes, fontsize=13, fontweight='bold',
va='top', ha='right')
axarr[m, n].ticklabel_format(style='sci', axis='y', scilimits=(0,0))
axarr[m, n].set_ylim(0, 4)
# reduce margin
plt.tight_layout()
# save figure
plt.savefig(figname, dpi = 300)
# close figure
plt.close()
if __name__ == "__main__":
main()
| [
"matplotlib.dates.DateFormatter"
] | [((2571, 2600), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%y-%m"""'], {}), "('%y-%m')\n", (2591, 2600), True, 'import matplotlib.dates as mdates\n')] |
import argparse
import matplotlib.pyplot as plt
import numpy as np
import h5py
def plot_data(ax, data, title=None, args=None):
if args.start_idx:
data = data[args.start_idx:, ...]
heatmap = ax.pcolor(np.swapaxes(data, 0, 1), vmin=-1., vmax=1.)
if not args.plot_both:
plt.colorbar(heatmap)
# Configure y axis.
yticks = np.arange(1, data.shape[1] + 1).tolist()
ylabels = [str(tick) if (tick - 1) % 5 == 0 else '' for tick in yticks]
ax.set_yticks(yticks, ylabels)
#ax.set_ylim([0, data.shape[1]])
ax.set_ylabel('feature dimension')
# Configure x axis.
ax.set_xlabel('timestep')
ax.set_xlim([0, data.shape[0]])
def decode_tokens(tokens, vocabulary):
return ' '.join([vocabulary[token] for token in tokens])
def insert_newlines(string, every=64):
lines = []
for i in xrange(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def main(args):
f = h5py.File(args.input, 'r')
if args.idx is None:
# Select a random element.
nb_motions = len(f['motion_inputs'])
sample_idx = np.random.randint(nb_motions)
else:
sample_idx = args.idx
# Find associated annotations.
annotation_indexes = []
mat_id = None
for motion_idx, annotation_idx, id_idx in f['mapping']:
if motion_idx == sample_idx:
mat_id = f['ids'][id_idx]
annotation_indexes.append(annotation_idx)
assert id_idx is not None
# Get data.
motion_input = f['motion_inputs'][sample_idx]
motion_target = f['motion_targets'][sample_idx]
annotation_inputs = f['annotation_inputs'][annotation_indexes]
annotation_targets = f['annotation_targets'][annotation_indexes]
vocabulary = f['vocabulary']
print('Visualizing index {} from "{}" with ID {}:'.format(sample_idx, args.input, mat_id))
print('')
print('All {} annotation inputs:'.format(len(annotation_inputs)))
for idx, annotation_input in enumerate(annotation_inputs):
print(' {} decoded: {}'.format(idx + 1, decode_tokens(annotation_input, vocabulary)))
print(' {} indexes: {}'.format(idx + 1, ','.join([str(x) for x in annotation_input])))
print('')
print('')
print('All {} annotation targets:'.format(len(annotation_targets)))
for idx, annotation_target in enumerate(annotation_targets):
print(' {} decoded: {}'.format(idx + 1, decode_tokens(annotation_target, vocabulary)))
print(' {} indexes: {}'.format(idx + 1, ','.join([str(x) for x in annotation_target])))
print('')
print('')
# Visualize motion data.
print('Visualizing motion input and motion target ...')
if args.plot_both:
fig, subplots = plt.subplots(nrows=2, figsize=args.figsize)
else:
fig, subplots = plt.subplots(nrows=1, figsize=args.figsize)
subplots = [subplots]
for data, plot in zip([motion_input, motion_target], subplots):
print(data[0, :], data[-1, :])
plot_data(plot, data, args=args)
plt.tight_layout(pad=1.)
if args.output:
plt.savefig(args.output)
else:
plt.show()
print('done')
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('input', type=str)
parser.add_argument('--idx', type=int, default=None)
parser.add_argument('--plot-both', type=int, default=1)
parser.add_argument('--start-idx', type=int, default=None)
parser.add_argument('--figsize', nargs=2, type=float, default=(20, 10))
parser.add_argument('--output', type=str, default=None)
main(parser.parse_args())
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.colorbar",
"h5py.File",
"numpy.swapaxes",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((974, 1000), 'h5py.File', 'h5py.File', (['args.input', '"""r"""'], {}), "(args.input, 'r')\n", (983, 1000), False, 'import h5py\n'), ((3066, 3091), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.0)'}), '(pad=1.0)\n', (3082, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (3271, 3287), False, 'import argparse\n'), ((220, 243), 'numpy.swapaxes', 'np.swapaxes', (['data', '(0)', '(1)'], {}), '(data, 0, 1)\n', (231, 243), True, 'import numpy as np\n'), ((299, 320), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (311, 320), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1157), 'numpy.random.randint', 'np.random.randint', (['nb_motions'], {}), '(nb_motions)\n', (1145, 1157), True, 'import numpy as np\n'), ((2762, 2805), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': 'args.figsize'}), '(nrows=2, figsize=args.figsize)\n', (2774, 2805), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2883), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'figsize': 'args.figsize'}), '(nrows=1, figsize=args.figsize)\n', (2852, 2883), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3143), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (3130, 3143), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3170, 3172), True, 'import matplotlib.pyplot as plt\n'), ((359, 390), 'numpy.arange', 'np.arange', (['(1)', '(data.shape[1] + 1)'], {}), '(1, data.shape[1] + 1)\n', (368, 390), True, 'import numpy as np\n')] |
'''This is the repo which contains the original code to the WACV 2021 paper
"Same Same But DifferNet: Semi-Supervised Defect Detection with Normalizing Flows"
by <NAME>, <NAME> and <NAME>.
For further information contact <NAME> (<EMAIL>)'''
import config as c
from train import *
from utils import load_datasets, make_dataloaders
import time
import gc
import json
_, _, test_set = load_datasets(c.dataset_path, c.class_name, test=True)
_, _, test_loader = make_dataloaders(None, None, test_set, test=True)
model = torch.load("models/" + c.modelname + "", map_location=torch.device('cpu'))
with open('models/' + c.modelname + '.json') as jsonfile:
model_parameters = json.load(jsonfile)
time_start = time.time()
test(model, model_parameters, test_loader)
time_end = time.time()
time_c = time_end - time_start # 运行所花时间
print("test time cost: {:f} s".format(time_c)) | [
"json.load",
"utils.make_dataloaders",
"time.time",
"utils.load_datasets"
] | [((383, 437), 'utils.load_datasets', 'load_datasets', (['c.dataset_path', 'c.class_name'], {'test': '(True)'}), '(c.dataset_path, c.class_name, test=True)\n', (396, 437), False, 'from utils import load_datasets, make_dataloaders\n'), ((458, 507), 'utils.make_dataloaders', 'make_dataloaders', (['None', 'None', 'test_set'], {'test': '(True)'}), '(None, None, test_set, test=True)\n', (474, 507), False, 'from utils import load_datasets, make_dataloaders\n'), ((708, 719), 'time.time', 'time.time', ([], {}), '()\n', (717, 719), False, 'import time\n'), ((774, 785), 'time.time', 'time.time', ([], {}), '()\n', (783, 785), False, 'import time\n'), ((674, 693), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (683, 693), False, 'import json\n')] |
from os import path
from setuptools import setup
name = 'sam_anomaly_detector'
here = path.abspath(path.dirname(__file__))
module_path = path.join(here, '{}/__init__.py'.format(name))
version_line = [line for line in open(module_path) if line.startswith('__version__')][0]
version = version_line.split('__version__ = ')[-1][1:][:-2]
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=name,
packages=[name],
description='Sam media anomaly detector library',
long_description=long_description,
version=version,
url='https://pypi.python.org/pypi/sam-anomaly-detector',
author='<NAME>',
author_email='<EMAIL>',
keywords=['forecast', 'fbprophet', 'anomaly-detection'],
license='Apache-2.0',
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
'numpy',
'pandas',
'pystan',
'fbprophet',
],
python_requires='>=3',
)
| [
"os.path.join",
"os.path.dirname",
"setuptools.setup"
] | [((435, 1064), 'setuptools.setup', 'setup', ([], {'name': 'name', 'packages': '[name]', 'description': '"""Sam media anomaly detector library"""', 'long_description': 'long_description', 'version': 'version', 'url': '"""https://pypi.python.org/pypi/sam-anomaly-detector"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'keywords': "['forecast', 'fbprophet', 'anomaly-detection']", 'license': '"""Apache-2.0"""', 'classifiers': "['Programming Language :: Python :: 3', 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules']", 'install_requires': "['numpy', 'pandas', 'pystan', 'fbprophet']", 'python_requires': '""">=3"""'}), "(name=name, packages=[name], description=\n 'Sam media anomaly detector library', long_description=long_description,\n version=version, url=\n 'https://pypi.python.org/pypi/sam-anomaly-detector', author='<NAME>',\n author_email='<EMAIL>', keywords=['forecast', 'fbprophet',\n 'anomaly-detection'], license='Apache-2.0', classifiers=[\n 'Programming Language :: Python :: 3',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules'],\n install_requires=['numpy', 'pandas', 'pystan', 'fbprophet'],\n python_requires='>=3')\n", (440, 1064), False, 'from setuptools import setup\n'), ((102, 124), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (114, 124), False, 'from os import path\n'), ((347, 376), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (356, 376), False, 'from os import path\n')] |
import unittest
from lib.solutions.checkout import checkout
class TestSum(unittest.TestCase):
def test_a_offer(self):
self.assertEqual(130, checkout('AAA'))
def test_a_single(self):
self.assertEqual(0, checkout(""))
def test_blank(self):
self.assertEqual(50, checkout("A"))
def test_b_offer(self):
self.assertEqual(45, checkout('BB'))
def test_all(self):
self.assertEqual(160, checkout('BBABCD'))
def test_error(self):
self.assertEqual(-1, checkout('-'))
def test_other_error(self):
self.assertEqual(-1, checkout(2))
def test_unicode(self):
self.assertEqual(130, checkout(u'AAA'))
def test_big_a_offer(self):
self.assertEqual(200, checkout('AAAAA'))
def test_be_offer(self):
self.assertEqual(130, checkout('EEBA'))
def test_be_double_offer(self):
self.assertEqual(160, checkout('EEEEBB'))
def test_both_a_offer(self):
self.assertEqual(410, checkout('AAAAAAAAAB'))
def test_f_offer(self):
self.assertEqual(440, checkout('AAAAAAAAABFFFF'))
def test_group_offer(self):
self.assertEqual(45 + 20 + 17, checkout('STXYZ'))
def test_group_part(self):
self.assertEqual(45 + 17, checkout('SXYZ'))
def test_both(self):
self.assertEqual(440 + 45 + 20 + 17, checkout('AAAAAAAAABFFFFSTXYZ'))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"lib.solutions.checkout.checkout"
] | [((1421, 1436), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1434, 1436), False, 'import unittest\n'), ((155, 170), 'lib.solutions.checkout.checkout', 'checkout', (['"""AAA"""'], {}), "('AAA')\n", (163, 170), False, 'from lib.solutions.checkout import checkout\n'), ((230, 242), 'lib.solutions.checkout.checkout', 'checkout', (['""""""'], {}), "('')\n", (238, 242), False, 'from lib.solutions.checkout import checkout\n'), ((300, 313), 'lib.solutions.checkout.checkout', 'checkout', (['"""A"""'], {}), "('A')\n", (308, 313), False, 'from lib.solutions.checkout import checkout\n'), ((373, 387), 'lib.solutions.checkout.checkout', 'checkout', (['"""BB"""'], {}), "('BB')\n", (381, 387), False, 'from lib.solutions.checkout import checkout\n'), ((444, 462), 'lib.solutions.checkout.checkout', 'checkout', (['"""BBABCD"""'], {}), "('BBABCD')\n", (452, 462), False, 'from lib.solutions.checkout import checkout\n'), ((520, 533), 'lib.solutions.checkout.checkout', 'checkout', (['"""-"""'], {}), "('-')\n", (528, 533), False, 'from lib.solutions.checkout import checkout\n'), ((597, 608), 'lib.solutions.checkout.checkout', 'checkout', (['(2)'], {}), '(2)\n', (605, 608), False, 'from lib.solutions.checkout import checkout\n'), ((669, 685), 'lib.solutions.checkout.checkout', 'checkout', (['u"""AAA"""'], {}), "(u'AAA')\n", (677, 685), False, 'from lib.solutions.checkout import checkout\n'), ((750, 767), 'lib.solutions.checkout.checkout', 'checkout', (['"""AAAAA"""'], {}), "('AAAAA')\n", (758, 767), False, 'from lib.solutions.checkout import checkout\n'), ((829, 845), 'lib.solutions.checkout.checkout', 'checkout', (['"""EEBA"""'], {}), "('EEBA')\n", (837, 845), False, 'from lib.solutions.checkout import checkout\n'), ((914, 932), 'lib.solutions.checkout.checkout', 'checkout', (['"""EEEEBB"""'], {}), "('EEEEBB')\n", (922, 932), False, 'from lib.solutions.checkout import checkout\n'), ((998, 1020), 'lib.solutions.checkout.checkout', 'checkout', (['"""AAAAAAAAAB"""'], {}), "('AAAAAAAAAB')\n", (1006, 1020), False, 'from lib.solutions.checkout import checkout\n'), ((1081, 1107), 'lib.solutions.checkout.checkout', 'checkout', (['"""AAAAAAAAABFFFF"""'], {}), "('AAAAAAAAABFFFF')\n", (1089, 1107), False, 'from lib.solutions.checkout import checkout\n'), ((1181, 1198), 'lib.solutions.checkout.checkout', 'checkout', (['"""STXYZ"""'], {}), "('STXYZ')\n", (1189, 1198), False, 'from lib.solutions.checkout import checkout\n'), ((1266, 1282), 'lib.solutions.checkout.checkout', 'checkout', (['"""SXYZ"""'], {}), "('SXYZ')\n", (1274, 1282), False, 'from lib.solutions.checkout import checkout\n'), ((1355, 1386), 'lib.solutions.checkout.checkout', 'checkout', (['"""AAAAAAAAABFFFFSTXYZ"""'], {}), "('AAAAAAAAABFFFFSTXYZ')\n", (1363, 1386), False, 'from lib.solutions.checkout import checkout\n')] |
"""
ColorHelper.
Copyright (c) 2015 - 2017 <NAME> <<EMAIL>>
License: MIT
"""
import sublime
import sublime_plugin
from ColorHelper.lib.rgba import RGBA
from ColorHelper.lib import csscolors
import threading
from time import time, sleep
import re
import os
import mdpopups
import ColorHelper.color_helper_util as util
from ColorHelper.color_helper_insert import InsertCalc, PickerInsertCalc
from ColorHelper.multiconf import get as qualify_settings
import traceback
from html.parser import HTMLParser
__pc_name__ = "ColorHelper"
PREVIEW_SCALE_Y = 2
PALETTE_SCALE_X = 8
PALETTE_SCALE_Y = 2
BORDER_SIZE = 2
PREVIEW_BORDER_SIZE = 1
reload_flag = False
ch_last_updated = None
ch_settings = None
unloading = False
if 'ch_thread' not in globals():
ch_thread = None
if 'ch_file_thread' not in globals():
ch_file_thread = None
if 'ch_preview_thread' not in globals():
ch_preview_thread = None
###########################
# Helper Classes/Functions
###########################
def start_file_index(view):
"""Kick off current file color index."""
global ch_file_thread
if view is not None and (ch_file_thread is None or not ch_file_thread.is_alive()):
rules = util.get_rules(view)
if rules:
scope = util.get_scope(view, rules, skip_sel_check=True)
if scope:
source = []
for r in view.find_by_selector(scope):
source.append(view.substr(r))
util.debug('Regions to search:\n', source)
if len(source):
ch_file_thread = ChFileIndexThread(
view, ' '.join(source),
rules.get('allowed_colors', []),
rules.get('use_hex_argb', False)
)
ch_file_thread.start()
s = sublime.load_settings('color_helper.sublime-settings')
if s.get('show_index_status', True):
sublime.status_message('File color indexer started...')
def preview_is_on_left():
"""Return boolean for positioning preview on left/right."""
return ch_settings.get('inline_preview_position') != 'right'
###########################
# Main Code
###########################
class ColorHelperCommand(sublime_plugin.TextCommand):
"""Color Helper command object."""
html_parser = HTMLParser()
def on_hide(self):
"""Hide popup event."""
self.view.settings().set('color_helper.popup_active', False)
self.view.settings().set('color_helper.popup_auto', self.auto)
def unescape(self, value):
"""Unescape url."""
return self.html_parser.unescape(value)
def on_navigate(self, href):
"""Handle link clicks."""
if href.startswith('__insert__'):
parts = href.split(':', 3)
self.show_insert(parts[1], parts[2], self.unescape(parts[3]))
elif href.startswith('__colors__'):
parts = href.split(':', 2)
self.show_colors(parts[1], self.unescape(parts[2]), update=True)
elif href == '__close__':
self.view.hide_popup()
elif href == '__palettes__':
self.show_palettes(update=True)
elif href == '__info__':
self.show_color_info(update=True)
elif href.startswith('__color_picker__'):
self.color_picker(color=href.split(':', 1)[1])
elif href.startswith('__add_fav__'):
self.add_fav(href.split(':', 1)[1])
elif href.startswith('__remove_fav__'):
self.remove_fav(href.split(':', 1)[1])
elif href.startswith('__delete_colors__'):
parts = href.split(':', 2)
self.show_colors(parts[1], self.unescape(parts[2]), delete=True, update=True)
elif href.startswith('__delete_color__'):
parts = href.split(':', 3)
self.delete_color(parts[1], parts[2], self.unescape(parts[3]))
elif href == '__delete__palettes__':
self.show_palettes(delete=True, update=True)
elif href.startswith('__delete__palette__'):
parts = href.split(':', 2)
self.delete_palette(parts[1], self.unescape(parts[2]))
elif href.startswith('__add_color__'):
self.show_palettes(color=href.split(':', 1)[1], update=True)
elif href.startswith('__add_palette_color__'):
parts = href.split(':', 3)
self.add_palette(parts[1], parts[2], self.unescape(parts[3]))
elif href.startswith('__create_palette__'):
parts = href.split(':', 2)
self.prompt_palette_name(parts[1], parts[2])
elif href.startswith('__convert_alpha__'):
parts = href.split(':', 2)
self.insert_color(parts[1], parts[2], alpha=True)
elif href.startswith('__convert__'):
parts = href.split(':', 2)
self.insert_color(parts[1], parts[2])
def repop(self):
"""Setup thread to repopup tooltip."""
return
if ch_thread.ignore_all:
return
now = time()
ch_thread.modified = True
ch_thread.time = now
def prompt_palette_name(self, palette_type, color):
"""Prompt user for new palette name."""
win = self.view.window()
if win is not None:
self.view.hide_popup()
win.show_input_panel(
"Palette Name:", '',
on_done=lambda name, t=palette_type, c=color: self.create_palette(name, t, color),
on_change=None,
on_cancel=self.repop
)
def create_palette(self, palette_name, palette_type, color):
"""Add color to new color palette."""
if palette_type == '__global__':
color_palettes = util.get_palettes()
for palette in color_palettes:
if palette_name == palette['name']:
sublime.error_message('The name of "%s" is already in use!')
return
color_palettes.append({"name": palette_name, 'colors': [color]})
util.save_palettes(color_palettes)
elif palette_type == '__project__':
color_palettes = util.get_project_palettes(self.view.window())
for palette in color_palettes:
if palette_name == palette['name']:
sublime.error_message('The name of "%s" is already in use!')
return
color_palettes.append({"name": palette_name, 'colors': [color]})
util.save_project_palettes(self.view.window(), color_palettes)
self.repop()
def add_palette(self, color, palette_type, palette_name):
"""Add pallete."""
if palette_type == "__special__":
if palette_name == 'Favorites':
favs = util.get_favs()['colors']
if color not in favs:
favs.append(color)
util.save_palettes(favs, favs=True)
self.show_color_info(update=True)
elif palette_type in ('__global__', '__project__'):
if palette_type == '__global__':
color_palettes = util.get_palettes()
else:
color_palettes = util.get_project_palettes(self.view.window())
for palette in color_palettes:
if palette_name == palette['name']:
if color not in palette['colors']:
palette['colors'].append(color)
if palette_type == '__global__':
util.save_palettes(color_palettes)
else:
util.save_project_palettes(self.view.window(), color_palettes)
self.show_color_info(update=True)
break
def delete_palette(self, palette_type, palette_name):
"""Delete palette."""
if palette_type == "__special__":
if palette_name == 'Favorites':
util.save_palettes([], favs=True)
self.show_palettes(delete=True, update=False)
elif palette_type in ('__global__', '__project__'):
if palette_type == '__global__':
color_palettes = util.get_palettes()
else:
color_palettes = util.get_project_palettes(self.view.window())
count = -1
index = None
for palette in color_palettes:
count += 1
if palette_name == palette['name']:
index = count
break
if index is not None:
del color_palettes[index]
if palette_type == '__global__':
util.save_palettes(color_palettes)
else:
util.save_project_palettes(self.view.window(), color_palettes)
self.show_palettes(delete=True, update=False)
def delete_color(self, color, palette_type, palette_name):
"""Delete color."""
if palette_type == '__special__':
if palette_name == "Favorites":
favs = util.get_favs()['colors']
if color in favs:
favs.remove(color)
util.save_palettes(favs, favs=True)
self.show_colors(palette_type, palette_name, delete=True, update=False)
elif palette_type in ('__global__', '__project__'):
if palette_type == '__global__':
color_palettes = util.get_palettes()
else:
color_palettes = util.get_project_palettes(self.view.window())
for palette in color_palettes:
if palette_name == palette['name']:
if color in palette['colors']:
palette['colors'].remove(color)
if palette_type == '__global__':
util.save_palettes(color_palettes)
else:
util.save_project_palettes(self.view.window(), color_palettes)
self.show_colors(palette_type, palette_name, delete=True, update=False)
break
def add_fav(self, color):
"""Add favorite."""
favs = util.get_favs()['colors']
favs.append(color)
util.save_palettes(favs, favs=True)
# For some reason if using update,
# the convert divider will be too wide.
self.show_color_info(update=False)
def remove_fav(self, color):
"""Remove favorite."""
favs = util.get_favs()['colors']
favs.remove(color)
util.save_palettes(favs, favs=True)
# For some reason if using update,
# the convert divider will be too wide.
self.show_color_info(update=False)
def color_picker(self, color):
"""Get color with color picker."""
if self.color_picker_package:
s = sublime.load_settings('color_helper_share.sublime-settings')
s.set('color_pick_return', None)
self.view.window().run_command(
'color_pick_api_get_color',
{'settings': 'color_helper_share.sublime-settings', "default_color": color[1:]}
)
new_color = s.get('color_pick_return', None)
if new_color is not None and new_color != color:
self.insert_color(new_color)
else:
sublime.set_timeout(self.show_color_info, 0)
else:
if not self.no_info:
on_cancel = {'command': 'color_helper', 'args': {'mode': "info", "auto": self.auto}}
elif not self.no_palette:
on_cancel = {'command': 'color_helper', 'args': {'mode': "palette", "auto": self.auto}}
else:
on_cancel = None
rules = util.get_rules(self.view)
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
use_hex_argb = rules.get("use_hex_argb", False) if rules else False
compress_hex = rules.get("compress_hex_output", False) if rules else False
self.view.run_command(
'color_helper_picker', {
'color': color,
'allowed_colors': allowed_colors,
'use_hex_argb': use_hex_argb,
'compress_hex': compress_hex,
'on_done': {'command': 'color_helper', 'args': {'mode': "color_picker_result"}},
'on_cancel': on_cancel
}
)
def insert_color(self, target_color, convert=None, picker=False, alpha=False):
"""Insert colors."""
sels = self.view.sel()
if (len(sels) == 1 and sels[0].size() == 0):
point = sels[0].begin()
parts = target_color.split('@')
target_color = parts[0]
dlevel = len(parts[1]) if len(parts) > 1 else 3
if not picker:
rules = util.get_rules(self.view)
use_hex_argb = rules.get("use_hex_argb", False) if rules else False
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
compress_hex = rules.get('compress_hex_output', False) if rules else False
calc = InsertCalc(self.view, point, target_color, convert, allowed_colors, use_hex_argb)
calc.calc()
if alpha:
calc.alpha_hex = target_color[-2:]
calc.alpha = util.fmt_float(float(int(calc.alpha_hex, 16)) / 255.0, dlevel)
if calc.web_color and not calc.alpha:
value = calc.web_color
elif calc.convert_rgb:
value = "%d, %d, %d" % (
int(calc.color[1:3], 16),
int(calc.color[3:5], 16),
int(calc.color[5:7], 16)
)
if calc.alpha:
value += ', %s' % calc.alpha
value = ("rgba(%s)" if calc.alpha else "rgb(%s)") % value
elif calc.convert_gray:
value = "%d" % int(calc.color[1:3], 16)
if calc.alpha:
value += ', %s' % calc.alpha
value = "gray(%s)" % value
elif calc.convert_hsl:
hsl = RGBA(calc.color)
h, l, s = hsl.tohls()
value = "%s, %s%%, %s%%" % (
util.fmt_float(h * 360.0),
util.fmt_float(s * 100.0),
util.fmt_float(l * 100.0)
)
if calc.alpha:
value += ', %s' % calc.alpha
value = ("hsla(%s)" if calc.alpha else "hsl(%s)") % value
elif calc.convert_hwb:
hwb = RGBA(calc.color)
h, w, b = hwb.tohwb()
value = "%s, %s%%, %s%%" % (
util.fmt_float(h * 360.0),
util.fmt_float(w * 100.0),
util.fmt_float(b * 100.0)
)
if calc.alpha:
value += ', %s' % calc.alpha
value = "hwb(%s)" % value
else:
use_upper = ch_settings.get("upper_case_hex", False)
color = calc.color
if calc.alpha_hex:
if convert == 'ahex':
color = '#' + calc.alpha_hex + calc.color[1:]
else:
color = calc.color + calc.alpha_hex
if compress_hex:
color = util.compress_hex(color)
value = color.upper() if use_upper else color.lower()
else:
rules = util.get_rules(self.view)
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
calc = PickerInsertCalc(self.view, point, allowed_colors)
calc.calc()
value = target_color
self.view.sel().subtract(sels[0])
self.view.sel().add(calc.region)
self.view.run_command("insert", {"characters": value})
self.view.hide_popup()
def format_palettes(self, color_list, label, palette_type, caption=None, color=None, delete=False):
"""Format color palette previews."""
colors = ['\n## %s\n' % label]
if caption:
colors.append('%s\n' % caption)
if delete:
label = '__delete__palette__:%s:%s' % (palette_type, label)
elif color:
label = '__add_palette_color__:%s:%s:%s' % (color, palette_type, label)
else:
label = '__colors__:%s:%s' % (palette_type, label)
colors.append(
'[%s](%s)' % (
mdpopups.color_box(
color_list, '#cccccc', '#333333',
height=self.color_h, width=self.palette_w * PALETTE_SCALE_X,
border_size=BORDER_SIZE, check_size=self.check_size(self.color_h)
),
label
)
)
return ''.join(colors)
def format_colors(self, color_list, label, palette_type, delete=None):
"""Format colors under palette."""
colors = ['\n## %s\n' % label]
count = 0
check_size = self.check_size(self.color_h)
for f in color_list:
parts = f.split('@')
if len(parts) > 1:
color = parts[0]
else:
color = f
no_alpha_color = color[:-2] if len(f) > 7 else color
if count != 0 and (count % 8 == 0):
colors.append('\n\n')
elif count != 0:
if sublime.platform() == 'windows':
colors.append(' ')
else:
colors.append(' ')
if delete:
colors.append(
'[%s](__delete_color__:%s:%s:%s)' % (
mdpopups.color_box(
[no_alpha_color, color], '#cccccc', '#333333',
height=self.color_h, width=self.color_w, border_size=BORDER_SIZE,
check_size=check_size
),
f, palette_type, label,
)
)
else:
colors.append(
'[%s](__insert__:%s:%s:%s)' % (
mdpopups.color_box(
[no_alpha_color, color], '#cccccc', '#333333',
height=self.color_h, width=self.color_w, border_size=BORDER_SIZE,
check_size=check_size
), f, palette_type, label
)
)
count += 1
return ''.join(colors)
def format_info(self, color, template_vars, alpha=None):
"""Format the selected color info."""
rgba = RGBA(color)
rules = util.get_rules(self.view)
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
use_hex_argb = rules.get("use_hex_argb", False) if rules else None
if alpha is not None:
parts = alpha.split('.')
dlevel = len(parts[1]) if len(parts) > 1 else None
alpha_hex = alpha_hex_display = "%02x" % (util.round_int(float(alpha) * 255.0) & 0xFF)
if dlevel is not None:
alpha_hex += '@%d' % dlevel
else:
alpha_hex = ''
try:
web_color = csscolors.hex2name(rgba.get_rgb())
except Exception:
web_color = None
h1, l, s = rgba.tohls()
h2, w, b = rgba.tohwb()
use_upper = ch_settings.get("upper_case_hex", False)
template_vars['color'] = color
template_vars['color_dlevel'] = rgba.get_rgb().lower() + alpha_hex
template_vars['web_color'] = web_color
if use_upper:
template_vars['hex_color'] = rgba.get_rgb().upper() if use_upper else rgba.get_rgb().lower()
template_vars['hex_alpha'] = 'FF' if not alpha else alpha_hex_display.upper()
template_vars['ahex_color'] = rgba.get_rgb().upper()[1:]
else:
template_vars['hex_color'] = rgba.get_rgb().lower()
template_vars['hex_alpha'] = 'ff' if not alpha else alpha_hex_display.lower()
template_vars['ahex_color'] = rgba.get_rgb().lower()[1:]
template_vars['alpha'] = alpha if alpha else '1'
template_vars['rgb_r'] = str(rgba.r)
template_vars['rgb_g'] = str(rgba.g)
template_vars['rgb_b'] = str(rgba.b)
template_vars['hsl_h'] = util.fmt_float(h1 * 360.0)
template_vars['hsl_s'] = util.fmt_float(s * 100.0)
template_vars['hsl_l'] = util.fmt_float(l * 100.0)
template_vars['hwb_h'] = util.fmt_float(h2 * 360.0)
template_vars['hwb_s'] = util.fmt_float(w * 100.0)
template_vars['hwb_l'] = util.fmt_float(b * 100.0)
s = sublime.load_settings('color_helper.sublime-settings')
show_global_palettes = s.get('enable_global_user_palettes', True)
show_project_palettes = s.get('enable_project_user_palettes', True)
show_favorite_palette = s.get('enable_favorite_palette', True)
show_current_palette = s.get('enable_current_file_palette', True)
show_conversions = s.get('enable_color_conversions', True)
show_picker = s.get('enable_color_picker', True)
palettes_enabled = (
show_global_palettes or show_project_palettes or
show_favorite_palette or show_current_palette
)
click_color_box_to_pick = s.get('click_color_box_to_pick', 'none')
if click_color_box_to_pick == 'color_picker' and show_picker:
template_vars['click_color_picker'] = True
elif click_color_box_to_pick == 'palette_picker' and palettes_enabled:
template_vars['click_palette_picker'] = True
if click_color_box_to_pick != 'palette_picker' and palettes_enabled:
template_vars['show_palette_menu'] = True
if click_color_box_to_pick != 'color_picker' and show_picker:
template_vars['show_picker_menu'] = True
if show_global_palettes or show_project_palettes:
template_vars['show_global_palette_menu'] = True
if show_favorite_palette:
template_vars['show_favorite_menu'] = True
template_vars['is_marked'] = (rgba.get_rgb().lower() + alpha_hex) in util.get_favs()['colors']
no_alpha_color = color[:-2] if len(color) > 7 else color
template_vars['color_preview'] = (
mdpopups.color_box(
[no_alpha_color, color], '#cccccc', '#333333',
height=self.color_h * PREVIEW_SCALE_Y, width=self.palette_w * PALETTE_SCALE_X,
border_size=BORDER_SIZE, check_size=self.check_size(self.color_h)
)
)
if show_conversions:
template_vars['show_conversions'] = True
template_vars['show_web_color'] = web_color and 'webcolors' in allowed_colors
template_vars['show_hex_color'] = "hex" in allowed_colors
if "hexa" in allowed_colors:
template_vars['show_hexa_color'] = not use_hex_argb
template_vars['show_ahex_color'] = bool(use_hex_argb)
template_vars['show_rgb_color'] = "rgb" in allowed_colors
template_vars['show_rgba_color'] = "rgba" in allowed_colors
template_vars['show_gray_color'] = "gray" in allowed_colors and util.is_gray(rgba.get_rgb())
template_vars['show_graya_color'] = "graya" in allowed_colors and util.is_gray(rgba.get_rgb())
template_vars['show_hsl_color'] = "hsl" in allowed_colors
template_vars['show_hsla_color'] = "hsla" in allowed_colors
template_vars['show_hwb_color'] = "hwb" in allowed_colors
template_vars['show_hwba_color'] = "hwba" in allowed_colors
def show_insert(self, color, palette_type, palette_name, update=False):
"""Show insert panel."""
sels = self.view.sel()
if (len(sels) == 1 and sels[0].size() == 0):
parts = color.split('@')
dlevel = len(parts[1]) if len(parts) > 1 else 3
point = sels[0].begin()
rules = util.get_rules(self.view)
use_hex_argb = rules.get("use_hex_argb", False) if rules else None
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
calc = InsertCalc(self.view, point, parts[0], 'rgba', allowed_colors, bool(use_hex_argb))
found = calc.calc()
rules = util.get_rules(self.view)
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
secondary_alpha = found and calc.alpha is not None and calc.alpha != '1'
rgba = RGBA(parts[0])
alpha = util.fmt_float(float(rgba.a) / 255.0, dlevel)
try:
web_color = csscolors.hex2name(rgba.get_rgb())
except Exception:
web_color = None
h1, l, s = rgba.tohls()
h2, w, b = rgba.tohwb()
use_upper = ch_settings.get("upper_case_hex", False)
template_vars = {
"palette_type": palette_type,
"palette_name": palette_name,
"color": rgba.get_rgb().upper() if use_upper else rgba.get_rgb().lower(),
"alpha_hex": rgba.get_rgba().upper()[-2:] if use_upper else rgba.get_rgba().lower()[-2:],
"color_alpha": rgba.get_rgba(),
"color_ahex": rgba.get_rgba().upper()[1:] if use_upper else rgba.get_rgb().lower()[1:],
"dlevel": ("@%d" % dlevel),
"alpha": alpha,
"current_alpha_hex": calc.alpha_hex if secondary_alpha else 'FF',
"current_alpha": calc.alpha if secondary_alpha else '1',
"rgb_r": rgba.r,
"rgb_g": rgba.g,
"rgb_b": rgba.b,
"hsl_h": util.fmt_float(h1 * 360.0),
"hsl_s": util.fmt_float(s * 100.0),
"hsl_l": util.fmt_float(l * 100.0),
"hwb_h": util.fmt_float(h2 * 360.0),
"hwb_w": util.fmt_float(w * 100.0),
"hwb_b": util.fmt_float(b * 100.0),
"web_color": web_color,
"secondary_alpha": secondary_alpha
}
template_vars['show_web_color'] = web_color and "webcolors" in allowed_colors
template_vars['show_hex_color'] = "hex" in allowed_colors
template_vars['show_hexa_color'] = "hexa" in allowed_colors and not bool(use_hex_argb)
template_vars['show_ahex_color'] = "hexa" in allowed_colors and bool(use_hex_argb)
template_vars['show_rgb_color'] = "rgb" in allowed_colors
template_vars['show_rgba_color'] = "rgba" in allowed_colors
template_vars['show_gray_color'] = "gray" in allowed_colors and util.is_gray(rgba.get_rgb())
template_vars['show_graya_color'] = "graya" in allowed_colors and util.is_gray(rgba.get_rgb())
template_vars['show_hsl_color'] = "hsl" in allowed_colors
template_vars['show_hsla_color'] = "hsla" in allowed_colors
template_vars['show_hwb_color'] = "hwb" in allowed_colors
template_vars['show_hwba_color'] = "hwba" in allowed_colors
if update:
mdpopups.update_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/insert.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS,
template_vars=template_vars,
nl2br=False
)
else:
self.view.settings().set('color_helper.popup_active', True)
self.view.settings().set('color_helper.popup_auto', self.auto)
mdpopups.show_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/insert.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS, location=-1, max_width=1024, max_height=512,
on_navigate=self.on_navigate,
on_hide=self.on_hide,
flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
template_vars=template_vars,
nl2br=False
)
def show_palettes(self, delete=False, color=None, update=False):
"""Show preview of all palettes."""
show_div = False
s = sublime.load_settings('color_helper.sublime-settings')
show_global_palettes = s.get('enable_global_user_palettes', True)
show_project_palettes = s.get('enable_project_user_palettes', True)
show_favorite_palette = s.get('enable_favorite_palette', True)
show_current_palette = s.get('enable_current_file_palette', True)
s = sublime.load_settings('color_helper.sublime-settings')
show_picker = s.get('enable_color_picker', True) and self.no_info
palettes = util.get_palettes()
project_palettes = util.get_project_palettes(self.view.window())
template_vars = {
"color": (color if color else '#ffffffff'),
"show_picker_menu": show_picker,
"show_delete_menu": (
not delete and not color and (show_global_palettes or show_project_palettes or show_favorite_palette)
),
"back_target": "__info__" if (not self.no_info and not delete) or color else "__palettes__",
"show_delete_ui": delete,
"show_new_ui": bool(color),
"show_favorite_palette": show_favorite_palette,
"show_current_palette": show_current_palette,
"show_global_palettes": show_global_palettes and len(palettes),
"show_project_palettes": show_project_palettes and len(project_palettes)
}
if show_favorite_palette:
favs = util.get_favs()
if len(favs['colors']) or color:
show_div = True
template_vars['favorite_palette'] = (
self.format_palettes(favs['colors'], favs['name'], '__special__', delete=delete, color=color)
)
if show_current_palette:
current_colors = self.view.settings().get('color_helper.file_palette', [])
if not delete and not color and len(current_colors):
show_div = True
template_vars['current_palette'] = (
self.format_palettes(current_colors, "Current Colors", '__special__', delete=delete, color=color)
)
if show_global_palettes and len(palettes):
if show_div:
template_vars['show_separator'] = True
show_div = False
global_palettes = []
for palette in palettes:
show_div = True
name = palette.get("name")
global_palettes.append(
self.format_palettes(
palette.get('colors', []), name, '__global__', palette.get('caption'),
delete=delete,
color=color
)
)
template_vars['global_palettes'] = global_palettes
if show_project_palettes and len(project_palettes):
if show_div:
show_div = False
template_vars['show_project_separator'] = True
project_palettes = []
for palette in project_palettes:
name = palette.get("name")
project_palettes.append(
self.format_palettes(
palette.get('colors', []), name, '__project__', palette.get('caption'),
delete=delete,
color=color
)
)
template_vars['project_palettes'] = project_palettes
if update:
mdpopups.update_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/palettes.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS,
template_vars=template_vars,
nl2br=False
)
else:
self.view.settings().set('color_helper.popup_active', True)
self.view.settings().set('color_helper.popup_auto', self.auto)
mdpopups.show_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/palettes.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS, location=-1, max_width=1024, max_height=512,
on_navigate=self.on_navigate,
on_hide=self.on_hide,
flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
template_vars=template_vars,
nl2br=False
)
def show_colors(self, palette_type, palette_name, delete=False, update=False):
"""Show colors under the given palette."""
target = None
current = False
if palette_type == "__special__":
if palette_name == "Current Colors":
current = True
target = {
"name": palette_name,
"colors": self.view.settings().get('color_helper.file_palette', [])
}
elif palette_name == "Favorites":
target = util.get_favs()
elif palette_type == "__global__":
for palette in util.get_palettes():
if palette_name == palette['name']:
target = palette
elif palette_type == "__project__":
for palette in util.get_project_palettes(self.view.window()):
if palette_name == palette['name']:
target = palette
if target is not None:
template_vars = {
"delete": delete,
'show_delete_menu': not delete and not current,
"back": '__colors__' if delete else '__palettes__',
"palette_type": palette_type,
"palette_name": target["name"],
"colors": self.format_colors(target['colors'], target['name'], palette_type, delete)
}
if update:
mdpopups.update_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/colors.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS,
template_vars=template_vars,
nl2br=False
)
else:
self.view.settings().set('color_helper.popup_active', True)
self.view.settings().set('color_helper.popup_auto', self.auto)
mdpopups.show_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/colors.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS, location=-1, max_width=1024, max_height=512,
on_navigate=self.on_navigate,
on_hide=self.on_hide,
flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
template_vars=template_vars,
nl2br=False
)
def get_cursor_color(self):
"""Get cursor color."""
color = None
alpha = None
alpha_dec = None
sels = self.view.sel()
if (len(sels) == 1 and sels[0].size() == 0):
point = sels[0].begin()
visible = self.view.visible_region()
start = point - 50
end = point + 50
if start < visible.begin():
start = visible.begin()
if end > visible.end():
end = visible.end()
bfr = self.view.substr(sublime.Region(start, end))
ref = point - start
rules = util.get_rules(self.view)
use_hex_argb = rules.get("use_hex_argb", False) if rules else False
allowed_colors = rules.get('allowed_colors', []) if rules else util.ALL
for m in util.COLOR_RE.finditer(bfr):
if ref >= m.start(0) and ref < m.end(0):
if m.group('hex_compressed') and 'hex_compressed' not in allowed_colors:
continue
elif m.group('hexa_compressed') and 'hexa_compressed' not in allowed_colors:
continue
elif m.group('hex') and 'hex' not in allowed_colors:
continue
elif m.group('hexa') and 'hexa' not in allowed_colors:
continue
elif m.group('rgb') and 'rgb' not in allowed_colors:
continue
elif m.group('rgba') and 'rgba' not in allowed_colors:
continue
elif m.group('gray') and 'gray' not in allowed_colors:
continue
elif m.group('graya') and 'graya' not in allowed_colors:
continue
elif m.group('hsl') and 'hsl' not in allowed_colors:
continue
elif m.group('hsla') and 'hsla' not in allowed_colors:
continue
elif m.group('hwb') and 'hwb' not in allowed_colors:
continue
elif m.group('hwba') and 'hwba' not in allowed_colors:
continue
elif m.group('webcolors') and 'webcolors' not in allowed_colors:
continue
color, alpha, alpha_dec = util.translate_color(m, bool(use_hex_argb))
break
return color, alpha, alpha_dec
def show_color_info(self, update=False):
"""Show the color under the cursor."""
color, alpha, alpha_dec = self.get_cursor_color()
template_vars = {}
if color is not None:
if alpha is not None:
color += alpha
html = []
html.append(
self.format_info(color.lower(), template_vars, alpha_dec)
)
if update:
mdpopups.update_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/info.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS,
template_vars=template_vars,
nl2br=False
)
else:
self.view.settings().set('color_helper.popup_active', True)
self.view.settings().set('color_helper.popup_auto', self.auto)
mdpopups.show_popup(
self.view,
sublime.load_resource('Packages/ColorHelper/panels/info.html'),
wrapper_class="color-helper content",
css=util.ADD_CSS,
location=-1,
max_width=1024,
max_height=512,
on_navigate=self.on_navigate,
on_hide=self.on_hide,
flags=sublime.COOPERATE_WITH_AUTO_COMPLETE,
template_vars=template_vars,
nl2br=False
)
elif update:
self.view.hide_popup()
def set_sizes(self):
"""Get sizes."""
self.graphic_size = qualify_settings(ch_settings, 'graphic_size', 'medium')
top_pad = self.view.settings().get('line_padding_top', 0)
bottom_pad = self.view.settings().get('line_padding_bottom', 0)
# Sometimes we strangely get None
if top_pad is None:
top_pad = 0
if bottom_pad is None:
bottom_pad = 0
box_height = util.get_line_height(self.view) - int(top_pad + bottom_pad) - 6
sizes = {
"small": (box_height, box_height, box_height * 2),
"medium": (int(box_height * 1.5), int(box_height * 1.5), box_height * 2),
"large": (int(box_height * 2), int(box_height * 2), box_height * 2)
}
self.color_h, self.color_w, self.palette_w = sizes.get(
self.graphic_size,
sizes["medium"]
)
def check_size(self, height):
"""Create checkered size based on height."""
check_size = int((height - (BORDER_SIZE * 2)) / 4)
if check_size < 2:
check_size = 2
return check_size
def run(self, edit, mode, palette_name=None, color=None, auto=False):
"""Run the specified tooltip."""
self.set_sizes()
s = sublime.load_settings('color_helper.sublime-settings')
use_color_picker_package = s.get('use_color_picker_package', False)
self.color_picker_package = use_color_picker_package and util.color_picker_available()
self.no_info = True
self.no_palette = True
self.auto = auto
if mode == "palette":
self.no_palette = False
if palette_name is not None:
self.show_colors(palette_name)
else:
self.show_palettes()
elif mode == "color_picker":
self.no_info = True
color, alpha = self.get_cursor_color()[:-1]
if color is not None:
if alpha is not None:
color += alpha
else:
color = '#ffffffff'
self.color_picker(color)
elif mode == "color_picker_result":
self.insert_color(color, picker=True)
elif mode == "info":
self.no_info = False
self.no_palette = False
self.show_color_info()
def is_enabled(self, mode, palette_name=None, color=None, auto=False):
"""Check if command is enabled."""
s = sublime.load_settings('color_helper.sublime-settings')
return bool(
(mode == "info" and self.get_cursor_color()[0]) or
(
mode == "palette" and (
s.get('enable_global_user_palettes', True) or
s.get('enable_project_user_palettes', True) or
s.get('enable_favorite_palette', True) or
s.get('enable_current_file_palette', True) or
s.get('enable_project_palette', True)
)
) or
mode not in ("info", "palette")
)
class ColorHelperFileIndexCommand(sublime_plugin.TextCommand):
"""Color Helper file index command."""
def run(self, edit):
"""Run the command."""
rules = util.get_rules(self.view)
if rules and util.get_scope(self.view, rules, skip_sel_check=True):
if ch_file_thread is None or not ch_file_thread.is_alive():
start_file_index(self.view)
else:
sublime.error_message("File indexer is already running!")
else:
sublime.error_message('Cannot index colors in this file!')
def is_enabled(self):
"""Check if command is enabled."""
s = sublime.load_settings('color_helper.sublime-settings')
return s.get('enable_current_file_palette', True)
###########################
# Threading
###########################
class ChPreview(object):
"""Color Helper preview with phantoms."""
def __init__(self):
"""Setup."""
self.previous_region = sublime.Region(0, 0)
def on_navigate(self, href, view):
"""Handle color box click."""
view.sel().clear()
previews = view.settings().get('color_helper.preview_meta', {})
for k, v in previews.items():
if href == v[5]:
phantoms = view.query_phantom(v[4])
if phantoms:
pt = phantoms[0].begin()
view.sel().add(sublime.Region(int(pt) if preview_is_on_left() else int(pt) - int(v[1])))
view.settings().set('color_helper.no_auto', True)
view.run_command('color_helper', {"mode": "info"})
break
def do_search(self, view, force=False):
"""Perform the search for the highlighted word."""
# Since the plugin has been reloaded, force update.
global reload_flag
if reload_flag:
reload_flag = False
force = True
# Calculate size of preview boxes
settings = view.settings()
size_offset = int(qualify_settings(ch_settings, 'inline_preview_offset', 0))
top_pad = view.settings().get('line_padding_top', 0)
bottom_pad = view.settings().get('line_padding_bottom', 0)
# Sometimes we strangely get None
if top_pad is None:
top_pad = 0
if bottom_pad is None:
bottom_pad = 0
old_box_height = int(settings.get('color_helper.box_height', 0))
box_height = util.get_line_height(view) - int(top_pad + bottom_pad) + size_offset
check_size = int((box_height - 4) / 4)
current_color_scheme = settings.get('color_scheme')
if check_size < 2:
check_size = 2
# If desired preview boxes are different than current,
# we need to reload the boxes.
if old_box_height != box_height or current_color_scheme != settings.get('color_helper.color_scheme', ''):
self.erase_phantoms(view)
settings.set('color_helper.color_scheme', current_color_scheme)
settings.set('color_helper.box_height', box_height)
settings.set('color_helper.preview_meta', {})
force = True
# If we don't need to force previews,
# quit if visible region is the same as last time
visible_region = view.visible_region()
if not force and self.previous_region == visible_region:
return
self.previous_region = visible_region
# Get the current preview positions so we don't insert doubles
preview = settings.get('color_helper.preview_meta', {})
# Get the rules and use them to get the needed scopes.
# The scopes will be used to get the searchable regions.
rules = util.get_rules(view)
scope = util.get_scope(view, rules, skip_sel_check=True)
source = []
if scope:
for r in view.find_by_selector(scope):
if r.end() < visible_region.begin():
continue
if r.begin() > visible_region.end():
continue
if r.begin() < visible_region.begin():
start = max(visible_region.begin() - 20, 0)
if r.end() > visible_region.end():
end = min(visible_region.end() + 20, view.size())
else:
end = r.end()
r = sublime.Region(start, end)
elif r.end() > visible_region.end():
r = sublime.Region(r.begin(), min(visible_region.end() + 20, view.size()))
source.append(r)
else:
# Nothing to search for
self.erase_phantoms(view)
if source:
# See what colors are allowed
self.allowed_colors = set(rules.get('allowed_colors', []))
use_hex_argb = rules.get('use_hex_argb', False)
# Find the colors
colors = []
for src in source:
text = view.substr(src)
for m in util.COLOR_RE.finditer(text):
src_start = src.begin() + m.start(0)
src_end = src.begin() + m.end(0)
position_on_left = preview_is_on_left()
pt = src_start if position_on_left else src_end
if str(pt) in preview:
continue
elif not visible_region.contains(sublime.Region(src.begin() + m.start(0), src.begin() + m.end(0))):
continue
elif m.group('hex_compressed'):
if not self.color_okay('hex_compressed'):
continue
color_type = 'hex_compressed'
elif m.group('hexa_compressed'):
if not self.color_okay('hexa_compressed'):
continue
color_type = 'hexa_compressed'
elif m.group('hex'):
if not self.color_okay('hex'):
continue
color_type = 'hex'
elif m.group('hexa'):
if not self.color_okay('hexa'):
continue
color_type = 'hexa'
elif m.group('rgb'):
if not self.color_okay('rgb'):
continue
color_type = 'rgb'
elif m.group('rgba'):
if not self.color_okay('rgba'):
continue
color_type = 'rgba'
elif m.group('gray'):
if not self.color_okay('gray'):
continue
color_type = 'gray'
elif m.group('graya'):
if not self.color_okay('graya'):
continue
color_type = 'graya'
elif m.group('hsl'):
if not self.color_okay('hsl'):
continue
color_type = 'hsl'
elif m.group('hsla'):
if not self.color_okay('hsla'):
continue
color_type = 'hsla'
elif m.group('hwb'):
if not self.color_okay('hwb'):
continue
color_type = 'hwb'
elif m.group('hwba'):
if not self.color_okay('hwba'):
continue
color_type = 'hwba'
elif m.group('webcolors'):
if not self.color_okay('webcolors'):
continue
color_type = 'webcolors'
else:
continue
color, alpha, alpha_dec = util.translate_color(m, use_hex_argb)
color += alpha if alpha is not None else 'ff'
no_alpha_color = color[:-2] if len(color) > 7 else color
scope = view.scope_name(pt)
start_scope = view.scope_name(src_start)
end_scope = view.scope_name(src_end - 1)
rgba = RGBA(mdpopups.scope2style(view, scope)['background'])
rgba.brightness(1.1 if rgba.get_luminance() <= 127 else .9)
preview_id = str(time())
color = '<style>html, body {margin: 0; padding:0;}</style><a href="%s">%s</a>' % (
preview_id,
mdpopups.color_box(
[no_alpha_color, color], rgba.get_rgb(),
height=box_height, width=box_height,
border_size=PREVIEW_BORDER_SIZE, check_size=check_size
)
)
colors.append(
(
color, pt, hash(m.group(0)), len(m.group(0)),
color_type, hash(start_scope + ':' + end_scope),
preview_id
)
)
self.add_phantoms(view, colors, preview)
settings.set('color_helper.preview_meta', preview)
# The phantoms may have altered the viewable region,
# so set previous region to the current viewable region
self.previous_region = sublime.Region(self.previous_region.begin(), view.visible_region().end())
def add_phantoms(self, view, colors, preview):
"""Add phantoms."""
for color in colors:
pid = view.add_phantom(
'color_helper',
sublime.Region(color[1]),
color[0],
0,
on_navigate=lambda href, view=view: self.on_navigate(href, view)
)
preview[str(color[1])] = [color[2], color[3], color[4], color[5], pid, color[6]]
def reset_previous(self):
"""Reset previous region."""
self.previous_region = sublime.Region(0)
def erase_phantoms(self, view, incremental=False):
"""Erase phantoms."""
altered = False
if incremental:
# Edits can potentially move the position of all the previews.
# We need to grab the phantom by their id and then apply the color regex
# on the phantom range +/- some extra characters so we can catch word boundaries.
# Clear the phantom if any of the follwoing:
# - Phantom can't be found
# - regex doesn't match
# - regex group doesn't match color type
# - match doesn't start at the same point
# - hash result is wrong
# Update preview meta data with new results
old_preview = view.settings().get('color_helper.preview_meta', {})
position_on_left = preview_is_on_left()
preview = {}
for k, v in old_preview.items():
phantoms = view.query_phantom(v[4])
pt = phantoms[0].begin() if phantoms else None
if pt is None:
view.erase_phantom_by_id(v[4])
altered = True
else:
color_start = pt if position_on_left else pt - v[1]
color_end = pt + v[1] if position_on_left else pt
approx_color_start = color_start - 5
if approx_color_start < 0:
approx_color_start = 0
approx_color_end = color_end + 5
if approx_color_end > view.size():
approx_color_end = view.size()
text = view.substr(sublime.Region(approx_color_start, approx_color_end))
m = util.COLOR_RE.search(text)
if (
not m or
not m.group(v[2]) or
approx_color_start + m.start(0) != color_start or
hash(m.group(0)) != v[0] or
v[3] != hash(view.scope_name(color_start) + ':' + view.scope_name(color_end - 1)) or
str(pt) in preview
):
view.erase_phantom_by_id(v[4])
altered = True
else:
preview[str(pt)] = v
view.settings().set('color_helper.preview_meta', preview)
else:
# Obliterate!
view.erase_phantoms('color_helper')
view.settings().set('color_helper.preview_meta', {})
altered = True
if altered:
self.reset_previous()
def color_okay(self, color_type):
"""Check if color is allowed."""
return color_type in self.allowed_colors
class ChPreviewThread(threading.Thread):
"""Load up defaults."""
def __init__(self):
"""Setup the thread."""
self.reset()
threading.Thread.__init__(self)
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.time = time()
self.modified = False
self.ignore_all = False
self.clear = False
self.abort = False
def payload(self, clear=False, force=False):
"""Code to run."""
if clear:
self.modified = False
# Ignore selection and edit events inside the routine
self.ignore_all = True
if ch_preview is not None:
try:
view = sublime.active_window().active_view()
if view:
if not clear:
ch_preview.do_search(view, force)
else:
ch_preview.erase_phantoms(view, incremental=True)
except Exception:
print('ColorHelper: \n' + str(traceback.format_exc()))
self.ignore_all = False
self.time = time()
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
self.reset()
def run(self):
"""Thread loop."""
while not self.abort:
if not self.ignore_all:
if self.modified is True and (time() - self.time) > self.wait_time:
sublime.set_timeout_async(lambda: self.payload(clear=True), 0)
elif not self.modified:
sublime.set_timeout_async(self.payload, 0)
sleep(0.5)
class ColorHelperListener(sublime_plugin.EventListener):
"""Color Helper listener."""
def on_modified(self, view):
"""Flag that we need to show a tooltip or that we need to add phantoms."""
if self.ignore_event(view):
return
if ch_preview_thread is not None:
now = time()
ch_preview_thread.modified = True
ch_preview_thread.time = now
self.on_selection_modified(view)
def on_selection_modified(self, view):
"""Flag that we need to show a tooltip."""
if self.ignore_event(view):
return
if not ch_thread.ignore_all:
now = time()
ch_thread.modified = True
ch_thread.time = now
def set_file_scan_rules(self, view):
"""Set the scan rules for the current view."""
file_name = view.file_name()
ext = os.path.splitext(file_name)[1].lower() if file_name is not None else None
s = sublime.load_settings('color_helper.sublime-settings')
rules = s.get("color_scanning", [])
syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]
scan_scopes = []
incomplete_scopes = []
allowed_colors = set()
use_hex_argb = False
compress_hex = False
for rule in rules:
results = []
base_scopes = rule.get("base_scopes", [])
if not base_scopes:
results.append(True)
else:
results.append(False)
for base in rule.get("base_scopes", []):
if view.score_selector(0, base):
results[-1] = True
break
syntax_files = rule.get("syntax_files", [])
syntax_filter = rule.get("syntax_filter", "whitelist")
syntax_okay = bool(
not syntax_files or (
(syntax_filter == "whitelist" and syntax in syntax_files) or
(syntax_filter == "blacklist" and syntax not in syntax_files)
)
)
results.append(syntax_okay)
extensions = [e.lower() for e in rule.get("extensions", [])]
results.append(True if not extensions or (ext is not None and ext in extensions) else False)
if False not in results:
scan_scopes += rule.get("scan_scopes", [])
incomplete_scopes += rule.get("scan_completion_scopes", [])
for color in rule.get("allowed_colors", []):
if color == "css3":
for c in util.CSS3:
allowed_colors.add(c)
elif color == "css4":
for c in util.CSS4:
allowed_colors.add(c)
elif color == "all":
for c in util.ALL:
allowed_colors.add(c)
else:
allowed_colors.add(color)
if not use_hex_argb and rule.get("use_hex_argb", False):
use_hex_argb = True
if not compress_hex and rule.get("compress_hex_output", False):
compress_hex = True
if scan_scopes or incomplete_scopes:
view.settings().set(
'color_helper.scan',
{
"enabled": True,
"scan_scopes": scan_scopes,
"scan_completion_scopes": incomplete_scopes,
"allowed_colors": list(allowed_colors),
"use_hex_argb": use_hex_argb,
"compress_hex_output": compress_hex,
"current_ext": ext,
"current_syntax": syntax,
"last_updated": ch_last_updated
}
)
else:
view.settings().set(
'color_helper.scan',
{
"enabled": False,
"current_ext": ext,
"current_syntax": syntax,
"last_updated": ch_last_updated
}
)
view.settings().set('color_helper.file_palette', [])
if not unloading and ch_preview_thread is not None:
view.settings().add_on_change(
'color_helper.reload', lambda view=view: self.on_view_settings_change(view)
)
def should_update(self, view):
"""Check if an update should be performed."""
force_update = False
color_palette_initialized = view.settings().get('color_helper.file_palette', None) is not None
rules = view.settings().get('color_helper.scan', None)
if not color_palette_initialized:
force_update = True
elif rules:
last_updated = rules.get('last_updated', None)
if last_updated is None or last_updated < ch_last_updated:
force_update = True
file_name = view.file_name()
ext = os.path.splitext(file_name)[1].lower() if file_name is not None else None
old_ext = rules.get('current_ext')
if ext is None or ext != old_ext:
force_update = True
syntax = os.path.splitext(view.settings().get('syntax').replace('Packages/', '', 1))[0]
old_syntax = rules.get("current_syntax")
if old_syntax is None or old_syntax != syntax:
force_update = True
else:
force_update = True
return force_update
def on_activated(self, view):
"""Run current file scan and/or project scan if not run before."""
if self.ignore_event(view):
if view.settings().get('color_helper.preview_meta', {}):
view.settings().erase('color_helper.preview_meta')
return
if self.should_update(view):
self.set_file_scan_rules(view)
s = sublime.load_settings('color_helper.sublime-settings')
show_current_palette = s.get('enable_current_file_palette', True)
view.settings().set('color_helper.file_palette', [])
if show_current_palette:
start_file_index(view)
def on_view_settings_change(self, view):
"""Post text command event to catch syntax setting."""
if not unloading:
settings = view.settings()
rules = settings.get('color_helper.scan', None)
if rules:
syntax = os.path.splitext(settings.get('syntax').replace('Packages/', '', 1))[0]
old_syntax = rules.get("current_syntax")
if old_syntax is None or old_syntax != syntax:
self.on_activated(view)
if settings.get('color_scheme') != settings.get('color_helper.color_scheme', ''):
settings.erase('color_helper.preview_meta')
view.erase_phantoms('color_helper')
def on_post_save(self, view):
"""Run current file scan and/or project scan on save."""
if self.ignore_event(view):
if view.settings().get('color_helper.preview_meta', {}):
view.settings().erase('color_helper.preview_meta')
return
s = sublime.load_settings('color_helper.sublime-settings')
show_current_palette = s.get('enable_current_file_palette', True)
if self.should_update(view):
view.settings().erase('color_helper.preview_meta')
view.erase_phantoms('color_helper')
self.set_file_scan_rules(view)
if show_current_palette:
start_file_index(view)
def on_clone(self, view):
"""Run current file scan on clone."""
if self.ignore_event(view):
return
s = sublime.load_settings('color_helper.sublime-settings')
show_current_palette = s.get('enable_current_file_palette', True)
if show_current_palette:
start_file_index(view)
def ignore_event(self, view):
"""Check if event should be ignored."""
return view.settings().get('is_widget', False) or ch_thread is None
class ChFileIndexThread(threading.Thread):
"""Load up defaults."""
def __init__(self, view, source, allowed_colors, use_hex_argb):
"""Setup the thread."""
self.abort = False
self.view = view
self.use_hex_argb = use_hex_argb
self.allowed_colors = set(allowed_colors) if not isinstance(allowed_colors, set) else allowed_colors
self.webcolor_names = re.compile(
r'\b(%s)\b' % '|'.join(
[name for name in csscolors.name2hex_map.keys()]
)
)
self.source = source
threading.Thread.__init__(self)
def update_index(self, view, colors):
"""Code to run."""
try:
colors.sort()
view.settings().set('color_helper.file_palette', colors)
util.debug('Colors:\n', colors)
s = sublime.load_settings('color_helper.sublime-settings')
if s.get('show_index_status', True):
sublime.status_message('File color index complete...')
except Exception:
pass
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
def run(self):
"""Thread loop."""
if self.source:
self.index_colors()
def color_okay(self, color_type):
"""Check if color is allowed."""
return color_type in self.allowed_colors
def index_colors(self):
"""Index colors in file."""
colors = set()
for m in util.COLOR_RE.finditer(self.source):
if self.abort:
break
if m.group('hex_compressed') and not self.color_okay('hex_compressed'):
continue
elif m.group('hexa_compressed') and not self.color_okay('hexa_compressed'):
continue
elif m.group('hex') and not self.color_okay('hex'):
continue
elif m.group('hexa') and not self.color_okay('hexa'):
continue
elif m.group('rgb') and not self.color_okay('rgb'):
continue
elif m.group('rgba') and not self.color_okay('rgba'):
continue
elif m.group('gray') and not self.color_okay('gray'):
continue
elif m.group('graya') and not self.color_okay('graya'):
continue
elif m.group('hsl') and not self.color_okay('hsl'):
continue
elif m.group('hsla') and not self.color_okay('hsla'):
continue
elif m.group('hwb') and not self.color_okay('hwb'):
continue
elif m.group('hwba') and not self.color_okay('hwba'):
continue
elif m.group('webcolors') and not self.color_okay('webcolors'):
continue
color, alpha, alpha_dec = util.translate_color(m, self.use_hex_argb)
color += alpha if alpha is not None else 'ff'
if not color.lower().endswith('ff'):
parts = alpha_dec.split('.')
dlevel = len(parts[1]) if len(parts) > 1 else None
if dlevel is not None:
color += '@%d' % dlevel
colors.add(color)
if not self.abort:
sublime.set_timeout(
lambda view=self.view, colors=list(colors): self.update_index(view, colors), 0
)
class ChThread(threading.Thread):
"""Load up defaults."""
def __init__(self):
"""Setup the thread."""
self.reset()
threading.Thread.__init__(self)
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.time = time()
self.modified = False
self.ignore_all = False
self.abort = False
self.save_palettes = False
def color_okay(self, allowed_colors, color_type):
"""Check if color is allowed."""
return color_type in allowed_colors
def payload(self):
"""Code to run."""
self.modified = False
self.ignore_all = True
window = sublime.active_window()
view = window.active_view()
if view.settings().get('color_helper.no_auto', False):
view.settings().set('color_helper.no_auto', False)
self.ignore_all = False
self.time = time()
return
s = sublime.load_settings('color_helper.sublime-settings')
auto_popup = s.get('auto_popup', True)
if view is not None and auto_popup:
info = False
execute = False
sels = view.sel()
rules = util.get_rules(view)
scope = util.get_scope(view, rules)
insert_scope = util.get_scope_completion(view, rules)
scope_okay = (
scope and
len(sels) == 1 and sels[0].size() == 0 and
view.score_selector(sels[0].begin(), scope)
)
insert_scope_okay = (
scope_okay or (
insert_scope and
len(sels) == 1 and sels[0].size() == 0 and
view.score_selector(sels[0].begin(), insert_scope)
)
)
if scope_okay or insert_scope_okay:
allowed_colors = rules.get('allowed_colors', [])
point = sels[0].begin()
visible = view.visible_region()
start = point - 50
end = point + 50
if start < visible.begin():
start = visible.begin()
if end > visible.end():
end = visible.end()
bfr = view.substr(sublime.Region(start, end))
ref = point - start
for m in util.COLOR_ALL_RE.finditer(bfr):
if ref >= m.start(0) and ref < m.end(0):
if (
(m.group('hexa_compressed') and self.color_okay(allowed_colors, 'hexa_compressed')) or
(m.group('hex_compressed') and self.color_okay(allowed_colors, 'hex_compressed')) or
(m.group('hexa') and self.color_okay(allowed_colors, 'hexa')) or
(m.group('hex') and self.color_okay(allowed_colors, 'hex')) or
(m.group('rgb') and self.color_okay(allowed_colors, 'rgb')) or
(m.group('rgba') and self.color_okay(allowed_colors, 'rgba')) or
(m.group('gray') and self.color_okay(allowed_colors, 'gray')) or
(m.group('graya') and self.color_okay(allowed_colors, 'graya')) or
(m.group('hsl') and self.color_okay(allowed_colors, 'hsl')) or
(m.group('hsla') and self.color_okay(allowed_colors, 'hsla')) or
(m.group('hwb') and self.color_okay(allowed_colors, 'hwb')) or
(m.group('hwba') and self.color_okay(allowed_colors, 'hwba')) or
(m.group('webcolors') and self.color_okay(allowed_colors, 'webcolors'))
):
info = True
execute = True
break
elif ref == m.end(0):
if (
(
m.group('hash') and (
self.color_okay(allowed_colors, 'hex') or
self.color_okay(allowed_colors, 'hexa') or
self.color_okay(allowed_colors, 'hex_compressed') or
self.color_okay(allowed_colors, 'hexa_compressed')
)
) or
(m.group('rgb_open') and self.color_okay(allowed_colors, 'rgb')) or
(m.group('rgba_open') and self.color_okay(allowed_colors, 'rgba')) or
(m.group('hsl_open') and self.color_okay(allowed_colors, 'hsl')) or
(m.group('hsla_open') and self.color_okay(allowed_colors, 'hsla')) or
(
m.group('hwb_open') and
(
self.color_okay(allowed_colors, 'hwb') or
self.color_okay(allowed_colors, 'hwba')
)
) or
(
m.group('gray_open') and (
self.color_okay(allowed_colors, 'gray') or
self.color_okay(allowed_colors, 'graya')
)
)
):
execute = True
break
if execute:
view.run_command('color_helper', {"mode": "palette" if not info else "info", "auto": True})
if (
not execute and
view.settings().get('color_helper.popup_active', False) and
view.settings().get('color_helper.popup_auto', False)
):
mdpopups.hide_popup(view)
self.ignore_all = False
self.time = time()
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
self.reset()
def run(self):
"""Thread loop."""
while not self.abort:
if self.modified is True and time() - self.time > self.wait_time:
sublime.set_timeout(lambda: self.payload(), 0)
sleep(0.5)
###########################
# Plugin Initialization
###########################
def settings_reload():
"""Handle settings reload event."""
global ch_last_updated
global reload_flag
reload_flag = True
ch_last_updated = time()
setup_previews()
def setup_previews():
"""Setup previews."""
global ch_preview_thread
global ch_preview
global unloading
unloading = True
if ch_preview_thread is not None:
ch_preview_thread.kill()
for w in sublime.windows():
for v in w.views():
v.settings().clear_on_change('color_helper.reload')
v.settings().erase('color_helper.preview_meta')
v.erase_phantoms('color_helper')
unloading = False
if ch_settings.get('inline_previews', False):
ch_preview = ChPreview()
ch_preview_thread = ChPreviewThread()
ch_preview_thread.start()
def plugin_loaded():
"""Setup plugin."""
global ch_settings
global ch_thread
global ch_file_thread
global ch_last_updated
# Setup settings
ch_settings = sublime.load_settings('color_helper.sublime-settings')
# Setup reload events
ch_settings.clear_on_change('reload')
ch_settings.add_on_change('reload', settings_reload)
settings_reload()
# Start event thread
if ch_thread is not None:
ch_thread.kill()
ch_thread = ChThread()
ch_thread.start()
setup_previews()
def plugin_unloaded():
"""Kill threads."""
global unloading
unloading = True
if ch_thread is not None:
ch_thread.kill()
if ch_file_thread is not None:
ch_file_thread.kill()
if ch_preview_thread is not None:
ch_preview_thread.kill()
# Clear view events
ch_settings.clear_on_change('reload')
for w in sublime.windows():
for v in w.views():
v.settings().clear_on_change('color_helper.reload')
unloading = False
| [
"ColorHelper.color_helper_util.get_scope_completion",
"ColorHelper.color_helper_util.compress_hex",
"ColorHelper.color_helper_util.get_palettes",
"ColorHelper.color_helper_util.get_scope",
"ColorHelper.color_helper_util.color_picker_available",
"time.sleep",
"sublime.Region",
"mdpopups.hide_popup",
"ColorHelper.color_helper_util.get_favs",
"sublime.windows",
"threading.Thread.__init__",
"sublime.active_window",
"ColorHelper.color_helper_util.get_line_height",
"ColorHelper.color_helper_util.COLOR_ALL_RE.finditer",
"ColorHelper.lib.csscolors.name2hex_map.keys",
"sublime.error_message",
"ColorHelper.color_helper_util.COLOR_RE.finditer",
"os.path.splitext",
"ColorHelper.color_helper_util.debug",
"sublime.set_timeout",
"html.parser.HTMLParser",
"sublime.load_resource",
"mdpopups.color_box",
"ColorHelper.color_helper_insert.InsertCalc",
"ColorHelper.color_helper_util.translate_color",
"mdpopups.scope2style",
"time.time",
"ColorHelper.lib.rgba.RGBA",
"ColorHelper.color_helper_insert.PickerInsertCalc",
"traceback.format_exc",
"ColorHelper.color_helper_util.save_palettes",
"ColorHelper.multiconf.get",
"sublime.status_message",
"sublime.set_timeout_async",
"ColorHelper.color_helper_util.get_rules",
"ColorHelper.color_helper_util.COLOR_RE.search",
"ColorHelper.color_helper_util.fmt_float",
"sublime.platform",
"sublime.load_settings"
] | [((2382, 2394), 'html.parser.HTMLParser', 'HTMLParser', ([], {}), '()\n', (2392, 2394), False, 'from html.parser import HTMLParser\n'), ((76299, 76305), 'time.time', 'time', ([], {}), '()\n', (76303, 76305), False, 'from time import time, sleep\n'), ((76556, 76573), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (76571, 76573), False, 'import sublime\n'), ((77143, 77197), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (77164, 77197), False, 'import sublime\n'), ((77861, 77878), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (77876, 77878), False, 'import sublime\n'), ((1190, 1210), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['view'], {}), '(view)\n', (1204, 1210), True, 'import ColorHelper.color_helper_util as util\n'), ((5093, 5099), 'time.time', 'time', ([], {}), '()\n', (5097, 5099), False, 'from time import time, sleep\n'), ((10367, 10402), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['favs'], {'favs': '(True)'}), '(favs, favs=True)\n', (10385, 10402), True, 'import ColorHelper.color_helper_util as util\n'), ((10679, 10714), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['favs'], {'favs': '(True)'}), '(favs, favs=True)\n', (10697, 10714), True, 'import ColorHelper.color_helper_util as util\n'), ((19212, 19223), 'ColorHelper.lib.rgba.RGBA', 'RGBA', (['color'], {}), '(color)\n', (19216, 19223), False, 'from ColorHelper.lib.rgba import RGBA\n'), ((19241, 19266), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (19255, 19266), True, 'import ColorHelper.color_helper_util as util\n'), ((20936, 20962), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h1 * 360.0)'], {}), '(h1 * 360.0)\n', (20950, 20962), True, 'import ColorHelper.color_helper_util as util\n'), ((20996, 21021), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(s * 100.0)'], {}), '(s * 100.0)\n', (21010, 21021), True, 'import ColorHelper.color_helper_util as util\n'), ((21055, 21080), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(l * 100.0)'], {}), '(l * 100.0)\n', (21069, 21080), True, 'import ColorHelper.color_helper_util as util\n'), ((21114, 21140), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h2 * 360.0)'], {}), '(h2 * 360.0)\n', (21128, 21140), True, 'import ColorHelper.color_helper_util as util\n'), ((21174, 21199), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(w * 100.0)'], {}), '(w * 100.0)\n', (21188, 21199), True, 'import ColorHelper.color_helper_util as util\n'), ((21233, 21258), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(b * 100.0)'], {}), '(b * 100.0)\n', (21247, 21258), True, 'import ColorHelper.color_helper_util as util\n'), ((21272, 21326), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (21293, 21326), False, 'import sublime\n'), ((29011, 29065), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (29032, 29065), False, 'import sublime\n'), ((29373, 29427), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (29394, 29427), False, 'import sublime\n'), ((29521, 29540), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (29538, 29540), True, 'import ColorHelper.color_helper_util as util\n'), ((40182, 40237), 'ColorHelper.multiconf.get', 'qualify_settings', (['ch_settings', '"""graphic_size"""', '"""medium"""'], {}), "(ch_settings, 'graphic_size', 'medium')\n", (40198, 40237), True, 'from ColorHelper.multiconf import get as qualify_settings\n'), ((41385, 41439), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (41406, 41439), False, 'import sublime\n'), ((42586, 42640), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (42607, 42640), False, 'import sublime\n'), ((43368, 43393), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (43382, 43393), True, 'import ColorHelper.color_helper_util as util\n'), ((43846, 43900), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (43867, 43900), False, 'import sublime\n'), ((44178, 44198), 'sublime.Region', 'sublime.Region', (['(0)', '(0)'], {}), '(0, 0)\n', (44192, 44198), False, 'import sublime\n'), ((46924, 46944), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['view'], {}), '(view)\n', (46938, 46944), True, 'import ColorHelper.color_helper_util as util\n'), ((46961, 47009), 'ColorHelper.color_helper_util.get_scope', 'util.get_scope', (['view', 'rules'], {'skip_sel_check': '(True)'}), '(view, rules, skip_sel_check=True)\n', (46975, 47009), True, 'import ColorHelper.color_helper_util as util\n'), ((53453, 53470), 'sublime.Region', 'sublime.Region', (['(0)'], {}), '(0)\n', (53467, 53470), False, 'import sublime\n'), ((56422, 56453), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (56447, 56453), False, 'import threading\n'), ((56568, 56574), 'time.time', 'time', ([], {}), '()\n', (56572, 56574), False, 'from time import time, sleep\n'), ((57397, 57403), 'time.time', 'time', ([], {}), '()\n', (57401, 57403), False, 'from time import time, sleep\n'), ((58937, 58991), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (58958, 58991), False, 'import sublime\n'), ((65312, 65366), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (65333, 65366), False, 'import sublime\n'), ((65845, 65899), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (65866, 65899), False, 'import sublime\n'), ((66783, 66814), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (66808, 66814), False, 'import threading\n'), ((67735, 67770), 'ColorHelper.color_helper_util.COLOR_RE.finditer', 'util.COLOR_RE.finditer', (['self.source'], {}), '(self.source)\n', (67757, 67770), True, 'import ColorHelper.color_helper_util as util\n'), ((69781, 69812), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (69806, 69812), False, 'import threading\n'), ((69928, 69934), 'time.time', 'time', ([], {}), '()\n', (69932, 69934), False, 'from time import time, sleep\n'), ((70330, 70353), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (70351, 70353), False, 'import sublime\n'), ((70614, 70668), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (70635, 70668), False, 'import sublime\n'), ((75666, 75672), 'time.time', 'time', ([], {}), '()\n', (75670, 75672), False, 'from time import time, sleep\n'), ((1249, 1297), 'ColorHelper.color_helper_util.get_scope', 'util.get_scope', (['view', 'rules'], {'skip_sel_check': '(True)'}), '(view, rules, skip_sel_check=True)\n', (1263, 1297), True, 'import ColorHelper.color_helper_util as util\n'), ((5801, 5820), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (5818, 5820), True, 'import ColorHelper.color_helper_util as util\n'), ((6113, 6147), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['color_palettes'], {}), '(color_palettes)\n', (6131, 6147), True, 'import ColorHelper.color_helper_util as util\n'), ((10306, 10321), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (10319, 10321), True, 'import ColorHelper.color_helper_util as util\n'), ((10618, 10633), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (10631, 10633), True, 'import ColorHelper.color_helper_util as util\n'), ((10983, 11043), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper_share.sublime-settings"""'], {}), "('color_helper_share.sublime-settings')\n", (11004, 11043), False, 'import sublime\n'), ((11890, 11915), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (11904, 11915), True, 'import ColorHelper.color_helper_util as util\n'), ((24624, 24649), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (24638, 24649), True, 'import ColorHelper.color_helper_util as util\n'), ((24968, 24993), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (24982, 24993), True, 'import ColorHelper.color_helper_util as util\n'), ((25184, 25198), 'ColorHelper.lib.rgba.RGBA', 'RGBA', (['parts[0]'], {}), '(parts[0])\n', (25188, 25198), False, 'from ColorHelper.lib.rgba import RGBA\n'), ((30435, 30450), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (30448, 30450), True, 'import ColorHelper.color_helper_util as util\n'), ((36580, 36605), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (36594, 36605), True, 'import ColorHelper.color_helper_util as util\n'), ((36791, 36818), 'ColorHelper.color_helper_util.COLOR_RE.finditer', 'util.COLOR_RE.finditer', (['bfr'], {}), '(bfr)\n', (36813, 36818), True, 'import ColorHelper.color_helper_util as util\n'), ((41581, 41610), 'ColorHelper.color_helper_util.color_picker_available', 'util.color_picker_available', ([], {}), '()\n', (41608, 41610), True, 'import ColorHelper.color_helper_util as util\n'), ((43415, 43468), 'ColorHelper.color_helper_util.get_scope', 'util.get_scope', (['self.view', 'rules'], {'skip_sel_check': '(True)'}), '(self.view, rules, skip_sel_check=True)\n', (43429, 43468), True, 'import ColorHelper.color_helper_util as util\n'), ((43704, 43762), 'sublime.error_message', 'sublime.error_message', (['"""Cannot index colors in this file!"""'], {}), "('Cannot index colors in this file!')\n", (43725, 43762), False, 'import sublime\n'), ((45219, 45276), 'ColorHelper.multiconf.get', 'qualify_settings', (['ch_settings', '"""inline_preview_offset"""', '(0)'], {}), "(ch_settings, 'inline_preview_offset', 0)\n", (45235, 45276), True, 'from ColorHelper.multiconf import get as qualify_settings\n'), ((57944, 57954), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (57949, 57954), False, 'from time import time, sleep\n'), ((58281, 58287), 'time.time', 'time', ([], {}), '()\n', (58285, 58287), False, 'from time import time, sleep\n'), ((58624, 58630), 'time.time', 'time', ([], {}), '()\n', (58628, 58630), False, 'from time import time, sleep\n'), ((63997, 64051), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (64018, 64051), False, 'import sublime\n'), ((67006, 67037), 'ColorHelper.color_helper_util.debug', 'util.debug', (['"""Colors:\n"""', 'colors'], {}), "('Colors:\\n', colors)\n", (67016, 67037), True, 'import ColorHelper.color_helper_util as util\n'), ((67054, 67108), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (67075, 67108), False, 'import sublime\n'), ((69086, 69128), 'ColorHelper.color_helper_util.translate_color', 'util.translate_color', (['m', 'self.use_hex_argb'], {}), '(m, self.use_hex_argb)\n', (69106, 69128), True, 'import ColorHelper.color_helper_util as util\n'), ((70576, 70582), 'time.time', 'time', ([], {}), '()\n', (70580, 70582), False, 'from time import time, sleep\n'), ((70863, 70883), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['view'], {}), '(view)\n', (70877, 70883), True, 'import ColorHelper.color_helper_util as util\n'), ((70904, 70931), 'ColorHelper.color_helper_util.get_scope', 'util.get_scope', (['view', 'rules'], {}), '(view, rules)\n', (70918, 70931), True, 'import ColorHelper.color_helper_util as util\n'), ((70959, 70997), 'ColorHelper.color_helper_util.get_scope_completion', 'util.get_scope_completion', (['view', 'rules'], {}), '(view, rules)\n', (70984, 70997), True, 'import ColorHelper.color_helper_util as util\n'), ((76048, 76058), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (76053, 76058), False, 'from time import time, sleep\n'), ((1469, 1511), 'ColorHelper.color_helper_util.debug', 'util.debug', (['"""Regions to search:\n"""', 'source'], {}), "('Regions to search:\\n', source)\n", (1479, 1511), True, 'import ColorHelper.color_helper_util as util\n'), ((6962, 6997), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['favs'], {'favs': '(True)'}), '(favs, favs=True)\n', (6980, 6997), True, 'import ColorHelper.color_helper_util as util\n'), ((8030, 8063), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['[]'], {'favs': '(True)'}), '([], favs=True)\n', (8048, 8063), True, 'import ColorHelper.color_helper_util as util\n'), ((11484, 11528), 'sublime.set_timeout', 'sublime.set_timeout', (['self.show_color_info', '(0)'], {}), '(self.show_color_info, 0)\n', (11503, 11528), False, 'import sublime\n'), ((13034, 13059), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (13048, 13059), True, 'import ColorHelper.color_helper_util as util\n'), ((13346, 13431), 'ColorHelper.color_helper_insert.InsertCalc', 'InsertCalc', (['self.view', 'point', 'target_color', 'convert', 'allowed_colors', 'use_hex_argb'], {}), '(self.view, point, target_color, convert, allowed_colors,\n use_hex_argb)\n', (13356, 13431), False, 'from ColorHelper.color_helper_insert import InsertCalc, PickerInsertCalc\n'), ((15977, 16002), 'ColorHelper.color_helper_util.get_rules', 'util.get_rules', (['self.view'], {}), '(self.view)\n', (15991, 16002), True, 'import ColorHelper.color_helper_util as util\n'), ((16114, 16164), 'ColorHelper.color_helper_insert.PickerInsertCalc', 'PickerInsertCalc', (['self.view', 'point', 'allowed_colors'], {}), '(self.view, point, allowed_colors)\n', (16130, 16164), False, 'from ColorHelper.color_helper_insert import InsertCalc, PickerInsertCalc\n'), ((26374, 26400), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h1 * 360.0)'], {}), '(h1 * 360.0)\n', (26388, 26400), True, 'import ColorHelper.color_helper_util as util\n'), ((26427, 26452), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(s * 100.0)'], {}), '(s * 100.0)\n', (26441, 26452), True, 'import ColorHelper.color_helper_util as util\n'), ((26479, 26504), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(l * 100.0)'], {}), '(l * 100.0)\n', (26493, 26504), True, 'import ColorHelper.color_helper_util as util\n'), ((26531, 26557), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h2 * 360.0)'], {}), '(h2 * 360.0)\n', (26545, 26557), True, 'import ColorHelper.color_helper_util as util\n'), ((26584, 26609), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(w * 100.0)'], {}), '(w * 100.0)\n', (26598, 26609), True, 'import ColorHelper.color_helper_util as util\n'), ((26636, 26661), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(b * 100.0)'], {}), '(b * 100.0)\n', (26650, 26661), True, 'import ColorHelper.color_helper_util as util\n'), ((32551, 32617), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/palettes.html"""'], {}), "('Packages/ColorHelper/panels/palettes.html')\n", (32572, 32617), False, 'import sublime\n'), ((33031, 33097), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/palettes.html"""'], {}), "('Packages/ColorHelper/panels/palettes.html')\n", (33052, 33097), False, 'import sublime\n'), ((34099, 34118), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (34116, 34118), True, 'import ColorHelper.color_helper_util as util\n'), ((36500, 36526), 'sublime.Region', 'sublime.Region', (['start', 'end'], {}), '(start, end)\n', (36514, 36526), False, 'import sublime\n'), ((40549, 40580), 'ColorHelper.color_helper_util.get_line_height', 'util.get_line_height', (['self.view'], {}), '(self.view)\n', (40569, 40580), True, 'import ColorHelper.color_helper_util as util\n'), ((43620, 43677), 'sublime.error_message', 'sublime.error_message', (['"""File indexer is already running!"""'], {}), "('File indexer is already running!')\n", (43641, 43677), False, 'import sublime\n'), ((45652, 45678), 'ColorHelper.color_helper_util.get_line_height', 'util.get_line_height', (['view'], {}), '(view)\n', (45672, 45678), True, 'import ColorHelper.color_helper_util as util\n'), ((48239, 48267), 'ColorHelper.color_helper_util.COLOR_RE.finditer', 'util.COLOR_RE.finditer', (['text'], {}), '(text)\n', (48261, 48267), True, 'import ColorHelper.color_helper_util as util\n'), ((53095, 53119), 'sublime.Region', 'sublime.Region', (['color[1]'], {}), '(color[1])\n', (53109, 53119), False, 'import sublime\n'), ((67174, 67228), 'sublime.status_message', 'sublime.status_message', (['"""File color index complete..."""'], {}), "('File color index complete...')\n", (67196, 67228), False, 'import sublime\n'), ((72014, 72045), 'ColorHelper.color_helper_util.COLOR_ALL_RE.finditer', 'util.COLOR_ALL_RE.finditer', (['bfr'], {}), '(bfr)\n', (72040, 72045), True, 'import ColorHelper.color_helper_util as util\n'), ((75588, 75613), 'mdpopups.hide_popup', 'mdpopups.hide_popup', (['view'], {}), '(view)\n', (75607, 75613), False, 'import mdpopups\n'), ((1851, 1905), 'sublime.load_settings', 'sublime.load_settings', (['"""color_helper.sublime-settings"""'], {}), "('color_helper.sublime-settings')\n", (1872, 1905), False, 'import sublime\n'), ((5936, 5996), 'sublime.error_message', 'sublime.error_message', (['"""The name of "%s" is already in use!"""'], {}), '(\'The name of "%s" is already in use!\')\n', (5957, 5996), False, 'import sublime\n'), ((6843, 6858), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (6856, 6858), True, 'import ColorHelper.color_helper_util as util\n'), ((7186, 7205), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (7203, 7205), True, 'import ColorHelper.color_helper_util as util\n'), ((8264, 8283), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (8281, 8283), True, 'import ColorHelper.color_helper_util as util\n'), ((9160, 9175), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (9173, 9175), True, 'import ColorHelper.color_helper_util as util\n'), ((9279, 9314), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['favs'], {'favs': '(True)'}), '(favs, favs=True)\n', (9297, 9314), True, 'import ColorHelper.color_helper_util as util\n'), ((9545, 9564), 'ColorHelper.color_helper_util.get_palettes', 'util.get_palettes', ([], {}), '()\n', (9562, 9564), True, 'import ColorHelper.color_helper_util as util\n'), ((22785, 22800), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (22798, 22800), True, 'import ColorHelper.color_helper_util as util\n'), ((27875, 27939), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/insert.html"""'], {}), "('Packages/ColorHelper/panels/insert.html')\n", (27896, 27939), False, 'import sublime\n'), ((28397, 28461), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/insert.html"""'], {}), "('Packages/ColorHelper/panels/insert.html')\n", (28418, 28461), False, 'import sublime\n'), ((34013, 34028), 'ColorHelper.color_helper_util.get_favs', 'util.get_favs', ([], {}), '()\n', (34026, 34028), True, 'import ColorHelper.color_helper_util as util\n'), ((34967, 35031), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/colors.html"""'], {}), "('Packages/ColorHelper/panels/colors.html')\n", (34988, 35031), False, 'import sublime\n'), ((35489, 35553), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/colors.html"""'], {}), "('Packages/ColorHelper/panels/colors.html')\n", (35510, 35553), False, 'import sublime\n'), ((39006, 39068), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/info.html"""'], {}), "('Packages/ColorHelper/panels/info.html')\n", (39027, 39068), False, 'import sublime\n'), ((39526, 39588), 'sublime.load_resource', 'sublime.load_resource', (['"""Packages/ColorHelper/panels/info.html"""'], {}), "('Packages/ColorHelper/panels/info.html')\n", (39547, 39588), False, 'import sublime\n'), ((47599, 47625), 'sublime.Region', 'sublime.Region', (['start', 'end'], {}), '(start, end)\n', (47613, 47625), False, 'import sublime\n'), ((51237, 51274), 'ColorHelper.color_helper_util.translate_color', 'util.translate_color', (['m', 'use_hex_argb'], {}), '(m, use_hex_argb)\n', (51257, 51274), True, 'import ColorHelper.color_helper_util as util\n'), ((55235, 55261), 'ColorHelper.color_helper_util.COLOR_RE.search', 'util.COLOR_RE.search', (['text'], {}), '(text)\n', (55255, 55261), True, 'import ColorHelper.color_helper_util as util\n'), ((71925, 71951), 'sublime.Region', 'sublime.Region', (['start', 'end'], {}), '(start, end)\n', (71939, 71951), False, 'import sublime\n'), ((1987, 2042), 'sublime.status_message', 'sublime.status_message', (['"""File color indexer started..."""'], {}), "('File color indexer started...')\n", (2009, 2042), False, 'import sublime\n'), ((6382, 6442), 'sublime.error_message', 'sublime.error_message', (['"""The name of "%s" is already in use!"""'], {}), '(\'The name of "%s" is already in use!\')\n', (6403, 6442), False, 'import sublime\n'), ((8756, 8790), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['color_palettes'], {}), '(color_palettes)\n', (8774, 8790), True, 'import ColorHelper.color_helper_util as util\n'), ((17947, 17965), 'sublime.platform', 'sublime.platform', ([], {}), '()\n', (17963, 17965), False, 'import sublime\n'), ((51786, 51792), 'time.time', 'time', ([], {}), '()\n', (51790, 51792), False, 'from time import time, sleep\n'), ((55157, 55209), 'sublime.Region', 'sublime.Region', (['approx_color_start', 'approx_color_end'], {}), '(approx_color_start, approx_color_end)\n', (55171, 55209), False, 'import sublime\n'), ((56989, 57012), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (57010, 57012), False, 'import sublime\n'), ((57889, 57931), 'sublime.set_timeout_async', 'sublime.set_timeout_async', (['self.payload', '(0)'], {}), '(self.payload, 0)\n', (57914, 57931), False, 'import sublime\n'), ((58851, 58878), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (58867, 58878), False, 'import os\n'), ((75936, 75942), 'time.time', 'time', ([], {}), '()\n', (75940, 75942), False, 'from time import time, sleep\n'), ((18227, 18391), 'mdpopups.color_box', 'mdpopups.color_box', (['[no_alpha_color, color]', '"""#cccccc"""', '"""#333333"""'], {'height': 'self.color_h', 'width': 'self.color_w', 'border_size': 'BORDER_SIZE', 'check_size': 'check_size'}), "([no_alpha_color, color], '#cccccc', '#333333', height=\n self.color_h, width=self.color_w, border_size=BORDER_SIZE, check_size=\n check_size)\n", (18245, 18391), False, 'import mdpopups\n'), ((18706, 18870), 'mdpopups.color_box', 'mdpopups.color_box', (['[no_alpha_color, color]', '"""#cccccc"""', '"""#333333"""'], {'height': 'self.color_h', 'width': 'self.color_w', 'border_size': 'BORDER_SIZE', 'check_size': 'check_size'}), "([no_alpha_color, color], '#cccccc', '#333333', height=\n self.color_h, width=self.color_w, border_size=BORDER_SIZE, check_size=\n check_size)\n", (18724, 18870), False, 'import mdpopups\n'), ((51620, 51653), 'mdpopups.scope2style', 'mdpopups.scope2style', (['view', 'scope'], {}), '(view, scope)\n', (51640, 51653), False, 'import mdpopups\n'), ((57708, 57714), 'time.time', 'time', ([], {}), '()\n', (57712, 57714), False, 'from time import time, sleep\n'), ((66691, 66720), 'ColorHelper.lib.csscolors.name2hex_map.keys', 'csscolors.name2hex_map.keys', ([], {}), '()\n', (66718, 66720), False, 'from ColorHelper.lib import csscolors\n'), ((7594, 7628), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['color_palettes'], {}), '(color_palettes)\n', (7612, 7628), True, 'import ColorHelper.color_helper_util as util\n'), ((9949, 9983), 'ColorHelper.color_helper_util.save_palettes', 'util.save_palettes', (['color_palettes'], {}), '(color_palettes)\n', (9967, 9983), True, 'import ColorHelper.color_helper_util as util\n'), ((14451, 14467), 'ColorHelper.lib.rgba.RGBA', 'RGBA', (['calc.color'], {}), '(calc.color)\n', (14455, 14467), False, 'from ColorHelper.lib.rgba import RGBA\n'), ((57320, 57342), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (57340, 57342), False, 'import traceback\n'), ((63073, 63100), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (63089, 63100), False, 'import os\n'), ((14964, 14980), 'ColorHelper.lib.rgba.RGBA', 'RGBA', (['calc.color'], {}), '(calc.color)\n', (14968, 14980), False, 'from ColorHelper.lib.rgba import RGBA\n'), ((14583, 14608), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h * 360.0)'], {}), '(h * 360.0)\n', (14597, 14608), True, 'import ColorHelper.color_helper_util as util\n'), ((14634, 14659), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(s * 100.0)'], {}), '(s * 100.0)\n', (14648, 14659), True, 'import ColorHelper.color_helper_util as util\n'), ((14685, 14710), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(l * 100.0)'], {}), '(l * 100.0)\n', (14699, 14710), True, 'import ColorHelper.color_helper_util as util\n'), ((15836, 15860), 'ColorHelper.color_helper_util.compress_hex', 'util.compress_hex', (['color'], {}), '(color)\n', (15853, 15860), True, 'import ColorHelper.color_helper_util as util\n'), ((15096, 15121), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(h * 360.0)'], {}), '(h * 360.0)\n', (15110, 15121), True, 'import ColorHelper.color_helper_util as util\n'), ((15147, 15172), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(w * 100.0)'], {}), '(w * 100.0)\n', (15161, 15172), True, 'import ColorHelper.color_helper_util as util\n'), ((15198, 15223), 'ColorHelper.color_helper_util.fmt_float', 'util.fmt_float', (['(b * 100.0)'], {}), '(b * 100.0)\n', (15212, 15223), True, 'import ColorHelper.color_helper_util as util\n')] |
import time
import sys
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
DIRNAME = os.path.dirname(__file__)
sys.path.append(os.path.join(DIRNAME, '..', '..'))
import json
import numpy as np
from collections import OrderedDict
import pandas as pd
import pathos.multiprocessing as mp
import itertools as it
from src.constrainedChasingEscapingEnv.envMADDPG import *
from src.constrainedChasingEscapingEnv.envNoPhysics import UnpackCenterControlAction
from src.constrainedChasingEscapingEnv.reward import RewardFunctionCompete
from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, \
GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel
from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, \
ApproximatePolicy, restoreVariables
from src.constrainedChasingEscapingEnv.state import GetAgentPosFromState
from src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier
from src.replayBuffer import SampleBatchFromBuffer, SaveToBuffer
from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, \
ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward
from exec.parallelComputing import GenerateTrajectoriesParallel
class PreprocessTrajectoriesForBuffer:
def __init__(self, addMultiAgentValuesToTrajectory, removeTerminalTupleFromTrajectory):
self.addMultiAgentValuesToTrajectory = addMultiAgentValuesToTrajectory
self.removeTerminalTupleFromTrajectory = removeTerminalTupleFromTrajectory
def __call__(self, trajectories):
trajectoriesWithValues = [self.addMultiAgentValuesToTrajectory(trajectory) for trajectory in trajectories]
filteredTrajectories = [self.removeTerminalTupleFromTrajectory(trajectory) for trajectory in trajectoriesWithValues]
return filteredTrajectories
class TrainOneAgent:
def __init__(self, numTrainStepEachIteration, numTrajectoriesToStartTrain, processTrajectoryForPolicyValueNets,
sampleBatchFromBuffer, trainNN):
self.numTrainStepEachIteration = numTrainStepEachIteration
self.numTrajectoriesToStartTrain = numTrajectoriesToStartTrain
self.sampleBatchFromBuffer = sampleBatchFromBuffer
self.processTrajectoryForPolicyValueNets = processTrajectoryForPolicyValueNets
self.trainNN = trainNN
def __call__(self, agentId, multiAgentNNmodel, updatedReplayBuffer):
NNModel = multiAgentNNmodel[agentId]
if len(updatedReplayBuffer) >= self.numTrajectoriesToStartTrain:
for _ in range(self.numTrainStepEachIteration):
sampledBatch = self.sampleBatchFromBuffer(updatedReplayBuffer)
processedBatch = self.processTrajectoryForPolicyValueNets[agentId](sampledBatch)
trainData = [list(varBatch) for varBatch in zip(*processedBatch)]
updatedNNModel = self.trainNN(NNModel, trainData)
NNModel = updatedNNModel
return NNModel
def iterateTrainOneCondition(parameterOneCondition):
numTrainStepEachIteration = int(parameterOneCondition['numTrainStepEachIteration'])
numTrajectoriesPerIteration = int(parameterOneCondition['numTrajectoriesPerIteration'])
dirName = os.path.dirname(__file__)
numOfAgent = 2
agentIds = list(range(numOfAgent))
maxRunningSteps = 50
numSimulations = 250
killzoneRadius = 50
fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius}
# env MDP
sheepsID = [0]
wolvesID = [1, 2]
blocksID = []
numSheeps = len(sheepsID)
numWolves = len(wolvesID)
numBlocks = len(blocksID)
numAgents = numWolves + numSheeps
numEntities = numAgents + numBlocks
sheepSize = 0.05
wolfSize = 0.075
blockSize = 0.2
sheepMaxSpeed = 1.3 * 1
wolfMaxSpeed = 1.0 * 1
blockMaxSpeed = None
entitiesSizeList = [sheepSize] * numSheeps + [wolfSize] * numWolves + [blockSize] * numBlocks
entityMaxSpeedList = [sheepMaxSpeed] * numSheeps + [wolfMaxSpeed] * numWolves + [blockMaxSpeed] * numBlocks
entitiesMovableList = [True] * numAgents + [False] * numBlocks
massList = [1.0] * numEntities
centralControlId = 1
centerControlIndexList = [centralControlId]
reshapeAction = UnpackCenterControlAction(centerControlIndexList)
getCollisionForce = GetCollisionForce()
applyActionForce = ApplyActionForce(wolvesID, sheepsID, entitiesMovableList)
applyEnvironForce = ApplyEnvironForce(numEntities, entitiesMovableList, entitiesSizeList,
getCollisionForce, getPosFromAgentState)
integrateState = IntegrateState(numEntities, entitiesMovableList, massList,
entityMaxSpeedList, getVelFromAgentState, getPosFromAgentState)
interpolateState = TransitMultiAgentChasing(numEntities, reshapeAction, applyActionForce, applyEnvironForce, integrateState)
numFramesToInterpolate = 1
def transit(state, action):
for frameIndex in range(numFramesToInterpolate):
nextState = interpolateState(state, action)
action = np.array([(0, 0)] * numAgents)
state = nextState
return nextState
isTerminal = lambda state: False
isCollision = IsCollision(getPosFromAgentState)
collisonRewardWolf = 1
punishForOutOfBound = PunishForOutOfBound()
rewardWolf = RewardCentralControlPunishBond(wolvesID, sheepsID, entitiesSizeList, getPosFromAgentState, isCollision, punishForOutOfBound, collisonRewardWolf)
collisonRewardSheep = -1
rewardSheep = RewardCentralControlPunishBond(sheepsID, wolvesID, entitiesSizeList, getPosFromAgentState, isCollision, punishForOutOfBound, collisonRewardSheep)
resetState = ResetMultiAgentChasing(numAgents, numBlocks)
observeOneAgent = lambda agentID: Observe(agentID, wolvesID, sheepsID, blocksID, getPosFromAgentState, getVelFromAgentState)
observe = lambda state: [observeOneAgent(agentID)(state) for agentID in range(numAgents)]
# policy
actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
wolfActionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7), (0, 0)]
preyPowerRatio = 0.5
sheepActionSpace = list(map(tuple, np.array(actionSpace) * preyPowerRatio))
predatorPowerRatio = 0.5
wolfActionOneSpace = list(map(tuple, np.array(wolfActionSpace) * predatorPowerRatio))
wolfActionTwoSpace = list(map(tuple, np.array(wolfActionSpace) * predatorPowerRatio))
wolvesActionSpace = list(it.product(wolfActionOneSpace, wolfActionTwoSpace))
actionSpaceList = [sheepActionSpace, wolvesActionSpace]
# neural network init
numStateSpace = 4 * numEntities
numSheepActionSpace = len(sheepActionSpace)
numWolvesActionSpace = len(wolvesActionSpace)
regularizationFactor = 1e-4
sharedWidths = [128]
actionLayerWidths = [128]
valueLayerWidths = [128]
generateSheepModel = GenerateModel(numStateSpace, numSheepActionSpace, regularizationFactor)
generateWolvesModel = GenerateModel(numStateSpace, numWolvesActionSpace, regularizationFactor)
generateModelList = [generateSheepModel, generateWolvesModel]
sheepDepth = 9
wolfDepth = 9
depthList = [sheepDepth, wolfDepth]
resBlockSize = 2
dropoutRate = 0.0
initializationMethod = 'uniform'
multiAgentNNmodel = [generateModel(sharedWidths * depth, actionLayerWidths, valueLayerWidths, resBlockSize, initializationMethod, dropoutRate) for depth, generateModel in zip(depthList, generateModelList)]
# replay buffer
bufferSize = 20000
saveToBuffer = SaveToBuffer(bufferSize)
def getUniformSamplingProbabilities(buffer): return [(1 / len(buffer)) for _ in buffer]
miniBatchSize = 512
sampleBatchFromBuffer = SampleBatchFromBuffer(miniBatchSize, getUniformSamplingProbabilities)
# pre-process the trajectory for replayBuffer
rewardMultiAgents = [rewardSheep, rewardWolf]
decay = 1
accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay)
addMultiAgentValuesToTrajectory = AddValuesToTrajectory(accumulateMultiAgentRewards)
actionIndex = 1
def getTerminalActionFromTrajectory(trajectory): return trajectory[-1][actionIndex]
removeTerminalTupleFromTrajectory = RemoveTerminalTupleFromTrajectory(getTerminalActionFromTrajectory)
# pre-process the trajectory for NNTraining
sheepActionToOneHot = ActionToOneHot(sheepActionSpace)
wolvesActionToOneHot = ActionToOneHot(wolvesActionSpace)
actionToOneHotList = [sheepActionToOneHot, wolvesActionToOneHot]
processTrajectoryForPolicyValueNets = [ProcessTrajectoryForPolicyValueNetMultiAgentReward(actionToOneHotList[agentId], agentId) for agentId in agentIds]
# function to train NN model
terminalThreshold = 1e-6
lossHistorySize = 10
initActionCoeff = 1
initValueCoeff = 1
initCoeff = (initActionCoeff, initValueCoeff)
afterActionCoeff = 1
afterValueCoeff = 1
afterCoeff = (afterActionCoeff, afterValueCoeff)
terminalController = TrainTerminalController(lossHistorySize, terminalThreshold)
coefficientController = CoefficientCotroller(initCoeff, afterCoeff)
reportInterval = 10000
trainStepsIntervel = 1 # 10000
trainReporter = TrainReporter(numTrainStepEachIteration, reportInterval)
learningRateDecay = 1
learningRateDecayStep = 1
learningRate = 0.0001
learningRateModifier = LearningRateModifier(learningRate, learningRateDecay, learningRateDecayStep)
trainNN = Train(numTrainStepEachIteration, miniBatchSize, sampleData, learningRateModifier, terminalController, coefficientController, trainReporter)
# load save dir
trajectorySaveExtension = '.pickle'
NNModelSaveExtension = ''
trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'iterTrain2wolves1sheepMADDPGEnv', 'trajectories')
if not os.path.exists(trajectoriesSaveDirectory):
os.makedirs(trajectoriesSaveDirectory)
NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'iterTrain2wolves1sheepMADDPGEnv', 'NNModelRes')
if not os.path.exists(NNModelSaveDirectory):
os.makedirs(NNModelSaveDirectory)
generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters)
generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters)
startTime = time.time()
sheepDepth = 9
wolfDepth = 9
depthList = [sheepDepth, wolfDepth]
resBlockSize = 2
dropoutRate = 0.0
initializationMethod = 'uniform'
multiAgentNNmodel = [generateModel(sharedWidths * depth, actionLayerWidths, valueLayerWidths, resBlockSize, initializationMethod, dropoutRate) for depth, generateModel in zip(depthList, generateModelList)]
preprocessMultiAgentTrajectories = PreprocessTrajectoriesForBuffer(addMultiAgentValuesToTrajectory, removeTerminalTupleFromTrajectory)
numTrajectoriesToStartTrain = 1024
trainOneAgent = TrainOneAgent(numTrainStepEachIteration, numTrajectoriesToStartTrain, processTrajectoryForPolicyValueNets, sampleBatchFromBuffer, trainNN)
# restorePretrainModel
sheepPreTrainModelPath = os.path.join(dirName, '..', '..', 'data', 'MADDPG2wolves1sheep', 'trainSheepWithPretrrainWolves', 'trainedResNNModels', 'agentId=0_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000')
wolvesPreTrainModelPath = os.path.join(dirName, '..', '..', 'data', 'MADDPG2wolves1sheep', 'trainWolvesTwoCenterControlAction', 'trainedResNNModels', 'agentId=1_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000')
pretrainModelPathList = [sheepPreTrainModelPath, wolvesPreTrainModelPath]
sheepId, wolvesId = [0,1]
trainableAgentIds = [sheepId, wolvesId]
for agentId in trainableAgentIds:
restoredNNModel = restoreVariables(multiAgentNNmodel[agentId], pretrainModelPathList[agentId])
multiAgentNNmodel[agentId] = restoredNNModel
NNModelPathParameters = {'iterationIndex': 0, 'agentId': agentId, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration}
NNModelSavePath = generateNNModelSavePath(NNModelPathParameters)
saveVariables(multiAgentNNmodel[agentId], NNModelSavePath)
fuzzySearchParameterNames = ['sampleIndex']
loadTrajectoriesForParallel = LoadTrajectories(generateTrajectorySavePath, loadFromPickle, fuzzySearchParameterNames)
loadTrajectoriesForTrainBreak = LoadTrajectories(generateTrajectorySavePath, loadFromPickle)
# initRreplayBuffer
replayBuffer = []
trajectoryBeforeTrainIndex = 0
trajectoryBeforeTrainPathParamters = {'iterationIndex': trajectoryBeforeTrainIndex}
trajectoriesBeforeTrain = loadTrajectoriesForParallel(trajectoryBeforeTrainPathParamters)
preProcessedTrajectoriesBeforeTrain = preprocessMultiAgentTrajectories(trajectoriesBeforeTrain)
replayBuffer = saveToBuffer(replayBuffer, preProcessedTrajectoriesBeforeTrain)
# delete used model for disk space
fixedParametersForDelete = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration}
toDeleteNNModelExtensionList = ['.meta', '.index', '.data-00000-of-00001']
generatetoDeleteNNModelPathList = [GetSavePath(NNModelSaveDirectory, toDeleteNNModelExtension, fixedParametersForDelete) for toDeleteNNModelExtension in toDeleteNNModelExtensionList]
# restore model
restoredIteration = 0
for agentId in trainableAgentIds:
modelPathForRestore = generateNNModelSavePath({'iterationIndex': restoredIteration, 'agentId': agentId, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration})
restoredNNModel = restoreVariables(multiAgentNNmodel[agentId], modelPathForRestore)
multiAgentNNmodel[agentId] = restoredNNModel
# restore buffer
bufferTrajectoryPathParameters = {'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration}
restoredIterationIndexRange = range(restoredIteration)
restoredTrajectories = loadTrajectoriesForTrainBreak(parameters=bufferTrajectoryPathParameters, parametersWithSpecificValues={'iterationIndex': list(restoredIterationIndexRange)})
preProcessedRestoredTrajectories = preprocessMultiAgentTrajectories(restoredTrajectories)
print(len(preProcessedRestoredTrajectories))
replayBuffer = saveToBuffer(replayBuffer, preProcessedRestoredTrajectories)
modelMemorySize = 5
modelSaveFrequency = 50
deleteUsedModel = DeleteUsedModel(modelMemorySize, modelSaveFrequency, generatetoDeleteNNModelPathList)
numIterations = 10000
for iterationIndex in range(restoredIteration + 1, numIterations):
print('iterationIndex: ', iterationIndex)
numCpuToUseWhileTrain = int(16)
numCmdList = min(numTrajectoriesPerIteration, numCpuToUseWhileTrain)
sampleTrajectoryFileName = 'sampleMultiMCTSAgentCenterControlResNetTrajCondtion.py'
generateTrajectoriesParallelWhileTrain = GenerateTrajectoriesParallel(sampleTrajectoryFileName, numTrajectoriesPerIteration, numCmdList)
trajectoryPathParameters = {'iterationIndex': iterationIndex, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration}
cmdList = generateTrajectoriesParallelWhileTrain(trajectoryPathParameters)
trajectories = loadTrajectoriesForParallel(trajectoryPathParameters)
trajectorySavePath = generateTrajectorySavePath(trajectoryPathParameters)
saveToPickle(trajectories, trajectorySavePath)
preProcessedTrajectories = preprocessMultiAgentTrajectories(trajectories)
updatedReplayBuffer = saveToBuffer(replayBuffer, preProcessedTrajectories)
for agentId in trainableAgentIds:
updatedAgentNNModel = trainOneAgent(agentId, multiAgentNNmodel, updatedReplayBuffer)
NNModelPathParameters = {'iterationIndex': iterationIndex, 'agentId': agentId, 'numTrajectoriesPerIteration': numTrajectoriesPerIteration, 'numTrainStepEachIteration': numTrainStepEachIteration}
NNModelSavePath = generateNNModelSavePath(NNModelPathParameters)
saveVariables(updatedAgentNNModel, NNModelSavePath)
multiAgentNNmodel[agentId] = updatedAgentNNModel
replayBuffer = updatedReplayBuffer
deleteUsedModel(iterationIndex, agentId)
endTime = time.time()
print("Time taken for {} iterations: {} seconds".format(
numIterations, (endTime - startTime)))
def main():
manipulatedVariables = OrderedDict()
manipulatedVariables['numTrainStepEachIteration'] = [1]
manipulatedVariables['numTrajectoriesPerIteration'] = [1]
productedValues = it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])
parametersAllCondtion = [dict(list(specificValueParameter)) for specificValueParameter in productedValues]
# Sample Trajectory Before Train to fill Buffer
miniBatchSize = 256
numTrajectoriesToStartTrain = 2 * miniBatchSize
sampleTrajectoryFileName = 'preparePretrainedNNMCTSAgentCenterControlResNetTraj.py'
numCpuCores = os.cpu_count()
numCpuToUse = int(0.8 * numCpuCores)
numCmdList = min(numTrajectoriesToStartTrain, numCpuToUse)
generateTrajectoriesParallel = GenerateTrajectoriesParallel(sampleTrajectoryFileName, numTrajectoriesToStartTrain, numCmdList)
iterationBeforeTrainIndex = 0
trajectoryBeforeTrainPathParamters = {'iterationIndex': iterationBeforeTrainIndex}
prepareBefortrainData = 1
if prepareBefortrainData:
cmdList = generateTrajectoriesParallel(trajectoryBeforeTrainPathParamters)
trainPool = mp.Pool(numCpuToUse)
trainPool.map(iterateTrainOneCondition, parametersAllCondtion)
if __name__ == '__main__':
main()
| [
"src.neuralNetwork.trainTools.LearningRateModifier",
"src.neuralNetwork.policyValueResNet.restoreVariables",
"src.neuralNetwork.trainTools.TrainTerminalController",
"exec.trajectoriesSaveLoad.LoadTrajectories",
"numpy.array",
"src.neuralNetwork.trainTools.TrainReporter",
"os.cpu_count",
"src.neuralNetwork.trainTools.CoefficientCotroller",
"os.path.exists",
"exec.trajectoriesSaveLoad.GetSavePath",
"src.constrainedChasingEscapingEnv.envNoPhysics.UnpackCenterControlAction",
"itertools.product",
"exec.parallelComputing.GenerateTrajectoriesParallel",
"src.neuralNetwork.policyValueResNet.saveVariables",
"src.neuralNetwork.policyValueResNet.GenerateModel",
"collections.OrderedDict",
"exec.preProcessing.AccumulateMultiAgentRewards",
"pathos.multiprocessing.Pool",
"exec.preProcessing.RemoveTerminalTupleFromTrajectory",
"exec.preProcessing.ProcessTrajectoryForPolicyValueNetMultiAgentReward",
"os.path.dirname",
"exec.trajectoriesSaveLoad.DeleteUsedModel",
"exec.preProcessing.AddValuesToTrajectory",
"time.time",
"exec.trajectoriesSaveLoad.saveToPickle",
"os.makedirs",
"exec.preProcessing.ActionToOneHot",
"src.neuralNetwork.policyValueResNet.Train",
"src.replayBuffer.SaveToBuffer",
"os.path.join",
"src.replayBuffer.SampleBatchFromBuffer"
] | [((85, 110), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (100, 110), False, 'import os\n'), ((127, 160), 'os.path.join', 'os.path.join', (['DIRNAME', '""".."""', '""".."""'], {}), "(DIRNAME, '..', '..')\n", (139, 160), False, 'import os\n'), ((3471, 3496), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3486, 3496), False, 'import os\n'), ((4548, 4597), 'src.constrainedChasingEscapingEnv.envNoPhysics.UnpackCenterControlAction', 'UnpackCenterControlAction', (['centerControlIndexList'], {}), '(centerControlIndexList)\n', (4573, 4597), False, 'from src.constrainedChasingEscapingEnv.envNoPhysics import UnpackCenterControlAction\n'), ((7281, 7352), 'src.neuralNetwork.policyValueResNet.GenerateModel', 'GenerateModel', (['numStateSpace', 'numSheepActionSpace', 'regularizationFactor'], {}), '(numStateSpace, numSheepActionSpace, regularizationFactor)\n', (7294, 7352), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((7379, 7451), 'src.neuralNetwork.policyValueResNet.GenerateModel', 'GenerateModel', (['numStateSpace', 'numWolvesActionSpace', 'regularizationFactor'], {}), '(numStateSpace, numWolvesActionSpace, regularizationFactor)\n', (7392, 7451), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((7949, 7973), 'src.replayBuffer.SaveToBuffer', 'SaveToBuffer', (['bufferSize'], {}), '(bufferSize)\n', (7961, 7973), False, 'from src.replayBuffer import SampleBatchFromBuffer, SaveToBuffer\n'), ((8119, 8188), 'src.replayBuffer.SampleBatchFromBuffer', 'SampleBatchFromBuffer', (['miniBatchSize', 'getUniformSamplingProbabilities'], {}), '(miniBatchSize, getUniformSamplingProbabilities)\n', (8140, 8188), False, 'from src.replayBuffer import SampleBatchFromBuffer, SaveToBuffer\n'), ((8338, 8372), 'exec.preProcessing.AccumulateMultiAgentRewards', 'AccumulateMultiAgentRewards', (['decay'], {}), '(decay)\n', (8365, 8372), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((8412, 8462), 'exec.preProcessing.AddValuesToTrajectory', 'AddValuesToTrajectory', (['accumulateMultiAgentRewards'], {}), '(accumulateMultiAgentRewards)\n', (8433, 8462), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((8612, 8678), 'exec.preProcessing.RemoveTerminalTupleFromTrajectory', 'RemoveTerminalTupleFromTrajectory', (['getTerminalActionFromTrajectory'], {}), '(getTerminalActionFromTrajectory)\n', (8645, 8678), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((8754, 8786), 'exec.preProcessing.ActionToOneHot', 'ActionToOneHot', (['sheepActionSpace'], {}), '(sheepActionSpace)\n', (8768, 8786), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((8814, 8847), 'exec.preProcessing.ActionToOneHot', 'ActionToOneHot', (['wolvesActionSpace'], {}), '(wolvesActionSpace)\n', (8828, 8847), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((9387, 9446), 'src.neuralNetwork.trainTools.TrainTerminalController', 'TrainTerminalController', (['lossHistorySize', 'terminalThreshold'], {}), '(lossHistorySize, terminalThreshold)\n', (9410, 9446), False, 'from src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier\n'), ((9475, 9518), 'src.neuralNetwork.trainTools.CoefficientCotroller', 'CoefficientCotroller', (['initCoeff', 'afterCoeff'], {}), '(initCoeff, afterCoeff)\n', (9495, 9518), False, 'from src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier\n'), ((9604, 9660), 'src.neuralNetwork.trainTools.TrainReporter', 'TrainReporter', (['numTrainStepEachIteration', 'reportInterval'], {}), '(numTrainStepEachIteration, reportInterval)\n', (9617, 9660), False, 'from src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier\n'), ((9770, 9846), 'src.neuralNetwork.trainTools.LearningRateModifier', 'LearningRateModifier', (['learningRate', 'learningRateDecay', 'learningRateDecayStep'], {}), '(learningRate, learningRateDecay, learningRateDecayStep)\n', (9790, 9846), False, 'from src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier\n'), ((9862, 10009), 'src.neuralNetwork.policyValueResNet.Train', 'Train', (['numTrainStepEachIteration', 'miniBatchSize', 'sampleData', 'learningRateModifier', 'terminalController', 'coefficientController', 'trainReporter'], {}), '(numTrainStepEachIteration, miniBatchSize, sampleData,\n learningRateModifier, terminalController, coefficientController,\n trainReporter)\n', (9867, 10009), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((10126, 10222), 'os.path.join', 'os.path.join', (['dirName', '""".."""', '""".."""', '"""data"""', '"""iterTrain2wolves1sheepMADDPGEnv"""', '"""trajectories"""'], {}), "(dirName, '..', '..', 'data', 'iterTrain2wolves1sheepMADDPGEnv',\n 'trajectories')\n", (10138, 10222), False, 'import os\n'), ((10348, 10442), 'os.path.join', 'os.path.join', (['dirName', '""".."""', '""".."""', '"""data"""', '"""iterTrain2wolves1sheepMADDPGEnv"""', '"""NNModelRes"""'], {}), "(dirName, '..', '..', 'data', 'iterTrain2wolves1sheepMADDPGEnv',\n 'NNModelRes')\n", (10360, 10442), False, 'import os\n'), ((10564, 10649), 'exec.trajectoriesSaveLoad.GetSavePath', 'GetSavePath', (['trajectoriesSaveDirectory', 'trajectorySaveExtension', 'fixedParameters'], {}), '(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters\n )\n', (10575, 10649), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((10675, 10747), 'exec.trajectoriesSaveLoad.GetSavePath', 'GetSavePath', (['NNModelSaveDirectory', 'NNModelSaveExtension', 'fixedParameters'], {}), '(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters)\n', (10686, 10747), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((10765, 10776), 'time.time', 'time.time', ([], {}), '()\n', (10774, 10776), False, 'import time\n'), ((11541, 11787), 'os.path.join', 'os.path.join', (['dirName', '""".."""', '""".."""', '"""data"""', '"""MADDPG2wolves1sheep"""', '"""trainSheepWithPretrrainWolves"""', '"""trainedResNNModels"""', '"""agentId=0_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000"""'], {}), "(dirName, '..', '..', 'data', 'MADDPG2wolves1sheep',\n 'trainSheepWithPretrrainWolves', 'trainedResNNModels',\n 'agentId=0_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000'\n )\n", (11553, 11787), False, 'import os\n'), ((11807, 12057), 'os.path.join', 'os.path.join', (['dirName', '""".."""', '""".."""', '"""data"""', '"""MADDPG2wolves1sheep"""', '"""trainWolvesTwoCenterControlAction"""', '"""trainedResNNModels"""', '"""agentId=1_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000"""'], {}), "(dirName, '..', '..', 'data', 'MADDPG2wolves1sheep',\n 'trainWolvesTwoCenterControlAction', 'trainedResNNModels',\n 'agentId=1_depth=9_learningRate=0.0001_maxRunningSteps=50_miniBatchSize=256_numSimulations=250_trainSteps=50000'\n )\n", (11819, 12057), False, 'import os\n'), ((12810, 12901), 'exec.trajectoriesSaveLoad.LoadTrajectories', 'LoadTrajectories', (['generateTrajectorySavePath', 'loadFromPickle', 'fuzzySearchParameterNames'], {}), '(generateTrajectorySavePath, loadFromPickle,\n fuzzySearchParameterNames)\n', (12826, 12901), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((12934, 12994), 'exec.trajectoriesSaveLoad.LoadTrajectories', 'LoadTrajectories', (['generateTrajectorySavePath', 'loadFromPickle'], {}), '(generateTrajectorySavePath, loadFromPickle)\n', (12950, 12994), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((15168, 15257), 'exec.trajectoriesSaveLoad.DeleteUsedModel', 'DeleteUsedModel', (['modelMemorySize', 'modelSaveFrequency', 'generatetoDeleteNNModelPathList'], {}), '(modelMemorySize, modelSaveFrequency,\n generatetoDeleteNNModelPathList)\n', (15183, 15257), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((17074, 17085), 'time.time', 'time.time', ([], {}), '()\n', (17083, 17085), False, 'import time\n'), ((17235, 17248), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17246, 17248), False, 'from collections import OrderedDict\n'), ((17837, 17851), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (17849, 17851), False, 'import os\n'), ((17991, 18090), 'exec.parallelComputing.GenerateTrajectoriesParallel', 'GenerateTrajectoriesParallel', (['sampleTrajectoryFileName', 'numTrajectoriesToStartTrain', 'numCmdList'], {}), '(sampleTrajectoryFileName,\n numTrajectoriesToStartTrain, numCmdList)\n', (18019, 18090), False, 'from exec.parallelComputing import GenerateTrajectoriesParallel\n'), ((18368, 18388), 'pathos.multiprocessing.Pool', 'mp.Pool', (['numCpuToUse'], {}), '(numCpuToUse)\n', (18375, 18388), True, 'import pathos.multiprocessing as mp\n'), ((6864, 6914), 'itertools.product', 'it.product', (['wolfActionOneSpace', 'wolfActionTwoSpace'], {}), '(wolfActionOneSpace, wolfActionTwoSpace)\n', (6874, 6914), True, 'import itertools as it\n'), ((8960, 9053), 'exec.preProcessing.ProcessTrajectoryForPolicyValueNetMultiAgentReward', 'ProcessTrajectoryForPolicyValueNetMultiAgentReward', (['actionToOneHotList[agentId]', 'agentId'], {}), '(actionToOneHotList[\n agentId], agentId)\n', (9010, 9053), False, 'from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, ActionToOneHot, ProcessTrajectoryForPolicyValueNet, ProcessTrajectoryForPolicyValueNetMultiAgentReward\n'), ((10230, 10271), 'os.path.exists', 'os.path.exists', (['trajectoriesSaveDirectory'], {}), '(trajectoriesSaveDirectory)\n', (10244, 10271), False, 'import os\n'), ((10281, 10319), 'os.makedirs', 'os.makedirs', (['trajectoriesSaveDirectory'], {}), '(trajectoriesSaveDirectory)\n', (10292, 10319), False, 'import os\n'), ((10450, 10486), 'os.path.exists', 'os.path.exists', (['NNModelSaveDirectory'], {}), '(NNModelSaveDirectory)\n', (10464, 10486), False, 'import os\n'), ((10496, 10529), 'os.makedirs', 'os.makedirs', (['NNModelSaveDirectory'], {}), '(NNModelSaveDirectory)\n', (10507, 10529), False, 'import os\n'), ((12266, 12342), 'src.neuralNetwork.policyValueResNet.restoreVariables', 'restoreVariables', (['multiAgentNNmodel[agentId]', 'pretrainModelPathList[agentId]'], {}), '(multiAgentNNmodel[agentId], pretrainModelPathList[agentId])\n', (12282, 12342), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((12668, 12726), 'src.neuralNetwork.policyValueResNet.saveVariables', 'saveVariables', (['multiAgentNNmodel[agentId]', 'NNModelSavePath'], {}), '(multiAgentNNmodel[agentId], NNModelSavePath)\n', (12681, 12726), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((13852, 13941), 'exec.trajectoriesSaveLoad.GetSavePath', 'GetSavePath', (['NNModelSaveDirectory', 'toDeleteNNModelExtension', 'fixedParametersForDelete'], {}), '(NNModelSaveDirectory, toDeleteNNModelExtension,\n fixedParametersForDelete)\n', (13863, 13941), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((14336, 14401), 'src.neuralNetwork.policyValueResNet.restoreVariables', 'restoreVariables', (['multiAgentNNmodel[agentId]', 'modelPathForRestore'], {}), '(multiAgentNNmodel[agentId], modelPathForRestore)\n', (14352, 14401), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((15661, 15760), 'exec.parallelComputing.GenerateTrajectoriesParallel', 'GenerateTrajectoriesParallel', (['sampleTrajectoryFileName', 'numTrajectoriesPerIteration', 'numCmdList'], {}), '(sampleTrajectoryFileName,\n numTrajectoriesPerIteration, numCmdList)\n', (15689, 15760), False, 'from exec.parallelComputing import GenerateTrajectoriesParallel\n'), ((16194, 16240), 'exec.trajectoriesSaveLoad.saveToPickle', 'saveToPickle', (['trajectories', 'trajectorySavePath'], {}), '(trajectories, trajectorySavePath)\n', (16206, 16240), False, 'from exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, conditionDfFromParametersDict, LoadTrajectories, SaveAllTrajectories, GenerateAllSampleIndexSavePaths, saveToPickle, loadFromPickle, DeleteUsedModel\n'), ((5408, 5438), 'numpy.array', 'np.array', (['([(0, 0)] * numAgents)'], {}), '([(0, 0)] * numAgents)\n', (5416, 5438), True, 'import numpy as np\n'), ((16845, 16896), 'src.neuralNetwork.policyValueResNet.saveVariables', 'saveVariables', (['updatedAgentNNModel', 'NNModelSavePath'], {}), '(updatedAgentNNModel, NNModelSavePath)\n', (16858, 16896), False, 'from src.neuralNetwork.policyValueResNet import GenerateModel, Train, saveVariables, sampleData, ApproximateValue, ApproximatePolicy, restoreVariables\n'), ((6583, 6604), 'numpy.array', 'np.array', (['actionSpace'], {}), '(actionSpace)\n', (6591, 6604), True, 'import numpy as np\n'), ((6695, 6720), 'numpy.array', 'np.array', (['wolfActionSpace'], {}), '(wolfActionSpace)\n', (6703, 6720), True, 'import numpy as np\n'), ((6785, 6810), 'numpy.array', 'np.array', (['wolfActionSpace'], {}), '(wolfActionSpace)\n', (6793, 6810), True, 'import numpy as np\n')] |
# Copyright <NAME> 2016
# wekaref-enabled ordered set, convenient for UI windows stack organization
import weakref
class OrderedDeque():
"""
Ordered deque with fast order comparison operation (simply by index).
Most operations are defined on it's element, not on object itself.
"""
class DequeElement():
def __init__(self, owner, obj, index, prev = None, next = None):
self.obj = obj
self.owner = owner
self.index = index
self.prev = prev
self.next = next
def moveToHead(self):
self.owner.moveToHead(self)
def __init__(self):
self.root = None
self.head = None
self.head_index = -1
def push(self, obj):
"""Create new element and push it to the tail of deque"""
self.head_index += 1
elem = OrderedDeque.DequeElement(self, obj, self.head_index, self.head)
if self.head is not None:
self.head.next = elem
self.head = elem
if self.root is None:
self.root = elem
return elem
def pushElem(self, elem):
"""Push element to the tail of deque"""
self.head_index += 1
elem.index = self.head_index
elem.prev = self.head
elem.next = None
if self.head is not None:
self.head.next = elem
self.head = elem
if self.root is None:
self.root = elem
def removeElem(self, elem):
"""Remove element from deque"""
if self.head is elem:
self.head = elem.prev
if self.root is elem:
self.root = elem.next
if elem.prev is not None:
elem.prev.next = elem.next
if elem.next is not None:
elem.next.prev = elem.prev
def removeFirst(self, obj):
"""
Remove first element, that holds passed object.
Returns element deleted, does not throw.
"""
elem = self.root
while elem is not None:
if elem.obj is obj:
self.removeElem(elem)
return elem
elem = elem.next
return None
def moveToHead(self, elem):
self.removeElem(elem)
self.pushElem(elem)
class WeakOrderedDeque(OrderedDeque):
def __init__(self):
super().__init__()
def push(self, obj):
weak = weakref.ref(obj, self.onWeakCollect)
return super().push(weak)
def onWeakCollect(self, weak):
self.removeFirst(weak) | [
"weakref.ref"
] | [((2403, 2439), 'weakref.ref', 'weakref.ref', (['obj', 'self.onWeakCollect'], {}), '(obj, self.onWeakCollect)\n', (2414, 2439), False, 'import weakref\n')] |
from datetime import datetime
from onegov.core.crypto import hash_password, verify_password
from onegov.core.orm import Base
from onegov.core.orm.mixins import data_property, TimestampMixin
from onegov.core.orm.types import JSON, UUID, LowercaseText
from onegov.core.security import forget, remembered
from onegov.core.utils import is_valid_yubikey_format
from onegov.core.utils import remove_repeated_spaces
from onegov.core.utils import yubikey_otp_to_serial
from onegov.search import ORMSearchable
from onegov.user.models.group import UserGroup
from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, deferred, relationship
from uuid import uuid4
class User(Base, TimestampMixin, ORMSearchable):
""" Defines a generic user. """
__tablename__ = 'users'
#: the type of the item, this can be used to create custom polymorphic
#: subclasses of this class. See
#: `<http://docs.sqlalchemy.org/en/improve_toc/\
#: orm/extensions/declarative/inheritance.html>`_.
type = Column(Text, nullable=True)
__mapper_args__ = {
'polymorphic_on': type
}
es_properties = {
'username': {'type': 'text'},
'realname': {'type': 'text'},
'userprofile': {'type': 'text'}
}
es_public = False
@property
def es_suggestion(self):
return (self.realname or self.username, self.username)
@property
def userprofile(self):
data = []
if self.data:
for value in self.data.values():
if value and isinstance(value, str):
data.append(value)
return data
#: the user id is a uuid because that's more secure (no id guessing)
id = Column(UUID, nullable=False, primary_key=True, default=uuid4)
#: the username may be any string, but will usually be an email address
username = Column(LowercaseText, unique=True, nullable=False)
#: the password is stored with the hashing algorithm defined by onegov.core
password_hash = Column(Text, nullable=False)
#: the role is relevant for security in onegov.core
role = Column(Text, nullable=False)
#: the group this user belongs to
group_id = Column(UUID, ForeignKey(UserGroup.id), nullable=True)
group = relationship(
UserGroup, backref=backref('users', lazy='dynamic')
)
#: the real name of the user for display (use the :attr:`name` property
#: to automatically get the name or the username)
realname = Column(Text, nullable=True)
#: extra data that may be stored with this user, the format and content
#: of this data is defined by the consumer of onegov.user
#: by default, this data is only loaded by request, so if you need to
#: load a lot of data columns, do something like this::
#:
#: session.query(User).options(undefer("data"))
#:
data = deferred(Column(JSON, nullable=True))
#: two-factor authentication schemes are enabled with this property
#: if no two-factor auth is used, the value is NULL, if one *is* used,
#: there should be a dictionary with the type of the two-factor
#: authentication as well as custom values required by the two-factor
#: implementation.
#:
#: e.g.::
#:
#: {
#: 'type': 'yubikey',
#: 'data': 'ccccccbcgujh'
#: }
#:
#: Note that 'data' could also be a nested dictionary!
#:
second_factor = Column(JSON, nullable=True)
#: A string describing where the user came from, None if internal.
#
#: Internal users may login using a password, which they may also change.
#
#: External users may not login using a password, nor can they ask for one.
#
#: A user can technically come from changing providers - the source refers
#: to the last provider he used.
source = Column(Text, nullable=True, default=None)
#: A string describing the user id on the source, which is an id that is
#: supposed never change (unlike the username, which may change).
#:
#: If set, the source_id is unique per source.
source_id = Column(Text, nullable=True, default=None)
#: true if the user is active
active = Column(Boolean, nullable=False, default=True)
#: the signup token used by the user
signup_token = Column(Text, nullable=True, default=None)
__table_args__ = (
Index('lowercase_username', func.lower(username), unique=True),
UniqueConstraint('source', 'source_id', name='unique_source_id'),
)
@hybrid_property
def title(self):
""" Returns the realname or the username of the user, depending on
what's available first. """
if self.realname is None:
return self.username
if self.realname.strip():
return self.realname
return self.username
@title.expression
def title(cls):
return func.coalesce(
func.nullif(func.trim(cls.realname), ''), cls.username
)
@hybrid_property
def password(self):
""" An alias for :attr:`password_hash`. """
return self.password_hash
@password.setter
def password(self, value):
""" When set, the given password in cleartext is hashed using
onegov.core's default hashing algorithm.
"""
self.password_hash = hash_password(value)
def is_matching_password(self, password):
""" Returns True if the given password (cleartext) matches the
stored password hash.
"""
return verify_password(password, self.password_hash)
@classmethod
def get_initials(cls, username, realname=None):
""" Takes the name and returns initials which are at most two
characters wide.
Examples:
admin => A
<EMAIL> => ND
<NAME> => VS
<NAME> => CB
"""
# for e-mail addresses assume the dot splits the name and use
# the first two parts of said split (probably won't have a middle
# name in the e-mail address)
if realname is None or not realname.strip():
username = username.split('@')[0]
parts = username.split('.')[:2]
# for real names split by space and assume that with more than one
# part that the first and last part are the most important to get rid
# of middlenames
else:
parts = remove_repeated_spaces(realname).split(' ')
if len(parts) > 2:
parts = (parts[0], parts[-1])
return ''.join(p[0] for p in parts).upper()
@property
def initials(self):
return self.get_initials(self.username, self.realname)
@property
def has_yubikey(self):
if not self.second_factor:
return False
return self.second_factor.get('type') == 'yubikey'
@property
def yubikey(self):
if not self.has_yubikey:
return None
return self.second_factor.get('data')
@yubikey.setter
def yubikey(self, yubikey):
if not yubikey:
self.second_factor = None
else:
assert is_valid_yubikey_format(yubikey)
self.second_factor = {
'type': 'yubikey',
'data': yubikey[:12]
}
@property
def yubikey_serial(self):
""" Returns the yubikey serial of the yubikey associated with this
user (if any).
"""
yubikey = self.yubikey
return yubikey and yubikey_otp_to_serial(yubikey) or None
#: sessions of this user
sessions = data_property()
#: tags of this user
tags = data_property()
#: the phone number of this user
phone_number = data_property()
def cleanup_sessions(self, request):
""" Removes stored sessions not valid anymore. """
self.sessions = self.sessions or {}
for session_id in list(self.sessions.keys()):
if not remembered(request.app, session_id):
del self.sessions[session_id]
def save_current_session(self, request):
""" Stores the current browser session. """
self.sessions = self.sessions or {}
self.sessions[request.browser_session._token] = {
'address': request.client_addr,
'timestamp': datetime.utcnow().isoformat(),
'agent': request.user_agent
}
self.cleanup_sessions(request)
def remove_current_session(self, request):
""" Removes the current browser session. """
token = request.browser_session._token
if self.sessions and token and token in self.sessions:
del self.sessions[token]
self.cleanup_sessions(request)
def logout_all_sessions(self, request):
""" Terminates all open browser sessions. """
self.sessions = self.sessions or {}
for session_id in self.sessions:
forget(request.app, session_id)
self.cleanup_sessions(request)
| [
"onegov.core.crypto.verify_password",
"onegov.core.crypto.hash_password",
"onegov.core.utils.remove_repeated_spaces",
"sqlalchemy.orm.backref",
"onegov.core.utils.is_valid_yubikey_format",
"onegov.core.security.remembered",
"datetime.datetime.utcnow",
"sqlalchemy.ForeignKey",
"onegov.core.utils.yubikey_otp_to_serial",
"sqlalchemy.UniqueConstraint",
"onegov.core.orm.mixins.data_property",
"sqlalchemy.func.trim",
"sqlalchemy.func.lower",
"sqlalchemy.Column",
"onegov.core.security.forget"
] | [((1138, 1165), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)'}), '(Text, nullable=True)\n', (1144, 1165), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((1826, 1887), 'sqlalchemy.Column', 'Column', (['UUID'], {'nullable': '(False)', 'primary_key': '(True)', 'default': 'uuid4'}), '(UUID, nullable=False, primary_key=True, default=uuid4)\n', (1832, 1887), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((1980, 2030), 'sqlalchemy.Column', 'Column', (['LowercaseText'], {'unique': '(True)', 'nullable': '(False)'}), '(LowercaseText, unique=True, nullable=False)\n', (1986, 2030), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((2132, 2160), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (2138, 2160), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((2229, 2257), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (2235, 2257), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((2604, 2631), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)'}), '(Text, nullable=True)\n', (2610, 2631), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((3560, 3587), 'sqlalchemy.Column', 'Column', (['JSON'], {'nullable': '(True)'}), '(JSON, nullable=True)\n', (3566, 3587), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((3965, 4006), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)', 'default': 'None'}), '(Text, nullable=True, default=None)\n', (3971, 4006), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((4229, 4270), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)', 'default': 'None'}), '(Text, nullable=True, default=None)\n', (4235, 4270), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((4319, 4364), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(True)'}), '(Boolean, nullable=False, default=True)\n', (4325, 4364), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((4426, 4467), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)', 'default': 'None'}), '(Text, nullable=True, default=None)\n', (4432, 4467), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((7706, 7721), 'onegov.core.orm.mixins.data_property', 'data_property', ([], {}), '()\n', (7719, 7721), False, 'from onegov.core.orm.mixins import data_property, TimestampMixin\n'), ((7759, 7774), 'onegov.core.orm.mixins.data_property', 'data_property', ([], {}), '()\n', (7772, 7774), False, 'from onegov.core.orm.mixins import data_property, TimestampMixin\n'), ((7832, 7847), 'onegov.core.orm.mixins.data_property', 'data_property', ([], {}), '()\n', (7845, 7847), False, 'from onegov.core.orm.mixins import data_property, TimestampMixin\n'), ((2325, 2349), 'sqlalchemy.ForeignKey', 'ForeignKey', (['UserGroup.id'], {}), '(UserGroup.id)\n', (2335, 2349), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((2995, 3022), 'sqlalchemy.Column', 'Column', (['JSON'], {'nullable': '(True)'}), '(JSON, nullable=True)\n', (3001, 3022), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((4572, 4636), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""source"""', '"""source_id"""'], {'name': '"""unique_source_id"""'}), "('source', 'source_id', name='unique_source_id')\n", (4588, 4636), False, 'from sqlalchemy import UniqueConstraint\n'), ((5459, 5479), 'onegov.core.crypto.hash_password', 'hash_password', (['value'], {}), '(value)\n', (5472, 5479), False, 'from onegov.core.crypto import hash_password, verify_password\n'), ((5656, 5701), 'onegov.core.crypto.verify_password', 'verify_password', (['password', 'self.password_hash'], {}), '(password, self.password_hash)\n', (5671, 5701), False, 'from onegov.core.crypto import hash_password, verify_password\n'), ((2419, 2451), 'sqlalchemy.orm.backref', 'backref', (['"""users"""'], {'lazy': '"""dynamic"""'}), "('users', lazy='dynamic')\n", (2426, 2451), False, 'from sqlalchemy.orm import backref, deferred, relationship\n'), ((4528, 4548), 'sqlalchemy.func.lower', 'func.lower', (['username'], {}), '(username)\n', (4538, 4548), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((7251, 7283), 'onegov.core.utils.is_valid_yubikey_format', 'is_valid_yubikey_format', (['yubikey'], {}), '(yubikey)\n', (7274, 7283), False, 'from onegov.core.utils import is_valid_yubikey_format\n'), ((9027, 9058), 'onegov.core.security.forget', 'forget', (['request.app', 'session_id'], {}), '(request.app, session_id)\n', (9033, 9058), False, 'from onegov.core.security import forget, remembered\n'), ((5060, 5083), 'sqlalchemy.func.trim', 'func.trim', (['cls.realname'], {}), '(cls.realname)\n', (5069, 5083), False, 'from sqlalchemy import Boolean, Column, Index, Text, func, ForeignKey\n'), ((7622, 7652), 'onegov.core.utils.yubikey_otp_to_serial', 'yubikey_otp_to_serial', (['yubikey'], {}), '(yubikey)\n', (7643, 7652), False, 'from onegov.core.utils import yubikey_otp_to_serial\n'), ((8067, 8102), 'onegov.core.security.remembered', 'remembered', (['request.app', 'session_id'], {}), '(request.app, session_id)\n', (8077, 8102), False, 'from onegov.core.security import forget, remembered\n'), ((6522, 6554), 'onegov.core.utils.remove_repeated_spaces', 'remove_repeated_spaces', (['realname'], {}), '(realname)\n', (6544, 6554), False, 'from onegov.core.utils import remove_repeated_spaces\n'), ((8420, 8437), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (8435, 8437), False, 'from datetime import datetime\n')] |
from django.test import TestCase
from game.game import Game
class GameTest(TestCase):
def play_game(self, moves, result, msg):
g = Game()
for m in moves:
g.move(*m)
self.assertEqual(g.get_winner_or_draw(), result, msg)
def test_initial_board(self):
g = Game()
self.assertEqual(g.board, [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']])
def test_print_board(self):
pass
def test_move(self):
g = Game()
g.move('o', 0, 0)
self.assertEqual(g.board, [['o', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']])
g.move('x', 1, 0)
self.assertEqual(g.board, [['o', ' ', ' '], ['x', ' ', ' '], [' ', ' ', ' ']])
g.move('o', 2, 2)
self.assertEqual(g.board, [['o', ' ', ' '], ['x', ' ', ' '], [' ', ' ', 'o']])
g.move('x', 1, 2)
self.assertEqual(g.board, [['o', ' ', ' '], ['x', ' ', 'x'], [' ', ' ', 'o']])
def test_move_coordinates_not_integers(self):
pass
def test_illegal_moves(self):
g = Game()
with self.assertRaises(AssertionError):
g.move('o', 5, 0)
with self.assertRaises(AssertionError):
g.move('o', -1, 0)
with self.assertRaises(AssertionError):
g.move('o', 0, -1)
with self.assertRaises(AssertionError):
g.move('o', 0, 5)
def test_already_played_move_cant_be_played_again(self):
g = Game()
g.move('o', 1, 1)
with self.assertRaises(AssertionError):
g.move('x', 1, 1)
def test_consecutive_moves_not_the_same_player(self):
g = Game()
g.move('x', 0, 0)
with self.assertRaises(AssertionError):
g.move('x', 1, 0)
def test_illegal_players(self):
g = Game()
with self.assertRaises(AssertionError):
g.move('d', 0, 0)
with self.assertRaises(AssertionError):
g.move(' ', 0, 0)
with self.assertRaises(AssertionError):
g.move('-', 0, 0)
def test_moves(self):
g = Game()
m1 = ('o', 1, 1)
m2 = ('x', 2, 2)
m3 = ('o', 0, 1)
g.move(*m1)
g.move(*m2)
g.move(*m3)
self.assertListEqual([m1, m2, m3], g.moves)
def test_get_winner_or_draw(self):
# game 1 - diagonal win
self.play_game(
[('x', 0, 0), ('o', 0, 1), ('x', 1, 1), ('o', 1, 2), ('x', 2, 2)],
'x',
'Game should ended - diagonal win.',
)
# game 2 - horizontal win
self.play_game(
[('x', 0, 0), ('o', 1, 1), ('x', 0, 1), ('o', 1, 2), ('x', 0, 2)],
'x',
'Game should ended - horizontal win.',
)
# game 3 - vertical win
self.play_game(
[('x', 0, 0), ('o', 0, 1), ('x', 1, 0), ('o', 1, 1), ('x', 2, 0)],
'x',
'Game should ended - vertical win.',
)
# game 4 - more than 5 moves
self.play_game(
[('x', 0, 0), ('o', 0, 1), ('x', 1, 1), ('o', 1, 2), ('x', 2, 1), ('o', 0, 2), ('x', 2, 2)],
'x',
'Game should ended (more than 5 moves) - vertical win.',
)
# game 5 - draw
self.play_game(
[('x', 1, 1), ('o', 0, 0), ('x', 2, 2), ('o', 0, 2), ('x', 0, 1), ('o', 2, 1), ('x', 1, 0), ('o', 1, 2),
('x', 2, 0)],
'draw',
'Game should ended - draw',
)
# game 6 - the other diagonal win
self.play_game(
[('x', 2, 0), ('o', 1, 0), ('x', 1, 1), ('o', 0, 1), ('x', 0, 2)],
'x',
'Game should ended - diagonal win.',
)
def test_get_empty_field(self):
g = Game()
g.get_empty_fields()
self.assertEqual(g.get_empty_fields(), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)])
g.move('x', 1, 1)
g.move('o', 0, 0)
g.move('x', 2, 2)
g.move('o', 1, 0)
self.assertEqual(g.get_empty_fields(), [(0, 1), (0, 2), (1, 2), (2, 0), (2, 1)])
def test_next_random_move(self):
g = Game()
g.move('x', 1, 1)
g.move('o', 0, 0)
g.move('x', 2, 2)
g.move('o', 1, 0)
self.assertIn(g.next_random_move(), [(0, 1), (0, 2), (1, 2), (2, 0), (2, 1)])
| [
"game.game.Game"
] | [((147, 153), 'game.game.Game', 'Game', ([], {}), '()\n', (151, 153), False, 'from game.game import Game\n'), ((310, 316), 'game.game.Game', 'Game', ([], {}), '()\n', (314, 316), False, 'from game.game import Game\n'), ((488, 494), 'game.game.Game', 'Game', ([], {}), '()\n', (492, 494), False, 'from game.game import Game\n'), ((1062, 1068), 'game.game.Game', 'Game', ([], {}), '()\n', (1066, 1068), False, 'from game.game import Game\n'), ((1461, 1467), 'game.game.Game', 'Game', ([], {}), '()\n', (1465, 1467), False, 'from game.game import Game\n'), ((1644, 1650), 'game.game.Game', 'Game', ([], {}), '()\n', (1648, 1650), False, 'from game.game import Game\n'), ((1805, 1811), 'game.game.Game', 'Game', ([], {}), '()\n', (1809, 1811), False, 'from game.game import Game\n'), ((2088, 2094), 'game.game.Game', 'Game', ([], {}), '()\n', (2092, 2094), False, 'from game.game import Game\n'), ((3759, 3765), 'game.game.Game', 'Game', ([], {}), '()\n', (3763, 3765), False, 'from game.game import Game\n'), ((4159, 4165), 'game.game.Game', 'Game', ([], {}), '()\n', (4163, 4165), False, 'from game.game import Game\n')] |
import time
import cv2
import os
import pytesseract
from PIL import Image, ImageEnhance, ImageFilter
tessdata_dir_config = '--psm 6 --tessdata-dir "C:\\Program Files (x86)\\Tesseract-OCR\\tessdata"'
# Example config: '--tessdata-dir "C:\\Program Files (x86)\\Tesseract-OCR\\tessdata"'
# It's important to include double quotes around the dir path.
camera_port = input("Enter your cam port (default = 0): ")
camera = cv2.VideoCapture(int(camera_port))
time.sleep(0.1)
index=0
while camera.isOpened():
ret,frame=camera.read()
index=index+1
if ((index%5)==0):
imgH, imgW, _ = frame.shape
x1,y1,w1,h1=0,0,imgH,imgW
imgGrey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
_, thrash = cv2.threshold(imgGrey, 80, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thrash, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.03* cv2.arcLength(contour, True), True)
x = approx.ravel()[0]
y = approx.ravel()[1] - 5
x1 ,y1, w, h = cv2.boundingRect(approx)
if w > 20 and h> 20 and w<500:
cv2.drawContours(frame, [approx], 0, (0, 255, 0), 5)
if len(approx) == 3:
cv2.putText(frame, "Triangle", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
elif len(approx) == 4:
aspectRatio = float(w)/h
#print(w)
if aspectRatio >= 0.95 and aspectRatio <= 1.05:
cv2.putText(frame, "square", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
else:
cv2.putText(frame, "rectangle", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
elif len(approx) == 5:
cv2.putText(frame, "Pentagon", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
elif len(approx) == 6:
cv2.putText(frame, "Hexagon", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
elif len(approx) == 10:
cv2.putText(frame, "Star", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
else:
cv2.putText(frame, "Circle", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
cv2.imshow('Shapes', frame)
#cv2.imshow('Thrash', thrash)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
#name = './Images/image_frames' + str(index) + '.png'
#print ('frames')
# cv2.imwrite(name,frame)
#if cv2.waitkey(10)& 0xff == ord('q'):
# break
camera.release()
cv2.destroyAllWindows() | [
"cv2.drawContours",
"cv2.threshold",
"cv2.arcLength",
"time.sleep",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.findContours",
"cv2.waitKey",
"cv2.boundingRect"
] | [((463, 478), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (473, 478), False, 'import time\n'), ((2320, 2343), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2341, 2343), False, 'import cv2\n'), ((656, 695), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (668, 695), False, 'import cv2\n'), ((711, 761), 'cv2.threshold', 'cv2.threshold', (['imgGrey', '(80)', '(255)', 'cv2.THRESH_BINARY'], {}), '(imgGrey, 80, 255, cv2.THRESH_BINARY)\n', (724, 761), False, 'import cv2\n'), ((779, 841), 'cv2.findContours', 'cv2.findContours', (['thrash', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(thrash, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (795, 841), False, 'import cv2\n'), ((2029, 2056), 'cv2.imshow', 'cv2.imshow', (['"""Shapes"""', 'frame'], {}), "('Shapes', frame)\n", (2039, 2056), False, 'import cv2\n'), ((1026, 1050), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (1042, 1050), False, 'import cv2\n'), ((1091, 1143), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[approx]', '(0)', '(0, 255, 0)', '(5)'], {}), '(frame, [approx], 0, (0, 255, 0), 5)\n', (1107, 1143), False, 'import cv2\n'), ((2096, 2110), 'cv2.waitKey', 'cv2.waitKey', (['(2)'], {}), '(2)\n', (2107, 2110), False, 'import cv2\n'), ((915, 943), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (928, 943), False, 'import cv2\n'), ((1176, 1265), 'cv2.putText', 'cv2.putText', (['frame', '"""Triangle"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'Triangle', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0,\n 255), 2)\n", (1187, 1265), False, 'import cv2\n'), ((1396, 1484), 'cv2.putText', 'cv2.putText', (['frame', '"""square"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'square', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, \n 255), 2)\n", (1407, 1484), False, 'import cv2\n'), ((1497, 1588), 'cv2.putText', 'cv2.putText', (['frame', '"""rectangle"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'rectangle', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, \n 0, 255), 2)\n", (1508, 1588), False, 'import cv2\n'), ((1615, 1704), 'cv2.putText', 'cv2.putText', (['frame', '"""Pentagon"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'Pentagon', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0,\n 255), 2)\n", (1626, 1704), False, 'import cv2\n'), ((1732, 1820), 'cv2.putText', 'cv2.putText', (['frame', '"""Hexagon"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'Hexagon', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0,\n 255), 2)\n", (1743, 1820), False, 'import cv2\n'), ((1849, 1935), 'cv2.putText', 'cv2.putText', (['frame', '"""Star"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'Star', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, \n 255), 2)\n", (1860, 1935), False, 'import cv2\n'), ((1945, 2033), 'cv2.putText', 'cv2.putText', (['frame', '"""Circle"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), "(frame, 'Circle', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, \n 255), 2)\n", (1956, 2033), False, 'import cv2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time : 2018/10/16 19:39
# Author : gaojiewen
# Version : 1.0
# Desc : 测试adb.py
import unittest
from eadb import android_cmd
class AndroidCMDTest(unittest.TestCase):
def setUp(self):
self.adb = android_cmd.AndroidAdb()
def test_get_devices(self):
print(self.adb.devices())
self.assertIsNotNone(self.adb.devices())
| [
"eadb.android_cmd.AndroidAdb"
] | [((267, 291), 'eadb.android_cmd.AndroidAdb', 'android_cmd.AndroidAdb', ([], {}), '()\n', (289, 291), False, 'from eadb import android_cmd\n')] |
#!/usr/bin/env python3.6
from collections import OrderedDict
import numpy as np
# from scipy import ndimage
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
import rospy
# import matplotlib.pyplot as plt
# import time
class reinforcement_module(nn.Module):
def __init__(self, args, model=None):
"""
Args:
args - dictionary:
device: torch device, cuda or cpu
lr: learning rate
weight_decay: weight decay
"""
super(reinforcement_module, self).__init__()
self.model_name = model
if model == 'densenet': # Densenet
# for the laptop used it is better to save memory when running on gpu
if args['device'] == torch.device('cuda'):
memory_efficient = True
else:
memory_efficient = False
# use pre-trained densenet121 to estimate the features
self.color_features = torchvision.models.densenet.densenet121(pretrained = True, memory_efficient = memory_efficient).to(args['device']).features
self.depth_features = torchvision.models.densenet.densenet121(pretrained = True, memory_efficient = memory_efficient).to(args['device']).features
n_features = 2 * 1024
elif model == 'resnext':
# use the ResNeXt-50-32x4d to estimate the features resnext50_32x4d2 = nn.Sequential(*list(resnext50_32x4d.children())[:-2])
color_features = torchvision.models.resnext50_32x4d(pretrained=True).to(args['device'])
depth_features = torchvision.models.resnext50_32x4d(pretrained=True).to(args['device'])
self.color_features = nn.Sequential(*list(color_features.children())[:-2])
self.depth_features = nn.Sequential(*list(depth_features.children())[:-2])
n_features = 2 * 2048
elif model == 'mobilenet':
# use the RMNASNet 1.0 to estimate the features
self.color_features = torchvision.models.mobilenet_v2(pretrained = True).to(args['device']).features
self.depth_features = torchvision.models.mobilenet_v2(pretrained = True).to(args['device']).features
n_features = 2 * 1280
elif model == 'mnasnet':
# use the RMNASNet 1.0 to estimate the features
self.color_features = torchvision.models.mnasnet1_0(pretrained = True).to(args['device']).layers
self.depth_features = torchvision.models.mnasnet1_0(pretrained = True).to(args['device']).layers
n_features = 2 * 1280
else:
raise Exception('Model name not recognized.')
# each feature net gives n features that will be concatenated in channels
self.net = nn.Sequential(OrderedDict([
('neural-norm0', nn.BatchNorm2d(n_features)),
('neural-relu0', nn.ReLU(inplace=True)),
('neural-conv0', nn.Conv2d(n_features, 64, kernel_size=1, stride=1, bias=False)),
('neural-norm1', nn.BatchNorm2d(64)),
('neural-relu1', nn.ReLU(inplace=True)),
('neural-conv1', nn.Conv2d(64, 1, kernel_size=1, stride=1, bias=False)),
('neural-upsam', nn.Upsample(scale_factor=16, mode='bilinear', align_corners=True))
])).to(args['device'])
# multiple learning rate
ph = 0.1 # factor to reduce the learning rate of the pre-trained net
self.optimizer = optim.Adam([
{'params': self.color_features.parameters(), 'lr':args['lr']*ph, 'weight_decay': args['weight_decay']*ph},
{'params': self.depth_features.parameters(), 'lr':args['lr']*ph, 'weight_decay': args['weight_decay']*ph},
{'params': self.net.parameters(), 'lr': args['lr'], 'weight_decay': args['weight_decay']}], lr=0)
self.criterion = torch.nn.SmoothL1Loss(reduction = 'sum') # Huber loss
# Initialize network weights
# https://arxiv.org/pdf/1502.01852.pdf
for m in self.named_modules():
if 'neural-' in m[0]:
if isinstance(m[1], nn.Conv2d):
# print(m[1])
nn.init.kaiming_normal_(m[1].weight.data)
elif isinstance(m[1], nn.BatchNorm2d):
m[1].weight.data.fill_(1)
m[1].bias.data.zero_()
def forward(self, input_color_data, input_depth_data):
# Compute features
color_features = self.color_features(input_color_data)
depth_features = self.depth_features(input_depth_data)
# each feature net gives 1024 features with 7x7 that will be concatenated in channels
features = torch.cat((color_features, depth_features), dim=1)
# print(color_features.size()) # torch.Size([1, 1024, 7, 7])
# print(depth_features.size()) # torch.Size([1, 1024, 7, 7])
# print(features.size()) # torch.Size([1, 2048, 7, 7])
# Pass through the net to estimate the Q-values
q_values = self.net(features)
return q_values
class kinematic_reinforcement_module(nn.Module):
def __init__(self, args):
"""
Args:
args - dictionary:
device: torch device, cuda or cpu
lr: learning rate
weight_decay: weight decay
"""
super(kinematic_reinforcement_module, self).__init__()
self.args = args
# for the laptop used it is better to save memory when running on gpu
if args['device'] == torch.device('cuda'):
memory_efficient = True
else:
memory_efficient = False
# use pre-trained densenet121 to estimate the features
self.color_features = torchvision.models.densenet.densenet121(pretrained = True, memory_efficient = memory_efficient).to(args['device']).features
self.depth_features = torchvision.models.densenet.densenet121(pretrained = True, memory_efficient = memory_efficient).to(args['device']).features
# each feature net gives 1024 features that will be concatenated in channels
# the output of this layer is 112 x 112 = 12544
self.net = nn.Sequential(OrderedDict([
('neural-norm0', nn.BatchNorm2d(2048)),
('neural-relu0', nn.ReLU(inplace=True)),
('neural-conv0', nn.Conv2d(2048, 64, kernel_size=1, stride=1, bias=False)),
('neural-norm1', nn.BatchNorm2d(64)),
('neural-relu1', nn.ReLU(inplace=True)),
('neural-conv1', nn.Conv2d(64, 1, kernel_size=1, stride=1, bias=False)),
('neural-upsam', nn.Upsample(scale_factor=16, mode='bilinear', align_corners=True))
])).to(args['device'])
self.kinnet = nn.Sequential(OrderedDict([
('kinnet-linear0', nn.Linear(12551, 4096)), # input = 12544 + 7 = 12551
('kinnet-relu0', nn.ReLU(inplace=True)),
('kinnet-linear1', nn.Linear(4096, 1024)),
('kinnet-relu1', nn.ReLU(inplace=True)),
('kinnet-linear2', nn.Linear(1024, 14))
])).to(args['device'])
# multiple learning rate
ph = 0.1 # factor to reduce the learning rate of the pre-trained net
self.optimizer = optim.Adam([
{'params': self.color_features.parameters(), 'lr':args['lr']*ph, 'weight_decay': args['weight_decay']*ph},
{'params': self.depth_features.parameters(), 'lr':args['lr']*ph, 'weight_decay': args['weight_decay']*ph},
{'params': self.net.parameters(), 'lr': args['lr'], 'weight_decay': args['weight_decay']},
{'params': self.kinnet.parameters(), 'lr': args['lr'], 'weight_decay': args['weight_decay']}], lr=0)
self.criterion = torch.nn.SmoothL1Loss(reduction = 'sum') # Huber loss
# Initialize network weights
for m in self.named_modules():
if 'neural-' in m[0]:
if isinstance(m[1], nn.Conv2d):
# print(m[1])
nn.init.kaiming_normal_(m[1].weight.data)
elif isinstance(m[1], nn.BatchNorm2d):
m[1].weight.data.fill_(1)
m[1].bias.data.zero_()
def forward(self, input_color_data, input_depth_data, pose):
"""
Args:
input_color_data: tensor with color image
input_depth_data: tensor with depth image
pose: object Pose() of the current pose of the tool
"""
# Compute features
color_features = self.color_features(input_color_data)
depth_features = self.depth_features(input_depth_data)
# each feature net gives 1024 features with 7x7 that will be concatenated in channels
# print(color_features.size())
# print(depth_features.size())
image_features = torch.cat((color_features, depth_features), dim=1)
# print(image_features.size())
# Pass through the net to estimate the Q-values
q_values = self.net(image_features)
q_values = torch.flatten(q_values, start_dim=1).squeeze()
# print(q_values.size()[0])
# print(pose.size())
if q_values.size()[0] == 12544:
features = torch.cat([q_values, pose])
else:
features = torch.cat([q_values, pose], dim=1)
q_values_kin = self.kinnet(features.float())
return q_values_kin | [
"torchvision.models.resnext50_32x4d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d",
"torch.cat",
"torchvision.models.mnasnet1_0",
"torch.nn.Upsample",
"torch.nn.Linear",
"torchvision.models.mobilenet_v2",
"torch.nn.SmoothL1Loss",
"torchvision.models.densenet.densenet121",
"torch.flatten",
"torch.device"
] | [((3912, 3950), 'torch.nn.SmoothL1Loss', 'torch.nn.SmoothL1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (3933, 3950), False, 'import torch\n'), ((4740, 4790), 'torch.cat', 'torch.cat', (['(color_features, depth_features)'], {'dim': '(1)'}), '((color_features, depth_features), dim=1)\n', (4749, 4790), False, 'import torch\n'), ((7782, 7820), 'torch.nn.SmoothL1Loss', 'torch.nn.SmoothL1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (7803, 7820), False, 'import torch\n'), ((8863, 8913), 'torch.cat', 'torch.cat', (['(color_features, depth_features)'], {'dim': '(1)'}), '((color_features, depth_features), dim=1)\n', (8872, 8913), False, 'import torch\n'), ((5596, 5616), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5608, 5616), False, 'import torch\n'), ((9250, 9277), 'torch.cat', 'torch.cat', (['[q_values, pose]'], {}), '([q_values, pose])\n', (9259, 9277), False, 'import torch\n'), ((9315, 9349), 'torch.cat', 'torch.cat', (['[q_values, pose]'], {'dim': '(1)'}), '([q_values, pose], dim=1)\n', (9324, 9349), False, 'import torch\n'), ((847, 867), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (859, 867), False, 'import torch\n'), ((9074, 9110), 'torch.flatten', 'torch.flatten', (['q_values'], {'start_dim': '(1)'}), '(q_values, start_dim=1)\n', (9087, 9110), False, 'import torch\n'), ((4227, 4268), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m[1].weight.data'], {}), '(m[1].weight.data)\n', (4250, 4268), True, 'import torch.nn as nn\n'), ((5799, 5895), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': '(True)', 'memory_efficient': 'memory_efficient'}), '(pretrained=True, memory_efficient=\n memory_efficient)\n', (5838, 5895), False, 'import torchvision\n'), ((5953, 6049), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': '(True)', 'memory_efficient': 'memory_efficient'}), '(pretrained=True, memory_efficient=\n memory_efficient)\n', (5992, 6049), False, 'import torchvision\n'), ((8050, 8091), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m[1].weight.data'], {}), '(m[1].weight.data)\n', (8073, 8091), True, 'import torch.nn as nn\n'), ((1069, 1165), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': '(True)', 'memory_efficient': 'memory_efficient'}), '(pretrained=True, memory_efficient=\n memory_efficient)\n', (1108, 1165), False, 'import torchvision\n'), ((1227, 1323), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': '(True)', 'memory_efficient': 'memory_efficient'}), '(pretrained=True, memory_efficient=\n memory_efficient)\n', (1266, 1323), False, 'import torchvision\n'), ((1585, 1636), 'torchvision.models.resnext50_32x4d', 'torchvision.models.resnext50_32x4d', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1619, 1636), False, 'import torchvision\n'), ((1685, 1736), 'torchvision.models.resnext50_32x4d', 'torchvision.models.resnext50_32x4d', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1719, 1736), False, 'import torchvision\n'), ((2094, 2142), 'torchvision.models.mobilenet_v2', 'torchvision.models.mobilenet_v2', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2125, 2142), False, 'import torchvision\n'), ((2207, 2255), 'torchvision.models.mobilenet_v2', 'torchvision.models.mobilenet_v2', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2238, 2255), False, 'import torchvision\n'), ((2897, 2923), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_features'], {}), '(n_features)\n', (2911, 2923), True, 'import torch.nn as nn\n'), ((2955, 2976), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2962, 2976), True, 'import torch.nn as nn\n'), ((3008, 3070), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_features', '(64)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(n_features, 64, kernel_size=1, stride=1, bias=False)\n', (3017, 3070), True, 'import torch.nn as nn\n'), ((3102, 3120), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3116, 3120), True, 'import torch.nn as nn\n'), ((3152, 3173), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3159, 3173), True, 'import torch.nn as nn\n'), ((3205, 3258), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(64, 1, kernel_size=1, stride=1, bias=False)\n', (3214, 3258), True, 'import torch.nn as nn\n'), ((3290, 3355), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(16)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=16, mode='bilinear', align_corners=True)\n", (3301, 3355), True, 'import torch.nn as nn\n'), ((6295, 6315), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2048)'], {}), '(2048)\n', (6309, 6315), True, 'import torch.nn as nn\n'), ((6347, 6368), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6354, 6368), True, 'import torch.nn as nn\n'), ((6400, 6456), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(64)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(2048, 64, kernel_size=1, stride=1, bias=False)\n', (6409, 6456), True, 'import torch.nn as nn\n'), ((6488, 6506), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6502, 6506), True, 'import torch.nn as nn\n'), ((6538, 6559), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6545, 6559), True, 'import torch.nn as nn\n'), ((6591, 6644), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(64, 1, kernel_size=1, stride=1, bias=False)\n', (6600, 6644), True, 'import torch.nn as nn\n'), ((6676, 6741), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(16)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=16, mode='bilinear', align_corners=True)\n", (6687, 6741), True, 'import torch.nn as nn\n'), ((6856, 6878), 'torch.nn.Linear', 'nn.Linear', (['(12551)', '(4096)'], {}), '(12551, 4096)\n', (6865, 6878), True, 'import torch.nn as nn\n'), ((6938, 6959), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6945, 6959), True, 'import torch.nn as nn\n'), ((6993, 7014), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(1024)'], {}), '(4096, 1024)\n', (7002, 7014), True, 'import torch.nn as nn\n'), ((7046, 7067), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7053, 7067), True, 'import torch.nn as nn\n'), ((7101, 7120), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(14)'], {}), '(1024, 14)\n', (7110, 7120), True, 'import torch.nn as nn\n'), ((2448, 2494), 'torchvision.models.mnasnet1_0', 'torchvision.models.mnasnet1_0', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2477, 2494), False, 'import torchvision\n'), ((2557, 2603), 'torchvision.models.mnasnet1_0', 'torchvision.models.mnasnet1_0', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2586, 2603), False, 'import torchvision\n')] |
from django.db import models
class Player(models.Model):
name = models.CharField(max_length=64, unique=True)
position = models.CharField(max_length=64)
goals = models.PositiveIntegerField(default=0)
class Meta:
verbose_name = "player"
verbose_name_plural = "players"
def __str__(self):
return f"{self.name} {self.position} {self.goals}"
| [
"django.db.models.PositiveIntegerField",
"django.db.models.CharField"
] | [((70, 114), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)'}), '(max_length=64, unique=True)\n', (86, 114), False, 'from django.db import models\n'), ((130, 161), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (146, 161), False, 'from django.db import models\n'), ((174, 212), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (201, 212), False, 'from django.db import models\n')] |
import matplotlib.pyplot as plt
import colorspacious
import numpy as np
N = 1200
a, b = np.meshgrid(np.linspace(-100, 100, N), np.linspace(-100, 100, N))
for L in (20, 50, 80):
lab = np.full((N, N, 3), L)
lab[:, :, 1] = a
lab[:, :, 2] = b
rgb = colorspacious.cspace_convert(lab, 'CIELab', 'sRGB1')
# rgb = np.clip(rgb, 0, 1)
rgb[np.any((rgb < 0) | (rgb > 1), axis=-1)] = 0
w, h = plt.rcParams['figure.figsize']
fig = plt.figure(figsize=(h, h))
ax = fig.add_subplot(1, 1, 1)
img = plt.imshow(rgb[::-1], extent=[-100, 100, -100, 100])
img.set_rasterized(True)
ax.set_title(f'$L* = {L}$')
ax.set_xlabel('a*')
ax.set_ylabel('b*')
fig.tight_layout()
fig.savefig(f'build/plots/lab_{L}.pdf')
| [
"matplotlib.pyplot.imshow",
"colorspacious.cspace_convert",
"numpy.any",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.full"
] | [((102, 127), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', 'N'], {}), '(-100, 100, N)\n', (113, 127), True, 'import numpy as np\n'), ((129, 154), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', 'N'], {}), '(-100, 100, N)\n', (140, 154), True, 'import numpy as np\n'), ((190, 211), 'numpy.full', 'np.full', (['(N, N, 3)', 'L'], {}), '((N, N, 3), L)\n', (197, 211), True, 'import numpy as np\n'), ((265, 317), 'colorspacious.cspace_convert', 'colorspacious.cspace_convert', (['lab', '"""CIELab"""', '"""sRGB1"""'], {}), "(lab, 'CIELab', 'sRGB1')\n", (293, 317), False, 'import colorspacious\n'), ((454, 480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(h, h)'}), '(figsize=(h, h))\n', (464, 480), True, 'import matplotlib.pyplot as plt\n'), ((526, 578), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb[::-1]'], {'extent': '[-100, 100, -100, 100]'}), '(rgb[::-1], extent=[-100, 100, -100, 100])\n', (536, 578), True, 'import matplotlib.pyplot as plt\n'), ((357, 395), 'numpy.any', 'np.any', (['((rgb < 0) | (rgb > 1))'], {'axis': '(-1)'}), '((rgb < 0) | (rgb > 1), axis=-1)\n', (363, 395), True, 'import numpy as np\n')] |
from dsmpy.modelparameters import ModelParameters, ParameterType
from dsmpy.seismicmodel import SeismicModel
from dsmpy.station import Station
from dsmpy.event import Event
from dsmpy.utils.cmtcatalog import read_catalog
from dsmpy.dataset import Dataset
from dsmpy.dsm import PyDSMInput, compute
from pytomo.utilities import white_noise
import numpy as np
import matplotlib.pyplot as plt
def get_model(
n_upper_mantle=20, n_mtz=10, n_lower_mantle=12,
types=[ParameterType.VSH], verbose=0):
'''Boxcar mesh using ak135 as reference model for the structure of
the upper mantle and transition zone down to 1000 km depth.
'''
ak135 = SeismicModel.ak135()
# model parameters
depth_moho = 6371. - 6336.6
depth_410 = 410.
depth_660 = 660.
depth_max = 1000.
rs_upper_mantle = np.linspace(depth_410, depth_moho, n_upper_mantle+1)
rs_mtz = np.linspace(depth_660, depth_410, n_mtz, endpoint=False)
rs_lower_mantle = np.linspace(
depth_max, depth_660, n_lower_mantle, endpoint=False)
radii = 6371. - np.round(
np.hstack((rs_lower_mantle, rs_mtz, rs_upper_mantle)), 4)
if verbose > 1:
print('dr_um={}, dr_mtz={}, dr_lm={}'.format(
rs_upper_mantle[1] - rs_upper_mantle[0],
rs_mtz[1] - rs_mtz[0],
rs_lower_mantle[1] - rs_lower_mantle[0]))
model_params = ModelParameters(types, radii, mesh_type='boxcar')
# mesh
# model, mesh = ak135.boxcar_mesh(model_params)
return ak135, model_params
def get_model_lininterp(
n_mtz=10, n_lower_mantle=12,
types=[ParameterType.VSH], discontinuous=True, verbose=0):
"""Boxcar mesh using ak135 as reference model for the structure of
the upper mantle and transition zone down to 1000 km depth.
"""
ak135 = SeismicModel.ak135()
radii = np.array(
[5371, 5611, 5711, 5836, 5961, 6161, 6251, 6336.6])
model_params = ModelParameters(types, radii, mesh_type='lininterp')
# mesh
model = ak135.lininterp_mesh(model_params, discontinuous=discontinuous)
return model, model_params
def get_ref_event():
catalog = read_catalog()
event = Event.event_from_catalog(
catalog, '200707211534A')
event.source_time_function.half_duration = 2.
return event
def get_dataset(
model, tlen=1638.4, nspc=64, sampling_hz=20, mode=0,
add_noise=False, noise_normalized_std=1.):
#TODO fix outputs.us=NaN when event.latitude==station.latitude
event = get_ref_event()
events = [event]
stations = [
Station(
'{:03d}'.format(i), 'DSM',
event.latitude+5+0.5*i, event.longitude+0.1)
for i in range(61)]
dataset = Dataset.dataset_from_arrays(
events, [stations], sampling_hz=sampling_hz)
pydsm_input = PyDSMInput.input_from_arrays(
event, stations, model, tlen, nspc, sampling_hz)
pydsm_output = compute(pydsm_input, mode=mode)
pydsm_output.to_time_domain()
dataset.data = np.zeros((1,)+pydsm_output.us.shape, dtype=np.float64)
dataset.data[0] = pydsm_output.us
if add_noise:
noise_arr = white_noise(
noise_normalized_std, dataset.data.shape)
npts_cut = int(dataset.data.shape[3]*0.9)
norm = np.abs(
dataset.data[:,:,:npts_cut]).max(axis=3, keepdims=True)
noise_arr *= norm
dataset.data += noise_arr
return dataset, pydsm_output
def get_model_syntest1():
model_ref = SeismicModel.ak135()
types = [ParameterType.VSH]
radii = 6371. - np.array([493.33, 410.])
model_params = ModelParameters(types, radii, mesh_type='boxcar')
model, mesh = model_ref.boxcar_mesh(model_params)
values = np.array(
[.2 * (-1)**i for i in range(model_params._n_grd_params)])
values_dict = {param_type: values for param_type in types}
values_mat = model_params.get_values_matrix(values_dict)
mesh_mul = mesh.multiply(model_params.get_nodes(), values_mat)
model_mul = model + mesh_mul
return model_mul
def get_model_syntest2():
types = [ParameterType.VSH]
model_ref, model_params = get_model(
20, 10, 12, types=types)
model, mesh = model_ref.boxcar_mesh(model_params)
values = np.array(
[.2 * (-1)**i for i in range(model_params._n_grd_params)])
values_dict = {param_type: values for param_type in types}
values_mat = model_params.get_values_matrix(values_dict)
mesh_mul = mesh.multiply(model_params.get_nodes(), values_mat)
model_mul = model + mesh_mul
return model_mul
def get_model_syntest3():
types = [ParameterType.VSH, ParameterType.RADIUS]
model, model_params = get_model_lininterp(
types=types, verbose=0, discontinuous=True)
values_vsh = np.zeros(model_params._n_grd_params)
values_r = np.array(
[0, 0, 0, 0, 30., 30., 0, 0, -30, -30, 0, 0, 0, 0, 0, 0])
values_dict = {
ParameterType.VSH: values_vsh,
ParameterType.RADIUS: values_r}
model_mul = model.build_model(
model, model_params, values_dict)
return model_mul
def get_dataset_syntest1(
tlen=1638.4, nspc=256, sampling_hz=20, mode=0,
add_noise=False, noise_normalized_std=1.):
return get_dataset(
get_model_syntest1(), tlen, nspc, sampling_hz, mode,
add_noise, noise_normalized_std)
def get_dataset_syntest2(
tlen=1638.4, nspc=256, sampling_hz=20, mode=0,
add_noise=False, noise_normalized_std=1.):
return get_dataset(
get_model_syntest2(), tlen, nspc, sampling_hz, mode,
add_noise, noise_normalized_std)
def get_dataset_syntest3(
tlen=1638.4, nspc=256, sampling_hz=20, mode=0,
add_noise=False, noise_normalized_std=1.):
return get_dataset(
get_model_syntest3(), tlen, nspc, sampling_hz, mode,
add_noise, noise_normalized_std)
if __name__ == '__main__':
model, model_params = get_model_lininterp(
types=[ParameterType.VSH, ParameterType.RADIUS])
fig, ax = get_model_syntest3().plot()
SeismicModel.ak135().plot(ax=ax)
plt.show()
dataset, _ = get_dataset_syntest3(nspc=64, mode=2)
dataset.plot_event(0)
plt.show()
fig, ax = SeismicModel.ak135().plot(
types=[ParameterType.VSH], dr=.5, label='')
mask_dict = dict()
mask_dict[ParameterType.VSH] = np.ones(
model_params._n_grd_params//2, dtype='bool')
mask_dict[ParameterType.RADIUS] = np.zeros(
model_params._n_grd_params//2, dtype='bool')
mask_dict[ParameterType.VSH][0] = False
mask_dict[ParameterType.VSH][-1] = False
for i in range(2,5):
mask_dict[ParameterType.RADIUS][i] = True
discon_arr = np.zeros(
model_params._n_grd_params//2, dtype='bool')
discon_arr[2] = True
discon_arr[4] = True
model_params.set_constraints(
mask_dict=mask_dict,
discon_arr=discon_arr)
values_vsh = np.array(
[0.2*(-1)**(i//2) for i in range(model_params._n_grd_params)])
values_r = np.array(
[0. for i in range(model_params._n_grd_params)])
values_dict = {
ParameterType.VSH: values_vsh,
ParameterType.RADIUS: values_r}
model_mul = model.build_model(
model, model_params, values_dict)
print(model_params.get_n_params())
print(model_params.get_free_indices())
model_mul.plot(
types=[ParameterType.VSH], dr=1., label='model', ax=ax
)
plt.show()
plt.close(fig)
| [
"pytomo.utilities.white_noise",
"numpy.abs",
"numpy.ones",
"numpy.hstack",
"dsmpy.modelparameters.ModelParameters",
"dsmpy.event.Event.event_from_catalog",
"dsmpy.dsm.compute",
"dsmpy.dsm.PyDSMInput.input_from_arrays",
"dsmpy.utils.cmtcatalog.read_catalog",
"dsmpy.seismicmodel.SeismicModel.ak135",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.close",
"dsmpy.dataset.Dataset.dataset_from_arrays",
"matplotlib.pyplot.show"
] | [((667, 687), 'dsmpy.seismicmodel.SeismicModel.ak135', 'SeismicModel.ak135', ([], {}), '()\n', (685, 687), False, 'from dsmpy.seismicmodel import SeismicModel\n'), ((829, 883), 'numpy.linspace', 'np.linspace', (['depth_410', 'depth_moho', '(n_upper_mantle + 1)'], {}), '(depth_410, depth_moho, n_upper_mantle + 1)\n', (840, 883), True, 'import numpy as np\n'), ((895, 951), 'numpy.linspace', 'np.linspace', (['depth_660', 'depth_410', 'n_mtz'], {'endpoint': '(False)'}), '(depth_660, depth_410, n_mtz, endpoint=False)\n', (906, 951), True, 'import numpy as np\n'), ((974, 1039), 'numpy.linspace', 'np.linspace', (['depth_max', 'depth_660', 'n_lower_mantle'], {'endpoint': '(False)'}), '(depth_max, depth_660, n_lower_mantle, endpoint=False)\n', (985, 1039), True, 'import numpy as np\n'), ((1381, 1430), 'dsmpy.modelparameters.ModelParameters', 'ModelParameters', (['types', 'radii'], {'mesh_type': '"""boxcar"""'}), "(types, radii, mesh_type='boxcar')\n", (1396, 1430), False, 'from dsmpy.modelparameters import ModelParameters, ParameterType\n'), ((1819, 1839), 'dsmpy.seismicmodel.SeismicModel.ak135', 'SeismicModel.ak135', ([], {}), '()\n', (1837, 1839), False, 'from dsmpy.seismicmodel import SeismicModel\n'), ((1852, 1912), 'numpy.array', 'np.array', (['[5371, 5611, 5711, 5836, 5961, 6161, 6251, 6336.6]'], {}), '([5371, 5611, 5711, 5836, 5961, 6161, 6251, 6336.6])\n', (1860, 1912), True, 'import numpy as np\n'), ((1942, 1994), 'dsmpy.modelparameters.ModelParameters', 'ModelParameters', (['types', 'radii'], {'mesh_type': '"""lininterp"""'}), "(types, radii, mesh_type='lininterp')\n", (1957, 1994), False, 'from dsmpy.modelparameters import ModelParameters, ParameterType\n'), ((2154, 2168), 'dsmpy.utils.cmtcatalog.read_catalog', 'read_catalog', ([], {}), '()\n', (2166, 2168), False, 'from dsmpy.utils.cmtcatalog import read_catalog\n'), ((2181, 2231), 'dsmpy.event.Event.event_from_catalog', 'Event.event_from_catalog', (['catalog', '"""200707211534A"""'], {}), "(catalog, '200707211534A')\n", (2205, 2231), False, 'from dsmpy.event import Event\n'), ((2726, 2798), 'dsmpy.dataset.Dataset.dataset_from_arrays', 'Dataset.dataset_from_arrays', (['events', '[stations]'], {'sampling_hz': 'sampling_hz'}), '(events, [stations], sampling_hz=sampling_hz)\n', (2753, 2798), False, 'from dsmpy.dataset import Dataset\n'), ((2831, 2908), 'dsmpy.dsm.PyDSMInput.input_from_arrays', 'PyDSMInput.input_from_arrays', (['event', 'stations', 'model', 'tlen', 'nspc', 'sampling_hz'], {}), '(event, stations, model, tlen, nspc, sampling_hz)\n', (2859, 2908), False, 'from dsmpy.dsm import PyDSMInput, compute\n'), ((2937, 2968), 'dsmpy.dsm.compute', 'compute', (['pydsm_input'], {'mode': 'mode'}), '(pydsm_input, mode=mode)\n', (2944, 2968), False, 'from dsmpy.dsm import PyDSMInput, compute\n'), ((3022, 3078), 'numpy.zeros', 'np.zeros', (['((1,) + pydsm_output.us.shape)'], {'dtype': 'np.float64'}), '((1,) + pydsm_output.us.shape, dtype=np.float64)\n', (3030, 3078), True, 'import numpy as np\n'), ((3499, 3519), 'dsmpy.seismicmodel.SeismicModel.ak135', 'SeismicModel.ak135', ([], {}), '()\n', (3517, 3519), False, 'from dsmpy.seismicmodel import SeismicModel\n'), ((3616, 3665), 'dsmpy.modelparameters.ModelParameters', 'ModelParameters', (['types', 'radii'], {'mesh_type': '"""boxcar"""'}), "(types, radii, mesh_type='boxcar')\n", (3631, 3665), False, 'from dsmpy.modelparameters import ModelParameters, ParameterType\n'), ((4777, 4813), 'numpy.zeros', 'np.zeros', (['model_params._n_grd_params'], {}), '(model_params._n_grd_params)\n', (4785, 4813), True, 'import numpy as np\n'), ((4830, 4898), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 30.0, 30.0, 0, 0, -30, -30, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 30.0, 30.0, 0, 0, -30, -30, 0, 0, 0, 0, 0, 0])\n', (4838, 4898), True, 'import numpy as np\n'), ((6098, 6108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6106, 6108), True, 'import matplotlib.pyplot as plt\n'), ((6195, 6205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6203, 6205), True, 'import matplotlib.pyplot as plt\n'), ((6363, 6417), 'numpy.ones', 'np.ones', (['(model_params._n_grd_params // 2)'], {'dtype': '"""bool"""'}), "(model_params._n_grd_params // 2, dtype='bool')\n", (6370, 6417), True, 'import numpy as np\n'), ((6463, 6518), 'numpy.zeros', 'np.zeros', (['(model_params._n_grd_params // 2)'], {'dtype': '"""bool"""'}), "(model_params._n_grd_params // 2, dtype='bool')\n", (6471, 6518), True, 'import numpy as np\n'), ((6708, 6763), 'numpy.zeros', 'np.zeros', (['(model_params._n_grd_params // 2)'], {'dtype': '"""bool"""'}), "(model_params._n_grd_params // 2, dtype='bool')\n", (6716, 6763), True, 'import numpy as np\n'), ((7453, 7463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7461, 7463), True, 'import matplotlib.pyplot as plt\n'), ((7468, 7482), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7477, 7482), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3207), 'pytomo.utilities.white_noise', 'white_noise', (['noise_normalized_std', 'dataset.data.shape'], {}), '(noise_normalized_std, dataset.data.shape)\n', (3165, 3207), False, 'from pytomo.utilities import white_noise\n'), ((3572, 3597), 'numpy.array', 'np.array', (['[493.33, 410.0]'], {}), '([493.33, 410.0])\n', (3580, 3597), True, 'import numpy as np\n'), ((1087, 1140), 'numpy.hstack', 'np.hstack', (['(rs_lower_mantle, rs_mtz, rs_upper_mantle)'], {}), '((rs_lower_mantle, rs_mtz, rs_upper_mantle))\n', (1096, 1140), True, 'import numpy as np\n'), ((6061, 6081), 'dsmpy.seismicmodel.SeismicModel.ak135', 'SeismicModel.ak135', ([], {}), '()\n', (6079, 6081), False, 'from dsmpy.seismicmodel import SeismicModel\n'), ((6225, 6245), 'dsmpy.seismicmodel.SeismicModel.ak135', 'SeismicModel.ak135', ([], {}), '()\n', (6243, 6245), False, 'from dsmpy.seismicmodel import SeismicModel\n'), ((3286, 3323), 'numpy.abs', 'np.abs', (['dataset.data[:, :, :npts_cut]'], {}), '(dataset.data[:, :, :npts_cut])\n', (3292, 3323), True, 'import numpy as np\n')] |
"""Unit tests for servicediscovery-supported APIs."""
import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_servicediscovery
# See our Development Tips on writing tests for hints on how to write good tests:
# http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html
@mock_servicediscovery
def test_create_http_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_http_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_public_dns_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_public_dns_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_private_dns_namespace_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_private_dns_namespace(
Name="mynamespace", Vpc="vpc", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_create_service_with_tags():
client = boto3.client("servicediscovery", region_name="eu-west-1")
client.create_service(Name="myservice", Tags=[{"Key": "key1", "Value": "val1"}])
ns_arn = client.list_services()["Services"][0]["Arn"]
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key1", "Value": "val1"}])
@mock_servicediscovery
def test_tag_resource():
client = boto3.client("servicediscovery", region_name="ap-southeast-1")
client.create_http_namespace(
Name="mynamespace", Tags=[{"Key": "key1", "Value": "val1"}]
)
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
client.tag_resource(ResourceARN=ns_arn, Tags=[{"Key": "key2", "Value": "val2"}])
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal(
[{"Key": "key1", "Value": "val1"}, {"Key": "key2", "Value": "val2"}]
)
@mock_servicediscovery
def test_untag_resource():
client = boto3.client("servicediscovery", region_name="us-east-2")
client.create_http_namespace(Name="mynamespace")
ns_arn = client.list_namespaces()["Namespaces"][0]["Arn"]
client.tag_resource(
ResourceARN=ns_arn,
Tags=[{"Key": "key1", "Value": "val1"}, {"Key": "key2", "Value": "val2"}],
)
client.untag_resource(ResourceARN=ns_arn, TagKeys=["key1"])
resp = client.list_tags_for_resource(ResourceARN=ns_arn)
resp.should.have.key("Tags")
resp["Tags"].should.equal([{"Key": "key2", "Value": "val2"}])
| [
"boto3.client"
] | [((406, 463), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""eu-west-1"""'}), "('servicediscovery', region_name='eu-west-1')\n", (418, 463), False, 'import boto3\n'), ((885, 942), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""eu-west-1"""'}), "('servicediscovery', region_name='eu-west-1')\n", (897, 942), False, 'import boto3\n'), ((1371, 1428), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""eu-west-1"""'}), "('servicediscovery', region_name='eu-west-1')\n", (1383, 1428), False, 'import boto3\n'), ((1855, 1912), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""eu-west-1"""'}), "('servicediscovery', region_name='eu-west-1')\n", (1867, 1912), False, 'import boto3\n'), ((2282, 2344), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""ap-southeast-1"""'}), "('servicediscovery', region_name='ap-southeast-1')\n", (2294, 2344), False, 'import boto3\n'), ((2876, 2933), 'boto3.client', 'boto3.client', (['"""servicediscovery"""'], {'region_name': '"""us-east-2"""'}), "('servicediscovery', region_name='us-east-2')\n", (2888, 2933), False, 'import boto3\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.apps import apps
from django.forms import fields
from django.forms.models import ModelForm
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from cms.models import Page
from cmsplugin_cascade.models import CascadePage
from cmsplugin_cascade.utils import validate_link
if 'django_select2' in settings.INSTALLED_APPS:
Select2Widget = import_string('django_select2.forms.Select2Widget')
else:
Select2Widget = import_string('django.forms.widgets.Select')
class SelectWidget(Select2Widget):
@property
def media(self):
parent_media = super(SelectWidget, self).media
# prepend JS snippet to re-add 'jQuery' to the global namespace
parent_media._js.insert(0, 'cascade/js/admin/jquery.restore.js')
return parent_media
class LinkSearchField(fields.ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('widget', SelectWidget)
super(LinkSearchField, self).__init__(*args, **kwargs)
def clean(self, value):
try:
return int(value)
except (TypeError, ValueError):
pass
class LinkForm(ModelForm):
"""
Form class to add fake fields for rendering the ModelAdmin's form, which later are used to
populate the glossary of the model.
"""
LINK_TYPE_CHOICES = (('cmspage', _("CMS Page")), ('exturl', _("External URL")), ('email', _("Mail To")),)
link_type = fields.ChoiceField(label=_("Link"), help_text=_("Type of link"))
cms_page = LinkSearchField(required=False, label='',
help_text=_("An internal link onto CMS pages of this site"))
section = fields.ChoiceField(required=False, label='',
help_text=_("Page bookmark"))
ext_url = fields.URLField(required=False, label='', help_text=_("Link onto external page"))
mail_to = fields.EmailField(required=False, label='', help_text=_("Open Email program with this address"))
class Meta:
fields = ('glossary',)
def __init__(self, data=None, *args, **kwargs):
instance = kwargs.get('instance')
default_link_type = {'type': self.LINK_TYPE_CHOICES[0][0]}
initial = dict(instance.glossary) if instance else {'link': default_link_type}
initial.update(kwargs.pop('initial', {}))
initial.setdefault('link', {'type': default_link_type})
link_type = initial['link']['type']
self.base_fields['link_type'].choices = self.LINK_TYPE_CHOICES
self.base_fields['link_type'].initial = link_type
if data and data.get('shared_glossary'):
# convert this into an optional field since it is disabled with ``shared_glossary`` set
self.base_fields['link_type'].required = False
set_initial_linktype = getattr(self, 'set_initial_{}'.format(link_type), None)
# populate Select field for choosing a CMS page
try:
site = instance.placeholder.page.site
except AttributeError:
site = Site.objects.get_current()
choices = [(p.pk, '{0} ({1})'.format(p.get_page_title(), p.get_absolute_url()))
for p in Page.objects.drafts().on_site(site)]
self.base_fields['cms_page'].choices = choices
if callable(set_initial_linktype):
set_initial_linktype(initial)
self._preset_section(data, initial)
super(LinkForm, self).__init__(data, initial=initial, *args, **kwargs)
def _preset_section(self, data, initial):
choices = [(None, _("Page root"))]
try:
if data:
cms_page = Page.objects.get(pk=data['cms_page'])
else:
cms_page = Page.objects.get(pk=initial['link']['pk'])
except (KeyError, ValueError, ObjectDoesNotExist):
pass
else:
CascadePage.assure_relation(cms_page)
for key, val in cms_page.cascadepage.glossary.get('element_ids', {}).items():
choices.append((key, val))
self.base_fields['section'].initial = initial['link'].get('section')
self.base_fields['section'].choices = choices
def clean_glossary(self):
"""
This method rectifies the behavior of JSONFormFieldBase.clean which converts
the value of empty fields to None, although it shall be an empty dict.
"""
glossary = self.cleaned_data['glossary']
if glossary is None:
glossary = {}
return glossary
def clean(self):
cleaned_data = super(LinkForm, self).clean()
if self.is_valid():
if 'link_data' in cleaned_data:
cleaned_data['glossary'].update(link=cleaned_data['link_data'])
del self.cleaned_data['link_data']
elif 'link_type' in cleaned_data:
cleaned_data['glossary'].update(link={'type': cleaned_data['link_type']})
else:
cleaned_data['glossary'].update(link={'type': 'none'})
return cleaned_data
def clean_cms_page(self):
if self.cleaned_data.get('link_type') == 'cmspage':
self.cleaned_data['link_data'] = {
'type': 'cmspage',
'model': 'cms.Page',
'pk': self.cleaned_data['cms_page'],
}
validate_link(self.cleaned_data['link_data'])
return self.cleaned_data['cms_page']
def clean_section(self):
if self.cleaned_data.get('link_type') == 'cmspage':
self.cleaned_data['link_data']['section'] = self.cleaned_data['section']
return self.cleaned_data['section']
def clean_ext_url(self):
if self.cleaned_data.get('link_type') == 'exturl':
self.cleaned_data['link_data'] = {'type': 'exturl', 'url': self.cleaned_data['ext_url']}
return self.cleaned_data['ext_url']
def clean_mail_to(self):
if self.cleaned_data.get('link_type') == 'email':
self.cleaned_data['link_data'] = {'type': 'email', 'email': self.cleaned_data['mail_to']}
return self.cleaned_data['mail_to']
def set_initial_none(self, initial):
pass
def set_initial_cmspage(self, initial):
try:
# check if that page still exists, otherwise return nothing
Model = apps.get_model(*initial['link']['model'].split('.'))
initial['cms_page'] = Model.objects.get(pk=initial['link']['pk']).pk
except (KeyError, ObjectDoesNotExist):
pass
def set_initial_exturl(self, initial):
try:
initial['ext_url'] = initial['link']['url']
except KeyError:
pass
def set_initial_email(self, initial):
try:
initial['mail_to'] = initial['link']['email']
except KeyError:
pass
@classmethod
def get_form_class(cls):
"""
Hook to return a form class for editing a CMSPlugin inheriting from ``LinkPluginBase``.
"""
return cls
@classmethod
def unset_required_for(cls, sharable_fields):
"""
Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are
required if they are declared in `sharable_fields`. Otherwise just deactivate them.
"""
if 'link_content' in cls.base_fields and 'link_content' not in sharable_fields:
cls.base_fields['link_content'].required = False
if 'link_type' in cls.base_fields and 'link' not in sharable_fields:
cls.base_fields['link_type'].required = False
class TextLinkFormMixin(object):
"""
To be used in combination with `LinkForm` for easily accessing the field `link_content`.
"""
def clean(self):
cleaned_data = super(TextLinkFormMixin, self).clean()
if self.is_valid():
cleaned_data['glossary'].update(link_content=cleaned_data['link_content'])
return cleaned_data
| [
"django.utils.translation.ugettext_lazy",
"cms.models.Page.objects.get",
"cmsplugin_cascade.models.CascadePage.assure_relation",
"cmsplugin_cascade.utils.validate_link",
"cms.models.Page.objects.drafts",
"django.utils.module_loading.import_string",
"django.contrib.sites.models.Site.objects.get_current"
] | [((607, 658), 'django.utils.module_loading.import_string', 'import_string', (['"""django_select2.forms.Select2Widget"""'], {}), "('django_select2.forms.Select2Widget')\n", (620, 658), False, 'from django.utils.module_loading import import_string\n'), ((685, 729), 'django.utils.module_loading.import_string', 'import_string', (['"""django.forms.widgets.Select"""'], {}), "('django.forms.widgets.Select')\n", (698, 729), False, 'from django.utils.module_loading import import_string\n'), ((1575, 1588), 'django.utils.translation.ugettext_lazy', '_', (['"""CMS Page"""'], {}), "('CMS Page')\n", (1576, 1588), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1602, 1619), 'django.utils.translation.ugettext_lazy', '_', (['"""External URL"""'], {}), "('External URL')\n", (1603, 1619), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1632, 1644), 'django.utils.translation.ugettext_lazy', '_', (['"""Mail To"""'], {}), "('Mail To')\n", (1633, 1644), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1689, 1698), 'django.utils.translation.ugettext_lazy', '_', (['"""Link"""'], {}), "('Link')\n", (1690, 1698), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1710, 1727), 'django.utils.translation.ugettext_lazy', '_', (['"""Type of link"""'], {}), "('Type of link')\n", (1711, 1727), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1804, 1853), 'django.utils.translation.ugettext_lazy', '_', (['"""An internal link onto CMS pages of this site"""'], {}), "('An internal link onto CMS pages of this site')\n", (1805, 1853), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1932, 1950), 'django.utils.translation.ugettext_lazy', '_', (['"""Page bookmark"""'], {}), "('Page bookmark')\n", (1933, 1950), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2018, 2046), 'django.utils.translation.ugettext_lazy', '_', (['"""Link onto external page"""'], {}), "('Link onto external page')\n", (2019, 2046), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2116, 2157), 'django.utils.translation.ugettext_lazy', '_', (['"""Open Email program with this address"""'], {}), "('Open Email program with this address')\n", (2117, 2157), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4031, 4068), 'cmsplugin_cascade.models.CascadePage.assure_relation', 'CascadePage.assure_relation', (['cms_page'], {}), '(cms_page)\n', (4058, 4068), False, 'from cmsplugin_cascade.models import CascadePage\n'), ((5501, 5546), 'cmsplugin_cascade.utils.validate_link', 'validate_link', (["self.cleaned_data['link_data']"], {}), "(self.cleaned_data['link_data'])\n", (5514, 5546), False, 'from cmsplugin_cascade.utils import validate_link\n'), ((3208, 3234), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (3232, 3234), False, 'from django.contrib.sites.models import Site\n'), ((3725, 3739), 'django.utils.translation.ugettext_lazy', '_', (['"""Page root"""'], {}), "('Page root')\n", (3726, 3739), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3803, 3840), 'cms.models.Page.objects.get', 'Page.objects.get', ([], {'pk': "data['cms_page']"}), "(pk=data['cms_page'])\n", (3819, 3840), False, 'from cms.models import Page\n'), ((3886, 3928), 'cms.models.Page.objects.get', 'Page.objects.get', ([], {'pk': "initial['link']['pk']"}), "(pk=initial['link']['pk'])\n", (3902, 3928), False, 'from cms.models import Page\n'), ((3351, 3372), 'cms.models.Page.objects.drafts', 'Page.objects.drafts', ([], {}), '()\n', (3370, 3372), False, 'from cms.models import Page\n')] |
import time
import pytest
from oidcendpoint.authn_event import create_authn_event
from oidcendpoint.session import SessionDB
from oidcendpoint import token_handler
from oidcendpoint.sso_db import SSODb
from oidcendpoint.in_memory_db import InMemoryDataBase
from oidcmsg.oidc import AuthorizationRequest
from oidcmsg.oidc import OpenIDRequest
from oidcendpoint.token_handler import AccessCodeUsed, WrongTokenType, \
ExpiredToken
__author__ = 'rohe0002'
AREQ = AuthorizationRequest(response_type="code", client_id="client1",
redirect_uri="http://example.com/authz",
scope=["openid"], state="state000")
AREQN = AuthorizationRequest(response_type="code", client_id="client1",
redirect_uri="http://example.com/authz",
scope=["openid"], state="state000",
nonce="something")
AREQO = AuthorizationRequest(response_type="code", client_id="client1",
redirect_uri="http://example.com/authz",
scope=["openid", "offlien_access"],
prompt="consent", state="state000")
OIDR = OpenIDRequest(response_type="code", client_id="client1",
redirect_uri="http://example.com/authz", scope=["openid"],
state="state000")
class TestSessionDB(object):
@pytest.fixture(autouse=True)
def create_sdb(self):
_sso_db = SSODb()
_token_handler = token_handler.factory('losenord')
self.sdb = SessionDB(InMemoryDataBase(), _token_handler, _sso_db)
def test_create_authz_session(self):
ae = create_authn_event("uid", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
self.sdb.do_sub(sid, "client_salt")
info = self.sdb[sid]
assert info["client_id"] == "client_id"
assert set(info.keys()) == {'client_id', 'authn_req', 'authn_event',
'sub', 'oauth_state', 'code'}
def test_create_authz_session_without_nonce(self):
ae = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
info = self.sdb[sid]
assert info["oauth_state"] == "authz"
def test_create_authz_session_with_nonce(self):
ae = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae, AREQN, client_id='client_id')
info = self.sdb[sid]
authz_request = info['authn_req']
assert authz_request["nonce"] == "something"
def test_create_authz_session_with_id_token(self):
ae = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id',
id_token="id_token")
info = self.sdb[sid]
assert info["id_token"] == "id_token"
def test_create_authz_session_with_oidreq(self):
ae = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id',
oidreq=OIDR)
info = self.sdb[sid]
assert "id_token" not in info
assert "oidreq" in info
def test_create_authz_session_with_sector_id(self):
ae = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id',
oidreq=OIDR)
self.sdb.do_sub(sid, "client_salt", "http://example.com/si.jwt",
"pairwise")
info_1 = self.sdb[sid].copy()
assert "id_token" not in info_1
assert "oidreq" in info_1
assert info_1["sub"] != "sub"
self.sdb.do_sub(sid, "client_salt", "http://example.net/si.jwt",
"pairwise")
info_2 = self.sdb[sid]
assert info_2["sub"] != "sub"
assert info_2["sub"] != info_1["sub"]
def test_upgrade_to_token(self):
ae1 = create_authn_event("uid", "salt")
sid = self.sdb.create_authz_session(ae1, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]["code"]
_dict = self.sdb.upgrade_to_token(grant)
print(_dict.keys())
assert set(_dict.keys()) == {
'authn_event', 'code', 'authn_req', 'access_token', 'token_type',
'client_id', 'oauth_state'}
# can't update again
with pytest.raises(AccessCodeUsed):
self.sdb.upgrade_to_token(grant)
self.sdb.upgrade_to_token(_dict["access_token"])
def test_upgrade_to_token_refresh(self):
ae1 = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae1, AREQO, client_id='client_id')
self.sdb.do_sub(sid, ae1['salt'])
grant = self.sdb[sid]["code"]
# Issue an access token trading in the access grant code
_dict = self.sdb.upgrade_to_token(grant, issue_refresh=True)
print(_dict.keys())
assert set(_dict.keys()) == {
'authn_event', 'code', 'authn_req', 'access_token', 'sub',
'token_type', 'client_id', 'oauth_state', 'refresh_token'}
# can't get another access token using the same code
with pytest.raises(AccessCodeUsed):
self.sdb.upgrade_to_token(grant)
# You can't refresh a token using the token itself
with pytest.raises(WrongTokenType):
self.sdb.refresh_token(_dict["access_token"])
# If the code has been used twice then the refresh token should not work
with pytest.raises(ExpiredToken):
self.sdb.refresh_token(_dict["refresh_token"])
def test_upgrade_to_token_with_id_token_and_oidreq(self):
ae2 = create_authn_event("another_user_id", "salt")
sid = self.sdb.create_authz_session(ae2, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]["code"]
_dict = self.sdb.upgrade_to_token(grant, id_token="id_token",
oidreq=OIDR)
print(_dict.keys())
assert set(_dict.keys()) == {
'authn_event', 'code', 'authn_req', 'oidreq', 'access_token',
'id_token', 'token_type', 'client_id', 'oauth_state'}
assert _dict["id_token"] == "id_token"
assert isinstance(_dict["oidreq"], OpenIDRequest)
def test_refresh_token(self):
ae = create_authn_event("uid", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]["code"]
# with mock.patch("time.gmtime", side_effect=[
# time.struct_time((1970, 1, 1, 10, 39, 0, 0, 0, 0)),
# time.struct_time((1970, 1, 1, 10, 40, 0, 0, 0, 0))]):
dict1 = self.sdb.upgrade_to_token(grant, issue_refresh=True).copy()
rtoken = dict1["refresh_token"]
dict2 = self.sdb.refresh_token(rtoken, AREQ['client_id'])
assert dict1["access_token"] != dict2["access_token"]
with pytest.raises(WrongTokenType):
self.sdb.refresh_token(dict2["access_token"], AREQ['client_id'])
def test_refresh_token_cleared_session(self):
ae = create_authn_event('uid', 'salt')
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]['code']
dict1 = self.sdb.upgrade_to_token(grant, issue_refresh=True)
ac1 = dict1['access_token']
# Purge the SessionDB
self.sdb._db = {}
rtoken = dict1['refresh_token']
with pytest.raises(KeyError):
self.sdb.refresh_token(rtoken, AREQ['client_id'])
def test_is_valid(self):
ae1 = create_authn_event("uid", "salt")
sid = self.sdb.create_authz_session(ae1, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]["code"]
assert self.sdb.is_valid(grant)
sinfo = self.sdb.upgrade_to_token(grant, issue_refresh=True)
assert not self.sdb.is_valid(grant)
access_token = sinfo["access_token"]
assert self.sdb.is_valid(access_token)
refresh_token = sinfo["refresh_token"]
sinfo = self.sdb.refresh_token(refresh_token, AREQ['client_id'])
access_token2 = sinfo["access_token"]
assert self.sdb.is_valid(access_token2)
# The old access code should be invalid
try:
self.sdb.is_valid(access_token)
except KeyError:
pass
def test_valid_grant(self):
ae = create_authn_event("another:user", "salt")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
grant = self.sdb[sid]["code"]
assert self.sdb.is_valid(grant)
def test_revoke_token(self):
ae1 = create_authn_event("uid", "salt")
sid = self.sdb.create_authz_session(ae1, AREQ, client_id='client_id')
self.sdb[sid]['sub'] = 'sub'
grant = self.sdb[sid]["code"]
tokens = self.sdb.upgrade_to_token(grant, issue_refresh=True)
access_token = tokens["access_token"]
refresh_token = tokens["refresh_token"]
assert self.sdb.is_valid(access_token)
self.sdb.revoke_token(access_token)
assert not self.sdb.is_valid(access_token)
sinfo = self.sdb.refresh_token(refresh_token, AREQ['client_id'])
access_token = sinfo["access_token"]
assert self.sdb.is_valid(access_token)
self.sdb.revoke_token(refresh_token)
assert not self.sdb.is_valid(refresh_token)
try:
self.sdb.refresh_token(refresh_token, AREQ['client_id'])
except ExpiredToken:
pass
assert self.sdb.is_valid(access_token)
ae2 = create_authn_event("sub", "salt")
sid = self.sdb.create_authz_session(ae2, AREQ, client_id='client_2')
grant = self.sdb[sid]["code"]
self.sdb.revoke_token(grant)
assert not self.sdb.is_valid(grant)
def test_sub_to_authn_event(self):
ae = create_authn_event("sub", "salt", time_stamp=time.time())
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
sub = self.sdb.do_sub(sid, "client_salt")
# given the sub find out whether the authn event is still valid
sids = self.sdb.get_sids_by_sub(sub)
ae = self.sdb[sids[0]]['authn_event']
assert ae.valid()
def test_do_sub_deterministic(self):
ae = create_authn_event("tester", "random_value")
sid = self.sdb.create_authz_session(ae, AREQ, client_id='client_id')
self.sdb.do_sub(sid, "other_random_value")
info = self.sdb[sid]
assert info["sub"] == \
'179670cdee6375c48e577317b2abd7d5cd26a5cdb1cfb7ef84af3d703c71d013'
self.sdb.do_sub(sid, "other_random_value",
sector_id='http://example.com',
subject_type="pairwise")
info2 = self.sdb[sid]
assert info2["sub"] == \
'aaa50d80f8780cf1c4beb39e8e126556292f5091b9e39596424fefa2b99d9c53'
self.sdb.do_sub(sid, "another_random_value",
sector_id='http://other.example.com',
subject_type="pairwise")
info2 = self.sdb[sid]
assert info2["sub"] == \
'62fb630e29f0d41b88e049ac0ef49a9c3ac5418c029d6e4f5417df7e9443976b'
| [
"oidcmsg.oidc.AuthorizationRequest",
"oidcendpoint.authn_event.create_authn_event",
"oidcmsg.oidc.OpenIDRequest",
"pytest.raises",
"oidcendpoint.in_memory_db.InMemoryDataBase",
"pytest.fixture",
"oidcendpoint.sso_db.SSODb",
"time.time",
"oidcendpoint.token_handler.factory"
] | [((471, 620), 'oidcmsg.oidc.AuthorizationRequest', 'AuthorizationRequest', ([], {'response_type': '"""code"""', 'client_id': '"""client1"""', 'redirect_uri': '"""http://example.com/authz"""', 'scope': "['openid']", 'state': '"""state000"""'}), "(response_type='code', client_id='client1',\n redirect_uri='http://example.com/authz', scope=['openid'], state='state000'\n )\n", (491, 620), False, 'from oidcmsg.oidc import AuthorizationRequest\n'), ((677, 845), 'oidcmsg.oidc.AuthorizationRequest', 'AuthorizationRequest', ([], {'response_type': '"""code"""', 'client_id': '"""client1"""', 'redirect_uri': '"""http://example.com/authz"""', 'scope': "['openid']", 'state': '"""state000"""', 'nonce': '"""something"""'}), "(response_type='code', client_id='client1',\n redirect_uri='http://example.com/authz', scope=['openid'], state=\n 'state000', nonce='something')\n", (697, 845), False, 'from oidcmsg.oidc import AuthorizationRequest\n'), ((933, 1117), 'oidcmsg.oidc.AuthorizationRequest', 'AuthorizationRequest', ([], {'response_type': '"""code"""', 'client_id': '"""client1"""', 'redirect_uri': '"""http://example.com/authz"""', 'scope': "['openid', 'offlien_access']", 'prompt': '"""consent"""', 'state': '"""state000"""'}), "(response_type='code', client_id='client1',\n redirect_uri='http://example.com/authz', scope=['openid',\n 'offlien_access'], prompt='consent', state='state000')\n", (953, 1117), False, 'from oidcmsg.oidc import AuthorizationRequest\n'), ((1205, 1343), 'oidcmsg.oidc.OpenIDRequest', 'OpenIDRequest', ([], {'response_type': '"""code"""', 'client_id': '"""client1"""', 'redirect_uri': '"""http://example.com/authz"""', 'scope': "['openid']", 'state': '"""state000"""'}), "(response_type='code', client_id='client1', redirect_uri=\n 'http://example.com/authz', scope=['openid'], state='state000')\n", (1218, 1343), False, 'from oidcmsg.oidc import OpenIDRequest\n'), ((1417, 1445), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1431, 1445), False, 'import pytest\n'), ((1490, 1497), 'oidcendpoint.sso_db.SSODb', 'SSODb', ([], {}), '()\n', (1495, 1497), False, 'from oidcendpoint.sso_db import SSODb\n'), ((1523, 1556), 'oidcendpoint.token_handler.factory', 'token_handler.factory', (['"""losenord"""'], {}), "('losenord')\n", (1544, 1556), False, 'from oidcendpoint import token_handler\n'), ((1686, 1719), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (1704, 1719), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((2131, 2164), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (2149, 2164), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((2383, 2416), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (2401, 2416), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((2688, 2721), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (2706, 2721), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((3007, 3040), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (3025, 3040), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((3344, 3377), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (3362, 3377), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((4050, 4083), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (4068, 4083), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((4711, 4744), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (4729, 4744), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((5820, 5865), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""another_user_id"""', '"""salt"""'], {}), "('another_user_id', 'salt')\n", (5838, 5865), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((6505, 6538), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (6523, 6538), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((7312, 7345), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (7330, 7345), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((7845, 7878), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (7863, 7878), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((8688, 8730), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""another:user"""', '"""salt"""'], {}), "('another:user', 'salt')\n", (8706, 8730), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((8935, 8968), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""uid"""', '"""salt"""'], {}), "('uid', 'salt')\n", (8953, 8968), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((9887, 9920), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""sub"""', '"""salt"""'], {}), "('sub', 'salt')\n", (9905, 9920), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((10601, 10645), 'oidcendpoint.authn_event.create_authn_event', 'create_authn_event', (['"""tester"""', '"""random_value"""'], {}), "('tester', 'random_value')\n", (10619, 10645), False, 'from oidcendpoint.authn_event import create_authn_event\n'), ((1586, 1604), 'oidcendpoint.in_memory_db.InMemoryDataBase', 'InMemoryDataBase', ([], {}), '()\n', (1602, 1604), False, 'from oidcendpoint.in_memory_db import InMemoryDataBase\n'), ((4514, 4543), 'pytest.raises', 'pytest.raises', (['AccessCodeUsed'], {}), '(AccessCodeUsed)\n', (4527, 4543), False, 'import pytest\n'), ((5322, 5351), 'pytest.raises', 'pytest.raises', (['AccessCodeUsed'], {}), '(AccessCodeUsed)\n', (5335, 5351), False, 'import pytest\n'), ((5471, 5500), 'pytest.raises', 'pytest.raises', (['WrongTokenType'], {}), '(WrongTokenType)\n', (5484, 5500), False, 'import pytest\n'), ((5655, 5682), 'pytest.raises', 'pytest.raises', (['ExpiredToken'], {}), '(ExpiredToken)\n', (5668, 5682), False, 'import pytest\n'), ((7140, 7169), 'pytest.raises', 'pytest.raises', (['WrongTokenType'], {}), '(WrongTokenType)\n', (7153, 7169), False, 'import pytest\n'), ((7714, 7737), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (7727, 7737), False, 'import pytest\n'), ((10216, 10227), 'time.time', 'time.time', ([], {}), '()\n', (10225, 10227), False, 'import time\n')] |
import os
import cv2
import unittest
import numpy as np
from PIL import Image
from os.path import join as pjoin
from dnnbrain.dnn.base import VideoSet
from torchvision import transforms
DNNBRAIN_TEST = pjoin(os.environ['DNNBRAIN_DATA'], 'test')
TMP_DIR = pjoin(os.path.expanduser('~'), '.dnnbrain_tmp')
if not os.path.isdir(TMP_DIR):
os.makedirs(TMP_DIR)
class TestImageSet(unittest.TestCase):
def test_init(self):
pass
def test_getitem(self):
pass
class TestVideoSet(unittest.TestCase):
def test_init(self):
# prepare variates
fpath = pjoin(DNNBRAIN_TEST, 'video', 'sub-CSI1_ses-01_imagenet.mp4')
frame_id = [1, 2, 4]
print(frame_id)
# initiate
video_1 = VideoSet(fpath, frame_id)
self.assertEqual(video_1.frame_nums, frame_id)
def test_getitem(self):
# prepare variates
fpath = pjoin(DNNBRAIN_TEST, 'video', 'sub-CSI1_ses-01_imagenet.mp4')
frame_id = [1, 2, 4]
# initiate
video_1 = VideoSet(fpath, frame_id)
transform = transforms.Compose([transforms.ToTensor()])
for i in list(range(len(frame_id))):
video_test_1, _ = video_1[i]
vid_cap = cv2.VideoCapture(fpath)
for j in range(frame_id[i]):
_, video_test_2 = vid_cap.read()
video_test_2 = Image.fromarray(cv2.cvtColor(video_test_2, cv2.COLOR_BGR2RGB))
video_test_2 = transform(video_test_2)
self.assertTrue(np.all(video_test_1.numpy() == video_test_2.numpy()))
if __name__ == '__main__':
unittest.main()
| [
"os.makedirs",
"os.path.join",
"os.path.isdir",
"cv2.VideoCapture",
"dnnbrain.dnn.base.VideoSet",
"cv2.cvtColor",
"unittest.main",
"torchvision.transforms.ToTensor",
"os.path.expanduser"
] | [((204, 246), 'os.path.join', 'pjoin', (["os.environ['DNNBRAIN_DATA']", '"""test"""'], {}), "(os.environ['DNNBRAIN_DATA'], 'test')\n", (209, 246), True, 'from os.path import join as pjoin\n'), ((263, 286), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (281, 286), False, 'import os\n'), ((312, 334), 'os.path.isdir', 'os.path.isdir', (['TMP_DIR'], {}), '(TMP_DIR)\n', (325, 334), False, 'import os\n'), ((340, 360), 'os.makedirs', 'os.makedirs', (['TMP_DIR'], {}), '(TMP_DIR)\n', (351, 360), False, 'import os\n'), ((1597, 1612), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1610, 1612), False, 'import unittest\n'), ((593, 654), 'os.path.join', 'pjoin', (['DNNBRAIN_TEST', '"""video"""', '"""sub-CSI1_ses-01_imagenet.mp4"""'], {}), "(DNNBRAIN_TEST, 'video', 'sub-CSI1_ses-01_imagenet.mp4')\n", (598, 654), True, 'from os.path import join as pjoin\n'), ((746, 771), 'dnnbrain.dnn.base.VideoSet', 'VideoSet', (['fpath', 'frame_id'], {}), '(fpath, frame_id)\n', (754, 771), False, 'from dnnbrain.dnn.base import VideoSet\n'), ((899, 960), 'os.path.join', 'pjoin', (['DNNBRAIN_TEST', '"""video"""', '"""sub-CSI1_ses-01_imagenet.mp4"""'], {}), "(DNNBRAIN_TEST, 'video', 'sub-CSI1_ses-01_imagenet.mp4')\n", (904, 960), True, 'from os.path import join as pjoin\n'), ((1028, 1053), 'dnnbrain.dnn.base.VideoSet', 'VideoSet', (['fpath', 'frame_id'], {}), '(fpath, frame_id)\n', (1036, 1053), False, 'from dnnbrain.dnn.base import VideoSet\n'), ((1227, 1250), 'cv2.VideoCapture', 'cv2.VideoCapture', (['fpath'], {}), '(fpath)\n', (1243, 1250), False, 'import cv2\n'), ((1094, 1115), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1113, 1115), False, 'from torchvision import transforms\n'), ((1384, 1429), 'cv2.cvtColor', 'cv2.cvtColor', (['video_test_2', 'cv2.COLOR_BGR2RGB'], {}), '(video_test_2, cv2.COLOR_BGR2RGB)\n', (1396, 1429), False, 'import cv2\n')] |
from flask import render_template
from model.db import do_something_in_db
def index():
do_something_in_db()
return render_template('index.html')
| [
"flask.render_template",
"model.db.do_something_in_db"
] | [((93, 113), 'model.db.do_something_in_db', 'do_something_in_db', ([], {}), '()\n', (111, 113), False, 'from model.db import do_something_in_db\n'), ((125, 154), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (140, 154), False, 'from flask import render_template\n')] |
# If p is the perimeter of a right angle triangle with integral length sides,
# {a,b,c}, there are exactly three solutions for p = 120.
# {20,48,52}, {24,45,51}, {30,40,50}
# For which value of p ≤ 1000, is the number of solutions maximised?
from math import floor
limit = 1000
triangles = [0 for x in xrange(limit)]
for cathetus1 in xrange(3,(limit-3)/3):
for cathetus2 in xrange(cathetus1+1,(limit-cathetus1-1)/2):
hypotenuse = (cathetus1**2 + cathetus2**2)**0.5
if floor(hypotenuse) == hypotenuse:
circumference = int(cathetus1+cathetus2+hypotenuse)
if circumference > 1000:
break
triangles[circumference-1] += 1
# print("{%d,%d,%d}" %(cathetus1, cathetus2, hypotenuse))
max = 0
for i in xrange(limit):
if triangles[i] > max:
max = triangles[i]
place = i
print ("triangles: %d\ncircumference: %d " %(triangles[place], place+1))
# 840 | [
"math.floor"
] | [((489, 506), 'math.floor', 'floor', (['hypotenuse'], {}), '(hypotenuse)\n', (494, 506), False, 'from math import floor\n')] |
from torchvision.transforms import *
from torch.nn import *
import torch as t
import cv2
from MaskDetector import MaskDetector
def clamp_val(val, min_val, max_val):
return max(min_val, min(max_val, val))
labels = ['Mask', 'No mask']
labelColor = [(10, 255, 0), (10, 0, 255)]
font = cv2.FONT_HERSHEY_SIMPLEX
def detect_frame(frame, face_detection_model, face_classifier_model, device, val_trns, opencv_frame=True):
if opencv_frame == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detection_model.detect(frame)
for face in faces:
x_start, y_start, x_end, y_end = face
x_start = clamp_val(x_start, 0, frame.shape[1]-1)
x_end = clamp_val(x_end, 0, frame.shape[1]-1)
y_start = clamp_val(y_start, 0, frame.shape[0]-1)
y_end = clamp_val(y_end, 0, frame.shape[0]-1)
if x_start==x_end or y_start == y_end:
print("Moving too fast!")
continue
# print(x_start, y_start,x_end,y_end)
# print(face)
# print(frame.shape)
faceImg = frame[y_start:y_end, x_start:x_end]
output = face_classifier_model(
val_trns(faceImg).unsqueeze(0).to(device))
# print(output)
output = Softmax(dim=-1)(output)
conf, predicted = t.max(output.data, 1)
verdict = f"{labels[predicted]}: {conf.item()*100:.2f}%"
cv2.rectangle(frame,
(x_start, y_start),
(x_end, y_end),
labelColor[predicted],
thickness=2)
# draw prediction label
cv2.putText(frame,
verdict,
(x_start, y_start-20),
font, 0.5, labelColor[predicted], 1)
if opencv_frame == True:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return frame
def load_mask_classifier(checkpoint, device, eval=True):
checkpoint = t.load(checkpoint, map_location=device)
lazy = False
img_size = 100
if 'lazy' in checkpoint.keys():
lazy = checkpoint['lazy']
if 'img_size' in checkpoint.keys():
img_size = checkpoint['img_size']
val_trns = Compose([
ToPILImage(),
Resize((img_size, img_size)),
ToTensor()
])
model = MaskDetector(".", lazy=lazy, img_size=img_size, batch_size=1,)
if 'state_dict' in checkpoint.keys():
model.load_state_dict(checkpoint['state_dict'], strict=False)
else:
model.load_state_dict(checkpoint, strict=False)
if eval == True:
model.eval()
model.to(device)
print(f"Loaded model with with image size: {img_size}")
return model, val_trns
| [
"cv2.rectangle",
"MaskDetector.MaskDetector",
"torch.load",
"torch.max",
"cv2.putText",
"cv2.cvtColor"
] | [((1948, 1987), 'torch.load', 't.load', (['checkpoint'], {'map_location': 'device'}), '(checkpoint, map_location=device)\n', (1954, 1987), True, 'import torch as t\n'), ((2304, 2365), 'MaskDetector.MaskDetector', 'MaskDetector', (['"""."""'], {'lazy': 'lazy', 'img_size': 'img_size', 'batch_size': '(1)'}), "('.', lazy=lazy, img_size=img_size, batch_size=1)\n", (2316, 2365), False, 'from MaskDetector import MaskDetector\n'), ((471, 509), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (483, 509), False, 'import cv2\n'), ((1304, 1325), 'torch.max', 't.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (1309, 1325), True, 'import torch as t\n'), ((1400, 1497), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x_start, y_start)', '(x_end, y_end)', 'labelColor[predicted]'], {'thickness': '(2)'}), '(frame, (x_start, y_start), (x_end, y_end), labelColor[\n predicted], thickness=2)\n', (1413, 1497), False, 'import cv2\n'), ((1622, 1716), 'cv2.putText', 'cv2.putText', (['frame', 'verdict', '(x_start, y_start - 20)', 'font', '(0.5)', 'labelColor[predicted]', '(1)'], {}), '(frame, verdict, (x_start, y_start - 20), font, 0.5, labelColor[\n predicted], 1)\n', (1633, 1716), False, 'import cv2\n'), ((1816, 1854), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (1828, 1854), False, 'import cv2\n')] |
from datetime import datetime, timezone
from io import BytesIO
from fastavro import schemaless_writer
from pydantic.main import BaseModel
def generate_current_epoch_time_ms() -> int:
return int(datetime.now(tz=timezone.utc).timestamp() * 1000)
def model_to_bytes(event: BaseModel, avro_schema: dict) -> bytes:
stream = BytesIO()
schemaless_writer(stream, avro_schema, event.dict())
return stream.getvalue()
| [
"datetime.datetime.now",
"io.BytesIO"
] | [((332, 341), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (339, 341), False, 'from io import BytesIO\n'), ((201, 230), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (213, 230), False, 'from datetime import datetime, timezone\n')] |
from django.core.management.base import BaseCommand
from datetime import datetime
from django.conf import settings
import subprocess
import os
bat_dir = os.path.join(settings.BASE_DIR, r'app1\management\commands\create_gui.bat')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('file_name')
""" カスタムコマンド定義 """
def handle(self, *args, **options):
# ここに実行したい処理を書く
file_name = options['file_name']
cmd = (bat_dir, file_name)
rc_code = subprocess.check_call(cmd, shell=True)
if rc_code == 0:
message = "OK"
else:
message = "NG" | [
"os.path.join",
"subprocess.check_call"
] | [((162, 239), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""app1\\\\management\\\\commands\\\\create_gui.bat"""'], {}), "(settings.BASE_DIR, 'app1\\\\management\\\\commands\\\\create_gui.bat')\n", (174, 239), False, 'import os\n'), ((550, 588), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (571, 588), False, 'import subprocess\n')] |
from pagarme.resources import handler_request
from pagarme.resources.routes import payable_routes
def find_all():
return handler_request.get(payable_routes.GET_ALL_PAYABLES)
def find_by(search_params):
return handler_request.get(payable_routes.GET_PAYABLE_BY, search_params)
| [
"pagarme.resources.handler_request.get"
] | [((127, 179), 'pagarme.resources.handler_request.get', 'handler_request.get', (['payable_routes.GET_ALL_PAYABLES'], {}), '(payable_routes.GET_ALL_PAYABLES)\n', (146, 179), False, 'from pagarme.resources import handler_request\n'), ((221, 286), 'pagarme.resources.handler_request.get', 'handler_request.get', (['payable_routes.GET_PAYABLE_BY', 'search_params'], {}), '(payable_routes.GET_PAYABLE_BY, search_params)\n', (240, 286), False, 'from pagarme.resources import handler_request\n')] |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.systemSet, name='system_set'),
# url(r'^trustip/$', views.trustip_set, name='trustip_set'),
# url(r'^trustip/ajax$', views.ajax_trustip_set, name='ajax_trustip_set'),
# url(r'^dkim/$', views.dkim, name='dkim'),
# url(r'^dkim/(?P<domain_id>\d+)/$', views.dkim_modify, name='dkim_modify'),
# url(r'^black/$', views.blacklist, name='blacklist_set'),
# url(r'^black/ajax$', views.ajax_blacklist, name='ajax_blacklist_set'),
# url(r'^white/$', views.whitelist, name='whitelist_set'),
# url(r'^white/ajax$', views.ajax_whitelist, name='ajax_whitelist_set'),
# url(r'^white_rcp/$', views.whitelist_rcp, name='whitelist_rcp_set'),
# url(r'^white_rcp/ajax$', views.ajax_whitelist_rcp, name='ajax_whitelist_rcp_set'),
# url(r'^alias/$', views.domain_alias, name='domain_alias_set'),
# url(r'^alias/ajax$', views.ajax_domain_alias, name='ajax_domain_alias_set'),
# url(r'^alias/add/$', views.domain_alias_add, name='domain_alias_add'),
# url(r'^alias/(?P<alias_id>\d+)/$', views.domain_alias_modify, name='domain_alias_modify'),
# url(r'^cfilter/$', views.cfilter, name='cfilter_set'),
url(r'^cfilter/config/$', views.cfilter_config, name='cfilter_config'),
# url(r'^cfilter/ajax$', views.ajax_cfilter, name='ajax_cfilter_set'),
# url(r'^cfilter/ajax_smtp$', views.ajax_smtpcheck, name='ajax_cfilter_smtpcheck'),
# url(r'^cfilter/add/$', views.cfilter_add, name='cfilter_add_set'),
# url(r'^cfilter/(?P<rule_id>\d+)/$', views.cfilter_modify, name='cfilter_modify_set'),
# url(r'^cfilter/v/(?P<rule_id>\d+)/$', views.cfilter_view, name='cfilter_view_set'),
url(r'^smtp/$', views.smtp, name='smtp_set'),
# url(r'^acct_transfer$', views.accountTransfer, name='account_transfer'),
# url(r'^acct_transfer/add$', views.accountTransferAdd, name='account_transfer_add'),
# url(r'^acct_transfer/mdf/(?P<trans_id>\d+)/$', views.accountTransferModify, name='account_transfer_mdf'),
# url(r'^acct_transfer/ajax$', views.ajax_accountTransfer, name='ajax_accountTransfer'),
]
| [
"django.conf.urls.url"
] | [((99, 144), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.systemSet'], {'name': '"""system_set"""'}), "('^$', views.systemSet, name='system_set')\n", (102, 144), False, 'from django.conf.urls import url\n'), ((1258, 1327), 'django.conf.urls.url', 'url', (['"""^cfilter/config/$"""', 'views.cfilter_config'], {'name': '"""cfilter_config"""'}), "('^cfilter/config/$', views.cfilter_config, name='cfilter_config')\n", (1261, 1327), False, 'from django.conf.urls import url\n'), ((1753, 1796), 'django.conf.urls.url', 'url', (['"""^smtp/$"""', 'views.smtp'], {'name': '"""smtp_set"""'}), "('^smtp/$', views.smtp, name='smtp_set')\n", (1756, 1796), False, 'from django.conf.urls import url\n')] |
import torch
from torch import nn
import torch.nn.functional as F
from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding
from models.sync_batchnorm import SynchronizedBatchNorm2d
# Multi input version of nn.Sequential
# https://github.com/pytorch/pytorch/issues/19808
class MultiSeqential(nn.Sequential):
def forward(self, *inputs):
x, e = inputs
for module in self._modules.values():
if type(module) is GBlock:
x = module(x, e)
else:
x = module(x)
return x
## Generator ##
class GBlock(nn.Module):
def __init__(self, in_ch, out_ch, upsample, embedding_dims, bottleneck_ratio=4):
super().__init__()
# conv layers
hidden_ch = out_ch // bottleneck_ratio
self.conv1 = SNConv2d(in_ch, hidden_ch, kernel_size=1)
self.conv2 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.conv3 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.conv4 = SNConv2d(hidden_ch, out_ch, kernel_size=1)
self.out_ch = out_ch
# bn layers
self.bn1 = ConditionalBatchNorm(in_ch, embedding_dims)
self.bn2 = ConditionalBatchNorm(hidden_ch, embedding_dims)
self.bn3 = ConditionalBatchNorm(hidden_ch, embedding_dims)
self.bn4 = ConditionalBatchNorm(hidden_ch, embedding_dims)
# upsample
self.upsample = upsample
def forward(self, x, embedding):
# skip connection path
skip = x[:,:self.out_ch,:,:] # drop channels
# main path
# pre-act
h = F.relu(self.bn1(x, embedding))
# 1st conv
h = F.relu(self.bn2(self.conv1(h), embedding))
# upsampling if needed
if self.upsample > 1:
h = F.interpolate(h, scale_factor=self.upsample)
skip = F.interpolate(skip, scale_factor=self.upsample)
# 2nd, 3rd conv
h = F.relu(self.bn3(self.conv2(h), embedding))
h = F.relu(self.bn4(self.conv3(h), embedding))
# last conv
h = self.conv4(h)
# add
return h + skip
def G_arch(ch=64, attention='64'):
arch = {}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 6)}}
return arch
### # parameters (Generator) ###
## paper setting = base_ch = 128
## ImageNet base (n_classes=1000, n_projectd_dims=128)
# --- resolution = 32 ---
# base_ch=16, params=777,827
# base_ch=32, params=1,507,395
# base_ch=48, params=2,316,835
# base_ch=64, params=3,206,147
# base_ch=96, params=5,224,387
# base_ch=128, params=7,562,115
# --- resolution = 64 ---
# base_ch=16, params=2,753,724
# base_ch=32, params=6,168,052
# base_ch=48, params=10,371,116
# base_ch=64, params=15,362,916
# base_ch=96, params=27,712,724
# base_ch=128, params=43,217,476
# --- resolution = 128 ---
# base_ch=16, params=2,806,688
# base_ch=32, params=6,278,268
# base_ch=48, params=10,542,872
# base_ch=64, params=15,600,500
# base_ch=96, params=28,094,828
# base_ch=128, params=43,761,252
# --- resolution = 256 ---
# base_ch=16, params=3,093,600
# base_ch=32, params=6,966,268
# base_ch=48, params=11,746,136
# base_ch=64, params=17,433,204
# base_ch=96, params=31,528,940
# base_ch=128, params=49,253,476
class Generator(nn.Module):
def __init__(self, base_ch, resolution, n_classes, n_projected_dims=32, n_latent_dims=128):
super().__init__()
assert resolution in [32, 64, 128, 256]
# Onehot vector projection dims [n_classes -> n_projected_dims]
# Latent random variable dims [n_latent_dims]
# Conditional batch norm [n_projected_dims + n_latent_dims -> image path ch]
# base ch
self.base_ch = base_ch
# n_latent_dims random variable of latent space
self.n_latent_dims = n_latent_dims
# n_classes of label
self.n_classes = n_classes
# n_dimentions projected by sheared embedding
self.n_projected_dims = n_projected_dims
embedding_dims = n_projected_dims + n_latent_dims
# Shared embedding across condtional batch norms
self.shared_embedding = SNLinear(n_classes, n_projected_dims)
# architecture of G
arch = G_arch(base_ch)[resolution]
# initial linear
self.initial_ch = arch["in_channels"][0]
self.linear = SNLinear(embedding_dims, 4 * 4 * self.initial_ch)
# main model
blocks = []
for in_ch, out_ch, upsample, _, attention in zip(*
(v.values() if type(v) is dict else v for v in arch.values())):
# ResBlock with non-upsampling
blocks.append(
GBlock(in_ch, in_ch, 1, embedding_dims)
)
# ResBlock with upsampling
blocks.append(
GBlock(in_ch, out_ch, 2 if upsample else 1, embedding_dims)
)
# Non-Local block if needed (Self-attention)
if attention:
blocks.append(SelfAttention(out_ch))
self.main = MultiSeqential(*blocks)
# final layers (not to use SNConv or Conditional BN)
last_ch = arch["out_channels"][-1]
self.out_conv = nn.Sequential(
nn.BatchNorm2d(last_ch),
nn.ReLU(True),
nn.Conv2d(last_ch, 3, kernel_size=3, padding=1),
nn.Tanh()
)
def forward(self, z, y):
assert z.size(1) == self.n_latent_dims and y.size(1) == self.n_classes
# linear
projection = self.shared_embedding(y)
# shared embedding value (becase BigGAN-deep does not split z)
embedding = torch.cat([projection, z], dim=1)
x = self.linear(embedding).view(z.size(0), self.initial_ch, 4, 4)
# main convolution blocks
x = self.main(x, embedding)
# output conv layers
x = self.out_conv(x)
return x
## Discriminator ##
class DBlock(nn.Module):
def __init__(self, in_ch, out_ch, downsample, bottleneck_ratio=4):
super().__init__()
# conv blocks
hidden_ch = in_ch // bottleneck_ratio
self.conv1 = SNConv2d(in_ch, hidden_ch, kernel_size=1)
self.conv2 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.conv3 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.conv4 = SNConv2d(hidden_ch, out_ch, kernel_size=1)
# short-conv for increasing channel
self.downsample = downsample
if in_ch < out_ch:
self.conv_short = SNConv2d(in_ch, out_ch - in_ch, kernel_size=1)
else:
self.conv_short = None
def forward(self, inputs):
# main path
x = F.relu(inputs)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
# avg pool
if self.downsample > 1:
x = F.avg_pool2d(x, kernel_size=self.downsample)
s = F.avg_pool2d(inputs, kernel_size=self.downsample)
else:
s = inputs
x = self.conv4(x)
# short cut path
if self.conv_short is not None:
s_increase = self.conv_short(s)
s = torch.cat([s, s_increase], dim=1)
# redisual add
return x + s
def D_arch(ch=64, attention='64'):
arch = {}
arch[256] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [item * ch for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
### # parameters (Discriminator) ###
## paper setting = base_ch = 128
## ImageNet base (n_classes=1000)
# --- resolution = 32 ---
# base_ch=16, params=106,465
# base_ch=32, params=292,801
# base_ch=48, params=559,009
# base_ch=64, params=905,089
# base_ch=96, params=1,836,865
# base_ch=128, params=3,088,129
# --- resolution = 64 ---
# base_ch=16, params=462,445
# base_ch=32, params=1,332,889
# base_ch=48, params=2,611,333
# base_ch=64, params=4,297,777
# base_ch=96, params=8,894,665
# base_ch=128, params=15,123,553
# --- resolution = 128 ---
# base_ch=16, params=758,254
# base_ch=32, params=2,514,330
# base_ch=48, params=5,268,230
# base_ch=64, params=9,019,954
# base_ch=96, params=19,516,874
# base_ch=128, params=34,005,090
# --- resolution = 256 ---
# base_ch=16, params=811,950
# base_ch=32, params=2,728,218
# base_ch=48, params=5,748,806
# base_ch=64, params=9,873,714
# base_ch=96, params=21,436,490
# base_ch=128, params=37,416,546
class Discriminator(nn.Module):
def __init__(self, base_ch, resolution, n_classes):
super().__init__()
assert resolution in [32, 64, 128, 256]
arch = D_arch(base_ch)[resolution]
# initial conv
self.initial_conv = SNConv2d(3, arch["in_channels"][0], kernel_size=3, padding=1)
# main_conv
blocks = []
for in_ch, out_ch, downsample, _, attention in zip(*
(v.values() if type(v) is dict else v for v in arch.values())):
# D block with downsampling
blocks.append(DBlock(in_ch, out_ch, 2 if downsample else 1))
# D block with non-downsampling
blocks.append(DBlock(out_ch, out_ch, 1))
# Non-local(self attention) if needed
if attention:
blocks.append(SelfAttention(out_ch))
self.main = nn.Sequential(*blocks)
# prob-linear
self.linear_out = SNLinear(out_ch, 1)
# projection
self.proj_embedding = SNEmbedding(n_classes, out_ch)
def forward(self, x, y):
h = self.initial_conv(x)
h = F.relu(self.main(h))
h = torch.sum(h, dim=(2, 3)) # global sum pooling
logit = self.linear_out(h)
h = self.proj_embedding(h, logit, y)
return h
| [
"torch.nn.BatchNorm2d",
"models.layers.SNEmbedding",
"torch.nn.ReLU",
"models.layers.SNLinear",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"models.layers.SNConv2d",
"torch.sum",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"models.layers.ConditionalBatchNorm",
"models.layers.SelfAttention",
"torch.cat"
] | [((827, 868), 'models.layers.SNConv2d', 'SNConv2d', (['in_ch', 'hidden_ch'], {'kernel_size': '(1)'}), '(in_ch, hidden_ch, kernel_size=1)\n', (835, 868), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((890, 946), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'hidden_ch'], {'kernel_size': '(3)', 'padding': '(1)'}), '(hidden_ch, hidden_ch, kernel_size=3, padding=1)\n', (898, 946), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((968, 1024), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'hidden_ch'], {'kernel_size': '(3)', 'padding': '(1)'}), '(hidden_ch, hidden_ch, kernel_size=3, padding=1)\n', (976, 1024), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((1046, 1088), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'out_ch'], {'kernel_size': '(1)'}), '(hidden_ch, out_ch, kernel_size=1)\n', (1054, 1088), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((1157, 1200), 'models.layers.ConditionalBatchNorm', 'ConditionalBatchNorm', (['in_ch', 'embedding_dims'], {}), '(in_ch, embedding_dims)\n', (1177, 1200), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((1220, 1267), 'models.layers.ConditionalBatchNorm', 'ConditionalBatchNorm', (['hidden_ch', 'embedding_dims'], {}), '(hidden_ch, embedding_dims)\n', (1240, 1267), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((1287, 1334), 'models.layers.ConditionalBatchNorm', 'ConditionalBatchNorm', (['hidden_ch', 'embedding_dims'], {}), '(hidden_ch, embedding_dims)\n', (1307, 1334), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((1354, 1401), 'models.layers.ConditionalBatchNorm', 'ConditionalBatchNorm', (['hidden_ch', 'embedding_dims'], {}), '(hidden_ch, embedding_dims)\n', (1374, 1401), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((5623, 5660), 'models.layers.SNLinear', 'SNLinear', (['n_classes', 'n_projected_dims'], {}), '(n_classes, n_projected_dims)\n', (5631, 5660), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((5829, 5878), 'models.layers.SNLinear', 'SNLinear', (['embedding_dims', '(4 * 4 * self.initial_ch)'], {}), '(embedding_dims, 4 * 4 * self.initial_ch)\n', (5837, 5878), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((7152, 7185), 'torch.cat', 'torch.cat', (['[projection, z]'], {'dim': '(1)'}), '([projection, z], dim=1)\n', (7161, 7185), False, 'import torch\n'), ((7639, 7680), 'models.layers.SNConv2d', 'SNConv2d', (['in_ch', 'hidden_ch'], {'kernel_size': '(1)'}), '(in_ch, hidden_ch, kernel_size=1)\n', (7647, 7680), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((7702, 7758), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'hidden_ch'], {'kernel_size': '(3)', 'padding': '(1)'}), '(hidden_ch, hidden_ch, kernel_size=3, padding=1)\n', (7710, 7758), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((7780, 7836), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'hidden_ch'], {'kernel_size': '(3)', 'padding': '(1)'}), '(hidden_ch, hidden_ch, kernel_size=3, padding=1)\n', (7788, 7836), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((7858, 7900), 'models.layers.SNConv2d', 'SNConv2d', (['hidden_ch', 'out_ch'], {'kernel_size': '(1)'}), '(hidden_ch, out_ch, kernel_size=1)\n', (7866, 7900), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((8199, 8213), 'torch.nn.functional.relu', 'F.relu', (['inputs'], {}), '(inputs)\n', (8205, 8213), True, 'import torch.nn.functional as F\n'), ((11634, 11695), 'models.layers.SNConv2d', 'SNConv2d', (['(3)', "arch['in_channels'][0]"], {'kernel_size': '(3)', 'padding': '(1)'}), "(3, arch['in_channels'][0], kernel_size=3, padding=1)\n", (11642, 11695), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((12248, 12270), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (12261, 12270), False, 'from torch import nn\n'), ((12319, 12338), 'models.layers.SNLinear', 'SNLinear', (['out_ch', '(1)'], {}), '(out_ch, 1)\n', (12327, 12338), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((12398, 12428), 'models.layers.SNEmbedding', 'SNEmbedding', (['n_classes', 'out_ch'], {}), '(n_classes, out_ch)\n', (12409, 12428), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((12545, 12569), 'torch.sum', 'torch.sum', (['h'], {'dim': '(2, 3)'}), '(h, dim=(2, 3))\n', (12554, 12569), False, 'import torch\n'), ((1817, 1861), 'torch.nn.functional.interpolate', 'F.interpolate', (['h'], {'scale_factor': 'self.upsample'}), '(h, scale_factor=self.upsample)\n', (1830, 1861), True, 'import torch.nn.functional as F\n'), ((1881, 1928), 'torch.nn.functional.interpolate', 'F.interpolate', (['skip'], {'scale_factor': 'self.upsample'}), '(skip, scale_factor=self.upsample)\n', (1894, 1928), True, 'import torch.nn.functional as F\n'), ((6744, 6767), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['last_ch'], {}), '(last_ch)\n', (6758, 6767), False, 'from torch import nn\n'), ((6781, 6794), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6788, 6794), False, 'from torch import nn\n'), ((6808, 6855), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_ch', '(3)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(last_ch, 3, kernel_size=3, padding=1)\n', (6817, 6855), False, 'from torch import nn\n'), ((6869, 6878), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6876, 6878), False, 'from torch import nn\n'), ((8039, 8085), 'models.layers.SNConv2d', 'SNConv2d', (['in_ch', '(out_ch - in_ch)'], {'kernel_size': '(1)'}), '(in_ch, out_ch - in_ch, kernel_size=1)\n', (8047, 8085), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((8383, 8427), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': 'self.downsample'}), '(x, kernel_size=self.downsample)\n', (8395, 8427), True, 'import torch.nn.functional as F\n'), ((8444, 8493), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['inputs'], {'kernel_size': 'self.downsample'}), '(inputs, kernel_size=self.downsample)\n', (8456, 8493), True, 'import torch.nn.functional as F\n'), ((8682, 8715), 'torch.cat', 'torch.cat', (['[s, s_increase]'], {'dim': '(1)'}), '([s, s_increase], dim=1)\n', (8691, 8715), False, 'import torch\n'), ((6509, 6530), 'models.layers.SelfAttention', 'SelfAttention', (['out_ch'], {}), '(out_ch)\n', (6522, 6530), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n'), ((12205, 12226), 'models.layers.SelfAttention', 'SelfAttention', (['out_ch'], {}), '(out_ch)\n', (12218, 12226), False, 'from models.layers import SNLinear, SNConv2d, ConditionalBatchNorm, SelfAttention, SNEmbedding\n')] |
import re
from unittest import TestCase, main
class TestNameVariants(TestCase):
def test_splits(self):
splitter = re.compile("[-.`'\u2019\\s]+", re.UNICODE | re.IGNORECASE)
res = splitter.split("Saint Bob's Pond")
print(res)
res = splitter.split("Saint Bob' Pond")
print(res)
res = splitter.split("Sant' Bob Pond")
print(res)
replacements = {}
term = 'saint'
replacements[term] = 'st. '
pat = f"({term}[-`'\u2019\s]+)"
regex = re.compile(pat, re.UNICODE | re.IGNORECASE)
test = 'Saint-Pryvé-Saint-Mesmin'
repl = replacements[term].capitalize()
nVar = regex.sub(repl, test)
nVar = nVar.replace('-', ' ').strip()
nVar = nVar.replace(' ', ' ')
print(nVar)
# TODO: validate tests.
assert True
if __name__ == '__main__':
main() | [
"unittest.main",
"re.compile"
] | [((895, 901), 'unittest.main', 'main', ([], {}), '()\n', (899, 901), False, 'from unittest import TestCase, main\n'), ((129, 182), 're.compile', 're.compile', (['"""[-.`\'’\\\\s]+"""', '(re.UNICODE | re.IGNORECASE)'], {}), '("[-.`\'’\\\\s]+", re.UNICODE | re.IGNORECASE)\n', (139, 182), False, 'import re\n'), ((533, 576), 're.compile', 're.compile', (['pat', '(re.UNICODE | re.IGNORECASE)'], {}), '(pat, re.UNICODE | re.IGNORECASE)\n', (543, 576), False, 'import re\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-10 23:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prodavnica', '0008_auto_20170811_0045'),
]
operations = [
migrations.RemoveField(
model_name='slika',
name='lokacija',
),
migrations.AddField(
model_name='slika',
name='slika',
field=models.ImageField(default='C:\\Python34\\Scripts\\env_site1\\korisnik.png', upload_to='C:\\Python34\\Scripts\\env_site1'),
),
]
| [
"django.db.models.ImageField",
"django.db.migrations.RemoveField"
] | [((303, 362), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""slika"""', 'name': '"""lokacija"""'}), "(model_name='slika', name='lokacija')\n", (325, 362), False, 'from django.db import migrations, models\n'), ((504, 629), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""C:\\\\Python34\\\\Scripts\\\\env_site1\\\\korisnik.png"""', 'upload_to': '"""C:\\\\Python34\\\\Scripts\\\\env_site1"""'}), "(default='C:\\\\Python34\\\\Scripts\\\\env_site1\\\\korisnik.png',\n upload_to='C:\\\\Python34\\\\Scripts\\\\env_site1')\n", (521, 629), False, 'from django.db import migrations, models\n')] |
"""Test for the convenience functions."""
import numpy as np
import pytest
from probnum import filtsmooth
@pytest.fixture(name="prior_dimension")
def fixture_prior_dimension():
return 3
@pytest.fixture(name="measurement_dimension")
def fixture_measurement_dimension():
return 2
@pytest.fixture(name="number_of_measurements")
def fixture_number_of_measurements():
return 10
@pytest.fixture(name="observations")
def fixture_observations(measurement_dimension, number_of_measurements):
obs = np.arange(measurement_dimension * number_of_measurements)
return obs.reshape((number_of_measurements, measurement_dimension))
@pytest.fixture(name="locations")
def fixture_locations(number_of_measurements):
return np.arange(number_of_measurements)
@pytest.fixture(name="F")
def fixture_F(prior_dimension):
return np.eye(prior_dimension)
@pytest.fixture(name="L")
def fixture_L(prior_dimension):
return np.eye(prior_dimension)
@pytest.fixture(name="H")
def fixture_H(measurement_dimension, prior_dimension):
return np.eye(measurement_dimension, prior_dimension)
@pytest.fixture(name="R")
def fixture_R(measurement_dimension):
return np.eye(measurement_dimension)
@pytest.fixture(name="m0")
def fixture_m0(prior_dimension):
return np.arange(prior_dimension)
@pytest.fixture(name="C0")
def fixture_C0(prior_dimension):
return np.eye(prior_dimension)
@pytest.mark.parametrize("prior_model", ["continuous", "discrete"])
def test_kalman_filter(observations, locations, F, L, H, R, m0, C0, prior_model):
posterior = filtsmooth.filter_kalman(
observations=observations,
locations=locations,
F=F,
L=L,
H=H,
R=R,
m0=m0,
C0=C0,
prior_model=prior_model,
)
assert isinstance(posterior, filtsmooth.gaussian.FilteringPosterior)
@pytest.mark.parametrize("prior_model", ["continuous", "discrete"])
def test_rauch_tung_striebel_smoother(
observations, locations, F, L, H, R, m0, C0, prior_model
):
posterior = filtsmooth.smooth_rts(
observations=observations,
locations=locations,
F=F,
L=L,
H=H,
R=R,
m0=m0,
C0=C0,
prior_model=prior_model,
)
assert isinstance(posterior, filtsmooth.gaussian.SmoothingPosterior)
def test_kalman_filter_raise_error(observations, locations, F, L, H, R, m0, C0):
"""Neither continuous nor discrete prior model raises a value error."""
with pytest.raises(ValueError):
filtsmooth.filter_kalman(
observations=observations,
locations=locations,
F=F,
L=L,
H=H,
R=R,
m0=m0,
C0=C0,
prior_model="neither_continuous_nor_discrete",
)
def test_rauch_tung_striebel_smoother_raise_error(
observations, locations, F, L, H, R, m0, C0
):
"""Neither continuous nor discrete prior model raises a value error."""
with pytest.raises(ValueError):
filtsmooth.smooth_rts(
observations=observations,
locations=locations,
F=F,
L=L,
H=H,
R=R,
m0=m0,
C0=C0,
prior_model="neither_continuous_nor_discrete",
)
| [
"numpy.eye",
"probnum.filtsmooth.smooth_rts",
"probnum.filtsmooth.filter_kalman",
"pytest.mark.parametrize",
"pytest.raises",
"pytest.fixture",
"numpy.arange"
] | [((111, 149), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""prior_dimension"""'}), "(name='prior_dimension')\n", (125, 149), False, 'import pytest\n'), ((197, 241), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""measurement_dimension"""'}), "(name='measurement_dimension')\n", (211, 241), False, 'import pytest\n'), ((295, 340), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""number_of_measurements"""'}), "(name='number_of_measurements')\n", (309, 340), False, 'import pytest\n'), ((396, 431), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""observations"""'}), "(name='observations')\n", (410, 431), False, 'import pytest\n'), ((648, 680), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""locations"""'}), "(name='locations')\n", (662, 680), False, 'import pytest\n'), ((776, 800), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""F"""'}), "(name='F')\n", (790, 800), False, 'import pytest\n'), ((871, 895), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""L"""'}), "(name='L')\n", (885, 895), False, 'import pytest\n'), ((966, 990), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""H"""'}), "(name='H')\n", (980, 990), False, 'import pytest\n'), ((1107, 1131), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""R"""'}), "(name='R')\n", (1121, 1131), False, 'import pytest\n'), ((1214, 1239), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""m0"""'}), "(name='m0')\n", (1228, 1239), False, 'import pytest\n'), ((1314, 1339), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""C0"""'}), "(name='C0')\n", (1328, 1339), False, 'import pytest\n'), ((1411, 1477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prior_model"""', "['continuous', 'discrete']"], {}), "('prior_model', ['continuous', 'discrete'])\n", (1434, 1477), False, 'import pytest\n'), ((1863, 1929), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prior_model"""', "['continuous', 'discrete']"], {}), "('prior_model', ['continuous', 'discrete'])\n", (1886, 1929), False, 'import pytest\n'), ((515, 572), 'numpy.arange', 'np.arange', (['(measurement_dimension * number_of_measurements)'], {}), '(measurement_dimension * number_of_measurements)\n', (524, 572), True, 'import numpy as np\n'), ((739, 772), 'numpy.arange', 'np.arange', (['number_of_measurements'], {}), '(number_of_measurements)\n', (748, 772), True, 'import numpy as np\n'), ((844, 867), 'numpy.eye', 'np.eye', (['prior_dimension'], {}), '(prior_dimension)\n', (850, 867), True, 'import numpy as np\n'), ((939, 962), 'numpy.eye', 'np.eye', (['prior_dimension'], {}), '(prior_dimension)\n', (945, 962), True, 'import numpy as np\n'), ((1057, 1103), 'numpy.eye', 'np.eye', (['measurement_dimension', 'prior_dimension'], {}), '(measurement_dimension, prior_dimension)\n', (1063, 1103), True, 'import numpy as np\n'), ((1181, 1210), 'numpy.eye', 'np.eye', (['measurement_dimension'], {}), '(measurement_dimension)\n', (1187, 1210), True, 'import numpy as np\n'), ((1284, 1310), 'numpy.arange', 'np.arange', (['prior_dimension'], {}), '(prior_dimension)\n', (1293, 1310), True, 'import numpy as np\n'), ((1384, 1407), 'numpy.eye', 'np.eye', (['prior_dimension'], {}), '(prior_dimension)\n', (1390, 1407), True, 'import numpy as np\n'), ((1576, 1712), 'probnum.filtsmooth.filter_kalman', 'filtsmooth.filter_kalman', ([], {'observations': 'observations', 'locations': 'locations', 'F': 'F', 'L': 'L', 'H': 'H', 'R': 'R', 'm0': 'm0', 'C0': 'C0', 'prior_model': 'prior_model'}), '(observations=observations, locations=locations, F=\n F, L=L, H=H, R=R, m0=m0, C0=C0, prior_model=prior_model)\n', (1600, 1712), False, 'from probnum import filtsmooth\n'), ((2049, 2181), 'probnum.filtsmooth.smooth_rts', 'filtsmooth.smooth_rts', ([], {'observations': 'observations', 'locations': 'locations', 'F': 'F', 'L': 'L', 'H': 'H', 'R': 'R', 'm0': 'm0', 'C0': 'C0', 'prior_model': 'prior_model'}), '(observations=observations, locations=locations, F=F,\n L=L, H=H, R=R, m0=m0, C0=C0, prior_model=prior_model)\n', (2070, 2181), False, 'from probnum import filtsmooth\n'), ((2498, 2523), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2511, 2523), False, 'import pytest\n'), ((2533, 2696), 'probnum.filtsmooth.filter_kalman', 'filtsmooth.filter_kalman', ([], {'observations': 'observations', 'locations': 'locations', 'F': 'F', 'L': 'L', 'H': 'H', 'R': 'R', 'm0': 'm0', 'C0': 'C0', 'prior_model': '"""neither_continuous_nor_discrete"""'}), "(observations=observations, locations=locations, F=\n F, L=L, H=H, R=R, m0=m0, C0=C0, prior_model=\n 'neither_continuous_nor_discrete')\n", (2557, 2696), False, 'from probnum import filtsmooth\n'), ((2995, 3020), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3008, 3020), False, 'import pytest\n'), ((3030, 3184), 'probnum.filtsmooth.smooth_rts', 'filtsmooth.smooth_rts', ([], {'observations': 'observations', 'locations': 'locations', 'F': 'F', 'L': 'L', 'H': 'H', 'R': 'R', 'm0': 'm0', 'C0': 'C0', 'prior_model': '"""neither_continuous_nor_discrete"""'}), "(observations=observations, locations=locations, F=F,\n L=L, H=H, R=R, m0=m0, C0=C0, prior_model='neither_continuous_nor_discrete')\n", (3051, 3184), False, 'from probnum import filtsmooth\n')] |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
try:
from http.client import HTTPResponse
except ImportError:
from httplib import HTTPResponse
from optparse import OptionParser
from contextlib import closing
from .warctools import WarcRecord
parser = OptionParser(usage="%prog warc:offset")
parser.set_defaults(output_directory=None, limit=None, log_level="info")
def main(argv):
(options, args) = parser.parse_args(args=argv[1:])
filename, offset = args[0].rsplit(':',1)
if ',' in offset:
offset, length = [int(n) for n in offset.split(',',1)]
else:
offset = int(offset)
length = None # unknown
dump_payload_from_file(filename, offset, length)
def dump_payload_from_file(filename, offset=None, length=None):
with closing(WarcRecord.open_archive(filename=filename, gzip="auto", offset=offset, length=length)) as fh:
return dump_payload_from_stream(fh)
def dump_payload_from_stream(fh):
try: # python3
out = sys.stdout.buffer
except AttributeError: # python2
out = sys.stdout
for (offset, record, errors) in fh.read_records(limit=1, offsets=False):
if record:
if (record.type == WarcRecord.RESPONSE
and record.content_type.startswith(b'application/http')):
f = FileHTTPResponse(record.content_file)
f.begin()
else:
f = record.content_file
buf = f.read(8192)
while buf != b'':
out.write(buf)
buf = f.read(8192)
elif errors:
print("warc errors at %s:%d"%(name, offset if offset else 0), file=sys.stderr)
for e in errors:
print('\t', e)
class FileHTTPResponse(HTTPResponse):
"""HTTPResponse subclass that reads from the supplied fileobj instead of
from a socket."""
def __init__(self, fileobj, debuglevel=0, strict=0, method=None, buffering=False):
self.fp = fileobj
# We can't call HTTPResponse.__init__(self, ...) because it will try to
# call sock.makefile() and we have no sock. So we have to copy and
# paste the rest of the constructor below.
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.headers = self.msg = None
# from the Status-Line of the response
self.version = 'UNKNOWN' # HTTP-Version
self.status = 'UNKNOWN' # Status-Code
self.reason = 'UNKNOWN' # Reason-Phrase
self.chunked = 'UNKNOWN' # is "chunked" being used?
self.chunk_left = 'UNKNOWN' # bytes left to read in current chunk
self.length = 'UNKNOWN' # number of bytes left in response
self.will_close = 'UNKNOWN' # conn will close at end of response
def run():
sys.exit(main(sys.argv))
if __name__ == '__main__':
run()
| [
"optparse.OptionParser"
] | [((298, 337), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""%prog warc:offset"""'}), "(usage='%prog warc:offset')\n", (310, 337), False, 'from optparse import OptionParser\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models as db
from django.utils.translation import ugettext_lazy as _
from dj.choices import Choices
from ralph_scrooge.models.base import (
BaseUsage,
BaseUsageManager,
BaseUsageType,
)
PRICE_DIGITS = 16
PRICE_PLACES = 6
class UsageTypeUploadFreq(Choices):
_ = Choices.Choice
daily = _('daily').extra(
margin=datetime.timedelta(
days=settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get('daily', 1)
)
)
weekly = _('weekly').extra(
margin=datetime.timedelta(
days=settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get('weekly', 2)
)
)
monthly = _('monthly').extra(
margin=datetime.timedelta(
days=settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get('monthly', 3)
)
)
class UsageType(BaseUsage):
"""
Model contains usage types
"""
average = db.BooleanField(
verbose_name=_("Average the values over multiple days"),
default=False,
)
show_value_percentage = db.BooleanField(
verbose_name=_("Show percentage of value"),
default=False,
)
by_warehouse = db.BooleanField(
verbose_name=_("Usage type is by warehouse"),
default=False,
)
is_manually_type = db.BooleanField(
verbose_name=_("Cost or price for usage is entered manually"),
default=False,
)
by_cost = db.BooleanField(
verbose_name=_("Given value is a cost"),
default=False,
)
order = db.IntegerField(
verbose_name=_("Display order"),
default=0,
)
TYPE_CHOICES = (
('BU', _("Base usage type")),
('RU', _("Regular usage type")),
('SU', _("Service usage type")),
)
usage_type = db.CharField(
verbose_name=_("Type"),
max_length=2,
choices=TYPE_CHOICES,
default='SU',
)
excluded_services = db.ManyToManyField(
'Service',
verbose_name=_("Excluded services"),
related_name='excluded_usage_types',
blank=True,
)
owners = db.ManyToManyField(
'ScroogeUser',
verbose_name=_("Owners of usage type"),
related_name='usage_type_owners',
blank=True,
)
upload_freq = db.PositiveIntegerField(
null=False,
blank=False,
default=UsageTypeUploadFreq.daily.id,
choices=UsageTypeUploadFreq(),
verbose_name=_("Expected frequency of uploads"),
help_text=_(
"Please note that this value doesn't affect costs calculation at "
"all - it's just for notifications about \"holes\" detected in "
"your uploads. Remember that have to upload your usages for every "
"day, despite what you've selected here!"
),
)
unit = db.CharField(
verbose_name=_("Unit"),
max_length=32,
help_text=_("Unit of usage (e.g. bytes, MB, miliseconds, U, etc.)"),
blank=True,
default="",
)
change_tolerance = db.FloatField(
default=0.2,
help_text=_(
"If the relative change (as in: (d1-d2)/d2) between two "
"consecutive days is higher than this value, then the owners of "
"this usage type will be notified about such (unusual) change."
),
)
allow_no_daily_usage = db.BooleanField(
default=False,
help_text=_(
'Skip validation of this UsageType for existing DailyUsages before'
' recalculating costs'
),
)
support_team = db.CharField(
max_length=255,
verbose_name=_("Support team"),
help_text=_("Information about support team."),
blank=True,
default=''
)
objects_admin = db.Manager()
objects = BaseUsageManager()
class Meta:
verbose_name = _("usage type")
verbose_name_plural = _("usage types")
app_label = 'ralph_scrooge'
ordering = ['name']
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.type = BaseUsageType.usage_type
super(UsageType, self).save(*args, **kwargs)
def get_plugin_name(self):
return 'usage_type_plugin'
@property
def excluded_services_environments(self):
from ralph_scrooge.models import ServiceEnvironment
return ServiceEnvironment.objects.filter(
service__in=self.excluded_services.all()
)
class UsageAnomalyAck(db.Model):
"""
Model meant to record acknowledgements of anomalies reported to UsageType
owners (see management command `detect_usage_anomalies`).
Anomaly comes from the difference between *two* days, but since the 2nd
one of them is always the 1st one +1, we need to record only the 1st one.
"""
type = db.ForeignKey(UsageType)
anomaly_date = db.DateField()
acknowledged_by = db.ForeignKey(settings.AUTH_USER_MODEL)
class Meta:
unique_together = ('type', 'anomaly_date', 'acknowledged_by')
def __unicode__(self):
return '{}: {:%Y-%m-%d} ({})'.format(
self.type.symbol, self.anomaly_date, self.acknowledged_by.username
)
class UsagePrice(db.Model):
"""
Model contains usages price information
"""
type = db.ForeignKey(UsageType, verbose_name=_("type"))
price = db.DecimalField(
max_digits=PRICE_DIGITS,
decimal_places=PRICE_PLACES,
verbose_name=_("price"),
default=0,
)
forecast_price = db.DecimalField(
max_digits=PRICE_DIGITS,
decimal_places=PRICE_PLACES,
verbose_name=_("forecast price"),
default=0,
)
cost = db.DecimalField(
max_digits=PRICE_DIGITS,
decimal_places=PRICE_PLACES,
default=0.00,
verbose_name=_("cost"),
)
forecast_cost = db.DecimalField(
max_digits=PRICE_DIGITS,
decimal_places=PRICE_PLACES,
default=0.00,
verbose_name=_("forecast cost"),
)
start = db.DateField()
end = db.DateField()
warehouse = db.ForeignKey(
'Warehouse',
null=True,
blank=True,
on_delete=db.PROTECT,
verbose_name=_("warehouse"),
)
class Meta:
verbose_name = _("usage price")
verbose_name_plural = _("usage prices")
app_label = 'ralph_scrooge'
ordering = ('type', '-start')
def __unicode__(self):
if self.type and self.type.by_warehouse:
return '{}-{} ({}-{})'.format(
self.warehouse,
self.type,
self.start,
self.end,
)
return '{} ({}-{})'.format(
self.type,
self.start,
self.end,
)
def clean(self):
if self.type.by_warehouse and not self.warehouse:
raise ValidationError('Warehouse is required')
class DailyUsage(db.Model):
"""
DailyUsage model contains daily usage information for each
usage
"""
date = db.DateField()
service_environment = db.ForeignKey(
'ServiceEnvironment',
related_name='daily_usages',
verbose_name=_("service environment"),
)
daily_pricing_object = db.ForeignKey(
'DailyPricingObject',
verbose_name=_("Pricing Object"),
null=False,
blank=False,
)
value = db.FloatField(verbose_name=_("value"), default=0)
type = db.ForeignKey(UsageType, verbose_name=_("daily_usages"))
warehouse = db.ForeignKey(
'Warehouse',
null=False,
blank=False,
on_delete=db.PROTECT,
default=1,
verbose_name=_("warehouse"),
)
remarks = db.TextField(
verbose_name=_("Remarks"),
help_text=_("Additional information."),
blank=True,
default="",
)
class Meta:
verbose_name = _("daily usage")
verbose_name_plural = _("daily usages")
app_label = 'ralph_scrooge'
# TODO: after migration to Django>=1.5 add index_together on
# type_id, date and warehouse_id (see migration 0019 for details)
def __unicode__(self):
return '{0}/{1} ({2}) {3}'.format(
self.daily_pricing_object,
self.type,
self.date,
self.value,
)
| [
"django.db.models.DateField",
"django.utils.translation.ugettext_lazy",
"django.db.models.Manager",
"django.conf.settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get",
"django.db.models.ForeignKey",
"django.core.exceptions.ValidationError",
"ralph_scrooge.models.base.BaseUsageManager"
] | [((4015, 4027), 'django.db.models.Manager', 'db.Manager', ([], {}), '()\n', (4025, 4027), True, 'from django.db import models as db\n'), ((4042, 4060), 'ralph_scrooge.models.base.BaseUsageManager', 'BaseUsageManager', ([], {}), '()\n', (4058, 4060), False, 'from ralph_scrooge.models.base import BaseUsage, BaseUsageManager, BaseUsageType\n'), ((5075, 5099), 'django.db.models.ForeignKey', 'db.ForeignKey', (['UsageType'], {}), '(UsageType)\n', (5088, 5099), True, 'from django.db import models as db\n'), ((5119, 5133), 'django.db.models.DateField', 'db.DateField', ([], {}), '()\n', (5131, 5133), True, 'from django.db import models as db\n'), ((5156, 5195), 'django.db.models.ForeignKey', 'db.ForeignKey', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (5169, 5195), True, 'from django.db import models as db\n'), ((6274, 6288), 'django.db.models.DateField', 'db.DateField', ([], {}), '()\n', (6286, 6288), True, 'from django.db import models as db\n'), ((6299, 6313), 'django.db.models.DateField', 'db.DateField', ([], {}), '()\n', (6311, 6313), True, 'from django.db import models as db\n'), ((7288, 7302), 'django.db.models.DateField', 'db.DateField', ([], {}), '()\n', (7300, 7302), True, 'from django.db import models as db\n'), ((4101, 4116), 'django.utils.translation.ugettext_lazy', '_', (['"""usage type"""'], {}), "('usage type')\n", (4102, 4116), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4147, 4163), 'django.utils.translation.ugettext_lazy', '_', (['"""usage types"""'], {}), "('usage types')\n", (4148, 4163), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6518, 6534), 'django.utils.translation.ugettext_lazy', '_', (['"""usage price"""'], {}), "('usage price')\n", (6519, 6534), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6565, 6582), 'django.utils.translation.ugettext_lazy', '_', (['"""usage prices"""'], {}), "('usage prices')\n", (6566, 6582), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8137, 8153), 'django.utils.translation.ugettext_lazy', '_', (['"""daily usage"""'], {}), "('daily usage')\n", (8138, 8153), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8184, 8201), 'django.utils.translation.ugettext_lazy', '_', (['"""daily usages"""'], {}), "('daily usages')\n", (8185, 8201), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((607, 617), 'django.utils.translation.ugettext_lazy', '_', (['"""daily"""'], {}), "('daily')\n", (608, 617), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((762, 773), 'django.utils.translation.ugettext_lazy', '_', (['"""weekly"""'], {}), "('weekly')\n", (763, 773), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((920, 932), 'django.utils.translation.ugettext_lazy', '_', (['"""monthly"""'], {}), "('monthly')\n", (921, 932), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1195, 1237), 'django.utils.translation.ugettext_lazy', '_', (['"""Average the values over multiple days"""'], {}), "('Average the values over multiple days')\n", (1196, 1237), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1334, 1363), 'django.utils.translation.ugettext_lazy', '_', (['"""Show percentage of value"""'], {}), "('Show percentage of value')\n", (1335, 1363), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1451, 1482), 'django.utils.translation.ugettext_lazy', '_', (['"""Usage type is by warehouse"""'], {}), "('Usage type is by warehouse')\n", (1452, 1482), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1574, 1622), 'django.utils.translation.ugettext_lazy', '_', (['"""Cost or price for usage is entered manually"""'], {}), "('Cost or price for usage is entered manually')\n", (1575, 1622), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1705, 1731), 'django.utils.translation.ugettext_lazy', '_', (['"""Given value is a cost"""'], {}), "('Given value is a cost')\n", (1706, 1731), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1812, 1830), 'django.utils.translation.ugettext_lazy', '_', (['"""Display order"""'], {}), "('Display order')\n", (1813, 1830), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1893, 1913), 'django.utils.translation.ugettext_lazy', '_', (['"""Base usage type"""'], {}), "('Base usage type')\n", (1894, 1913), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1931, 1954), 'django.utils.translation.ugettext_lazy', '_', (['"""Regular usage type"""'], {}), "('Regular usage type')\n", (1932, 1954), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1972, 1995), 'django.utils.translation.ugettext_lazy', '_', (['"""Service usage type"""'], {}), "('Service usage type')\n", (1973, 1995), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2056, 2065), 'django.utils.translation.ugettext_lazy', '_', (['"""Type"""'], {}), "('Type')\n", (2057, 2065), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2231, 2253), 'django.utils.translation.ugettext_lazy', '_', (['"""Excluded services"""'], {}), "('Excluded services')\n", (2232, 2253), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2403, 2428), 'django.utils.translation.ugettext_lazy', '_', (['"""Owners of usage type"""'], {}), "('Owners of usage type')\n", (2404, 2428), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2688, 2722), 'django.utils.translation.ugettext_lazy', '_', (['"""Expected frequency of uploads"""'], {}), "('Expected frequency of uploads')\n", (2689, 2722), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2742, 2983), 'django.utils.translation.ugettext_lazy', '_', (['"""Please note that this value doesn\'t affect costs calculation at all - it\'s just for notifications about "holes" detected in your uploads. Remember that have to upload your usages for every day, despite what you\'ve selected here!"""'], {}), '(\'Please note that this value doesn\\\'t affect costs calculation at all - it\\\'s just for notifications about "holes" detected in your uploads. Remember that have to upload your usages for every day, despite what you\\\'ve selected here!\'\n )\n', (2743, 2983), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3098, 3107), 'django.utils.translation.ugettext_lazy', '_', (['"""Unit"""'], {}), "('Unit')\n", (3099, 3107), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3150, 3207), 'django.utils.translation.ugettext_lazy', '_', (['"""Unit of usage (e.g. bytes, MB, miliseconds, U, etc.)"""'], {}), "('Unit of usage (e.g. bytes, MB, miliseconds, U, etc.)')\n", (3151, 3207), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3332, 3521), 'django.utils.translation.ugettext_lazy', '_', (['"""If the relative change (as in: (d1-d2)/d2) between two consecutive days is higher than this value, then the owners of this usage type will be notified about such (unusual) change."""'], {}), "('If the relative change (as in: (d1-d2)/d2) between two consecutive days is higher than this value, then the owners of this usage type will be notified about such (unusual) change.'\n )\n", (3333, 3521), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3661, 3756), 'django.utils.translation.ugettext_lazy', '_', (['"""Skip validation of this UsageType for existing DailyUsages before recalculating costs"""'], {}), "('Skip validation of this UsageType for existing DailyUsages before recalculating costs'\n )\n", (3662, 3756), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3874, 3891), 'django.utils.translation.ugettext_lazy', '_', (['"""Support team"""'], {}), "('Support team')\n", (3875, 3891), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3911, 3947), 'django.utils.translation.ugettext_lazy', '_', (['"""Information about support team."""'], {}), "('Information about support team.')\n", (3912, 3947), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5585, 5594), 'django.utils.translation.ugettext_lazy', '_', (['"""type"""'], {}), "('type')\n", (5586, 5594), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5716, 5726), 'django.utils.translation.ugettext_lazy', '_', (['"""price"""'], {}), "('price')\n", (5717, 5726), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5882, 5901), 'django.utils.translation.ugettext_lazy', '_', (['"""forecast price"""'], {}), "('forecast price')\n", (5883, 5901), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6069, 6078), 'django.utils.translation.ugettext_lazy', '_', (['"""cost"""'], {}), "('cost')\n", (6070, 6078), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6236, 6254), 'django.utils.translation.ugettext_lazy', '_', (['"""forecast cost"""'], {}), "('forecast cost')\n", (6237, 6254), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6456, 6470), 'django.utils.translation.ugettext_lazy', '_', (['"""warehouse"""'], {}), "('warehouse')\n", (6457, 6470), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7117, 7157), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Warehouse is required"""'], {}), "('Warehouse is required')\n", (7132, 7157), False, 'from django.core.exceptions import ValidationError\n'), ((7432, 7456), 'django.utils.translation.ugettext_lazy', '_', (['"""service environment"""'], {}), "('service environment')\n", (7433, 7456), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7557, 7576), 'django.utils.translation.ugettext_lazy', '_', (['"""Pricing Object"""'], {}), "('Pricing Object')\n", (7558, 7576), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7664, 7674), 'django.utils.translation.ugettext_lazy', '_', (['"""value"""'], {}), "('value')\n", (7665, 7674), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7736, 7753), 'django.utils.translation.ugettext_lazy', '_', (['"""daily_usages"""'], {}), "('daily_usages')\n", (7737, 7753), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7918, 7932), 'django.utils.translation.ugettext_lazy', '_', (['"""warehouse"""'], {}), "('warehouse')\n", (7919, 7932), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7989, 8001), 'django.utils.translation.ugettext_lazy', '_', (['"""Remarks"""'], {}), "('Remarks')\n", (7990, 8001), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8021, 8049), 'django.utils.translation.ugettext_lazy', '_', (['"""Additional information."""'], {}), "('Additional information.')\n", (8022, 8049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((677, 732), 'django.conf.settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', 'settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', (['"""daily"""', '(1)'], {}), "('daily', 1)\n", (720, 732), False, 'from django.conf import settings\n'), ((833, 889), 'django.conf.settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', 'settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', (['"""weekly"""', '(2)'], {}), "('weekly', 2)\n", (876, 889), False, 'from django.conf import settings\n'), ((992, 1049), 'django.conf.settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', 'settings.USAGE_TYPE_UPLOAD_FREQ_MARGINS.get', (['"""monthly"""', '(3)'], {}), "('monthly', 3)\n", (1035, 1049), False, 'from django.conf import settings\n')] |
from django import forms
from medicus import models as medicus_models
# class DoctorFilterForm(forms.Form):
# city = forms.CharField(required=True, max_length=100)
# name_or_profession = forms.CharField(required=False, max_length=100)
#
#
class SearchDoctorForm(forms.Form):
"""
Search a doctor on the website.
"""
profession = forms.CharField(label='profession', max_length=120)
city = forms.CharField(required=True, label='city', max_length=120)
class ProposeDoctorForm(forms.Form):
name = forms.CharField(label='name', max_length=128)
profession = forms.CharField(label='profession', max_length=128)
address = forms.CharField(label='address', max_length=128)
city = forms.CharField(label='city', max_length=128)
telephone = forms.CharField(label='telephone', max_length=64)
email = forms.EmailField(required=False, label='email', max_length=128)
website = forms.URLField(required=False, label='website', max_length=128)
class NewRatingForm(forms.ModelForm):
class Meta:
model = medicus_models.Rating
fields = (
'doctor',
'user',
'treatment',
'empathy',
'price',
'waiting_time',
'comment',
)
| [
"django.forms.URLField",
"django.forms.EmailField",
"django.forms.CharField"
] | [((355, 406), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""profession"""', 'max_length': '(120)'}), "(label='profession', max_length=120)\n", (370, 406), False, 'from django import forms\n'), ((418, 478), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'label': '"""city"""', 'max_length': '(120)'}), "(required=True, label='city', max_length=120)\n", (433, 478), False, 'from django import forms\n'), ((530, 575), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""name"""', 'max_length': '(128)'}), "(label='name', max_length=128)\n", (545, 575), False, 'from django import forms\n'), ((593, 644), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""profession"""', 'max_length': '(128)'}), "(label='profession', max_length=128)\n", (608, 644), False, 'from django import forms\n'), ((659, 707), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""address"""', 'max_length': '(128)'}), "(label='address', max_length=128)\n", (674, 707), False, 'from django import forms\n'), ((719, 764), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""city"""', 'max_length': '(128)'}), "(label='city', max_length=128)\n", (734, 764), False, 'from django import forms\n'), ((781, 830), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""telephone"""', 'max_length': '(64)'}), "(label='telephone', max_length=64)\n", (796, 830), False, 'from django import forms\n'), ((843, 906), 'django.forms.EmailField', 'forms.EmailField', ([], {'required': '(False)', 'label': '"""email"""', 'max_length': '(128)'}), "(required=False, label='email', max_length=128)\n", (859, 906), False, 'from django import forms\n'), ((921, 984), 'django.forms.URLField', 'forms.URLField', ([], {'required': '(False)', 'label': '"""website"""', 'max_length': '(128)'}), "(required=False, label='website', max_length=128)\n", (935, 984), False, 'from django import forms\n')] |
from itertools import combinations
ins = input().split()
s = ins[0]
k = int(ins[1])
s = list(s)
s.sort()
for idx in range(1,k+1):
for x in combinations(s,idx):
print(''.join(x))
| [
"itertools.combinations"
] | [((144, 164), 'itertools.combinations', 'combinations', (['s', 'idx'], {}), '(s, idx)\n', (156, 164), False, 'from itertools import combinations\n')] |
from typing import Any, Dict, List, Union
import torch
import torch.nn as nn
from torch.autograd import grad as torch_grad
import numpy
from pytorch_lightning import LightningModule
from torchmetrics.metric import Metric
class WassersteinGanGpModule(LightningModule):
def __init__(
self,
generator_network: nn.Module = None,
discriminator_network: nn.Module = None,
optimizer_function: Any = torch.optim.Adam,
optimizer_params: Dict = dict(lr=0.0001, betas=(0.5, 0.9)),
batch_size:int = 32,
metrics: Union[List[Metric],Metric] = [],
critic_iterations: int = 5,
lambda_gradient_penalty: int = 10
):
super().__init__()
self.save_hyperparameters(ignore=["generator_network", "discriminator_network", "optimizer_function", "loss_function", "metrics"])
self._net_g = generator_network
self._net_d = discriminator_network
self._optimizer_function = optimizer_function
self._optimizer_params = optimizer_params
self._batch_size = batch_size
self._lambda_gp = lambda_gradient_penalty
self._critic_iterations = critic_iterations
if isinstance(metrics, Metric):
self._metrics = [metrics]
elif isinstance(metrics, list):
self._metrics = [m for m in metrics]
def forward(self, input):
return self._net_g(input)
def on_pretrain_routine_start(self) -> None:
self._is_cuda_enabled = self.device.type == 'cuda'
for metric in self._metrics:
metric = metric.to(self.device)
def training_step(self, batch, batch_idx, optimizer_idx):
i_real, o_real = batch
if optimizer_idx == 0:
o_fake = self(i_real)
d_output = self._net_d(i_real, o_fake).squeeze()
g_loss = -d_output.mean()
loss = g_loss
self.log('train_loss', loss)
if optimizer_idx == 1:
# Generate fake data
o_fake = self(i_real)
# Calculate probabilities on real and generated data
d_real = self._net_d(i_real, o_real).squeeze()
d_fake = self._net_d(i_real, o_fake).squeeze()
# Get gradient penalty
gradient_penalty, grad_norm = self._gradient_penalty(i_real, o_real, o_fake)
# Create total loss and optimize
d_loss = d_fake.mean() - d_real.mean() + (self._lambda_gp * gradient_penalty)
loss = d_loss
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
x_t, y_t = batch
batch_size = x_t.shape[0]
self._net_g.eval()
y_f = self._net_g(x_t)
self._net_g.train()
y_f = y_f.squeeze()
y_t = y_t.squeeze()
for metric_name, metric_value in self._calculate_metrics(y_f, y_t).items():
self.log(f'valid_{metric_name}', metric_value)
def configure_optimizers(self):
self._net_g_optimizer = self._optimizer_function(self._net_g.parameters(), **self._optimizer_params)
self._net_d_optimizer = self._optimizer_function(self._net_d.parameters(), **self._optimizer_params)
return (
{'optimizer': self._net_g_optimizer, 'frequency': 1},
{'optimizer': self._net_d_optimizer, 'frequency': self._critic_iterations}
)
def _calculate_metrics(self, y, y_hat):
result = {}
for metric in self._metrics:
result[metric.__class__.__name__] = metric(y_hat, y)
return result
def _gradient_penalty(self, real_inputs, real_data, gen_data):
batch_size = real_data.size()[0]
dims = numpy.ones(len(real_data.shape), dtype=numpy.int64)
dims[0] = batch_size
dims = tuple(dims)
t = torch.rand(dims, requires_grad=True, dtype=self.dtype, device=self.device)
t = t.expand_as(real_data)
t = t.cuda()
# mixed sample from real and fake; make approx of the 'true' gradient norm
interpol = t * real_data.data + (1-t) * gen_data.data
interpol = interpol.cuda()
#interpol = torch.tensor(interpol, requires_grad=True)
prob_interpol = self._net_d(real_inputs, interpol)
torch.autograd.set_detect_anomaly(True)
gradients = torch_grad(outputs=prob_interpol, inputs=interpol, grad_outputs=torch.ones(prob_interpol.size(), device=self.device), create_graph=True, retain_graph=True)[0]
gradients = gradients.view(batch_size, -1)
#grad_norm = torch.norm(gradients, dim=1).mean()
#self.losses['gradient_norm'].append(grad_norm.item())
# add epsilon for stability
eps = 1e-10
gradients_norm = torch.sqrt(torch.sum(gradients**2, dim=1, dtype=torch.double) + eps)
#gradients = gradients.cpu()
# comment: precision is lower than grad_norm (think that is double) and gradients_norm is float
return (torch.max(torch.zeros(1, dtype=torch.double, device=self.device), gradients_norm.mean() - 1) ** 2), gradients_norm.mean().item()
| [
"torch.autograd.set_detect_anomaly",
"torch.sum",
"torch.zeros",
"torch.rand"
] | [((3885, 3959), 'torch.rand', 'torch.rand', (['dims'], {'requires_grad': '(True)', 'dtype': 'self.dtype', 'device': 'self.device'}), '(dims, requires_grad=True, dtype=self.dtype, device=self.device)\n', (3895, 3959), False, 'import torch\n'), ((4347, 4386), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (4380, 4386), False, 'import torch\n'), ((4830, 4882), 'torch.sum', 'torch.sum', (['(gradients ** 2)'], {'dim': '(1)', 'dtype': 'torch.double'}), '(gradients ** 2, dim=1, dtype=torch.double)\n', (4839, 4882), False, 'import torch\n'), ((5055, 5109), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'torch.double', 'device': 'self.device'}), '(1, dtype=torch.double, device=self.device)\n', (5066, 5109), False, 'import torch\n')] |
#-*- coding: utf-8 -*-
""" Cache module """
import hashlib
import os
import random
random.seed()
from glob import glob
from voiceplay.logger import logger
from .gdrive import GDrive
from .dbox import DBox
class BaseCache(object):
"""
Parent cache class with common methods
"""
@classmethod
def is_remote_cached(cls, target_filename):
"""
Return remote file ID if it is cached
"""
is_cached = None
cache = cls.CACHE_BACKEND()
for file_name, file_id in cache.search():
if file_name == os.path.basename(target_filename):
is_cached = file_id
logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)
break
return is_cached
@classmethod
def copy_to_cache(cls, target_filename):
"""
Transfer file to cache (if it is not cached, of course)"
"""
is_cached = cls.is_remote_cached(target_filename)
if not is_cached:
cache = cls.CACHE_BACKEND()
cache.upload(target_filename)
logger.debug('File %r was uploaded to %r', target_filename, cls.CACHE_BACKEND)
@classmethod
def get_from_cache(cls, target_filename):
"""
Get file from cache
"""
is_cached = cls.is_remote_cached(target_filename)
if is_cached:
cache = cls.CACHE_BACKEND()
cache.download(is_cached, target_filename)
logger.debug('File %r was downloaded from %r', target_filename, cls.CACHE_BACKEND)
else:
target_filename = None
return target_filename
@classmethod
def health_check(cls):
"""
Cache health check (space availability)
"""
cb = cls.CACHE_BACKEND()
return cb.health_check()
class DBoxCache(BaseCache):
"""
Cache using Dropbox
"""
CACHE_BACKEND = DBox
class GDriveCache(BaseCache):
"""
Cache using Google Drive
"""
CACHE_BACKEND = GDrive
class MixedCache(object):
"""
Multi-backend cache
"""
CACHE_BACKENDS = [DBoxCache, GDriveCache]
@staticmethod
def purge_cache():
"""
Purge file storage cache
"""
from voiceplay.config import Config
logger.debug('Purging cache...')
cache_dir = Config.cfg_data().get('cache_dir', '')
if os.path.exists(cache_dir) and os.path.isdir(cache_dir):
files = glob(os.path.join(cache_dir, '*'))
for fname in files:
try:
os.remove(fname)
except Exception as exc:
logger.debug('Removal of %r failed, please check permissions', exc)
@staticmethod
def track_to_hash(track):
"""
Hash track name using SHA1
"""
return hashlib.sha1(track.encode('utf-8')).hexdigest()
@classmethod
def get_from_cache(cls, file_name):
"""
Iterate over available cache backends, search for file, return first matching copy
"""
random.shuffle(cls.CACHE_BACKENDS)
fname = None
for cb in cls.CACHE_BACKENDS:
if not cb.health_check():
continue
fname = cb.get_from_cache(file_name)
if fname:
break
return fname
@classmethod
def copy_to_cache(cls, file_name):
"""
Iterate over available cache backends, upload to first one. Primitive load balancing
is implemented by randomizing backend list.
"""
random.shuffle(cls.CACHE_BACKENDS)
for cb in cls.CACHE_BACKENDS:
if not cb.health_check():
continue
# attempt upload
cb.copy_to_cache(file_name)
# confirm presence
if cls.get_from_cache(file_name):
break
| [
"voiceplay.logger.logger.debug",
"os.path.exists",
"random.shuffle",
"os.path.join",
"random.seed",
"voiceplay.config.Config.cfg_data",
"os.path.isdir",
"os.path.basename",
"os.remove"
] | [((84, 97), 'random.seed', 'random.seed', ([], {}), '()\n', (95, 97), False, 'import random\n'), ((2306, 2338), 'voiceplay.logger.logger.debug', 'logger.debug', (['"""Purging cache..."""'], {}), "('Purging cache...')\n", (2318, 2338), False, 'from voiceplay.logger import logger\n'), ((3091, 3125), 'random.shuffle', 'random.shuffle', (['cls.CACHE_BACKENDS'], {}), '(cls.CACHE_BACKENDS)\n', (3105, 3125), False, 'import random\n'), ((3596, 3630), 'random.shuffle', 'random.shuffle', (['cls.CACHE_BACKENDS'], {}), '(cls.CACHE_BACKENDS)\n', (3610, 3630), False, 'import random\n'), ((1114, 1192), 'voiceplay.logger.logger.debug', 'logger.debug', (['"""File %r was uploaded to %r"""', 'target_filename', 'cls.CACHE_BACKEND'], {}), "('File %r was uploaded to %r', target_filename, cls.CACHE_BACKEND)\n", (1126, 1192), False, 'from voiceplay.logger import logger\n'), ((1496, 1583), 'voiceplay.logger.logger.debug', 'logger.debug', (['"""File %r was downloaded from %r"""', 'target_filename', 'cls.CACHE_BACKEND'], {}), "('File %r was downloaded from %r', target_filename, cls.\n CACHE_BACKEND)\n", (1508, 1583), False, 'from voiceplay.logger import logger\n'), ((2409, 2434), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (2423, 2434), False, 'import os\n'), ((2439, 2463), 'os.path.isdir', 'os.path.isdir', (['cache_dir'], {}), '(cache_dir)\n', (2452, 2463), False, 'import os\n'), ((569, 602), 'os.path.basename', 'os.path.basename', (['target_filename'], {}), '(target_filename)\n', (585, 602), False, 'import os\n'), ((656, 741), 'voiceplay.logger.logger.debug', 'logger.debug', (['"""File %r already cached at %r"""', 'target_filename', 'cls.CACHE_BACKEND'], {}), "('File %r already cached at %r', target_filename, cls.CACHE_BACKEND\n )\n", (668, 741), False, 'from voiceplay.logger import logger\n'), ((2359, 2376), 'voiceplay.config.Config.cfg_data', 'Config.cfg_data', ([], {}), '()\n', (2374, 2376), False, 'from voiceplay.config import Config\n'), ((2490, 2518), 'os.path.join', 'os.path.join', (['cache_dir', '"""*"""'], {}), "(cache_dir, '*')\n", (2502, 2518), False, 'import os\n'), ((2593, 2609), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2602, 2609), False, 'import os\n'), ((2671, 2738), 'voiceplay.logger.logger.debug', 'logger.debug', (['"""Removal of %r failed, please check permissions"""', 'exc'], {}), "('Removal of %r failed, please check permissions', exc)\n", (2683, 2738), False, 'from voiceplay.logger import logger\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 18:44:28 2020
@author: pkk24
"""
import os
import pickle
import numpy as np
import pdb
from itertools import repeat
from multiprocessing import freeze_support, Pool
import time
from spore import spore
from baselines.baselines_utils import process_AutoSPoReFMG_linear as proc
from baselines import algos, baselines_utils
def parSPoRe(sporeObject, t, Y, S, seed=None):
lamS, _, _, _ = sporeObject.recover(Y, S, seed=seed)
print('Trial # ' + str(t+1) + ' complete')
return lamS
def parSumPoiss(algo, t, Y, fm, xT):
Phis, gi, pyxf = proc(fm)
_, D = Y.shape
recInfo = algo.rec(Y, Phis, pyx_func=pyxf, group_indices=gi, xTrue=xT)
print('Sum Poisson Trial # ' + str(t+1) + ' complete')
return recInfo
def parPoissAlt(algo, t, Y, lam0, fm, xT, label='unspecified'):
Phis, gi, pyxf = proc(fm)
_, D = Y.shape
recInfo = algo.rec(Y, Phis, lam0, group_indices=gi, xTrue=xT)
print('Poisson Alt Trial # ' + str(t+1) + ' complete; Initializer: ' + str(label))
return recInfo
if __name__ == '__main__':
freeze_support()
#allVar = np.logspace(-2, 0, 5)
simsDataFiles = ['20-11-23_rng1_phiUnif_Var0.001_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.0021544346900318843_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.004641588833612777_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.01_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.021544346900318832_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.046415888336127774_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.1_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.21544346900318823_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var0.46415888336127775_M2_N10_k3_D100_lamTot2_G1_50trials.pkl', \
'20-11-23_rng1_phiUnif_Var1.0_M2_N10_k3_D100_lamTot2_G1_50trials.pkl']
saveStem = 'results_Nov26fix_'
#notes = ''
notes = '20/11/23 11:23 AM: Integer baseline algorithms - Redoing with new grad clipping and doing 50 trials'
mainSeed = 1
randInitOffset = 0.1
for f in range(len(simsDataFiles)):
with open(simsDataFiles[f], 'rb') as file:
allX, allLam, allY, allFwdModels = pickle.load(file)
N, numTrials = allLam.shape
M, D, _ = allY.shape
k = np.sum(allLam[:,0] !=0)
lamTot = np.sum(allLam[:,0])
sampler = spore.PoissonSampler(np.zeros(N), sample_same=True, seed=mainSeed)
sigInv = baselines_utils.get_sigma_inv_linfixgauss(allFwdModels[0].fwdmodel_group) # assume that all in the file have constant noise setting
# would not work for scaling noise
oracleAlgo = algos.oracle_int_MMV(k, np.max(allX))
sumPoissAlgo = algos.SumPoissonSMV(sigInv, alpha=1e-2, lambda_total = lamTot, max_iter_bb=100)
altAlgo = algos.PoissonAlt(sigInv, alpha=1e-2, max_iter_bb=100, max_alt=10) # 3 different initializers
#- random initializer [knows k]
#- unbiased initializer (lamTot/N) [knows lamTot]
#- initialize with SumPoissonSMV [knows lamTot]
#- initialize with SPoRe
#randomLam0 = np.random.uniform(size=(N,numTrials))+randInitOffset
randomLam0s = []
for t in range(numTrials):
randomLam0s.append(np.random.uniform(size=(N,)) + randInitOffset)
unbiasedLam0 = np.ones(N)*(lamTot/N)
baselineAlgos = [oracleAlgo, sumPoissAlgo, altAlgo]
B = 6
#SPoRe parameters
cpuCount = 4
S = 1000
convRel = 1e-2
stepSize = 1e-1
patience=3000
maxIterSPoRe=int(1e5)
gradClip=5e-2
sporeObjs = []
Ys = []
xTrues = []
for t in range(numTrials):
sporeObjs.append(spore.SPoRe(N, allFwdModels[t], sampler, conv_rel=convRel, \
max_iter=maxIterSPoRe, step_size=stepSize, patience=patience, grad_clip=5e-2))
Ys.append(allY[:,:,t])
xTrues.append(allX[:,:,t])
timepoints = []
timepoints.append(time.time())
p = Pool(cpuCount)
lamSPoRe = p.starmap(parSPoRe, zip(sporeObjs, np.arange(numTrials), Ys, repeat(S), repeat(mainSeed)))
timepoints.append(time.time())
p.close()
p.join()
# Run baselines
XB = np.zeros((N, D, numTrials, B))
lamB = np.zeros((N, numTrials, B))
timeBaselines = np.zeros((numTrials, B))
exitFlags = np.zeros((numTrials, B))
lossFlags = np.zeros((numTrials, B))
for t in range(numTrials):
Phis, gi, pyxf = proc(allFwdModels[t])
XB[:,:,t, 0] = oracleAlgo.rec(allY[:,:,t], Phis)
timepoints.append(time.time())
# Sum poisson baseline
p = Pool(cpuCount)
sumPoissData = p.starmap(parSumPoiss, zip(repeat(sumPoissAlgo), np.arange(numTrials), Ys, allFwdModels, xTrues))
timepoints.append(time.time())
p.close()
p.join()
lamSum = []
for t in range(numTrials):
lamSum.append(np.sum(sumPoissData[t][0], axis=1)/D)
#Alternating baselines
p = Pool(cpuCount)
altRandData = p.starmap(parPoissAlt, zip(repeat(altAlgo), np.arange(numTrials), Ys, randomLam0s, allFwdModels, xTrues, repeat('Random')))
timepoints.append(time.time())
altUnbData = p.starmap(parPoissAlt, zip(repeat(altAlgo), np.arange(numTrials), Ys, repeat(unbiasedLam0), allFwdModels, xTrues, repeat('Unbiased')))
timepoints.append(time.time())
altSumPoissData = p.starmap(parPoissAlt, zip(repeat(altAlgo), np.arange(numTrials), Ys, lamSum, allFwdModels, xTrues, repeat('SumPoisson')))
timepoints.append(time.time())
altSPoReData = p.starmap(parPoissAlt, zip(repeat(altAlgo), np.arange(numTrials), Ys, lamSPoRe, allFwdModels, xTrues, repeat('SPoRe')))
timepoints.append(time.time())
p.close()
p.join()
for t in range(numTrials):
XB[:,:,t, 1], exitFlags[t,1], lossFlags[t,1] = sumPoissData[t]
XB[:,:,t, 2], exitFlags[t,2], lossFlags[t,2] = altRandData[t]
XB[:,:,t, 3], exitFlags[t,3], lossFlags[t,3] = altUnbData[t]
XB[:,:,t, 4], exitFlags[t,4], lossFlags[t,4] = altSumPoissData[t]
XB[:,:,t, 5], exitFlags[t,5], lossFlags[t,5] = altSPoReData[t]
for b in range(B):
lamB[:,t,b] = np.sum(XB[:,:,t,b], axis=1)/D
# Store results
lamCosSim = np.zeros((numTrials, B+1))
lamRelL2err = np.zeros((numTrials, B+1))
for t in range(numTrials):
lamCosSim[t, 0] = np.dot(lamSPoRe[t], allLam[:,t]) / (np.linalg.norm(lamSPoRe[t])*np.linalg.norm(allLam[:,t]))
lamRelL2err[t, 0] = np.linalg.norm(lamSPoRe[t] - allLam[:,t]) / np.linalg.norm(allLam[:,t])
lamCosSim[t, 1:] = (lamB[:,t,:].T @ allLam[:,t]) / (np.linalg.norm(lamB[:,t,:], axis=0) * np.linalg.norm(allLam[:,t]))
lamRelL2err[t, 1:] = np.linalg.norm(lamB[:,t,:] - allLam[:,t][:,None], axis=0) / np.linalg.norm(allLam[:,t])
algoTimes = np.diff(timepoints)
saveResultsFile = saveStem + simsDataFiles[f]
if os.path.exists(saveResultsFile) is False:
with open(saveResultsFile, 'wb') as file:
pickle.dump([lamSPoRe, lamB, allLam, lamCosSim, lamRelL2err, cpuCount, algoTimes, baselineAlgos, exitFlags, lossFlags, notes], file)
else:
print('output file already exists - won\'t overwrite')
| [
"multiprocessing.freeze_support",
"numpy.linalg.norm",
"numpy.arange",
"itertools.repeat",
"os.path.exists",
"baselines.algos.SumPoissonSMV",
"numpy.diff",
"spore.spore.SPoRe",
"numpy.max",
"numpy.dot",
"numpy.ones",
"pickle.load",
"time.time",
"baselines.baselines_utils.get_sigma_inv_linfixgauss",
"baselines.algos.PoissonAlt",
"pickle.dump",
"baselines.baselines_utils.process_AutoSPoReFMG_linear",
"numpy.sum",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.random.uniform"
] | [((627, 635), 'baselines.baselines_utils.process_AutoSPoReFMG_linear', 'proc', (['fm'], {}), '(fm)\n', (631, 635), True, 'from baselines.baselines_utils import process_AutoSPoReFMG_linear as proc\n'), ((920, 928), 'baselines.baselines_utils.process_AutoSPoReFMG_linear', 'proc', (['fm'], {}), '(fm)\n', (924, 928), True, 'from baselines.baselines_utils import process_AutoSPoReFMG_linear as proc\n'), ((1191, 1207), 'multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (1205, 1207), False, 'from multiprocessing import freeze_support, Pool\n'), ((2789, 2814), 'numpy.sum', 'np.sum', (['(allLam[:, 0] != 0)'], {}), '(allLam[:, 0] != 0)\n', (2795, 2814), True, 'import numpy as np\n'), ((2830, 2850), 'numpy.sum', 'np.sum', (['allLam[:, 0]'], {}), '(allLam[:, 0])\n', (2836, 2850), True, 'import numpy as np\n'), ((2952, 3025), 'baselines.baselines_utils.get_sigma_inv_linfixgauss', 'baselines_utils.get_sigma_inv_linfixgauss', (['allFwdModels[0].fwdmodel_group'], {}), '(allFwdModels[0].fwdmodel_group)\n', (2993, 3025), False, 'from baselines import algos, baselines_utils\n'), ((3222, 3299), 'baselines.algos.SumPoissonSMV', 'algos.SumPoissonSMV', (['sigInv'], {'alpha': '(0.01)', 'lambda_total': 'lamTot', 'max_iter_bb': '(100)'}), '(sigInv, alpha=0.01, lambda_total=lamTot, max_iter_bb=100)\n', (3241, 3299), False, 'from baselines import algos, baselines_utils\n'), ((3320, 3385), 'baselines.algos.PoissonAlt', 'algos.PoissonAlt', (['sigInv'], {'alpha': '(0.01)', 'max_iter_bb': '(100)', 'max_alt': '(10)'}), '(sigInv, alpha=0.01, max_iter_bb=100, max_alt=10)\n', (3336, 3385), False, 'from baselines import algos, baselines_utils\n'), ((4693, 4707), 'multiprocessing.Pool', 'Pool', (['cpuCount'], {}), '(cpuCount)\n', (4697, 4707), False, 'from multiprocessing import freeze_support, Pool\n'), ((4969, 4999), 'numpy.zeros', 'np.zeros', (['(N, D, numTrials, B)'], {}), '((N, D, numTrials, B))\n', (4977, 4999), True, 'import numpy as np\n'), ((5015, 5042), 'numpy.zeros', 'np.zeros', (['(N, numTrials, B)'], {}), '((N, numTrials, B))\n', (5023, 5042), True, 'import numpy as np\n'), ((5067, 5091), 'numpy.zeros', 'np.zeros', (['(numTrials, B)'], {}), '((numTrials, B))\n', (5075, 5091), True, 'import numpy as np\n'), ((5112, 5136), 'numpy.zeros', 'np.zeros', (['(numTrials, B)'], {}), '((numTrials, B))\n', (5120, 5136), True, 'import numpy as np\n'), ((5157, 5181), 'numpy.zeros', 'np.zeros', (['(numTrials, B)'], {}), '((numTrials, B))\n', (5165, 5181), True, 'import numpy as np\n'), ((5445, 5459), 'multiprocessing.Pool', 'Pool', (['cpuCount'], {}), '(cpuCount)\n', (5449, 5459), False, 'from multiprocessing import freeze_support, Pool\n'), ((5884, 5898), 'multiprocessing.Pool', 'Pool', (['cpuCount'], {}), '(cpuCount)\n', (5888, 5898), False, 'from multiprocessing import freeze_support, Pool\n'), ((7318, 7346), 'numpy.zeros', 'np.zeros', (['(numTrials, B + 1)'], {}), '((numTrials, B + 1))\n', (7326, 7346), True, 'import numpy as np\n'), ((7367, 7395), 'numpy.zeros', 'np.zeros', (['(numTrials, B + 1)'], {}), '((numTrials, B + 1))\n', (7375, 7395), True, 'import numpy as np\n'), ((7955, 7974), 'numpy.diff', 'np.diff', (['timepoints'], {}), '(timepoints)\n', (7962, 7974), True, 'import numpy as np\n'), ((2694, 2711), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2705, 2711), False, 'import pickle\n'), ((2889, 2900), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2897, 2900), True, 'import numpy as np\n'), ((3185, 3197), 'numpy.max', 'np.max', (['allX'], {}), '(allX)\n', (3191, 3197), True, 'import numpy as np\n'), ((3869, 3879), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (3876, 3879), True, 'import numpy as np\n'), ((4664, 4675), 'time.time', 'time.time', ([], {}), '()\n', (4673, 4675), False, 'import time\n'), ((4873, 4884), 'time.time', 'time.time', ([], {}), '()\n', (4882, 4884), False, 'import time\n'), ((5249, 5270), 'baselines.baselines_utils.process_AutoSPoReFMG_linear', 'proc', (['allFwdModels[t]'], {}), '(allFwdModels[t])\n', (5253, 5270), True, 'from baselines.baselines_utils import process_AutoSPoReFMG_linear as proc\n'), ((5372, 5383), 'time.time', 'time.time', ([], {}), '()\n', (5381, 5383), False, 'import time\n'), ((5644, 5655), 'time.time', 'time.time', ([], {}), '()\n', (5653, 5655), False, 'import time\n'), ((6108, 6119), 'time.time', 'time.time', ([], {}), '()\n', (6117, 6119), False, 'import time\n'), ((6320, 6331), 'time.time', 'time.time', ([], {}), '()\n', (6329, 6331), False, 'import time\n'), ((6517, 6528), 'time.time', 'time.time', ([], {}), '()\n', (6526, 6528), False, 'import time\n'), ((6716, 6727), 'time.time', 'time.time', ([], {}), '()\n', (6725, 6727), False, 'import time\n'), ((8040, 8071), 'os.path.exists', 'os.path.exists', (['saveResultsFile'], {}), '(saveResultsFile)\n', (8054, 8071), False, 'import os\n'), ((4323, 4464), 'spore.spore.SPoRe', 'spore.SPoRe', (['N', 'allFwdModels[t]', 'sampler'], {'conv_rel': 'convRel', 'max_iter': 'maxIterSPoRe', 'step_size': 'stepSize', 'patience': 'patience', 'grad_clip': '(0.05)'}), '(N, allFwdModels[t], sampler, conv_rel=convRel, max_iter=\n maxIterSPoRe, step_size=stepSize, patience=patience, grad_clip=0.05)\n', (4334, 4464), False, 'from spore import spore\n'), ((4790, 4810), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (4799, 4810), True, 'import numpy as np\n'), ((4816, 4825), 'itertools.repeat', 'repeat', (['S'], {}), '(S)\n', (4822, 4825), False, 'from itertools import repeat\n'), ((4827, 4843), 'itertools.repeat', 'repeat', (['mainSeed'], {}), '(mainSeed)\n', (4833, 4843), False, 'from itertools import repeat\n'), ((5546, 5566), 'itertools.repeat', 'repeat', (['sumPoissAlgo'], {}), '(sumPoissAlgo)\n', (5552, 5566), False, 'from itertools import repeat\n'), ((5568, 5588), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (5577, 5588), True, 'import numpy as np\n'), ((5976, 5991), 'itertools.repeat', 'repeat', (['altAlgo'], {}), '(altAlgo)\n', (5982, 5991), False, 'from itertools import repeat\n'), ((5993, 6013), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (6002, 6013), True, 'import numpy as np\n'), ((6054, 6070), 'itertools.repeat', 'repeat', (['"""Random"""'], {}), "('Random')\n", (6060, 6070), False, 'from itertools import repeat\n'), ((6169, 6184), 'itertools.repeat', 'repeat', (['altAlgo'], {}), '(altAlgo)\n', (6175, 6184), False, 'from itertools import repeat\n'), ((6186, 6206), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (6195, 6206), True, 'import numpy as np\n'), ((6212, 6232), 'itertools.repeat', 'repeat', (['unbiasedLam0'], {}), '(unbiasedLam0)\n', (6218, 6232), False, 'from itertools import repeat\n'), ((6256, 6274), 'itertools.repeat', 'repeat', (['"""Unbiased"""'], {}), "('Unbiased')\n", (6262, 6274), False, 'from itertools import repeat\n'), ((6386, 6401), 'itertools.repeat', 'repeat', (['altAlgo'], {}), '(altAlgo)\n', (6392, 6401), False, 'from itertools import repeat\n'), ((6403, 6423), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (6412, 6423), True, 'import numpy as np\n'), ((6459, 6479), 'itertools.repeat', 'repeat', (['"""SumPoisson"""'], {}), "('SumPoisson')\n", (6465, 6479), False, 'from itertools import repeat\n'), ((6580, 6595), 'itertools.repeat', 'repeat', (['altAlgo'], {}), '(altAlgo)\n', (6586, 6595), False, 'from itertools import repeat\n'), ((6597, 6617), 'numpy.arange', 'np.arange', (['numTrials'], {}), '(numTrials)\n', (6606, 6617), True, 'import numpy as np\n'), ((6655, 6670), 'itertools.repeat', 'repeat', (['"""SPoRe"""'], {}), "('SPoRe')\n", (6661, 6670), False, 'from itertools import repeat\n'), ((7460, 7493), 'numpy.dot', 'np.dot', (['lamSPoRe[t]', 'allLam[:, t]'], {}), '(lamSPoRe[t], allLam[:, t])\n', (7466, 7493), True, 'import numpy as np\n'), ((7585, 7627), 'numpy.linalg.norm', 'np.linalg.norm', (['(lamSPoRe[t] - allLam[:, t])'], {}), '(lamSPoRe[t] - allLam[:, t])\n', (7599, 7627), True, 'import numpy as np\n'), ((7629, 7657), 'numpy.linalg.norm', 'np.linalg.norm', (['allLam[:, t]'], {}), '(allLam[:, t])\n', (7643, 7657), True, 'import numpy as np\n'), ((7834, 7895), 'numpy.linalg.norm', 'np.linalg.norm', (['(lamB[:, t, :] - allLam[:, t][:, None])'], {'axis': '(0)'}), '(lamB[:, t, :] - allLam[:, t][:, None], axis=0)\n', (7848, 7895), True, 'import numpy as np\n'), ((7894, 7922), 'numpy.linalg.norm', 'np.linalg.norm', (['allLam[:, t]'], {}), '(allLam[:, t])\n', (7908, 7922), True, 'import numpy as np\n'), ((8154, 8290), 'pickle.dump', 'pickle.dump', (['[lamSPoRe, lamB, allLam, lamCosSim, lamRelL2err, cpuCount, algoTimes,\n baselineAlgos, exitFlags, lossFlags, notes]', 'file'], {}), '([lamSPoRe, lamB, allLam, lamCosSim, lamRelL2err, cpuCount,\n algoTimes, baselineAlgos, exitFlags, lossFlags, notes], file)\n', (8165, 8290), False, 'import pickle\n'), ((3799, 3827), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(N,)'}), '(size=(N,))\n', (3816, 3827), True, 'import numpy as np\n'), ((5774, 5808), 'numpy.sum', 'np.sum', (['sumPoissData[t][0]'], {'axis': '(1)'}), '(sumPoissData[t][0], axis=1)\n', (5780, 5808), True, 'import numpy as np\n'), ((7237, 7267), 'numpy.sum', 'np.sum', (['XB[:, :, t, b]'], {'axis': '(1)'}), '(XB[:, :, t, b], axis=1)\n', (7243, 7267), True, 'import numpy as np\n'), ((7496, 7523), 'numpy.linalg.norm', 'np.linalg.norm', (['lamSPoRe[t]'], {}), '(lamSPoRe[t])\n', (7510, 7523), True, 'import numpy as np\n'), ((7524, 7552), 'numpy.linalg.norm', 'np.linalg.norm', (['allLam[:, t]'], {}), '(allLam[:, t])\n', (7538, 7552), True, 'import numpy as np\n'), ((7734, 7771), 'numpy.linalg.norm', 'np.linalg.norm', (['lamB[:, t, :]'], {'axis': '(0)'}), '(lamB[:, t, :], axis=0)\n', (7748, 7771), True, 'import numpy as np\n'), ((7772, 7800), 'numpy.linalg.norm', 'np.linalg.norm', (['allLam[:, t]'], {}), '(allLam[:, t])\n', (7786, 7800), True, 'import numpy as np\n')] |
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
import json
from validate_email import validate_email
from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems
from utilities import cursor, db
from datetime import datetime, timedelta
import jwt
import logging
import collections
import os
# logging
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__) # flask app
CORS(app) # allow cors
app.config['SECRET_KEY'] = '<KEY>'
""" Authentication: Login/SignUp"""
@app.route('/validateUser', methods=['POST', 'GET'])
def validateUser():
"""
Validating the existance of a user in the database.
"""
data = json.loads(request.data)
usernameOrEmail = data['usernameOrEmail'].strip()
password = data['password']
if validate_email(usernameOrEmail):
condition = 'Email'
else:
condition = 'Username'
cursor.execute(f"select * from User where {condition}='{usernameOrEmail}'")
fetched_data = cursor.fetchall()
if not len(fetched_data) == 0:
fetched_zipped_data = zipTableAttrs('user', fetched_data)[0]
if usernameOrEmail == fetched_zipped_data[condition] and password == fetched_zipped_data['Password']:
token = jwt.encode({'user': usernameOrEmail, 'exp': datetime.utcnow(
) + timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'exists': True, 'token': token.decode('utf-8'), 'username': fetched_zipped_data['Username']})
return jsonify({'exists': False, 'username': ''})
@app.route('/registerUser', methods=['POST'])
def registerUser():
"""
Registering a user, inserting it the User table if it is not in use and valid.
"""
data = json.loads(request.data)
name = data['name'].strip()
username = data['username'].strip()
email = data['email'].strip()
password = data['password']
country = data['country']['label']
phoneNum = data['phone']
birthday = data['birthday'].split('T')[0]
# check if the email is available
emailAvailable = False
emailValid = False
cursor.execute(f'select * from User where Email="{email}"')
if len(cursor.fetchall()) == 0:
emailAvailable = True
if validate_email(email):
emailValid = True
# check if username is available
usernameAvailable = False
cursor.execute(f'select * from User where Username="{username}"')
if len(cursor.fetchall()) == 0:
usernameAvailable = True
# assign a deafult picture
if emailAvailable and usernameAvailable and emailValid:
cursor.execute(
f"insert User(Name, Username, Email, Password, Country, PhoneNumber, Birthday) values('{name}', '{username}', '{email}', '{password}','{country}', '{phoneNum}', '{birthday}')")
return jsonify({'registered': True, 'emailValid': True, 'emailAvailable': True, 'usernameAvailable': True})
else:
return jsonify({'registered': False, 'emailValid': emailValid, 'emailAvailable': emailAvailable, 'usernameAvailable': usernameAvailable})
@app.route('/validateToken', methods=['POST'])
def validateToken():
"""
Validating a user's token
"""
data = json.loads(request.data)
token = data['token']
if not token:
return jsonify({'valid': False})
try:
jwt.decode(token, app.config['SECRET_KEY'])
return jsonify({'valid': True})
except:
return jsonify({'valid': False})
""" Profile """
@app.route('/getUserInfo/<username>', methods=['GET'])
def getUserInfo(username):
"""
- A dynamic url containing id of a user that is passed to the function.
- A query will get the user info with that Id.
- Reviews are queryed with the profile's id
"""
cursor.execute(f"select * from User where Username='{username}'")
res = cursor.fetchall()
if len(res) == 0:
return jsonify('NOT FOUND')
info = zipTableAttrs('user', res)[0]
# trips
cursor.execute(f"select * from Trip where Username='{username}'")
trips = cursor.fetchall()
midpoints, unWantedCategories = getMidpointsAndunWantedCats(trips, cursor)
# shipments
cursor.execute(f"select * from Shipment where Username='{username}'")
shipments = cursor.fetchall()
items_final = getItemsOfShipments(shipments, cursor)
pics = matchPicsOfItems(shipments)
# reviews
cursor.execute(f"select * from Review where revieweeUsername='{username}'")
reviews = zipTableAttrs('review', cursor.fetchall())
# user's rating
ratings = map(lambda rev: rev['NumOfStars'], reviews)
freq = collections.Counter(ratings)
summation = sum(int(k)*v for k, v in freq.items())
allrates = sum([i[1] for i in freq.items() if int(i[0]) != 0])
if allrates > 0:
userRating = int(round(summation / allrates))
else:
userRating = 0
# requests
cursor.execute(
f"select * from Request where senderUsername='{info['Username']}' or receiverUsername='{info['Username']}'")
requests = zipTableAttrs('request', cursor.fetchall())
# req_trip_ship => {req: {ship:'', trip:''}}
req_shipTrip = {}
items = [] # of a shipment
for req in requests:
tripId = req['TripID']
shipmentId = req['ShipmentID']
cursor.execute(f"select * from Shipment where ID='{shipmentId}'")
# shipment related to the request
shipment = zipTableAttrs('shipment', cursor.fetchall())
cursor.execute(f"select * from Trip where Id='{tripId}'")
# trip related to the request
trip = zipTableAttrs('trip', cursor.fetchall())
# midpoints
cursor.execute(
f"select (District) from trip as t, Midpoint as m where t.Id = m.TripID and t.Id='{tripId}'")
midpoint_districts = [i[0] for i in cursor.fetchall()]
# unwanted categories
cursor.execute(
f"select (Category) from trip as t, UnwantedCategory as u where t.Id = u.tripID and t.Id ='{tripId}'")
unwanted_categories = [i[0] for i in cursor.fetchall()]
# Items
cursor.execute(
f"select * from Item where shipmentID = '{shipmentId}'")
items = zipTableAttrs('item', cursor.fetchall())
req_shipTrip[req['Id']] = {'shipment': shipment[0], 'trip': trip[0],
'unwantedCategories': unwanted_categories,
'midpoints': midpoint_districts, 'items': items}
info['userRating'] = userRating
info_reviews = {'info': info, 'reviews': reviews, 'trips': zipTableAttrs('trip', trips),
'shipments': zipTableAttrs('shipment', shipments), 'midpoints': midpoints,
'unWantedCategories': unWantedCategories, 'items': items,
'requests': requests, 'reqShipTrip': req_shipTrip, 'items': items_final, 'itemsPics': pics}
return jsonify(info_reviews)
@app.route('/getProfilePic/<username>', methods=['GET'])
def getProfilePic(username):
filePath = os.path.join(
os.getcwd(), f'static/Img/profilePics/{username}.png')
if os.path.isfile(filePath):
return send_file(filePath, mimetype='image/gif')
else:
defaultPath = os.path.join(
os.getcwd(), 'static/Img/profilePics/defaultProfilePic.png')
return send_file(defaultPath, mimetype='image/gif')
@app.route('/uploadProfilePic/<username>', methods=['POST'])
def uploadProfilePic(username):
image = request.files['image']
saveTo = os.path.join(
os.getcwd(), 'static/Img/profilePics', f'{username}.png')
image.save(saveTo)
return "SUCCESS"
@app.route('/acceptRequest', methods=['POST'])
def acceptRequest():
data = json.loads(request.data)
reqId = data['reqId']
status = data['status']
cursor.execute(
f"UPDATE Request SET status = '{status}' WHERE ID = '{reqId}';")
return 'SUCCESS'
@app.route('/declineRequest', methods=['POST'])
def declineRequest():
data = json.loads(request.data)
reqId = data['reqId']
status = data['status']
cursor.execute(
f"UPDATE Request SET status = status + '{status}' WHERE ID = '{reqId}';")
return 'SUCCESS'
@app.route('/cancelRequest', methods=['POST'])
def cancelRequest():
data = json.loads(request.data)
reqId = data['reqId']
cursor.execute(f"Delete From Request WHERE ID = '{reqId}';")
return 'SUCCESS'
@app.route('/cancelRequestAccept', methods=['POST'])
def cancelRequestAccept():
data = json.loads(request.data)
reqId = data['reqId']
cursor.execute(f"UPDATE Request SET status = '{0}' WHERE ID = '{reqId}';")
return 'SUCCESS'
@app.route('/otherPersonsInfo', methods=['POST'])
def otherPersonsInfo():
data = json.loads(request.data)
username = data['username']
cursor.execute(f"select * from User where Username='{username}'")
info = zipTableAttrs('user', cursor.fetchall())[0]
return jsonify({'Email': info['Email'], 'PhoneNumber': info['PhoneNumber']})
@app.route('/completedDeals', methods=['POST'])
def completedDeals():
data = json.loads(request.data)
periodIsOver = data['periodIsOver']
if periodIsOver:
reqId = data['reqId']
status = data['status']
cursor.execute(
f"UPDATE Request SET status = '{status}' WHERE ID = '{reqId}'")
return 'SUCCESS'
@app.route('/addReview', methods=['POST'])
def addReview():
data = json.loads(request.data)
reqId = data['reqId']
who = data['who']
cursor.execute(
f"insert into Review(tripID, ShipmentID, reviewerUsername, revieweeUsername, NumOfStars, Text) values('{data['TripID']}', '{data['ShipmentID']}', '{data['reviewerUsername']}', '{data['revieweeUsername']}', '{data['NumOfStars']}', '{data['Text']}')")
cursor.execute(
f"UPDATE Request SET {who} = '{cursor.lastrowid}' WHERE ID = '{reqId}'")
return 'SUCCESS'
""" Search """
@app.route('/suggestedResults/<username>', methods=['GET'])
def suggestedResults(username):
### Trips ####
cursor.execute(f"""
select FromLocation
from trip as t, User as u
where t.Username='{username}' and u.Username='{username}'
UNION
select ToLocation
from trip as t, User as u
where t.Username='{username}' and u.Username='{username}'
UNION
select FromLocation
from trip as t, User as u
where u.Username='{username}' and FromLocation=u.Country
UNION
select ToLocation
from trip as t, User as u
where u.Username='{username}' and ToLocation=u.Country
""")
# countries the user is interested in
countries = [x[0] for x in cursor.fetchall()]
trips = []
for country in countries:
cursor.execute(f"""
select *
from trip
where Username != '{username}' and (ToLocation = '{country}' or FromLocation = '{country}')
""")
trips.extend(cursor.fetchall())
trips = [t for t in {k[0]: k for k in trips}.values()] # unique trips
midpoints, unWantedCategories = getMidpointsAndunWantedCats(trips, cursor)
### Shipments ####
cursor.execute(f"""
select FromLocation
from shipment as t, User as u
where t.Username='{username}' and u.Username='{username}'
UNION
select ToLocation
from shipment as t, User as u
where t.Username='{username}' and u.Username='{username}'
UNION
select FromLocation
from shipment as t, User as u
where u.Username='{username}' and FromLocation=u.Country
UNION
select ToLocation
from shipment as t, User as u
where u.Username='{username}' and ToLocation=u.Country
""")
# countries the user is interested in
countries = [x[0] for x in cursor.fetchall()]
shipments = []
for country in countries:
cursor.execute(f"""
select *
from shipment
where Username != '{username}' and (ToLocation = '{country}' or FromLocation = '{country}')
""")
shipments.extend(cursor.fetchall())
# unique shipments
shipments = [t for t in {k[0]: k for k in shipments}.values()]
# items
items = getItemsOfShipments(shipments, cursor)
# Pics
pics = matchPicsOfItems(shipments)
return jsonify({'trips': zipTableAttrs('trip', trips), 'midpoints': midpoints,
'unWantedCategories': unWantedCategories, 'shipments': zipTableAttrs('shipment', shipments),
'items': items, "itemPics": pics})
@app.route('/handleSearch', methods=['POST'])
def handleSearch():
searchFilters = json.loads(request.data)
username = searchFilters['username']
cursor.execute(f"select * from Shipment where Username != '{username}' and \
FromLocation = '{searchFilters['from']['label']}' \
and ToLocation = '{searchFilters['to']['label']}'\
and RequestedDeliveryDate <= '{searchFilters['date'].split('T')[0]}'")
shipments = cursor.fetchall()
# items
items = getItemsOfShipments(shipments, cursor)
pics = matchPicsOfItems(shipments)
# trips
cursor.execute(f"select * from Trip where Username != '{username}' and FromLocation = '{searchFilters['from']['label']}' \
and ToLocation = '{searchFilters['to']['label']}' \
and Date <= '{searchFilters['date'].split('T')[0]}' ")
trips = cursor.fetchall()
midpoints, unWantedCategories = getMidpointsAndunWantedCats(trips, cursor)
return jsonify({'trips': zipTableAttrs('trip', trips), 'shipments': zipTableAttrs('shipment', shipments),
'midpoints': midpoints, 'unWantedCategories': unWantedCategories, 'items': items, "itemPics": pics})
""" Registering Requests """
@app.route('/registerRequest', methods=['POST'])
def registerRequest():
data = json.loads(request.data)
trip = data['trip']
shipment = data['shipment']
if data['sender'] == 'shipment':
cursor.execute(
f"insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values ('{trip['Id']}', '{shipment['Id']}', '{shipment['username']}', '{trip['Username']}', '{0}')")
else:
cursor.execute(
f"insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values ('{trip['Id']}', '{shipment['Id']}', '{trip['username']}', '{shipment['Username']}', '{0}')")
return jsonify({'registered': True})
""" UPDATE USER INFO IN ABOUT ME"""
@app.route("/updateUserInfo", methods=["POST", "GET"]) # for now just leave both
def updateUserInfo():
updatedInfo = json.loads(request.data)
cursor.execute(f"UPDATE User set Name='{updatedInfo['name']}', Email='{updatedInfo['email']}', Country='{updatedInfo['location']}' where Username='{updatedInfo['username']}'")
return {"UpdateStatus": True}
""" ADDING SHIPMENT AND TRIP """
@app.route('/addTrip', methods=["POST", "GET"])
def addTrip():
trip = json.loads(request.data)
cursor.execute(
f"insert Trip(Username, FromLocation, ToLocation, Date, Time, AvailableWeight, Description) values('{trip['username']}', '{trip['from']['label']}', '{trip['to']['label']}','{trip['date'].split('T')[0]}', '{trip['time'].split('T')[1][:-5]}', '{trip['AvWeight']}', '{trip['description']}')")
id = cursor.lastrowid
for unwantedcat in trip["cats"]:
cursor.execute(
f"insert unwantedcategory(tripID, Category) values('{id}','{unwantedcat}')")
for point in trip["midPoints"]:
cursor.execute(
f"insert midpoint(District, TripID) values('{point['label']}','{id}')")
return jsonify({'created': True, 'Id': cursor.lastrowid})
@app.route('/addShipment', methods=["POST", "GET"])
def addShipment():
shipment = json.loads(request.data)
cursor.execute(
f"insert shipment(Username, FromLocation, ToLocation, RequestedDeliveryDate, Title, Description_, OfferedReward) values('{shipment['username']}', '{shipment['from']['label']}', '{shipment['to']['label']}','{shipment['date'].split('T')[0]}', '{shipment['title']}', '{shipment['description']}', '{shipment['OfferedReward']}')")
id = cursor.lastrowid
items = shipment['itemArray']
for item in items:
ItemCat = item['ItemCategory']
cursor.execute(
f"insert item (shipmentID, Category, Weight, Name, Quantity, ItemLink) values ('{id}', '{shipment['lookupTable'][ItemCat]}', '{item['ItemWeight']}', '{item['ItemTitle']}', '{item['ItemQuantity']}', '{item['ItemLink']}')"
)
# this works better I think. Double check with asem because this will cause problems.
cursor.execute("SELECT max(id) FROM shipment")
testID = cursor.fetchone()[0]
return jsonify({'created': True, 'Id': testID})
@app.route('/addPics/<username>/<Id>', methods=["POST", "GET"])
def addPics(username, Id):
counter = 1
images = request.files
for singleImg in images:
img = images[singleImg]
saveTo = os.path.join(os.getcwd(), 'static/Img/ItemPics',
f"{username}_{Id}_img{counter}.png")
img.save(saveTo)
counter += 1
return "SUCESS"
@app.route("/loadShipmentsPics/<imageName>", methods=["POST", "GET"])
def loadPic(imageName):
filePath = os.path.join(
os.getcwd(), f'static/Img/itemPics/{imageName}')
if os.path.isfile(filePath):
return send_file(filePath, mimetype='image/gif')
else:
defaultPath = os.path.join(
os.getcwd(), 'static/Img/itemPics/defaultItemPic.png')
return send_file(defaultPath, mimetype='image/gif')
return "sucess"
if __name__ == "__main__":
app.run(debug=True)
| [
"jwt.decode",
"flask_cors.CORS",
"flask.Flask",
"datetime.timedelta",
"helpers.matchPicsOfItems",
"flask.jsonify",
"validate_email.validate_email",
"utilities.cursor.execute",
"helpers.getItemsOfShipments",
"json.loads",
"os.path.isfile",
"flask.send_file",
"helpers.zipTableAttrs",
"logging.basicConfig",
"datetime.datetime.utcnow",
"utilities.cursor.fetchall",
"helpers.getMidpointsAndunWantedCats",
"os.getcwd",
"collections.Counter",
"utilities.cursor.fetchone"
] | [((377, 417), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (396, 417), False, 'import logging\n'), ((425, 440), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (430, 440), False, 'from flask import Flask, jsonify, request, send_file\n'), ((454, 463), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (458, 463), False, 'from flask_cors import CORS\n'), ((713, 737), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (723, 737), False, 'import json\n'), ((831, 862), 'validate_email.validate_email', 'validate_email', (['usernameOrEmail'], {}), '(usernameOrEmail)\n', (845, 862), False, 'from validate_email import validate_email\n'), ((937, 1012), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from User where {condition}=\'{usernameOrEmail}\'"""'], {}), '(f"select * from User where {condition}=\'{usernameOrEmail}\'")\n', (951, 1012), False, 'from utilities import cursor, db\n'), ((1032, 1049), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (1047, 1049), False, 'from utilities import cursor, db\n'), ((1545, 1587), 'flask.jsonify', 'jsonify', (["{'exists': False, 'username': ''}"], {}), "({'exists': False, 'username': ''})\n", (1552, 1587), False, 'from flask import Flask, jsonify, request, send_file\n'), ((1770, 1794), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (1780, 1794), False, 'import json\n'), ((2140, 2199), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from User where Email="{email}\\""""'], {}), '(f\'select * from User where Email="{email}"\')\n', (2154, 2199), False, 'from utilities import cursor, db\n'), ((2273, 2294), 'validate_email.validate_email', 'validate_email', (['email'], {}), '(email)\n', (2287, 2294), False, 'from validate_email import validate_email\n'), ((2393, 2458), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from User where Username="{username}\\""""'], {}), '(f\'select * from User where Username="{username}"\')\n', (2407, 2458), False, 'from utilities import cursor, db\n'), ((3240, 3264), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3250, 3264), False, 'import json\n'), ((3812, 3877), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from User where Username=\'{username}\'"""'], {}), '(f"select * from User where Username=\'{username}\'")\n', (3826, 3877), False, 'from utilities import cursor, db\n'), ((3888, 3905), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (3903, 3905), False, 'from utilities import cursor, db\n'), ((4022, 4087), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Trip where Username=\'{username}\'"""'], {}), '(f"select * from Trip where Username=\'{username}\'")\n', (4036, 4087), False, 'from utilities import cursor, db\n'), ((4100, 4117), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (4115, 4117), False, 'from utilities import cursor, db\n'), ((4154, 4196), 'helpers.getMidpointsAndunWantedCats', 'getMidpointsAndunWantedCats', (['trips', 'cursor'], {}), '(trips, cursor)\n', (4181, 4196), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((4218, 4287), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Shipment where Username=\'{username}\'"""'], {}), '(f"select * from Shipment where Username=\'{username}\'")\n', (4232, 4287), False, 'from utilities import cursor, db\n'), ((4304, 4321), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (4319, 4321), False, 'from utilities import cursor, db\n'), ((4340, 4378), 'helpers.getItemsOfShipments', 'getItemsOfShipments', (['shipments', 'cursor'], {}), '(shipments, cursor)\n', (4359, 4378), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((4390, 4417), 'helpers.matchPicsOfItems', 'matchPicsOfItems', (['shipments'], {}), '(shipments)\n', (4406, 4417), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((4437, 4512), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Review where revieweeUsername=\'{username}\'"""'], {}), '(f"select * from Review where revieweeUsername=\'{username}\'")\n', (4451, 4512), False, 'from utilities import cursor, db\n'), ((4659, 4687), 'collections.Counter', 'collections.Counter', (['ratings'], {}), '(ratings)\n', (4678, 4687), False, 'import collections\n'), ((4938, 5071), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Request where senderUsername=\'{info[\'Username\']}\' or receiverUsername=\'{info[\'Username\']}\'"""'], {}), '(\n f"select * from Request where senderUsername=\'{info[\'Username\']}\' or receiverUsername=\'{info[\'Username\']}\'"\n )\n', (4952, 5071), False, 'from utilities import cursor, db\n'), ((6949, 6970), 'flask.jsonify', 'jsonify', (['info_reviews'], {}), '(info_reviews)\n', (6956, 6970), False, 'from flask import Flask, jsonify, request, send_file\n'), ((7158, 7182), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (7172, 7182), False, 'import os\n'), ((7768, 7792), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (7778, 7792), False, 'import json\n'), ((7851, 7930), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE Request SET status = \'{status}\' WHERE ID = \'{reqId}\';"""'], {}), '(f"UPDATE Request SET status = \'{status}\' WHERE ID = \'{reqId}\';")\n', (7865, 7930), False, 'from utilities import cursor, db\n'), ((8044, 8068), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (8054, 8068), False, 'import json\n'), ((8127, 8220), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE Request SET status = status + \'{status}\' WHERE ID = \'{reqId}\';"""'], {}), '(\n f"UPDATE Request SET status = status + \'{status}\' WHERE ID = \'{reqId}\';")\n', (8141, 8220), False, 'from utilities import cursor, db\n'), ((8327, 8351), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (8337, 8351), False, 'import json\n'), ((8382, 8442), 'utilities.cursor.execute', 'cursor.execute', (['f"""Delete From Request WHERE ID = \'{reqId}\';"""'], {}), '(f"Delete From Request WHERE ID = \'{reqId}\';")\n', (8396, 8442), False, 'from utilities import cursor, db\n'), ((8557, 8581), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (8567, 8581), False, 'import json\n'), ((8612, 8686), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE Request SET status = \'{0}\' WHERE ID = \'{reqId}\';"""'], {}), '(f"UPDATE Request SET status = \'{0}\' WHERE ID = \'{reqId}\';")\n', (8626, 8686), False, 'from utilities import cursor, db\n'), ((8795, 8819), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (8805, 8819), False, 'import json\n'), ((8856, 8921), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from User where Username=\'{username}\'"""'], {}), '(f"select * from User where Username=\'{username}\'")\n', (8870, 8921), False, 'from utilities import cursor, db\n'), ((8988, 9057), 'flask.jsonify', 'jsonify', (["{'Email': info['Email'], 'PhoneNumber': info['PhoneNumber']}"], {}), "({'Email': info['Email'], 'PhoneNumber': info['PhoneNumber']})\n", (8995, 9057), False, 'from flask import Flask, jsonify, request, send_file\n'), ((9141, 9165), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (9151, 9165), False, 'import json\n'), ((9483, 9507), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (9493, 9507), False, 'import json\n'), ((9560, 9834), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert into Review(tripID, ShipmentID, reviewerUsername, revieweeUsername, NumOfStars, Text) values(\'{data[\'TripID\']}\', \'{data[\'ShipmentID\']}\', \'{data[\'reviewerUsername\']}\', \'{data[\'revieweeUsername\']}\', \'{data[\'NumOfStars\']}\', \'{data[\'Text\']}\')"""'], {}), '(\n f"insert into Review(tripID, ShipmentID, reviewerUsername, revieweeUsername, NumOfStars, Text) values(\'{data[\'TripID\']}\', \'{data[\'ShipmentID\']}\', \'{data[\'reviewerUsername\']}\', \'{data[\'revieweeUsername\']}\', \'{data[\'NumOfStars\']}\', \'{data[\'Text\']}\')"\n )\n', (9574, 9834), False, 'from utilities import cursor, db\n'), ((9839, 9931), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE Request SET {who} = \'{cursor.lastrowid}\' WHERE ID = \'{reqId}\'"""'], {}), '(\n f"UPDATE Request SET {who} = \'{cursor.lastrowid}\' WHERE ID = \'{reqId}\'")\n', (9853, 9931), False, 'from utilities import cursor, db\n'), ((10091, 10885), 'utilities.cursor.execute', 'cursor.execute', (['f"""\n select FromLocation \n from trip as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select ToLocation \n from trip as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select FromLocation \n from trip as t, User as u \n where u.Username=\'{username}\' and FromLocation=u.Country \n UNION \n select ToLocation \n from trip as t, User as u \n where u.Username=\'{username}\' and ToLocation=u.Country\n """'], {}), '(\n f"""\n select FromLocation \n from trip as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select ToLocation \n from trip as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select FromLocation \n from trip as t, User as u \n where u.Username=\'{username}\' and FromLocation=u.Country \n UNION \n select ToLocation \n from trip as t, User as u \n where u.Username=\'{username}\' and ToLocation=u.Country\n """\n )\n', (10105, 10885), False, 'from utilities import cursor, db\n'), ((11409, 11451), 'helpers.getMidpointsAndunWantedCats', 'getMidpointsAndunWantedCats', (['trips', 'cursor'], {}), '(trips, cursor)\n', (11436, 11451), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((11480, 12290), 'utilities.cursor.execute', 'cursor.execute', (['f"""\n select FromLocation \n from shipment as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select ToLocation \n from shipment as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select FromLocation \n from shipment as t, User as u \n where u.Username=\'{username}\' and FromLocation=u.Country \n UNION \n select ToLocation \n from shipment as t, User as u \n where u.Username=\'{username}\' and ToLocation=u.Country\n """'], {}), '(\n f"""\n select FromLocation \n from shipment as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select ToLocation \n from shipment as t, User as u \n where t.Username=\'{username}\' and u.Username=\'{username}\' \n UNION \n select FromLocation \n from shipment as t, User as u \n where u.Username=\'{username}\' and FromLocation=u.Country \n UNION \n select ToLocation \n from shipment as t, User as u \n where u.Username=\'{username}\' and ToLocation=u.Country\n """\n )\n', (11494, 12290), False, 'from utilities import cursor, db\n'), ((12827, 12865), 'helpers.getItemsOfShipments', 'getItemsOfShipments', (['shipments', 'cursor'], {}), '(shipments, cursor)\n', (12846, 12865), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((12888, 12915), 'helpers.matchPicsOfItems', 'matchPicsOfItems', (['shipments'], {}), '(shipments)\n', (12904, 12915), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((13256, 13280), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (13266, 13280), False, 'import json\n'), ((13605, 13622), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (13620, 13622), False, 'from utilities import cursor, db\n'), ((13647, 13685), 'helpers.getItemsOfShipments', 'getItemsOfShipments', (['shipments', 'cursor'], {}), '(shipments, cursor)\n', (13666, 13685), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((13697, 13724), 'helpers.matchPicsOfItems', 'matchPicsOfItems', (['shipments'], {}), '(shipments)\n', (13713, 13724), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((13993, 14010), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (14008, 14010), False, 'from utilities import cursor, db\n'), ((14047, 14089), 'helpers.getMidpointsAndunWantedCats', 'getMidpointsAndunWantedCats', (['trips', 'cursor'], {}), '(trips, cursor)\n', (14074, 14089), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((14436, 14460), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (14446, 14460), False, 'import json\n'), ((15018, 15047), 'flask.jsonify', 'jsonify', (["{'registered': True}"], {}), "({'registered': True})\n", (15025, 15047), False, 'from flask import Flask, jsonify, request, send_file\n'), ((15208, 15232), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (15218, 15232), False, 'import json\n'), ((15237, 15422), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE User set Name=\'{updatedInfo[\'name\']}\', Email=\'{updatedInfo[\'email\']}\', Country=\'{updatedInfo[\'location\']}\' where Username=\'{updatedInfo[\'username\']}\'"""'], {}), '(\n f"UPDATE User set Name=\'{updatedInfo[\'name\']}\', Email=\'{updatedInfo[\'email\']}\', Country=\'{updatedInfo[\'location\']}\' where Username=\'{updatedInfo[\'username\']}\'"\n )\n', (15251, 15422), False, 'from utilities import cursor, db\n'), ((15556, 15580), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (15566, 15580), False, 'import json\n'), ((16231, 16281), 'flask.jsonify', 'jsonify', (["{'created': True, 'Id': cursor.lastrowid}"], {}), "({'created': True, 'Id': cursor.lastrowid})\n", (16238, 16281), False, 'from flask import Flask, jsonify, request, send_file\n'), ((16370, 16394), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (16380, 16394), False, 'import json\n'), ((17232, 17278), 'utilities.cursor.execute', 'cursor.execute', (['"""SELECT max(id) FROM shipment"""'], {}), "('SELECT max(id) FROM shipment')\n", (17246, 17278), False, 'from utilities import cursor, db\n'), ((17324, 17364), 'flask.jsonify', 'jsonify', (["{'created': True, 'Id': testID}"], {}), "({'created': True, 'Id': testID})\n", (17331, 17364), False, 'from flask import Flask, jsonify, request, send_file\n'), ((17952, 17976), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (17966, 17976), False, 'import os\n'), ((2629, 2830), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert User(Name, Username, Email, Password, Country, PhoneNumber, Birthday) values(\'{name}\', \'{username}\', \'{email}\', \'{password}\',\'{country}\', \'{phoneNum}\', \'{birthday}\')"""'], {}), '(\n f"insert User(Name, Username, Email, Password, Country, PhoneNumber, Birthday) values(\'{name}\', \'{username}\', \'{email}\', \'{password}\',\'{country}\', \'{phoneNum}\', \'{birthday}\')"\n )\n', (2643, 2830), False, 'from utilities import cursor, db\n'), ((2849, 2953), 'flask.jsonify', 'jsonify', (["{'registered': True, 'emailValid': True, 'emailAvailable': True,\n 'usernameAvailable': True}"], {}), "({'registered': True, 'emailValid': True, 'emailAvailable': True,\n 'usernameAvailable': True})\n", (2856, 2953), False, 'from flask import Flask, jsonify, request, send_file\n'), ((2976, 3110), 'flask.jsonify', 'jsonify', (["{'registered': False, 'emailValid': emailValid, 'emailAvailable':\n emailAvailable, 'usernameAvailable': usernameAvailable}"], {}), "({'registered': False, 'emailValid': emailValid, 'emailAvailable':\n emailAvailable, 'usernameAvailable': usernameAvailable})\n", (2983, 3110), False, 'from flask import Flask, jsonify, request, send_file\n'), ((3324, 3349), 'flask.jsonify', 'jsonify', (["{'valid': False}"], {}), "({'valid': False})\n", (3331, 3349), False, 'from flask import Flask, jsonify, request, send_file\n'), ((3367, 3410), 'jwt.decode', 'jwt.decode', (['token', "app.config['SECRET_KEY']"], {}), "(token, app.config['SECRET_KEY'])\n", (3377, 3410), False, 'import jwt\n'), ((3426, 3450), 'flask.jsonify', 'jsonify', (["{'valid': True}"], {}), "({'valid': True})\n", (3433, 3450), False, 'from flask import Flask, jsonify, request, send_file\n'), ((3943, 3963), 'flask.jsonify', 'jsonify', (['"""NOT FOUND"""'], {}), "('NOT FOUND')\n", (3950, 3963), False, 'from flask import Flask, jsonify, request, send_file\n'), ((3975, 4001), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""user"""', 'res'], {}), "('user', res)\n", (3988, 4001), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((4551, 4568), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (4566, 4568), False, 'from utilities import cursor, db\n'), ((5111, 5128), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (5126, 5128), False, 'from utilities import cursor, db\n'), ((5338, 5403), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Shipment where ID=\'{shipmentId}\'"""'], {}), '(f"select * from Shipment where ID=\'{shipmentId}\'")\n', (5352, 5403), False, 'from utilities import cursor, db\n'), ((5518, 5575), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Trip where Id=\'{tripId}\'"""'], {}), '(f"select * from Trip where Id=\'{tripId}\'")\n', (5532, 5575), False, 'from utilities import cursor, db\n'), ((5698, 5816), 'utilities.cursor.execute', 'cursor.execute', (['f"""select (District) from trip as t, Midpoint as m where t.Id = m.TripID and t.Id=\'{tripId}\'"""'], {}), '(\n f"select (District) from trip as t, Midpoint as m where t.Id = m.TripID and t.Id=\'{tripId}\'"\n )\n', (5712, 5816), False, 'from utilities import cursor, db\n'), ((5921, 6048), 'utilities.cursor.execute', 'cursor.execute', (['f"""select (Category) from trip as t, UnwantedCategory as u where t.Id = u.tripID and t.Id =\'{tripId}\'"""'], {}), '(\n f"select (Category) from trip as t, UnwantedCategory as u where t.Id = u.tripID and t.Id =\'{tripId}\'"\n )\n', (5935, 6048), False, 'from utilities import cursor, db\n'), ((6140, 6212), 'utilities.cursor.execute', 'cursor.execute', (['f"""select * from Item where shipmentID = \'{shipmentId}\'"""'], {}), '(f"select * from Item where shipmentID = \'{shipmentId}\'")\n', (6154, 6212), False, 'from utilities import cursor, db\n'), ((6622, 6650), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""trip"""', 'trips'], {}), "('trip', trips)\n", (6635, 6650), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((6685, 6721), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""shipment"""', 'shipments'], {}), "('shipment', shipments)\n", (6698, 6721), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((7096, 7107), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7105, 7107), False, 'import os\n'), ((7199, 7240), 'flask.send_file', 'send_file', (['filePath'], {'mimetype': '"""image/gif"""'}), "(filePath, mimetype='image/gif')\n", (7208, 7240), False, 'from flask import Flask, jsonify, request, send_file\n'), ((7375, 7419), 'flask.send_file', 'send_file', (['defaultPath'], {'mimetype': '"""image/gif"""'}), "(defaultPath, mimetype='image/gif')\n", (7384, 7419), False, 'from flask import Flask, jsonify, request, send_file\n'), ((7585, 7596), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7594, 7596), False, 'import os\n'), ((9297, 9375), 'utilities.cursor.execute', 'cursor.execute', (['f"""UPDATE Request SET status = \'{status}\' WHERE ID = \'{reqId}\'"""'], {}), '(f"UPDATE Request SET status = \'{status}\' WHERE ID = \'{reqId}\'")\n', (9311, 9375), False, 'from utilities import cursor, db\n'), ((11021, 11265), 'utilities.cursor.execute', 'cursor.execute', (['f""" \n select * \n from trip \n where Username != \'{username}\' and (ToLocation = \'{country}\' or FromLocation = \'{country}\')\n """'], {}), '(\n f""" \n select * \n from trip \n where Username != \'{username}\' and (ToLocation = \'{country}\' or FromLocation = \'{country}\')\n """\n )\n', (11035, 11265), False, 'from utilities import cursor, db\n'), ((12430, 12678), 'utilities.cursor.execute', 'cursor.execute', (['f""" \n select * \n from shipment \n where Username != \'{username}\' and (ToLocation = \'{country}\' or FromLocation = \'{country}\')\n """'], {}), '(\n f""" \n select * \n from shipment \n where Username != \'{username}\' and (ToLocation = \'{country}\' or FromLocation = \'{country}\')\n """\n )\n', (12444, 12678), False, 'from utilities import cursor, db\n'), ((14562, 14771), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values (\'{trip[\'Id\']}\', \'{shipment[\'Id\']}\', \'{shipment[\'username\']}\', \'{trip[\'Username\']}\', \'{0}\')"""'], {}), '(\n f"insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values (\'{trip[\'Id\']}\', \'{shipment[\'Id\']}\', \'{shipment[\'username\']}\', \'{trip[\'Username\']}\', \'{0}\')"\n )\n', (14576, 14771), False, 'from utilities import cursor, db\n'), ((14793, 15002), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values (\'{trip[\'Id\']}\', \'{shipment[\'Id\']}\', \'{trip[\'username\']}\', \'{shipment[\'Username\']}\', \'{0}\')"""'], {}), '(\n f"insert into Request(TripID, ShipmentID, senderUsername, receiverUsername, status) values (\'{trip[\'Id\']}\', \'{shipment[\'Id\']}\', \'{trip[\'username\']}\', \'{shipment[\'Username\']}\', \'{0}\')"\n )\n', (14807, 15002), False, 'from utilities import cursor, db\n'), ((15970, 16071), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert unwantedcategory(tripID, Category) values(\'{id}\',\'{unwantedcat}\')"""'], {}), '(\n f"insert unwantedcategory(tripID, Category) values(\'{id}\',\'{unwantedcat}\')"\n )\n', (15984, 16071), False, 'from utilities import cursor, db\n'), ((16119, 16210), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert midpoint(District, TripID) values(\'{point[\'label\']}\',\'{id}\')"""'], {}), '(\n f"insert midpoint(District, TripID) values(\'{point[\'label\']}\',\'{id}\')")\n', (16133, 16210), False, 'from utilities import cursor, db\n'), ((16879, 17125), 'utilities.cursor.execute', 'cursor.execute', (['f"""insert item (shipmentID, Category, Weight, Name, Quantity, ItemLink) values (\'{id}\', \'{shipment[\'lookupTable\'][ItemCat]}\', \'{item[\'ItemWeight\']}\', \'{item[\'ItemTitle\']}\', \'{item[\'ItemQuantity\']}\', \'{item[\'ItemLink\']}\')"""'], {}), '(\n f"insert item (shipmentID, Category, Weight, Name, Quantity, ItemLink) values (\'{id}\', \'{shipment[\'lookupTable\'][ItemCat]}\', \'{item[\'ItemWeight\']}\', \'{item[\'ItemTitle\']}\', \'{item[\'ItemQuantity\']}\', \'{item[\'ItemLink\']}\')"\n )\n', (16893, 17125), False, 'from utilities import cursor, db\n'), ((17292, 17309), 'utilities.cursor.fetchone', 'cursor.fetchone', ([], {}), '()\n', (17307, 17309), False, 'from utilities import cursor, db\n'), ((17896, 17907), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17905, 17907), False, 'import os\n'), ((17993, 18034), 'flask.send_file', 'send_file', (['filePath'], {'mimetype': '"""image/gif"""'}), "(filePath, mimetype='image/gif')\n", (18002, 18034), False, 'from flask import Flask, jsonify, request, send_file\n'), ((18163, 18207), 'flask.send_file', 'send_file', (['defaultPath'], {'mimetype': '"""image/gif"""'}), "(defaultPath, mimetype='image/gif')\n", (18172, 18207), False, 'from flask import Flask, jsonify, request, send_file\n'), ((1115, 1150), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""user"""', 'fetched_data'], {}), "('user', fetched_data)\n", (1128, 1150), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((2211, 2228), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (2226, 2228), False, 'from utilities import cursor, db\n'), ((2470, 2487), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (2485, 2487), False, 'from utilities import cursor, db\n'), ((3478, 3503), 'flask.jsonify', 'jsonify', (["{'valid': False}"], {}), "({'valid': False})\n", (3485, 3503), False, 'from flask import Flask, jsonify, request, send_file\n'), ((5491, 5508), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (5506, 5508), False, 'from utilities import cursor, db\n'), ((5651, 5668), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (5666, 5668), False, 'from utilities import cursor, db\n'), ((6264, 6281), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (6279, 6281), False, 'from utilities import cursor, db\n'), ((7299, 7310), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7308, 7310), False, 'import os\n'), ((8955, 8972), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (8970, 8972), False, 'from utilities import cursor, db\n'), ((10949, 10966), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (10964, 10966), False, 'from utilities import cursor, db\n'), ((11277, 11294), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (11292, 11294), False, 'from utilities import cursor, db\n'), ((12354, 12371), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (12369, 12371), False, 'from utilities import cursor, db\n'), ((12694, 12711), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (12709, 12711), False, 'from utilities import cursor, db\n'), ((12946, 12974), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""trip"""', 'trips'], {}), "('trip', trips)\n", (12959, 12974), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((13075, 13111), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""shipment"""', 'shipments'], {}), "('shipment', shipments)\n", (13088, 13111), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((14120, 14148), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""trip"""', 'trips'], {}), "('trip', trips)\n", (14133, 14148), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((14163, 14199), 'helpers.zipTableAttrs', 'zipTableAttrs', (['"""shipment"""', 'shipments'], {}), "('shipment', shipments)\n", (14176, 14199), False, 'from helpers import zipTableAttrs, getMidpointsAndunWantedCats, getItemsOfShipments, matchPicsOfItems\n'), ((17592, 17603), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17601, 17603), False, 'import os\n'), ((18093, 18104), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18102, 18104), False, 'import os\n'), ((5864, 5881), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (5879, 5881), False, 'from utilities import cursor, db\n'), ((6097, 6114), 'utilities.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (6112, 6114), False, 'from utilities import cursor, db\n'), ((1328, 1345), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1343, 1345), False, 'from datetime import datetime, timedelta\n'), ((1361, 1382), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (1370, 1382), False, 'from datetime import datetime, timedelta\n')] |
Subsets and Splits