content
stringlengths 5
1.05M
|
---|
'''
Auth API 관련 테스트 케이스
'''
import unittest
from json import loads
from flask import current_app
from app import create_app
from flask_jwt_extended import create_access_token
class AuthAPITestCase(unittest.TestCase):
''' Auth 테스트 케이스 클래스 '''
def setUp(self):
'''전처리 메소드'''
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.access_token = {
'ADMIN': create_access_token(
identity=self.app.config['ADMIN_ID'],
expires_delta=False),
'TEST': create_access_token(
identity="test",
expires_delta=False)}
def tearDown(self):
'''후처리 메소드'''
self.app_context.pop()
def get_headers(self, user_type="ADMIN"):
'''API Header 생성 메소드'''
result = {
'Accept': 'application/json',
'Authorization': "Bearer " + self.access_token[user_type],
#'Content-Type': 'application/json',
}
return result
def test_sejong(self):
'''세종대학교 구성원 인증 API 테스트'''
resp = self.client.post(
'/api/auth/sejong',
headers=self.get_headers(),
json={
"sj_id": current_app.config['SEJONG_ID'],
"sj_pw": current_app.config['SEJONG_PW']
}
)
self.assertEqual(resp.status_code, 200)
def test_signup_secession(self):
'''회원 가입 API 테스트 (회원 탈퇴 API 테스트 로직도 이하 동일함)'''
resp = self.client.post(
'/api/auth/signup',
headers=self.get_headers(),
json={
"sj_id": current_app.config['SEJONG_ID'],
"sj_pw": current_app.config['SEJONG_PW'],
"id": "test",
"pw": "test",
"nickname": "test"
}
)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(
'/api/auth/secession',
headers=self.get_headers("TEST"),
json={"pw": "test"}
)
self.assertEqual(resp.status_code, 200)
def test_signin(self):
''' 로그인 API 검증 테스트 '''
resp = self.client.post(
'/api/auth/signin',
headers=self.get_headers(),
json={
"id": current_app.config['ADMIN_ID'],
"pw": current_app.config['ADMIN_PW']
}
)
self.assertEqual(resp.status_code, 200)
def test_update_password(self):
'''비밀번호 변경 API 테스트'''
resp = self.client.post(
'/api/auth/signup',
headers=self.get_headers(),
json={
"sj_id": current_app.config['SEJONG_ID'],
"sj_pw": current_app.config['SEJONG_PW'],
"id": "test",
"pw": "test",
"nickname": "test"
}
)
self.assertEqual(resp.status_code, 200)
resp = self.client.patch(
'/api/auth/user/password',
headers=self.get_headers("TEST"),
json={
"old_pw": "test",
"new_pw": "test2",
"check_pw": "test2"
}
)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(
'/api/auth/secession',
headers=self.get_headers("TEST"),
json={"pw": "test2"}
)
self.assertEqual(resp.status_code, 200)
def test_update_nickname(self):
'''닉네임 변경 API 테스트'''
resp = self.client.post(
'/api/auth/signup',
headers=self.get_headers(),
json={
"sj_id": current_app.config['SEJONG_ID'],
"sj_pw": current_app.config['SEJONG_PW'],
"id": "test",
"pw": "test",
"nickname": "test"
}
)
self.assertEqual(resp.status_code, 200)
resp = self.client.patch(
'/api/auth/user/nickname',
headers=self.get_headers("TEST"),
json={"nickname": "test2"}
)
self.assertEqual(resp.status_code, 200)
resp = self.client.delete(
'/api/auth/secession',
headers=self.get_headers("TEST"),
json={"pw": "test"}
)
self.assertEqual(resp.status_code, 200)
def test_get_user(self):
''' 회원 정보 반환 API 검증 테스트 '''
resp = self.client.get(
'/api/auth/user',
headers=self.get_headers(),
json={}
)
self.assertEqual(resp.status_code, 200)
|
"""
Utilities to match ground truth boxes to anchor boxes.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import numpy as np
def match_bipartite_greedy(weight_matrix):
"""
Returns a bipartite matching according to the given weight matrix.
The algorithm works as follows:
Let the first axis of `weight_matrix` represent ground truth boxes
and the second axis anchor boxes.
The ground truth box that has the greatest similarity with any
anchor box will be matched first, then out of the remaining ground
truth boxes, the ground truth box that has the greatest similarity
with any of the remaining anchor boxes will be matched second, and
so on. That is, the ground truth boxes will be matched in descending
order by maximum similarity with any of the respectively remaining
anchor boxes.
The runtime complexity is O(m^2 * n), where `m` is the number of
ground truth boxes and `n` is the number of anchor boxes.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
Returns:
A 1D Numpy array of length `weight_matrix.shape[0]` that represents
the matched index along the second axis of `weight_matrix` for each index
along the first axis.
"""
weight_matrix = np.copy(weight_matrix) # We'll modify this array.
num_ground_truth_boxes = weight_matrix.shape[0]
all_gt_indices = list(range(num_ground_truth_boxes)) # Only relevant for fancy-indexing below.
# This 1D array will contain for each ground truth box the index of
# the matched anchor box.
matches = np.zeros(num_ground_truth_boxes, dtype=np.int)
# In each iteration of the loop below, exactly one ground truth box
# will be matched to one anchor box.
for _ in range(num_ground_truth_boxes):
# Find the maximal anchor-ground truth pair in two steps: First, reduce
# over the anchor boxes and then reduce over the ground truth boxes.
anchor_indices = np.argmax(weight_matrix, axis=1) # Reduce along the anchor box axis.
overlaps = weight_matrix[all_gt_indices, anchor_indices]
ground_truth_index = np.argmax(overlaps) # Reduce along the ground truth box axis.
anchor_index = anchor_indices[ground_truth_index]
matches[ground_truth_index] = anchor_index # Set the match.
# Set the row of the matched ground truth box and the column of the matched
# anchor box to all zeros. This ensures that those boxes will not be matched again,
# because they will never be the best matches for any other boxes.
weight_matrix[ground_truth_index] = 0
weight_matrix[:, anchor_index] = 0
return matches
def match_multi(weight_matrix, threshold):
"""
Matches all elements along the second axis of `weight_matrix` to their best
matches along the first axis subject to the constraint that the weight of a match
must be greater than or equal to `threshold` in order to produce a match.
If the weight matrix contains elements that should be ignored, the row or column
representing the respective elemet should be set to a value below `threshold`.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
threshold (float): A float that represents the threshold (i.e. lower bound)
that must be met by a pair of elements to produce a match.
Returns:
Two 1D Numpy arrays of equal length that represent the matched indices. The first
array contains the indices along the first axis of `weight_matrix`, the second array
contains the indices along the second axis.
"""
num_anchor_boxes = weight_matrix.shape[1]
all_anchor_indices = list(range(num_anchor_boxes)) # Only relevant for fancy-indexing below.
# Find the best ground truth match for every anchor box.
ground_truth_indices = np.argmax(weight_matrix, axis=0) # Array of shape (weight_matrix.shape[1],)
overlaps = weight_matrix[ground_truth_indices, all_anchor_indices] # Array of shape (weight_matrix.shape[1],)
# Filter out the matches with a weight below the threshold.
anchor_indices_thresh_met = np.nonzero(overlaps >= threshold)[0]
gt_indices_thresh_met = ground_truth_indices[anchor_indices_thresh_met]
return gt_indices_thresh_met, anchor_indices_thresh_met
|
#pretrained_model = '/home/dzm44/Documents/projects/clinical-bert/clinical-bert-weights'
#code_graph_file = '/home/dzm44/Documents/data/icd_codes/code_graph_radiology_expanded.pkl'
pretrained_model = '/home/jered/Documents/projects/clinical-bert/clinical-bert-weights'
code_graph_file = '/home/jered/Documents/data/icd_codes/code_graph_radiology_expanded.pkl'
#data_dir = '/home/dzm44/Documents/data/Dataset_10-11-2019/preprocessed/reports_and_codes_expanded'
data_dir = '/home/jered/Documents/data/Dataset_10-11-2019/preprocessed/reports_and_codes_expanded'
#data_dir = '/home/jered/Documents/data/Dataset_10-11-2019/preprocessed/mini'
#data_dir = '/home/jered/Documents/data/mimic-iii-clinical-database-1.4/preprocessed/reports_and_codes_expanded'
#data_dir = '/home/jered/Documents/data/mimic-iii-clinical-database-1.4/preprocessed/mini'
val_file = 'val.data'
#val_file = 'test.data'
# training params
learning_rate_unfrozen = .00001
learning_rate_frozen = .001
batch_size = 8
epochs = 3
#epochs = 5
limit_rows_train = 10000
#limit_rows_train = None
limit_rows_val = 1000
#limit_rows_val = None
subbatches = 8
num_workers = 2
checkpoint_every = 10
copy_checkpoint_every = 300
val_every = 10
email_every = 30
expensive_val_every = 500
#expensive_val_every = None
# model params
sentences_per_checkpoint = 30
concatenate_code_embedding = True
# email params
smtp_server = 'smtp.gmail.com'
port = 465
sender_email = '[email protected]'
receiver_email = '[email protected]'
|
from abc import ABC, abstractmethod
class Cat(ABC):
@abstractmethod
def talk(self, meow_type):
pass
class BlackCat(Cat):
pass
black_cat = BlackCat()
|
# Generated by Django 2.0.4 on 2018-05-01 14:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zoo', '0012_auto_20180501_1406'),
]
operations = [
migrations.RemoveField(
model_name='exhibit',
name='animals',
),
]
|
import os
import sys
import torch
import tarfile
import numpy as np
from types import SimpleNamespace
import pcam.datasets.modelnet40 as m40
from pcam.models.network import PoseEstimator
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from pcam.tool.train_util import train_one_epoch, validation
import math
from pcam.datasets.kitti_dm import KittiDataModule
from pcam.datasets.threedmatch_dm import ThreeDMatchDataModule
from sacred import Experiment
from sacred import SETTINGS
from sacred.utils import apply_backspaces_and_linefeeds
from sacred.observers import MongoObserver
# Sacred config.
SETTINGS.CAPTURE_MODE = 'sys' # for tqdm
name = 'pcam_train'
ex = Experiment(name)
@ex.config
def config():
# Name of the dataset
DATASET = ''
# Number of points to sample for the point cloud
NUM_POINTS = 2048
# Device for torch
DEVICE = 'cuda'
# Name of subfolder where to save checkpoint
PREFIX = 'new_training'
# Use a sparse mapping or not when computing pairs of corresponding point
SPARSE_ATTENTION = True
# Set to False for PCAM. When set to True only the last attention is used to find corresponding point.
LAST_ATTENTION = False
# Numbers of layers in our encoder to find matching points
NB_ENCODERS = 6
# Number of epochs
NB_EPOCHS = 100
# In case training needs to be relaunched from last checkpoint
RELOAD_LAST = False
# (De)activate losses during training
LOSS_ENTROPY_ATTENTION = True
LOSS_DIST_ATTENTION = False
LOSS_DIST_CONFIDENCE = True
def save_model(epoch, net, optimizer, scheduler, best_recall, best_rre, filename):
torch.save(
{
'epoch': epoch,
'net': net.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'best_recall': best_recall,
'best_rre': best_rre,
},
filename
)
@ex.automain
def main(NB_EPOCHS, NUM_POINTS, RELOAD_LAST, DEVICE, PREFIX,
SPARSE_ATTENTION, LAST_ATTENTION, NB_ENCODERS, DATASET,
LOSS_ENTROPY_ATTENTION, LOSS_DIST_ATTENTION, LOSS_DIST_CONFIDENCE):
# --- Base directory
basedir = os.path.dirname(os.path.realpath(__file__)) + '/../'
# --- Load dataset
if DATASET == 'kitti':
path2data = basedir + "/data/kitti/dataset"
icp_cache_path = basedir + "/data/kitti/icp"
threshold_rte = 0.6
threshold_rre = 5
config = SimpleNamespace(
voxel_size=0.3,
min_scale=1.,
max_scale=1.,
)
data_module = KittiDataModule(path2data, icp_cache_path, NUM_POINTS)
elif DATASET == '3dmatch':
path2data = basedir + "/data/3dmatch/threedmatch"
config = SimpleNamespace(
voxel_size=0.05,
min_scale=0.8,
max_scale=1.2,
)
threshold_rte = 0.3
threshold_rre = 15
data_module = ThreeDMatchDataModule(path2data, NUM_POINTS)
elif DATASET[:8] == 'modelnet':
unseen = False
gaussian_noise = False
if len(DATASET) > 8:
if DATASET[9:] == 'unseen':
unseen = True
elif DATASET[9:] == 'noise':
gaussian_noise = True
else:
raise NotImplementedError('Dataset not available')
print('unseen', unseen, 'gaussian_noise', gaussian_noise)
train_loader = torch.utils.data.DataLoader(
m40.ModelNet40(1024, partition='train', gaussian_noise=gaussian_noise, unseen=unseen),
pin_memory=True,
batch_size=1,
collate_fn=m40.collate_fn,
num_workers=4,
shuffle=True
)
val_loader = torch.utils.data.DataLoader(
m40.ModelNet40(1024, partition='val', gaussian_noise=gaussian_noise, unseen=unseen),
pin_memory=True,
batch_size=1,
collate_fn=m40.collate_fn,
num_workers=4,
shuffle=False
)
# For validation accuracy during training
threshold_rte = 0.03
threshold_rre = 1.
# Only used to define threshold for good pairs of points
config = SimpleNamespace(
voxel_size=0.03,
max_scale=1.,
)
if DATASET == 'kitti' or DATASET == '3dmatch':
train_loader = data_module.train_loader()
val_loader = data_module.val_loader()
# --- Network
BACKPROP = not SPARSE_ATTENTION
net = PoseEstimator(
nb_encoders=NB_ENCODERS,
last_attention=LAST_ATTENTION,
sparse_attention=SPARSE_ATTENTION,
backprop=BACKPROP
).to(DEVICE)
# --- Optimizer
epoch_factor = NB_EPOCHS / 100.0
optimizer = torch.optim.AdamW(net.parameters(), lr=1e-3, weight_decay=1e-3)
scheduler = MultiStepLR(optimizer,
milestones=[int(60 * epoch_factor), int(80 * epoch_factor)],
gamma=0.1)
# --- Path to experiment
root = basedir + "/trained_models/"
path2exp = root + DATASET + '/' + PREFIX + '/'
if SPARSE_ATTENTION:
path2exp += 'sparse_'
else:
path2exp += 'soft_'
if LAST_ATTENTION:
path2exp += 'lastAttention_'
path2exp += 'nbEnc_' + str(NB_ENCODERS)
if not BACKPROP:
path2exp += '_noBackprop'
if LOSS_DIST_ATTENTION:
if LOSS_ENTROPY_ATTENTION:
path2exp += '_DistEntAtt'
else:
path2exp += '_DistAtt'
else:
if not LOSS_ENTROPY_ATTENTION:
path2exp += '_NoLossAtt'
if LOSS_DIST_CONFIDENCE:
path2exp += '_DistConf'
if NUM_POINTS != 2048:
path2exp += '_' + str(NUM_POINTS)
print('Save experiment in ' + path2exp)
# --- Reload model
if RELOAD_LAST:
print('Reload last checkpoint')
checkpoint = torch.load(os.path.join(path2exp, 'check_point_last.pth'))
net.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
epoch = checkpoint['epoch'] + 1
purge_step = epoch * len(train_loader)
recall_best = checkpoint['best_recall'] if checkpoint.get('best_recall') is not None else 0
rre_best = checkpoint['best_rre'] if checkpoint.get('best_rre') is not None else np.inf
else:
epoch = 0
purge_step = 0
recall_best = 0
rre_best = np.inf
# --- Tensorboard
logger = SummaryWriter(log_dir=path2exp, purge_step=purge_step, flush_secs=60)
# --- Train
for epoch in range(epoch, NB_EPOCHS):
train_one_epoch(epoch, train_loader, net, optimizer, logger, DEVICE,
2 * config.max_scale * config.voxel_size,
LOSS_ENTROPY_ATTENTION, LOSS_DIST_ATTENTION, LOSS_DIST_CONFIDENCE)
recall, rre = validation(epoch, val_loader, net, logger, DEVICE,
threshold_rte=threshold_rte,
threshold_rre=threshold_rre)
scheduler.step()
# Save best model (but we use the last saved model at test time !)
if recall_best < recall or (recall_best == recall and rre_best > rre):
recall_best, rre_best = recall, rre
filename = os.path.join(path2exp, 'check_point_best.pth')
save_model(epoch, net, optimizer, scheduler, recall_best, rre_best, filename)
# Save checkpoint
print(epoch + 1, 'epoch done')
filename = os.path.join(path2exp, 'check_point_last.pth')
save_model(epoch, net, optimizer, scheduler, recall_best, rre_best, filename)
|
from bootiful_sanic.model.user import User
class UserDAO(User):
@classmethod
async def get_user(cls, user_id):
user = await cls.get_or_404(user_id)
return user
|
from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""Serializers a name field for testing our APIView"""
name = serializers.CharField(max_length= 10)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from enum import IntEnum
class LiveAuthType(IntEnum):
api = 0
password = 1
no_password = 2
class LiveTemplateType(IntEnum):
video = 1 # 视频
video_chat_qa = 2 # 视频,聊天,问答
video_chat = 3 # 视频,聊天 3
video_doc_chat = 4 # 视频,文档,聊天 4
video_doc_chat_qa = 5 # 视频,文档,聊天,问答 5
video_qa = 6 # 视频,问答 6
class LiveAutoLoginType(IntEnum):
user = 1
assistant = 2
manage = 3
lecturer = 4
class ClassRoomAutoLoginType(IntEnum):
presenter = 1 # 教师
assistant = 2 # 助教
talker = 3 # 互动
audience = 4 # 旁听
inspector = 5 # 隐身
record = 6 # 回放
class ClassRoomRoomType(IntEnum):
chat = 1
small_class = 2
class ClassRoomTemplateType(IntEnum):
talk = 1 # 讲课模式
main = 2 # 主视角模式
tile = 4 # 平铺模式
# one_to_one = 8 # 1v1模式, 使用这个会500
two_teacher = 16 # 双师模式
class ClassRoomMergeType(IntEnum):
main = 1 # 讲课模式
tile = 2 # 主视角模式
cover = 3 # 平铺模式
class ClassRoomClassType(IntEnum):
call = 1 # 点名
free = 2 # 自由
auto = 3 # 自动
|
import random as rnd
import numpy as np
import prettytable
POPULATION_SIZE = 9
NUMB_OF_ELITE_SCHEDULES = 1
TOURNAMENT_SELECTION_SIZE = 3
MUTATION_RATE = 0.1
class Department:
def __init__(self, name, courses):
self._name = name
self._courses = courses
def get_name(self):
return self._name
def get_courses(self):
return self._courses
class Class:
def __init__(self, id, dept, course):
self._id = id
self._dept = dept
self._course = course
self._instructor = None
self._meetingTime = None
self._room = None
def get_id(self):
return self._id
def get_dept(self):
return self._dept
def get_course(self):
return self._course
def get_instructor(self):
return self._instructor
def get_meetingTime(self):
return self._meetingTime
def get_room(self):
return self._room
def set_instructor(self, instructor):
self._instructor = instructor
def set_meetingTime(self, meetingTime):
self._meetingTime = meetingTime
def set_room(self, room):
self._room = room
def __str__(self) -> str:
return str(self._dept.get_name()) + ',' + str(self._course.get_number()) + ',' + \
str(self._room.get_number()) + ',' + str(self._instructor.get_id()) + ',' + str(
self._meetingTime.get_id())
class Course:
def __init__(self, number, name, instructors, maxNumberOfStudents):
self._number = number
self._name = name
self._instructors = instructors
self._maxNumberOfStudents = maxNumberOfStudents
def get_number(self):
return self._number
def get_name(self):
return self._name
def get_instructors(self):
return self._instructors
def get_maxNumbOfStudents(self):
return self._maxNumberOfStudents
def __str__(self):
return self._name
class Instructor:
def __init__(self, id, name):
self._id = id
self._name = name
def get_id(self):
return self._id
def get_name(self):
return self._name
def __str__(self):
return self._name
class Room:
def __init__(self, number, seatingCapacity):
self._number = number
self._seatingCapacity = seatingCapacity
def get_number(self):
return self._number
def get_seatingCapacity(self):
return self._seatingCapacity
class MeetingTime:
def __init__(self, id, time):
self._id = id
self._time = time
def get_id(self):
return self._id
def get_time(self):
return self._time
class Data:
ROOMS = [['R1', 25], ['R2', 45], ['R3', 35]]
MEETING_TIMES = [['MT1', 'MWF 09:00 - 10:00'],
['MT2', 'MWF 10:00 - 11:00'],
['MT3', 'TTH 09:00 - 10:30'],
['MT4', 'TTH 10:30 - 12:00']]
INSTRUCTORS = [['I1', 'Dr James Web'],
['I2', 'Dr Mike Brown'],
['I3', 'Dr Steve Day'],
['I4', 'Dr Jane Doe']]
def __init__(self):
self._rooms = []
self._meetingTimes = []
self._instructors = []
for i in range(len(self.ROOMS)):
self._rooms.append(Room(self.ROOMS[i][0], self.ROOMS[i][1]))
for i in range(len(self.MEETING_TIMES)):
self._meetingTimes.append(MeetingTime(self.MEETING_TIMES[i][0], self.MEETING_TIMES[i][1]))
for i in range(len(self.INSTRUCTORS)):
self._instructors.append(Instructor(self.INSTRUCTORS[i][0], self.INSTRUCTORS[i][1]))
course1 = Course("C1", "325K", [self._instructors[0], self._instructors[1]], 25)
course2 = Course("C2", "320K", [self._instructors[0], self._instructors[1]], 35)
course3 = Course("C3", "462k", [self._instructors[0], self._instructors[1]], 25)
course4 = Course("C4", "464K", [self._instructors[2], self._instructors[3]], 30)
course5 = Course("C5", "360C", [self._instructors[3]], 35)
course6 = Course("C6", "303K", [self._instructors[0], self._instructors[2]], 45)
course7 = Course("C7", "303L", [self._instructors[1], self._instructors[3]], 45)
self._courses = [course1, course2, course3, course4, course5, course6, course7]
dept1 = Department("MATH", [course1, course3])
dept2 = Department("EE", [course2, course4, course5])
dept3 = Department("PHY", [course6, course7])
self._depts = [dept1, dept2, dept3]
self._numberOfClasses = 0
for i in range(len(self._depts)):
self._numberOfClasses += len(self._depts[i].get_courses())
def get_rooms(self):
return self._rooms
def get_instructors(self):
return self._instructors
def get_courses(self):
return self._courses
def get_depts(self):
return self._depts
def get_meetingTimes(self):
return self._meetingTimes
class Schedule:
def __init__(self):
self._data = data
self._classes = []
self._numOfConflicts = 0
self._fitness = -1
self._classNumb = 0
self._isFitnessChanged = True
def get_classes(self):
self._isFitnessChanged = True
return self._classes
def get_numOfConflicts(self):
return self._numOfConflicts
def get_fitness(self):
if self._isFitnessChanged:
self._fitness = self.calculate_fitness()
self._isFitnessChanged = False
return self._fitness
def initialize(self):
depts = self._data.get_depts()
for i in range(len(depts)):
courses = depts[i].get_courses()
for j in range(len(courses)):
new_class = Class(self._classNumb, depts[i], courses[j])
self._classNumb += 1
new_class.set_meetingTime(data.get_meetingTimes()[rnd.randrange(0, len(data.get_meetingTimes()))])
new_class.set_room(data.get_rooms()[rnd.randrange(0, len(data.get_rooms()))])
new_class.set_instructor(
courses[j].get_instructors()[rnd.randrange(0, len(courses[j].get_instructors()))])
self._classes.append(new_class)
return self
def calculate_fitness(self):
self._numOfConflicts = 0
classes = self.get_classes()
for i in range(len(classes)):
if classes[i].get_room().get_seatingCapacity() < classes[i].get_course().get_maxNumbOfStudents():
self._numOfConflicts += 1
for j in range(len(classes)):
if j >= i:
if \
classes[i].get_meetingTime() == classes[j].get_meetingTime() and classes[i].get_id() != \
classes[j].get_id():
if classes[i].get_room() == classes[j].get_room():
self._numOfConflicts += 1
if classes[i].get_instructor() == classes[j].get_instructor():
self._numOfConflicts += 1
return 1 / (1.0 * (self._numOfConflicts + 1))
def __str__(self) -> str:
return_value = ''
for i in range(len(self._classes) - 1):
return_value += str(self._classes[i]) + ', '
return_value += str(self._classes[len(self._classes) - 1])
return return_value
class Population:
def __init__(self, size):
self._size = size
self._data = data
self._schedules = []
for i in range(size):
self._schedules.append(Schedule().initialize())
def get_schedules(self):
return self._schedules
class GeneticAlgorithm:
def evolve(self, population):
return self._mutate_population(self._crossover_population(population))
def _crossover_population(self, pop):
crossover_pop = Population(0)
for i in range(NUMB_OF_ELITE_SCHEDULES):
crossover_pop.get_schedules().append(pop.get_schedules()[i])
i = NUMB_OF_ELITE_SCHEDULES
while i < POPULATION_SIZE:
schedule1 = self._select_tournament_population(pop).get_schedules()[0]
schedule2 = self._select_tournament_population(pop).get_schedules()[0]
crossover_pop.get_schedules().append(self._crossover_schedule(schedule1, schedule2))
i += 1
return crossover_pop
def _mutate_population(self, population):
for i in range(NUMB_OF_ELITE_SCHEDULES, POPULATION_SIZE):
self._mutate_schedule(population.get_schedules()[i])
return population
def _crossover_schedule(self, schedule1, schedule2):
self.is_not_used()
crossover_schedule = Schedule().initialize()
for i in range(0, len(crossover_schedule.get_classes())):
if rnd.random() > 0.5:
crossover_schedule.get_classes()[i] = schedule1.get_classes()[i]
else:
crossover_schedule.get_classes()[i] = schedule2.get_classes()[i]
return crossover_schedule
def _mutate_schedule(self, mutateSchedule):
self.is_not_used()
schedule = Schedule().initialize()
for i in range(len(mutateSchedule.get_classes())):
if MUTATION_RATE > rnd.random():
mutateSchedule.get_classes()[i] = schedule.get_classes()[i]
return mutateSchedule
def _select_tournament_population(self, pop):
self.is_not_used()
tournament_pop = Population(0)
i = 0
while i < TOURNAMENT_SELECTION_SIZE:
tournament_pop.get_schedules().append(pop.get_schedules()[rnd.randrange(0, POPULATION_SIZE)])
i += 1
tournament_pop.get_schedules().sort(key=lambda x: x.get_fitness(), reverse=True)
return tournament_pop
def is_not_used(self):
pass
class DisplayMgr:
def print_available_data(self):
self.is_not_used()
print("> All Available Data")
self.print_dept()
self.print_course()
self.print_room()
self.print_instructor()
self.print_meeting_times()
def print_dept(self):
self.is_not_used()
depts = data.get_depts()
available_depts_table = prettytable.PrettyTable(['dept', 'courses'])
for i in range(len(depts)):
courses = depts.__getitem__(i).get_courses()
temp_str = "["
for j in range(len(courses) - 1):
temp_str += courses[j].__str__() + ", "
temp_str += courses[len(courses) - 1].__str__() + "]"
available_depts_table.add_row([depts.__getitem__(i).get_name(), temp_str])
print(available_depts_table)
def print_course(self):
self.is_not_used()
available_course_table = prettytable.PrettyTable(['id', 'course', 'maxS', 'ins'])
courses = data.get_courses()
for i in range(len(courses)):
instructors = courses[i].get_instructors()
temp_str = ""
for j in range(len(instructors) - 1):
temp_str += instructors[j].__str__() + ", "
temp_str += instructors[len(instructors) - 1].__str__()
available_course_table.add_row(
[courses[i].get_number(), courses[i].get_name(), str(courses[i].get_maxNumbOfStudents()), temp_str]
)
print(available_course_table)
def print_room(self):
self.is_not_used()
availableRoomsTable = prettytable.PrettyTable(['room', 'max'])
rooms = data.get_rooms()
for i in range(len(rooms)):
availableRoomsTable.add_row([str(rooms[i].get_number()), str(rooms[i].get_seatingCapacity())])
print(availableRoomsTable)
def print_instructor(self):
self.is_not_used()
available_instructor_table = prettytable.PrettyTable(['id', 'ins'])
instructors = data.get_instructors()
for i in range(len(instructors)):
available_instructor_table.add_row([instructors[i].get_id(), instructors[i].get_name()])
print(available_instructor_table)
def print_meeting_times(self):
self.is_not_used()
availableMeetingTimeTable = prettytable.PrettyTable(['id', 'mt'])
meetingTimes = data.get_meetingTimes()
for i in range(len(meetingTimes)):
availableMeetingTimeTable.add_row([meetingTimes[i].get_id(), meetingTimes[i].get_time()])
print(availableMeetingTimeTable)
def print_generation(self, population):
self.is_not_used()
table1 = prettytable.PrettyTable(['Schedule #', 'Fitness', '# of conflicts',
'classes [dept,class,room,instructor,meeting-time]'])
schedules = population.get_schedules()
for i in range(len(schedules)):
table1.add_row([str(i + 1), round(schedules[i].get_fitness(), 3), schedules[i].get_numOfConflicts(),
schedules[i].__str__()])
print(table1)
def print_schedule_as_table(self, schedule):
self.is_not_used()
table1 = prettytable.PrettyTable(['Class #', 'Dept', 'Courses (number, max # of students)', "Room (Capacity)",
"Instructor (Id)", "Meeting Time (Id)"])
classes = schedule.get_classes()
for i in range(len(classes)):
course = classes[i].get_course().get_name() + " (" + classes[i].get_course().get_number() + ", " + \
str(classes[i].get_course().get_maxNumbOfStudents()) + ")"
instructor = classes[i].get_instructor().get_name() + " (" + classes[i].get_instructor().get_id() + ")"
room = classes[i].get_room().get_number() + " (" + str(classes[i].get_room().get_seatingCapacity()) + ")"
meeting_time = classes[i].get_meetingTime().get_time() + " (" + classes[i].get_meetingTime().get_id() + ")"
table1.add_row(
[classes[i].get_id(), classes[i].get_dept().get_name(), course,
room, instructor, meeting_time]
)
print(table1)
def print_mean(self, population):
self.is_not_used()
schedules = population.get_schedules()
fitness_scores = []
for i in range(len(schedules)):
fitness_scores.append(round(schedules[i].get_fitness(), 3))
print("> Mean= ", np.array(fitness_scores).mean())
def print_std(self, population):
self.is_not_used()
schedules = population.get_schedules()
fitness_scores = []
for i in range(len(schedules)):
fitness_scores.append(round(schedules[i].get_fitness(), 3))
print("> Standard Deviation= ", np.array(fitness_scores).std())
def is_not_used(self):
pass
data = Data()
display = DisplayMgr()
display.print_available_data()
generation_number = 0
print("\n> Generation #", generation_number)
population = Population(POPULATION_SIZE)
population.get_schedules().sort(key=lambda x: x.get_fitness(), reverse=True)
display.print_generation(population)
geneticAlgorithm = GeneticAlgorithm()
while population.get_schedules()[0].get_fitness() != 1.0:
generation_number += 1
print("\n> Generation #", generation_number)
population = geneticAlgorithm.evolve(population)
population.get_schedules().sort(key=lambda x: x.get_fitness(), reverse=True)
display.print_generation(population)
display.print_schedule_as_table(population.get_schedules()[0])
display.print_mean(population)
display.print_std(population)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
README: 轉換格式 從 臺灣生物分類階層樹狀名錄
Input: https://taibnet.sinica.edu.tw/AjaxTree/allkingdom.php?
id detail: #https://taibnet.sinica.edu.tw/chi/taibnet_species_detail.php?name_code=201102&tree=y
API by hack: https://taibnet.sinica.edu.tw/AjaxTree/xml.php?id=%s
CSV 格式:
id,level,link,child,pid,name,ename,memo,text
項目 ID, 第幾層 ,從頭到此的結構 ,是否有子項(0:屬 1:非屬),上層代號,中文名稱,英文名稱,備註(此項資料下面有多少項目),項目原始 text
@author: wuulong
"""
#%% get XML
import os
import requests
import pandas as pd
import xml.etree.ElementTree as etree
import re
load_cnt = 0 # 讀取的項目數量, debug purpose
#%% 資料儲存
class BioTree():
def __init__(self,root_id,root_level,root_link):
self.root ={} # [level,link,result] ,
self.root_id = root_id #植物界
self.root_level = root_level #界:1
self.root_link = root_link # 7-x-x
#%% 下載 xml, 如果已經下載過,會跳過
def url_to_file(url,pathname,reload=False):
#print("url_to_file: url=%s, pathname=%s" %(url,pathname))
global load_cnt
need_load= True
if reload==False and os.path.isfile(pathname) :
need_load = False
if need_load:
try:
r = requests.get(url, allow_redirects=True,verify=False)
open(pathname, 'wb').write(r.content)
except:
return False
load_cnt += 1
if load_cnt % 100 ==0 :
print("loading cnt = %i" %(load_cnt))
return True
#%% 將 XML 轉換成 list
def parse_xml(filename):
try:
tree = etree.parse(filename)
root = tree.getroot()
dataname = root[0].tag #FIME: need more check no data
result = []
for item in root.findall(dataname):
id = item.attrib['id']
child = item.attrib['child']
text_tmp = item.attrib['text']
#text = text_tmp
text = text_tmp.replace(chr(160),' ') #(C2) A0-> 0x20
#m = re.search('(\S+)\s+(.*)<font.*>(.*)</font>',text)
#print("child=%s,id=%s,name=%s,ename=%s,memo=%s,text=%s" %(child,id,m.group(1),m.group(2),m.group(3).strip(),text))
result.append([id,child,text])
except:
result = None
return result #[ [id,child,text],[id,child,text],... ]
#%% 取得與處理某個 id
def get_id(id):
url="https://taibnet.sinica.edu.tw/AjaxTree/xml.php?id=%s" %(id)
status = url_to_file(url,"%s.xml" %(id),False)
if not status:
print("!!! Get URL exception: id=%i" %(id))
return None
filename = "%s.xml" %(id)
result = parse_xml(filename)
if result is None:
print("!!! Parse XML Exception: id=%s " %(id))
return result
#%% 將 item 中的 text, 取出名稱,英文學名, memo
def parse_text(id,child,text):
#print("parse_txt id=%s" %(id))
if child=='1':
m = re.search('(\S+)\s+(.*)<font.*>(.*)</font>',text)
if m:
name = m.group(1)
ename = m.group(2).replace('<i>','').replace('</i>','')
memo = m.group(3).strip()
#print("child=%s,id=%s,name=%s,ename=%s,memo=%s,text=%s" %(child,id,name,ename,memo,text))
else:
print('Exception parse_text: id=%s' % (id) )
return [id, child, '', '', '']
else:
if text[0]=='<':
m = re.search('(.*)<font.*>(.*)</font>',text)
name = ""
ename_str = m.group(1)
else:
m = re.search('(\S*)\s*(.*)<font.*>(.*)</font>',text)
name = m.group(1)
ename_str = m.group(2)
m2 = re.findall("<i>(.*?)</i>", ename_str, flags=0)
ename = " ".join(m2)
memo = ""
#print("child=%s,id=%s,name=%s,ename=%s, text=%s" %(child,id,name,ename, text))
return [id, child, name, ename, memo]
#%% 遞迴取id
def travel_id(oid,level,link, root, recurisve=False):
result = get_id(oid)
if result is None:
return
root[oid]=[level,link,result]
for [id,child,text] in result:
if child=='1':
if recurisve:
if link:
link_str = "%s-%s" %(link,id)
else:
link_str = id
travel_id(id, level+1, link_str,root, True)
#%% 將結果輸出到螢幕或是檔案(CSV)
def dump_root(root,filename):
if filename != "":
# 開啟檔案
fp = open(filename, "w",encoding='UTF-8')
fp.write("id|level|link|child|pid|name|ename|memo|text\n")
for pid in root.keys():
result = root[pid][2]
level = root[pid][0]+1
link = root[pid][1]
for [id,child,text] in result:
[id, child, name, ename, memo] = parse_text(id,child,text)
if link:
link_str = "%s-%s" %(link,id)
else:
link_str = id
if filename != "":
fp.write("%s|%i|%s|%s|%s|%s|%s|%s|%s\n" %(id,level,link_str, child,pid,name,ename, memo, text))
else:
print("id=%s|level=%i|link=%s|child=%s|pid=%s|name=%s|ename=%s|memo=%s|text=%s" %(id,level,link_str,child,pid,name,ename, memo, text))
if filename != "":
fp.close()
#%% 設定/取得/輸出
#oid='71004010012'
bt = BioTree('7',1,"7") #處理的ID, 第幾層(界為第一層),從上到此層的結構(如維管束植物門為 7-710) ,此例為植物界
#bt = BioTree('0',0,"") #此例為全部
#bt = BioTree('1',3,"1-0-0") #病毒是特例
#bt = BioTree('7100402',1,"7-710-71004-7100402") #處理的ID, 第幾層(界為第一層),從上到此層的結構(如維管束植物門為 7-710) ,此例為植物界
#bt = BioTree('1',1,"1") #處理的ID, 第幾層(界為第一層),從上到此層的結構(如維管束植物門為 7-710) ,此例為植物界
travel_id(bt.root_id,bt.root_level,bt.root_link,bt.root, True)
dump_root(bt.root,"biotree.csv")
#dump_root(bt.root,"")
|
#!/usr/bin/env python
import os
import yaml
from botocore.exceptions import ClientError, ProfileNotFound
import boto3
from fabric.api import run, env, task, sudo, cd
from fabric.contrib.files import exists
from fabric.contrib.console import confirm
from fabric.operations import require
from fabric.utils import abort, warn
from git import Repo
# We depend on the repo name in Github for the canonical name, however the folder cloned locally could be
# named differently. This code queries the `origin` remote and gets the repo name from git configuration
# instead of expecting the folder name to also be the upstream repo name.
repo = Repo()
origin_url = repo.remotes.origin.url
remote_repo_path = origin_url.split('/')[1]
repo_name = remote_repo_path.split('.')[0]
env.forward_agent = True
with open('fabric_config.yaml') as file:
deploy_config = yaml.load(file)
shortcode = deploy_config.get('shortcode')
artifact_name = deploy_config.get('artifact_name')
temp_dir = deploy_config.get('temp_dir')
deploy_to = deploy_config.get('deploy_to')
s3_bucket = deploy_config.get('s3_bucket')
def _get_latest_build():
try:
session = boto3.Session(profile_name=os.getenv('AWS_PROFILE', 'hb-deployer'))
except ProfileNotFound:
abort('AWS profile not found. Ensure you have `hb-deployer` set up in `~/.aws/credentials`')
try:
s3 = session.client('s3')
r = s3.list_objects(Bucket=s3_bucket)
target_match = '/'.join((repo_name, '_'.join((repo_name, env.branch))))
available_builds = [x.get('Key') for x in r.get('Contents') if x.get('Key').startswith(target_match)]
try:
return available_builds[-1]
except IndexError:
abort('Unable to find any builds in S3. Check that TravisCI is uploading builds.')
except ClientError:
abort('Permission denied when performing S3 query. Check your AWS credential configuration.')
def _get_current_build():
if exists('%s/.current_version' % os.path.join(deploy_to, artifact_name), use_sudo=True):
current_build = run('cat %s/.current_version' % os.path.join(deploy_to, artifact_name))
else:
current_build = 'unknown'
return current_build
def download_build(s3_bucket, build_id, temp_dir):
sudo('aws s3 cp s3://%s/%s %s/' % (s3_bucket, build_id, temp_dir))
with cd(temp_dir):
_, build_filename = build_id.split('/')
sudo('tar xf %s' % build_filename)
path_to_deployed_artifact = os.path.join(deploy_to, artifact_name)
if 'wp-content' in repo_name:
_wpcontent(temp_dir, build_filename, path_to_deployed_artifact)
else:
_normal(temp_dir, build_filename, path_to_deployed_artifact)
_cleanup_build_dir()
def _wpcontent(temp_dir, build_filename, path_to_deployed_artifact):
# The steps for deploying the `wp-content/` folder are a special case so that we don't blow away
# the folders inside that that we're deploying separately (eg, everything in `plugins/` and `themes/`)
with cd(temp_dir):
sudo('mv hnc-%s %s' % (artifact_name, artifact_name))
sudo('rsync -a %s/* %s' % (artifact_name, path_to_deployed_artifact))
sudo('find %s -type f -name *.php -exec chown deployer:deployer {} \;' % path_to_deployed_artifact)
sudo('find %s -type f -name *.php -exec chmod 644 {} \;' % path_to_deployed_artifact)
sudo('find %s -type d -exec chown deployer:deployer {} \;' % os.path.join(path_to_deployed_artifact, 'mu-plugins'))
sudo('find %s -type d -exec chmod 755 {} \;' % os.path.join(path_to_deployed_artifact, 'mu-plugins'))
sudo('echo %s > %s/.current_version' % (build_filename, path_to_deployed_artifact))
def _normal(temp_dir, build_filename, path_to_deployed_artifact):
with cd(temp_dir):
sudo('rsync -a --delete %s %s' % (artifact_name, deploy_to))
sudo('chown -R deployer:deployer %s' % path_to_deployed_artifact)
sudo('find %s -type f -exec chmod 644 {} \;' % path_to_deployed_artifact)
sudo('find %s -type d -exec chmod 755 {} \;' % path_to_deployed_artifact)
sudo('echo %s > %s/.current_version' % (build_filename, path_to_deployed_artifact))
def _cleanup_build_dir():
sudo('rm -rf %s/*' % temp_dir)
def stage_set(stage_name='stg'):
env.stage = stage_name
for option, value in deploy_config['stages'][env.stage].items():
setattr(env, option, value)
@task
def prod():
stage_set('prod')
@task
def stg():
stage_set('stg')
@task
def deploy(override_prompt=False):
"""
Deploy the project.
"""
require('stage', provided_by=(stg, prod))
latest_build = _get_latest_build()
current_build = _get_current_build()
if current_build == 'unknown':
msg = '''Either this is the first time deploying or the destination is in unknown state.
Either way, deploying now is a safe operation and this message is only for your own information.'''
warn('Unable to find a deployed build on the node. %s' % msg)
print 'Build currently deployed:', current_build
print 'Build available for deploying:', latest_build.split('/')[1]
print
if not override_prompt:
continue_prompt = confirm('Ready to deploy?')
if not continue_prompt:
abort('Not ready to deploy')
if latest_build.split('/')[1] == current_build:
warn('You are about to deploy the exact same build again')
dupe_deploy_prompt = confirm('Are you use you want to deploy the same build again?')
if not dupe_deploy_prompt:
abort('Not deploying duplicate build')
download_build(s3_bucket, latest_build, temp_dir)
|
#!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2021/1/11 21:05
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import logging
import ast
import astunparse
from utils import *
logging.basicConfig(
format="%(asctime)s [CPCLI] %(levelname)s: %(message)s",
level=logging.DEBUG)
class BuildPython(object):
head = u"""\
import sys
import <<group>>
<<group>>.<<module>> = sys.modules.get(__name__)
"""
def __init__(self, src, group_name, script):
self.src = formattedPath(src)
self.current_src = self.src
self.current_file = None
self.script = formattedPath(script)
self.n_id = u"_" + uid()
self.files = list()
self.build_end_files = list()
self.group_name = group_name
# 获得所有文件
for root, dirs, files in os.walk(self.src):
for file in files:
self.files.append(formattedPath(u"%s/%s" % (root, file)))
def run(self):
# 确定要编译的文件
build_files = [i for i in self.files if i.split(u".")[-1] == u"py"]
# 对文件进行编译
for i in build_files:
logging.info(u"build : %s" % i)
self.buildPyFile(i)
# 添加运行时头
for i in build_files:
module_name = self.fileModuleName(self.src, i)
if len(module_name.split(u".")) == 1:
logging.info(u"head :%s" % module_name)
code = readFile(i)
nodes = ast.parse(code)
for t in reversed(self.buildTemplate(module_name.split(u".")[0])):
nodes.body.insert(0, t)
writeFile(i, astunparse.unparse(nodes))
# 编译脚本程序
code = readFile(self.script)
nodes = ast.parse(code)
for i in ast.walk(nodes):
if isinstance(i, ast.Import):
for ID in range(len(i.names)):
t = i.names[ID]
file = self.searchModuleFile(self.src, t.name)
if not file is None:
module_name = self.fileModuleName(self.src, file)
module_name = u"%s.%s" % (self.group_name, module_name)
module_names = module_name.split(u".")
if t.asname is None:
head = ast.alias()
head.name = u".".join(module_names[:2])
head.asname = module_names[1]
t.asname = u"_"
i.names[ID] = [head, t]
else:
i.names[ID] = [t]
t.name = module_name
else:
i.names[ID] = [t]
i.names = [l for t in i.names for l in t]
elif isinstance(i, ast.ImportFrom):
if i.level < 1:
file = self.searchModuleFile(self.src, i.module)
if not file is None:
module_name = self.fileModuleName(self.src, file)
module_name = u"%s.%s" % (self.group_name, module_name)
i.module = module_name
code = astunparse.unparse(nodes)
writeFile(self.script, code)
def buildTemplate(self, module_name):
head = self.head
head = head.replace(u"<<group>>", self.group_name)
head = head.replace(u"<<module>>", module_name)
return ast.parse(head).body
def buildPyFile(self, file):
if file in self.build_end_files:
return
_up_file = self.current_file
self.current_file = formattedPath(file)
self.current_src = u"/".join(self.current_file.split(u"/")[:-1])
try:
code = readFile(self.current_file)
# 添加到完成编译的文件里
self.build_end_files.append(self.current_file)
nodes = ast.parse(code)
for i in ast.walk(nodes):
if isinstance(i, ast.Import):
for ID in range(len(i.names)):
t = i.names[ID]
file = self.searchModuleFile(self.src, t.name)
if not file is None:
module_name = self.fileModuleName(self.src, file)
module_name = u"%s.%s" % (self.group_name, module_name)
module_names = module_name.split(u".")
op_module_names = t.name.split(u".")
if t.asname is None:
head = ast.alias()
head.name = u".".join(module_names[:len(module_names) - (len(op_module_names) - 1)])
head.asname = module_names[len(module_names) - len(op_module_names)]
t.asname = u"_"
i.names[ID] = [head, t]
else:
i.names[ID] = [t]
t.name = module_name
# 对应的python文件
# self.buildPyFile(file)
else:
i.names[ID] = [t]
i.names = [l for t in i.names for l in t]
elif isinstance(i, ast.ImportFrom):
if i.level < 1:
file = self.searchModuleFile(self.src, i.module)
if not file is None:
module_name = self.fileModuleName(self.src, file)
module_name = u"%s.%s" % (self.group_name, module_name)
i.module = module_name
# self.buildPyFile(file)
code = astunparse.unparse(nodes)
writeFile(self.current_file, code)
except Exception as ex:
if self.current_file in self.build_end_files:
self.build_end_files.remove(self.current_file)
finally:
self.current_file = _up_file
if not _up_file is None:
self.current_src = u"/".join(self.current_file.split(u"/")[:-1])
def isfile(self, file):
file = formattedPath(file)
return file in self.files
def searchModuleFile(self, root, module):
u"""
:param root:
:param module:
:return:
:rtype: unicode|None
"""
if module is None:
if self.isfile(u"%s/__init__.py" % root):
return u"%s/__init__.py" % root
return None
module_file = module.replace(u".", u"/")
if self.isfile(u"%s/%s/__init__.py" % (root, module_file)):
return u"%s/%s/__init__.py" % (root, module_file)
if self.isfile(u"%s/%s.py" % (root, module_file)):
return u"%s/%s.py" % (root, module_file)
if self.isfile(u"%s/%s.pyd" % (root, module_file)):
return u"%s/%s.pyd" % (root, module_file)
if self.isfile(u"%s/%s.pyw" % (root, module_file)):
return u"%s/%s.pyw" % (root, module_file)
if self.isfile(u"%s/%s.pyo" % (root, module_file)):
return u"%s/%s.pyo" % (root, module_file)
if self.isfile(u"%s/%s.pyc" % (root, module_file)):
return u"%s/%s.pyc" % (root, module_file)
if self.isfile(u"%s/%s.pyz" % (root, module_file)):
return u"%s/%s.pyz" % (root, module_file)
return None
def fileModuleName(self, root, module_file):
names = module_file[len(root) + 1:].split(u"/")
if len(names) <= 1:
names[-1] = names[-1].split(u".")[0]
return u".".join(names)
if names[-1] == u"__init__.py":
names.pop(-1)
names[-1] = names[-1].split(u".")[0]
return u".".join(names)
def absModuleName(self, module, root, current_src, level=0):
if level <= 0:
module_file = self.searchModuleFile(current_src, module)
if not module_file is None:
return self.fileModuleName(root, module_file)
module_file = self.searchModuleFile(root, module)
if not module_file is None:
return self.fileModuleName(root, module_file)
else:
current_dir_path_sps = current_src.split(u"/")
for i in range(level - 1):
current_dir_path_sps.pop(-1)
path = u"/".join(current_dir_path_sps)
module_file = self.searchModuleFile(path, module)
if not module_file is None:
return self.fileModuleName(root, module_file)
return None
def group(src, script, group_name=None):
script = formattedPath(script)
if group_name is None:
group_name = uidname()
BuildPython(src=src, group_name=group_name, script=script).run()
|
"""Tkinter GUI chat client."""
import socket
import threading
import tkinter as tk
firstclick = True
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP = "127.0.0.1"
PORT = 33002
BUFFSIZE = 1024
ADDR = (IP, PORT)
MSG_COUNT = 0
""" Address Family - internet, SOCK_STREAM is the TCP connection; reliable """
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
""" reuse socket address to allow reconnecting """
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client_socket.connect(ADDR)
def on_entry_click(event):
"""function that gets called whenever entry1 is clicked"""
global firstclick
if firstclick: # if this is the first time they clicked it
firstclick = False
entry_field.delete(0, "end") # delete all the text in the entry
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFFSIZE).decode("utf8")
msg_list.insert(tk.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
global MSG_COUNT
msg = my_msg.get()
"""" Update chat window with current user's name/username """
if MSG_COUNT == 0 and msg != "{quit}":
new_title = '%s\'s Chat Window' % msg
window.title(new_title)
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
MSG_COUNT += 1
if msg == "{quit}":
client_socket.close()
window.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
window = tk.Tk()
window.title("Chat Room")
window.geometry("500x350")
messages_frame = tk.Frame(window)
my_msg = tk.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tk.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tk.Listbox(messages_frame, height=15, width=60,
yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
msg_list.pack(side=tk.LEFT, fill=tk.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tk.Entry(window, textvariable=my_msg)
entry_field.bind('<FocusIn>', on_entry_click)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tk.Button(window, text="Send", command=send)
send_button.pack()
window.protocol("WM_DELETE_WINDOW", on_closing)
receive_thread = threading.Thread(target=receive)
receive_thread.start()
window.mainloop()
|
from django.conf import settings
from django.db import models
from django.utils.functional import lazy
from django.utils.translation import gettext_lazy as _
from djchoices import DjangoChoices, ChoiceItem
from future.utils import python_2_unicode_compatible
from bluebottle.utils.models import PublishableModel, get_language_choices
@python_2_unicode_compatible
class Quote(PublishableModel):
"""
Slides for homepage
"""
class QuoteStatus(DjangoChoices):
published = ChoiceItem('published', label=_("Published"))
draft = ChoiceItem('draft', label=_("Draft"))
# Contents
language = models.CharField(_("language"), max_length=7,
choices=lazy(get_language_choices, list)())
quote = models.TextField()
user = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_('Quoted member'),
related_name="quote_user",
on_delete=models.CASCADE)
def __str__(self):
return self.quote
class Meta:
ordering = ('-publication_date',)
|
"""
Script to generate suitesparse_graphblas.h, suitesparse_graphblas_no_complex.h, and source.c files.
- Copy the SuiteSparse header file GraphBLAS.h to the local directory.
- Run the C preprocessor (cleans it up, but also loses #define values).
- Parse the processed header file using pycparser.
- Create the final files with and without complex types.
- Check #define values for sanity.
The generated files are then used by cffi to bind to SuiteSparse:GraphBLAS.
When running against new versions of SuiteSparse:GraphBLAS, the most likely
things that may need to change are:
- Update DEFINES, the integer #define constants defined by SuiteSparse.
- Update CHAR_DEFINES, the char* #defines.
- Update IGNORE_DEFINES, #defines that the script may mistakingly identity,
but that we can safely ignore.
- Update DEPRECATED: deprecated names (including enum fields) to exclude.
Run `python create_headers.py --help` to see more help.
"""
import argparse
import os
import re
import shutil
import subprocess
import sys
import pycparser
from pycparser import c_ast, c_generator, parse_file
def sort_key(string):
"""e.g., sort 'INT8' before 'INT16'"""
return string.replace("8", "08")
def has_complex(string):
return "FC32" in string or "FC64" in string
def groupby(index, seq):
rv = {}
for item in seq:
key = item[index]
if key in rv:
rv[key].append(item)
else:
rv[key] = [item]
return rv
AUTO = "/* This file is automatically generated */"
DEPRECATED = {
# enums
"GxB_IS_HYPER",
"GrB_SCMP",
# functions
"GxB_kron",
"GxB_Matrix_resize",
"GxB_Vector_resize",
# UnaryOp
"GxB_ABS_BOOL",
"GxB_ABS_INT8",
"GxB_ABS_INT16",
"GxB_ABS_INT32",
"GxB_ABS_INT64",
"GxB_ABS_UINT8",
"GxB_ABS_UINT16",
"GxB_ABS_UINT32",
"GxB_ABS_UINT64",
"GxB_ABS_FP32",
"GxB_ABS_FP64",
# Monoids
"GxB_MIN_INT8_MONOID",
"GxB_MIN_INT16_MONOID",
"GxB_MIN_INT32_MONOID",
"GxB_MIN_INT64_MONOID",
"GxB_MIN_UINT8_MONOID",
"GxB_MIN_UINT16_MONOID",
"GxB_MIN_UINT32_MONOID",
"GxB_MIN_UINT64_MONOID",
"GxB_MIN_FP32_MONOID",
"GxB_MIN_FP64_MONOID",
"GxB_MAX_INT8_MONOID",
"GxB_MAX_INT16_MONOID",
"GxB_MAX_INT32_MONOID",
"GxB_MAX_INT64_MONOID",
"GxB_MAX_UINT8_MONOID",
"GxB_MAX_UINT16_MONOID",
"GxB_MAX_UINT32_MONOID",
"GxB_MAX_UINT64_MONOID",
"GxB_MAX_FP32_MONOID",
"GxB_MAX_FP64_MONOID",
"GxB_PLUS_INT8_MONOID",
"GxB_PLUS_INT16_MONOID",
"GxB_PLUS_INT32_MONOID",
"GxB_PLUS_INT64_MONOID",
"GxB_PLUS_UINT8_MONOID",
"GxB_PLUS_UINT16_MONOID",
"GxB_PLUS_UINT32_MONOID",
"GxB_PLUS_UINT64_MONOID",
"GxB_PLUS_FP32_MONOID",
"GxB_PLUS_FP64_MONOID",
"GxB_TIMES_INT8_MONOID",
"GxB_TIMES_INT16_MONOID",
"GxB_TIMES_INT32_MONOID",
"GxB_TIMES_INT64_MONOID",
"GxB_TIMES_UINT8_MONOID",
"GxB_TIMES_UINT16_MONOID",
"GxB_TIMES_UINT32_MONOID",
"GxB_TIMES_UINT64_MONOID",
"GxB_TIMES_FP32_MONOID",
"GxB_TIMES_FP64_MONOID",
"GxB_LOR_BOOL_MONOID",
"GxB_LAND_BOOL_MONOID",
"GxB_LXOR_BOOL_MONOID",
"GxB_LXNOR_BOOL_MONOID",
# "GxB_EQ_BOOL_MONOID", # XXX: I prefer this name to GrB_LXNOR_MONOID_BOOL
# Semirings
"GxB_PLUS_TIMES_INT8",
"GxB_PLUS_TIMES_INT16",
"GxB_PLUS_TIMES_INT32",
"GxB_PLUS_TIMES_INT64",
"GxB_PLUS_TIMES_UINT8",
"GxB_PLUS_TIMES_UINT16",
"GxB_PLUS_TIMES_UINT32",
"GxB_PLUS_TIMES_UINT64",
"GxB_PLUS_TIMES_FP32",
"GxB_PLUS_TIMES_FP64",
"GxB_PLUS_MIN_INT8",
"GxB_PLUS_MIN_INT16",
"GxB_PLUS_MIN_INT32",
"GxB_PLUS_MIN_INT64",
"GxB_PLUS_MIN_UINT8",
"GxB_PLUS_MIN_UINT16",
"GxB_PLUS_MIN_UINT32",
"GxB_PLUS_MIN_UINT64",
"GxB_PLUS_MIN_FP32",
"GxB_PLUS_MIN_FP64",
"GxB_MIN_PLUS_INT8",
"GxB_MIN_PLUS_INT16",
"GxB_MIN_PLUS_INT32",
"GxB_MIN_PLUS_INT64",
"GxB_MIN_PLUS_UINT8",
"GxB_MIN_PLUS_UINT16",
"GxB_MIN_PLUS_UINT32",
"GxB_MIN_PLUS_UINT64",
"GxB_MIN_PLUS_FP32",
"GxB_MIN_PLUS_FP64",
"GxB_MIN_TIMES_INT8",
"GxB_MIN_TIMES_INT16",
"GxB_MIN_TIMES_INT32",
"GxB_MIN_TIMES_INT64",
"GxB_MIN_TIMES_UINT8",
"GxB_MIN_TIMES_UINT16",
"GxB_MIN_TIMES_UINT32",
"GxB_MIN_TIMES_UINT64",
"GxB_MIN_TIMES_FP32",
"GxB_MIN_TIMES_FP64",
"GxB_MIN_FIRST_INT8",
"GxB_MIN_FIRST_INT16",
"GxB_MIN_FIRST_INT32",
"GxB_MIN_FIRST_INT64",
"GxB_MIN_FIRST_UINT8",
"GxB_MIN_FIRST_UINT16",
"GxB_MIN_FIRST_UINT32",
"GxB_MIN_FIRST_UINT64",
"GxB_MIN_FIRST_FP32",
"GxB_MIN_FIRST_FP64",
"GxB_MIN_SECOND_INT8",
"GxB_MIN_SECOND_INT16",
"GxB_MIN_SECOND_INT32",
"GxB_MIN_SECOND_INT64",
"GxB_MIN_SECOND_UINT8",
"GxB_MIN_SECOND_UINT16",
"GxB_MIN_SECOND_UINT32",
"GxB_MIN_SECOND_UINT64",
"GxB_MIN_SECOND_FP32",
"GxB_MIN_SECOND_FP64",
"GxB_MIN_MAX_INT8",
"GxB_MIN_MAX_INT16",
"GxB_MIN_MAX_INT32",
"GxB_MIN_MAX_INT64",
"GxB_MIN_MAX_UINT8",
"GxB_MIN_MAX_UINT16",
"GxB_MIN_MAX_UINT32",
"GxB_MIN_MAX_UINT64",
"GxB_MIN_MAX_FP32",
"GxB_MIN_MAX_FP64",
"GxB_MAX_PLUS_INT8",
"GxB_MAX_PLUS_INT16",
"GxB_MAX_PLUS_INT32",
"GxB_MAX_PLUS_INT64",
"GxB_MAX_PLUS_UINT8",
"GxB_MAX_PLUS_UINT16",
"GxB_MAX_PLUS_UINT32",
"GxB_MAX_PLUS_UINT64",
"GxB_MAX_PLUS_FP32",
"GxB_MAX_PLUS_FP64",
"GxB_MAX_TIMES_INT8",
"GxB_MAX_TIMES_INT16",
"GxB_MAX_TIMES_INT32",
"GxB_MAX_TIMES_INT64",
"GxB_MAX_TIMES_UINT8",
"GxB_MAX_TIMES_UINT16",
"GxB_MAX_TIMES_UINT32",
"GxB_MAX_TIMES_UINT64",
"GxB_MAX_TIMES_FP32",
"GxB_MAX_TIMES_FP64",
"GxB_MAX_FIRST_INT8",
"GxB_MAX_FIRST_INT16",
"GxB_MAX_FIRST_INT32",
"GxB_MAX_FIRST_INT64",
"GxB_MAX_FIRST_UINT8",
"GxB_MAX_FIRST_UINT16",
"GxB_MAX_FIRST_UINT32",
"GxB_MAX_FIRST_UINT64",
"GxB_MAX_FIRST_FP32",
"GxB_MAX_FIRST_FP64",
"GxB_MAX_SECOND_INT8",
"GxB_MAX_SECOND_INT16",
"GxB_MAX_SECOND_INT32",
"GxB_MAX_SECOND_INT64",
"GxB_MAX_SECOND_UINT8",
"GxB_MAX_SECOND_UINT16",
"GxB_MAX_SECOND_UINT32",
"GxB_MAX_SECOND_UINT64",
"GxB_MAX_SECOND_FP32",
"GxB_MAX_SECOND_FP64",
"GxB_MAX_MIN_INT8",
"GxB_MAX_MIN_INT16",
"GxB_MAX_MIN_INT32",
"GxB_MAX_MIN_INT64",
"GxB_MAX_MIN_UINT8",
"GxB_MAX_MIN_UINT16",
"GxB_MAX_MIN_UINT32",
"GxB_MAX_MIN_UINT64",
"GxB_MAX_MIN_FP32",
"GxB_MAX_MIN_FP64",
"GxB_LOR_LAND_BOOL",
"GxB_LAND_LOR_BOOL",
"GxB_LXOR_LAND_BOOL",
# "GxB_EQ_LOR_BOOL", # XXX: I prefer this name to GrB_LXNOR_LOR_SEMIRING_BOOL
# Old deprecated (probably already removed)
"GrB_eWiseMult_Vector_Semiring",
"GrB_eWiseMult_Vector_Monoid",
"GrB_eWiseMult_Vector_BinaryOp",
"GrB_eWiseMult_Matrix_Semiring",
"GrB_eWiseMult_Matrix_Monoid",
"GrB_eWiseMult_Matrix_BinaryOp",
"GrB_eWiseAdd_Vector_Semiring",
"GrB_eWiseAdd_Vector_Monoid",
"GrB_eWiseAdd_Vector_BinaryOp",
"GrB_eWiseAdd_Matrix_Semiring",
"GrB_eWiseAdd_Matrix_Monoid",
"GrB_eWiseAdd_Matrix_BinaryOp",
}
DEFINES = {
"GxB_STDC_VERSION",
"GxB_IMPLEMENTATION_MAJOR",
"GxB_IMPLEMENTATION_MINOR",
"GxB_IMPLEMENTATION_SUB",
"GxB_SPEC_MAJOR",
"GxB_SPEC_MINOR",
"GxB_SPEC_SUB",
"GxB_IMPLEMENTATION",
"GxB_SPEC_VERSION",
"GxB_INDEX_MAX",
"GRB_VERSION",
"GRB_SUBVERSION",
"GxB_NTHREADS",
"GxB_CHUNK",
"GxB_GPU_CONTROL",
"GxB_GPU_CHUNK",
"GxB_HYPERSPARSE",
"GxB_SPARSE",
"GxB_BITMAP",
"GxB_FULL",
"GxB_NBITMAP_SWITCH",
"GxB_ANY_SPARSITY",
"GxB_AUTO_SPARSITY",
"GxB_RANGE",
"GxB_STRIDE",
"GxB_BACKWARDS",
"GxB_BEGIN",
"GxB_END",
"GxB_INC",
}
CHAR_DEFINES = {
"GxB_IMPLEMENTATION_NAME",
"GxB_IMPLEMENTATION_DATE",
"GxB_SPEC_DATE",
"GxB_IMPLEMENTATION_ABOUT",
"GxB_IMPLEMENTATION_LICENSE",
"GxB_SPEC_ABOUT",
}
IGNORE_DEFINES = {
"GrB",
"GxB",
"CMPLX",
"CMPLXF",
"GB_PUBLIC",
"GRAPHBLAS_H",
"GrB_INVALID_HANDLE",
"GrB_NULL",
"GxB_SUITESPARSE_GRAPHBLAS",
"NMACRO",
# deprecated
"GxB_HYPER",
}
IGNORE_LINES = {
"GxB_cuda_calloc",
"GxB_cuda_malloc",
"GxB_cuda_free",
}
class VisitEnumTypedef(c_generator.CGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.results = []
def visit_Typedef(self, node):
rv = super().visit_Typedef(node)
if isinstance(node.type.type, c_ast.Enum):
self.results.append(rv + ";")
return rv
def get_ast(filename):
fake_include = os.path.dirname(pycparser.__file__) + "utils/fake_libc_include"
ast = parse_file(filename, cpp_args=f"-I{fake_include}")
return ast
def get_groups(ast):
generator = c_generator.CGenerator()
lines = generator.visit(ast).splitlines()
seen = set()
groups = {}
vals = {x for x in lines if "extern GrB_Info GxB" in x} - seen
seen.update(vals)
groups["GxB methods"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GrB_Info GrB" in x} - seen
seen.update(vals)
groups["GrB methods"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GrB_Info GB" in x} - seen
seen.update(vals)
groups["GB methods"] = sorted(vals, key=sort_key)
missing_methods = {x for x in lines if "extern GrB_Info " in x} - seen
assert not missing_methods
vals = {x for x in lines if "extern GrB" in x} - seen
seen.update(vals)
groups["GrB objects"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GxB" in x} - seen
seen.update(vals)
groups["GxB objects"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern const" in x and "GxB" in x} - seen
seen.update(vals)
groups["GxB const"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern const" in x and "GrB" in x} - seen
seen.update(vals)
groups["GrB const"] = sorted(vals, key=sort_key)
missing_const = {x for x in lines if "extern const" in x} - seen
assert not missing_const
vals = {x for x in lines if "typedef" in x and "GxB" in x and "(" not in x} - seen
seen.update(vals)
groups["GxB typedef"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "typedef" in x and "GrB" in x and "(" not in x} - seen
seen.update(vals)
groups["GrB typedef"] = sorted(vals, key=sort_key)
missing_typedefs = {x for x in lines if "typedef" in x and "GB" in x and "(" not in x} - seen
assert not missing_typedefs
assert all(x.endswith(";") for x in seen) # sanity check
g = VisitEnumTypedef()
_ = g.visit(ast)
enums = g.results
vals = {x for x in enums if "} GrB" in x}
for val in vals:
seen.update(val.splitlines())
groups["GrB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1]))
vals = {x for x in enums if "} GxB" in x}
for val in vals:
seen.update(val.splitlines())
groups["GxB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1]))
missing_enums = set(enums) - set(groups["GrB typedef enums"]) - set(groups["GxB typedef enums"])
assert not missing_enums
vals = {x for x in lines if "typedef" in x and "GxB" in x} - seen
seen.update(vals)
groups["GxB typedef funcs"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "typedef" in x and "GrB" in x} - seen
assert not vals
groups["not seen"] = sorted(set(lines) - seen, key=sort_key)
for group in groups["not seen"]:
assert "extern" not in group, group
unhandled = set()
for line in groups["not seen"]:
if "GrB" in line or "GxB" in line:
for item in IGNORE_LINES:
if item in line:
break
else:
unhandled.add(line)
if unhandled:
raise ValueError(
"\n===================================\n"
"Unhandled functions with GrB or GxB\n"
"-----------------------------------\n "
+ "\n ".join(sorted(unhandled))
+ "\n==================================="
)
return groups
def get_group_info(groups, ast, *, skip_complex=False):
rv = {}
def handle_constants(group):
for line in group:
extern, const, ctype, name = line.split(" ")
assert name.endswith(";")
name = name[:-1].replace("(void)", "()")
assert extern == "extern"
assert const == "const"
if name in DEPRECATED:
continue
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GrB const"] = list(handle_constants(groups["GrB const"]))
rv["GxB const"] = list(handle_constants(groups["GxB const"]))
def handle_objects(group):
for line in group:
extern, ctype, name = line.split(" ")
assert name.endswith(";")
name = name[:-1]
assert extern == "extern"
if name in DEPRECATED:
continue
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GrB objects"] = list(handle_objects(groups["GrB objects"]))
rv["GxB objects"] = list(handle_objects(groups["GxB objects"]))
def handle_enums(group):
for text in group:
text = text.replace("enum \n", "enum\n")
typedef, bracket, *fields, name = text.splitlines()
assert typedef.strip() == "typedef enum"
assert bracket == "{"
assert name.startswith("}")
assert name.endswith(";")
name = name[1:-1].strip()
if name in DEPRECATED:
continue
if skip_complex and has_complex(name):
continue
# Break this open so we can remove unwanted deprecated fields.
# Instead of traversing the AST, munging string is good enough.
typedef, bracket, *fields, cname = text.splitlines()
typedef = typedef.strip()
assert typedef.strip() == "typedef enum"
assert bracket == "{"
assert cname.startswith("}")
assert cname.endswith(";")
new_fields = []
for field in fields:
if field.endswith(","):
field = field[:-1]
field = field.strip()
cfieldname, eq, val = field.split(" ")
assert eq == "="
if cfieldname in DEPRECATED:
continue
if skip_complex and has_complex(cfieldname):
continue
new_fields.append(field)
if not new_fields:
continue
lines = [typedef, bracket]
for field in new_fields:
lines.append(f" {field},")
lines[-1] = lines[-1][:-1] # remove last comma
lines.append(cname)
info = {
"orig_text": text,
"text": "\n".join(lines),
}
yield info
rv["GrB typedef enums"] = list(handle_enums(groups["GrB typedef enums"]))
rv["GxB typedef enums"] = list(handle_enums(groups["GxB typedef enums"]))
def handle_typedefs(group):
for line in group:
typedef, *ctypes, name = line.split(" ")
assert typedef == "typedef"
assert name.endswith(";")
name = name[:-1]
if name in DEPRECATED:
continue
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GrB typedef"] = list(handle_typedefs(groups["GrB typedef"]))
rv["GxB typedef"] = list(handle_typedefs(groups["GxB typedef"]))
def handle_typedef_funcs(group):
for line in group:
assert line.endswith(";") and line.startswith("typedef")
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GxB typedef funcs"] = list(handle_typedef_funcs(groups["GxB typedef funcs"]))
class FuncDeclVisitor(c_ast.NodeVisitor):
def __init__(self):
self.functions = []
def visit_Decl(self, node):
if isinstance(node.type, c_ast.FuncDecl) and node.storage == ["extern"]:
self.functions.append(node)
def handle_function_node(node):
if generator.visit(node.type.type) != "GrB_Info":
raise ValueError(generator.visit(node))
if node.name in DEPRECATED:
return
text = generator.visit(node)
text += ";"
if skip_complex and has_complex(text):
return
if "GrB_Matrix" in text:
group = "matrix"
elif "GrB_Vector" in text:
group = "vector"
elif "GxB_Scalar" in text or "GrB_Scalar" in text:
group = "scalar"
else:
group = node.name.split("_", 2)[1]
group = {
# Apply our naming scheme
"GrB_Matrix": "matrix",
"GrB_Vector": "vector",
"GxB_Scalar": "scalar",
"SelectOp": "selectop",
"BinaryOp": "binary",
"Desc": "descriptor",
"Descriptor": "descriptor",
"Monoid": "monoid",
"Semiring": "semiring",
"Type": "type",
"UnaryOp": "unary",
"IndexUnaryOp": "indexunary",
# "everything else" is "core"
"getVersion": "core",
"Global": "core",
"cuda": "core",
"finalize": "core",
"init": "core",
"wait": "core",
"deserialize": "core",
}[group]
return {
"name": node.name,
"group": group,
"node": node,
"text": text,
}
generator = c_generator.CGenerator()
visitor = FuncDeclVisitor()
visitor.visit(ast)
grb_nodes = [node for node in visitor.functions if node.name.startswith("GrB_")]
gxb_nodes = [node for node in visitor.functions if node.name.startswith("GxB_")]
gb_nodes = [node for node in visitor.functions if node.name.startswith("GB_")]
assert len(grb_nodes) == len(groups["GrB methods"])
assert len(gxb_nodes) == len(groups["GxB methods"])
assert len(gb_nodes) == len(groups["GB methods"])
grb_funcs = (handle_function_node(node) for node in grb_nodes)
gxb_funcs = (handle_function_node(node) for node in gxb_nodes)
gb_funcs = (handle_function_node(node) for node in gb_nodes)
grb_funcs = [x for x in grb_funcs if x is not None]
gxb_funcs = [x for x in gxb_funcs if x is not None]
gb_funcs = [x for x in gb_funcs if x is not None]
rv["GrB methods"] = sorted(grb_funcs, key=lambda x: sort_key(x["text"]))
rv["GxB methods"] = sorted(gxb_funcs, key=lambda x: sort_key(x["text"]))
rv["GB methods"] = sorted(gb_funcs, key=lambda x: sort_key(x["text"]))
for key in groups.keys() - rv.keys():
rv[key] = groups[key]
return rv
def parse_header(filename, *, skip_complex=False):
ast = get_ast(filename)
groups = get_groups(ast)
return get_group_info(groups, ast, skip_complex=skip_complex)
def create_header_text(groups, *, char_defines=None, defines=None):
if char_defines is None:
char_defines = CHAR_DEFINES
if defines is None:
defines = DEFINES
text = [AUTO]
text.append("/* GrB typedefs */")
for group in groups["GrB typedef"]:
text.append(group["text"])
text.append("")
text.append("/* GxB typedefs */")
for group in groups["GxB typedef"]:
text.append(group["text"])
text.append("")
text.append("/* GxB typedefs (functions) */")
for group in groups["GxB typedef funcs"]:
text.append(group["text"])
text.append("")
text.append("/* GrB enums */")
for group in groups["GrB typedef enums"]:
text.append(group["text"])
text.append("")
text.append("/* GxB enums */")
for group in groups["GxB typedef enums"]:
text.append(group["text"])
text.append("")
text.append("/* GrB consts */")
for group in groups["GrB const"]:
text.append(group["text"])
text.append("")
text.append("/* GxB consts */")
for group in groups["GxB const"]:
text.append(group["text"])
text.append("")
text.append("/* GrB objects */")
for group in groups["GrB objects"]:
if "GxB" not in group["text"]:
text.append(group["text"])
text.append("")
text.append("/* GrB objects (extended) */")
for group in groups["GrB objects"]:
if "GxB" in group["text"]:
text.append(group["text"])
text.append("")
text.append("/* GxB objects */")
for group in groups["GxB objects"]:
text.append(group["text"])
def handle_funcs(group):
groups = groupby("group", group)
for name in sorted(groups, key=sort_key):
yield ""
yield f"/* {name} */"
for info in groups[name]:
yield info["text"]
text.append("")
text.append("/****************")
text.append("* GrB functions *")
text.append("****************/")
text.extend(handle_funcs(groups["GrB methods"]))
text.append("")
text.append("/***************")
text.append("* GB functions *")
text.append("***************/")
text.extend(handle_funcs(groups["GB methods"]))
text.append("")
text.append("/****************")
text.append("* GxB functions *")
text.append("****************/")
text.extend(handle_funcs(groups["GxB methods"]))
text.append("")
text.append("/* int DEFINES */")
for item in sorted(defines, key=sort_key):
text.append(f"#define {item} ...")
text.append("")
text.append("/* char* DEFINES */")
for item in sorted(char_defines, key=sort_key):
text.append(f"extern char *{item}_STR;")
return text
def create_source_text(*, char_defines=None):
if char_defines is None:
char_defines = CHAR_DEFINES
text = [
AUTO,
'#include "GraphBLAS.h"',
]
for item in sorted(char_defines, key=sort_key):
text.append(f"char *{item}_STR = {item};")
return text
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--graphblas",
help="Path to GraphBLAS.h of SuiteSparse. Default will look in Python prefix path.",
default=os.path.join(sys.prefix, "include", "GraphBLAS.h"),
)
parser.add_argument(
"--show-skipped",
action="store_true",
help="If specified, then print the lines that were skipped when parsing the header file.",
)
args = parser.parse_args()
thisdir = os.path.dirname(__file__)
# copy the original to this file
graphblas_h = os.path.join(thisdir, "GraphBLAS-orig.h")
# after the preprocessor
processed_h = os.path.join(thisdir, "GraphBLAS-processed.h")
# final files used by cffi (with and without complex numbers)
final_h = os.path.join(thisdir, "suitesparse_graphblas.h")
final_no_complex_h = os.path.join(thisdir, "suitesparse_graphblas_no_complex.h")
source_c = os.path.join(thisdir, "source.c")
# Copy original file
print(f"Step 1: copy {args.graphblas} to {graphblas_h}")
if not os.path.exists(args.graphblas):
raise FileNotFoundError(f"File not found: {args.graphblas}")
shutil.copyfile(args.graphblas, graphblas_h)
# Run it through the preprocessor
print(f"Step 2: run preprocessor to create {processed_h}")
include = os.path.join(os.path.dirname(pycparser.__file__), "utils", "fake_libc_include")
command = (
f"gcc -nostdinc -E -I{include} {graphblas_h} "
f"| sed 's/ complex / _Complex /g' > {processed_h}"
)
res = subprocess.run(command, shell=True)
if res.returncode != 0:
raise RuntimeError("Subprocess command failed", res)
# Create final header file
print(f"Step 3: parse header file to create {final_h}")
groups = parse_header(processed_h, skip_complex=False)
text = create_header_text(groups)
with open(final_h, "w") as f:
f.write("\n".join(text))
# Create final header file (no complex)
print(f"Step 4: parse header file to create {final_no_complex_h}")
groups_no_complex = parse_header(processed_h, skip_complex=True)
text = create_header_text(groups_no_complex)
with open(final_no_complex_h, "w") as f:
f.write("\n".join(text))
# Create source
print(f"Step 5: create {source_c}")
text = create_source_text()
with open(source_c, "w") as f:
f.write("\n".join(text))
# Check defines
print("Step 6: check #define definitions")
with open(graphblas_h) as f:
text = f.read()
define_lines = re.compile(r".*?#define\s+\w+\s+")
define_pattern = re.compile(r"#define\s+\w+\s+")
defines = set()
for line in define_lines.findall(text):
line = line.split("//")[0].split("/*")[0]
defines.update(x[len("#define") :].strip() for x in define_pattern.findall(line))
extra_defines = (DEFINES | CHAR_DEFINES) - defines
if extra_defines:
# Should this raise? If it's a problem, it will raise when compiling.
print(
f"WARNING: the following #define values weren't found in {graphblas_h}: "
+ ", ".join(sorted(extra_defines))
)
unknown_defines = defines - DEFINES - CHAR_DEFINES - IGNORE_DEFINES
if unknown_defines:
raise ValueError(
f"Unknown #define values found in {graphblas_h}: " + ", ".join(sorted(unknown_defines))
)
print("Success!", "\N{ROCKET}")
if args.show_skipped:
print()
print(f"Showing lines from {processed_h} that were skipped when creating {final_h}:")
print("-" * 80)
for line in sorted(groups["not seen"], key=sort_key):
print(line)
if __name__ == "__main__":
main()
|
from functools import partial as p
import pytest
from tests.helpers.util import ensure_always, wait_for
pytestmark = [pytest.mark.kubernetes_events, pytest.mark.monitor_without_endpoints]
def has_event(fake_services, event_dict):
event_type = event_dict["reason"]
obj_kind = event_dict["involvedObjectKind"]
for event in fake_services.events:
if event.eventType == event_type:
for dim in event.dimensions:
if dim.key == "kubernetes_kind" and dim.value == obj_kind:
return True
return False
@pytest.mark.kubernetes
def test_k8s_events_with_whitelist(k8s_cluster):
config = """
monitors:
- type: kubernetes-events
whitelistedEvents:
- reason: Pulled
involvedObjectKind: Pod
- reason: Created
involvedObjectKind: Pod
- reason: Started
involvedObjectKind: Pod
"""
with k8s_cluster.run_agent(config) as agent:
for expected_event in [
{"reason": "Pulled", "involvedObjectKind": "Pod"},
{"reason": "Created", "involvedObjectKind": "Pod"},
{"reason": "Started", "involvedObjectKind": "Pod"},
]:
assert wait_for(p(has_event, agent.fake_services, expected_event)), (
"timed out waiting for event '%s'!" % expected_event
)
@pytest.mark.kubernetes
def test_k8s_events_without_whitelist(k8s_cluster):
config = """
monitors:
- type: kubernetes-events
"""
with k8s_cluster.run_agent(config) as agent:
assert ensure_always(lambda: not agent.fake_services.events, 30), "event received!"
|
# -*-coding:utf-8-*-
from __future__ import print_function
import numpy as np
import pickle
import os
# Single Layer Restricted Boltzmann Machine
class RBM:
def __init__(self, num_visible, num_hidden, learning_rate=0.1, path=None):
"""
初始化函数
Initial Function
:param num_visible: 可见层单元个数,the number of visible units
:param num_hidden: 隐含层单元个数,the number of hidden units
:param learning_rate: 学习率,the learning rate of RBM
:param path: 该RBM参数存储的路径
the path where we store the parameters of RBM
RBM类成员:
weights: weights 是 RBM 的网络权重矩阵,其大小为 ( 1 + num_visible ) * ( 1 + num_hidden)
第一行为隐含层偏置权重,第一列为可见层偏置权重,第一行第一列永远为0
第二行第二列起至矩阵尾为可见层与隐含层之间的边的权重
weights is the matrix of size ( 1 + num_visible ) * ( 1 + num_hidden)
the first row of "weights" is the hidden bias, the first column of "weights" is the visible bias
the rest part of "weights" is the weight matrix of edges between visible units and hidden units
weightsinc: weightsinc 是训练过程中weights矩阵的增量
weightsinc is the increase (or change) of "weights" in every epoch of training
"""
self.num_hidden = num_hidden
self.num_visible = num_visible
self.learning_rate = learning_rate
self.path = path
# 查看是否有存档,如果有,则载入参数weights和weightsinc
# Check whether the parameter file exists; if so, load the data
datafile = os.path.join(self.path, 'weights')
if os.path.isfile(datafile):
with open(datafile, 'rb') as fp:
self.weights = pickle.load(fp)
print("Load Weights Successfully!")
datafile = os.path.join(self.path, 'weightsinc')
with open(datafile, 'rb') as fp:
self.weightinc = pickle.load(fp)
print("Load WeightInc Successfully!")
else:
# 初始化权重 "weights",高斯分布,均值为1,标准差为0.1
# Initialize the weights, using a Gaussian distribution with mean 0 and standard deviation 0.1.
self.weights = 0.1 * np.random.randn(self.num_visible, self.num_hidden)
# Insert "weights" for the bias units into the first row and first column 插入偏置单元
self.weights = np.insert(self.weights, 0, 0, axis=0)
self.weights = np.insert(self.weights, 0, 0, axis=1)
with open(datafile, 'wb') as fp:
pickle.dump(self.weights, fp)
print("Create Weights Successfully!")
# 初始化 "weightsinc",0矩阵
# Initialize the weightsinc with zero matrix
self.weightinc = np.zeros([self.num_visible + 1, self.num_hidden + 1])
datafile = os.path.join(self.path, 'weightsinc')
with open(datafile, 'wb') as fp:
pickle.dump(self.weightinc, fp)
print("Create WeightInc Successfully!")
def train(self, batch_data, max_epochs=50):
"""
Train the RBM
:param batch_data: 训练集,类型为list,每个list元素为np.array,np.array是矩阵,每一行为每一个训练样本
training data, type: list of np.array,
every np.array is a matrix
where each row is a training example consisting of the states of visible units.
i.e. every np.array is a batch of training set
:param max_epochs: 训练最大迭代次数, the max epochs of the training operation
"""
# Initialization
# weightcost times weightsinc and then be added to the normal gradient (i.e. weightsinc)
# weightcost 乘以增量weightsinc 加上新的weightinc, 是为了weight-decay
# weightcost ranges from 0.01 to 0.00001
weightcost = 0.0002
# Momentum is a simple method for increasing the speed of learning when the objective function
# contains long, narrow and fairly straight ravines with a gentle but consistent gradient along the floor
# of the ravine and much steeper gradients up the sides of the ravine.
initialmomentum = 0.5
finalmomentum = 0.9
count = 0
for epoch in range(0, max_epochs):
errorsum = 0
for data in batch_data:
num_examples = data.shape[0]
# 第一列加入偏置单元 1
# Insert bias units of 1 into the first column.
data = np.insert(data, 0, 1, axis=1)
# Gibbs Sample
# 从 data 中 采样 sample 得到 隐单元 hidden units
# (This is the "positive CD phase", aka 正相.)
pos_hidden_activations = np.dot(data, self.weights)
pos_hidden_probs = self._logistic(pos_hidden_activations)
# Fix the bias unit 修正偏置单元
pos_hidden_probs[:, 0] = 1
pos_hidden_states = pos_hidden_probs > np.random.rand(num_examples, self.num_hidden + 1)
pos_associations = np.dot(data.T, pos_hidden_probs)
# 从隐单元 hidden units 采样 sample,重构 reconstruct 显单元 visible units
# (This is the "negative CD phase", aka 负相.)
neg_visible_activations = np.dot(pos_hidden_states, self.weights.T)
neg_visible_probs = self._logistic(neg_visible_activations)
# Fix the bias unit 修正偏置单元
neg_visible_probs[:, 0] = 1
neg_hidden_activations = np.dot(neg_visible_probs, self.weights)
neg_hidden_probs = self._logistic(neg_hidden_activations)
# Fix the bias unit 修正偏置单元
neg_hidden_probs[:, 0] = 1
neg_associations = np.dot(neg_visible_probs.T, neg_hidden_probs)
error = np.sum((data - neg_visible_probs) ** 2)
errorsum = error + errorsum
# 选择 momentum
if epoch > 5:
momentum = finalmomentum
else:
momentum = initialmomentum
# Update weights 更新权重
delta = (pos_associations - neg_associations) / num_examples
vishid = self.weights[1:self.num_visible + 1, 1:self.num_hidden + 1]
vishid = np.insert(vishid, 0, 0, axis=0)
vishid = np.insert(vishid, 0, 0, axis=1)
self.weightinc = momentum * self.weightinc + self.learning_rate * (delta - weightcost * vishid)
# 确保无关项为 0
self.weightinc[0, 0] = 0
self.weights += self.weightinc
self.weights[0, 0] = 0
count += 1
print("Count %s: error is %s" % (count, error))
# Save weights and error 保存权值和误差
if self.path:
datafile = os.path.join(self.path, 'weights')
with open(datafile, 'wb') as fp:
pickle.dump(self.weights, fp)
datafile = os.path.join(self.path, 'count.txt')
with open(datafile, 'at') as fp:
fp.write("%s,%s\n" % (count, error))
if self.path:
datafile = os.path.join(self.path, 'epoch.txt')
with open(datafile, 'at') as fp:
fp.write("%s,%s\n" % (epoch, errorsum))
def run_visible_for_hidden(self, batch_data):
"""
Assuming the RBM has been trained (so that weights for the network have been learned),
run the network on a set of visible units, to get probabilities of the hidden units.
:param batch_data: 可见层数据,类型为list,每个list元素为np.array,np.array是矩阵,每一行为一个样本
visible units data, type: list of np.array,
every np.array is a matrix
where each row is a example consisting of the states of visible units.
i.e. every np.array is a batch of visible units data set
:return: 隐含单元的logistic概率,类型为list,每个list元素为np.array,与输入数据相对应
the probabilities of the hidden units, type: list of np.array,
every np.array is a batch of hidden units data set, corresponding to the input
"""
batch_pos_hidden_probs = []
for data in batch_data:
# 第一列加入偏置单元 1
# Insert bias units of 1 into the first column of data.
data = np.insert(data, 0, 1, axis=1)
# Calculate the activations of the hidden units.
hidden_activations = np.dot(data, self.weights)
# Calculate the probabilities of turning the hidden units on.
hidden_probs = self._logistic(hidden_activations)
pos_hidden_probs = hidden_probs[:, 1:]
batch_pos_hidden_probs.append(pos_hidden_probs)
return batch_pos_hidden_probs
def run_hidden_for_visible(self, batch_data):
"""
Assuming the RBM has been trained (so that weights for the network have been learned),
run the network on a set of hidden units, to get probabilities of the visible units.
:param batch_data: 隐含层数据,类型为list,每个list元素为np.array,np.array是矩阵,每一行为一个样本
hidden units data, type: list of np.array,
every np.array is a matrix
where each row is a example consisting of the states of hidden units.
i.e. every np.array is a batch of hidden units data set
:return: 可见单元的logistic概率,类型为list,每个list元素为np.array,与输入数据相对应
the probabilities of the visible units, type: list of np.array,
every np.array is a batch of visible units data set, corresponding to the input
"""
batch_neg_visible_probs = []
for data in batch_data:
# Insert bias units of 1 into the first column of data.
data = np.insert(data, 0, 1, axis=1)
# Calculate the activations of the visible units.
visible_activations = np.dot(data, self.weights.T)
# Calculate the probabilities of turning the visible units on.
visible_probs = self._logistic(visible_activations)
neg_visible_probs = visible_probs[:, 1:]
batch_neg_visible_probs.append(neg_visible_probs)
return batch_neg_visible_probs
def predict(self, batch_data, soft_max=10):
"""
Assuming the RBM has been trained (so that weights for the network have been learned),
run the network on a set of test data, to get recognition results (only perform digits recognition)
This prediction method is especially designed for the visible units including the label(softmax)
:param batch_data: 可见层数据,类型为list,每个list元素为np.array,np.array是矩阵,每一行为一个样本
visible units data, type: list of np.array,
every np.array is a matrix
where each row is a example consisting of the states of visible units.
i.e. every np.array is a batch of visible units data set
:param soft_max: 标签的维度, 为4或10,4维是0-9的二进制表示,10维的每一维(只能为0或1)对应是否属于相应数字类别
the dimension of label, only can take value of 4 or 10
4 means the label is expressed as binary
10 means the state of each dimension infer whether it belongs to that class
:return: 分类结果,类型为list,其元素为list, 该list元素为标签,与输入数据相对应
the classification result, type: list of list of int,
list2 is a batch of answers, corresponding to the input
"""
final_ans = []
for data in batch_data:
ans = []
num_examples = data.shape[0]
data = np.insert(data, 0, 1, axis=1)
data = np.split(data, num_examples)
for item in data:
hidden_activations = np.dot(item, self.weights)
vbias_energy = hidden_activations[0, 0]
hidden_probs = self._logfree(hidden_activations)
hidden_probs[:, 0] = 0
free_energy = - np.sum(hidden_probs) - vbias_energy
min_free_energy = free_energy
tmp_ans = 0
for number in range(1, 10):
tmpitem = item.copy()
if soft_max == 10:
tmpitem[0, self.num_visible - 9:self.num_visible + 1] = 0
tmpitem[0, self.num_visible - (9 - number)] = 1
else:
if soft_max == 4:
label = bin(number)
label = label[::-1]
length = len(label)
for i in range(0, length - 3 + 1):
tmpitem[0, self.num_visible + i - 3] = int(label[i])
if length != 6:
for i in range(1, 6 - length + 1):
tmpitem[0, self.num_visible - (6 - length) + i] = 0
hidden_activations = np.dot(tmpitem, self.weights)
vbias_energy = hidden_activations[0, 0]
hidden_probs = self._logfree(hidden_activations)
hidden_probs[:, 0] = 0
free_energy = - np.sum(hidden_probs) - vbias_energy
if free_energy < min_free_energy:
tmp_ans = number
min_free_energy = free_energy
ans.append(tmp_ans)
final_ans.append(ans)
return final_ans
@staticmethod
def _logistic(x):
# np.tanh is more stable than np.exp in numpy
# return 1.0 / (1 + np.exp(-x))
return .5 * (1 + np.tanh(.5 * x))
@staticmethod
def _logfree(x):
return np.log(1 + np.exp(x))
|
import shutil
import os
import subprocess
'''
Building Executables
'''
print('Started build...')
build = subprocess.Popen(["pyinstaller", "--onefile", "--windowed", "--icon=logo.ico", "Pic2Text.py"],
stdin =subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
bufsize=0)
# Send OP
build.stdin.write("uname -a\n")
build.stdin.write("uptime\n")
build.stdin.close()
# Fetch output
for line in build.stdout:
print(line.strip())
print("Completed build !!!")
FOLDERS = ['ui','ICONS','Tesseract-OCR']
'''
Copying files to dist
'''
print('Started copying files to dist...')
try:
for folder in FOLDERS:
if os.path.isdir('dist/'+folder):
shutil.rmtree('dist/'+folder)
print('Removed dist/{}'.format(folder))
shutil.copytree(folder,'dist/'+folder)
print('Copied {} to "dist"'.format(folder))
print('Completed Files copying !!!')
except Exception as e:
print('Error while copying files to dist')
|
"""Plot a histogram."""
import pandas as pd
import matplotlib.pyplot as plt
from util import get_data, plot_data, compute_daily_returns
def test_run():
# Read data
dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index
symbols = ['SPY']
df = get_data(symbols, dates) # get data for each symbol
plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
plot_data(daily_returns, title = "Daily returns", ylabel = "Daily returns")
# Plot a histogram
daily_returns.hist(bins = 20)
# Get mean and standard deviation
mean = daily_returns['SPY'].mean()
print "mean=", mean
std = daily_returns['SPY'].std()
plt.axvline(mean, color = 'w', linestyle = 'dashed', linewidth = 2)
plt.axvline(std, color = 'r', linestyle = 'dashed', linewidth = 2)
plt.axvline(-std, color = 'r', linestyle = 'dashed', linewidth = 2)
plt.show()
# Compute kurtosis
print daily_returns.kurtosis()
if __name__ == "__main__":
test_run()
|
from rest_framework import serializers
from .models import Posts
class PostSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(read_only=True)
create_by = serializers.CurrentUserDefault
class Meta:
model = Posts
fields = ['id', 'title', 'content', 'image', 'create_by', 'slug']
|
r"""*Single-source version number for* ``stdio_mgr``.
``stdio_mgr`` provides a context manager for convenient
mocking and/or wrapping of ``stdin``/``stdout``/``stderr``
interactions.
**Author**
Brian Skinn ([email protected])
**File Created**
18 Mar 2019
**Copyright**
\(c) Brian Skinn 2018-2019
**Source Repository**
http://www.github.com/bskinn/stdio-mgr
**Documentation**
See README.rst at the GitHub repository
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
__version__ = "2.0.dev1" # pragma: no mutate
|
import pyqt_designer_plugin_entry_points
print("* pyqt_designer_plugin_entry_points hook *")
globals().update(**pyqt_designer_plugin_entry_points.enumerate_widgets())
print(pyqt_designer_plugin_entry_points.connect_events())
|
# Generated by Django 2.1.2 on 2021-12-04 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instrumento', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='evaluacion',
name='puntaje',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='evaluacion',
name='resultado',
field=models.CharField(default=0, max_length=50),
preserve_default=False,
),
]
|
'''
Created on 2013-11-24
@author: Nich
'''
import unittest
from objects.component_manager import ComponentManager, ArrayComponentSource
from objects.node_factory import NodeFactoryDB
class TestComponent0(object):
__compname__ = "test_component0"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent1(object):
__compname__ = "test_component1"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent2(object):
__compname__ = "test_component2"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent3(object):
__compname__ = "test_component3"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent4(object):
__compname__ = "test_component4"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent5(object):
__compname__ = "test_component5"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent7(object):
__compname__ = "test_component7"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent8(object):
__compname__ = "test_component8"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class TestComponent9(object):
__compname__ = "test_component9"
def __init__(self, entity_id, some_string=None):
self.entity_id = entity_id
self.some_string = some_string
class Test(unittest.TestCase):
def setUp(self):
self.components1 = {"test_component0": TestComponent0,
"test_component1": TestComponent1,
"test_component2": TestComponent2,
"test_component3": TestComponent3,
"test_component8": TestComponent8,
}
self.components2 = {
"test_component4": TestComponent4,
"test_component5": TestComponent5,
"test_component7": TestComponent7,
"test_component9": TestComponent9,
}
self.component_source1 = ArrayComponentSource(self.components1)
self.component_source2 = ArrayComponentSource(self.components2)
self.component_manager = ComponentManager(
[self.component_source1, self.component_source2])
# all components
self.test_entity1 = self.component_manager.create_entity({"test_component0": {"some_string": "E1C0"},
"test_component1": {"some_string": "E1C1"},
"test_component2": {"some_string": "E1C2"},
"test_component3": {"some_string": "E1C3"},
"test_component4": {"some_string": "E1C4"},
})
# no components
self.test_entity2 = self.component_manager.create_entity({})
# even components
self.test_entity3 = self.component_manager.create_entity({
"test_component0": {"some_string": "E3C0"},
"test_component2": {"some_string": "E3C2"},
"test_component4": {"some_string": "E3C4"},
})
# odd components
self.test_entity4 = self.component_manager.create_entity({"test_component1": {"some_string": "E4C1"},
"test_component3": {"some_string": "E4C3"},
})
# copy of entity1 to make sure that they stay distinct
self.test_entity5 = self.component_manager.create_entity({"test_component0": {"some_string": "E5C0"},
"test_component1": {"some_string": "E5C1"},
"test_component2": {"some_string": "E5C2"},
"test_component3": {"some_string": "E5C3"},
"test_component4": {"some_string": "E5C4"},
})
# Component only in second source
self.test_entity6 = self.component_manager.create_entity(
{'test_component7': {"some_string": "E6C7"}})
self.test_entity7 = self.component_manager.create_entity(
{'test_component3': {"some_string": "E7C7"}, 'test_component7': {'some_string': "E7C3"}})
def tearDown(self):
pass
def testGetComponentWithNoEntitiesReturnsNone(self):
es = self.component_manager.get_entities_with_components(
["test_component8"])
self.assertEqual(len(es), 0)
es = self.component_manager.get_entities_with_components(
["test_component9"])
self.assertEqual(len(es), 0)
es = self.component_manager.get_entities_with_components(
["test_component8", 'test_component7'])
self.assertEqual(len(es), 0)
es = self.component_manager.get_entities_with_components(
["test_component7", 'test_component8'])
self.assertEqual(len(es), 0)
es = self.component_manager.get_entities_with_components(
["test_component3", 'test_component9'])
self.assertEqual(len(es), 0)
es = self.component_manager.get_entities_with_components(
["test_component9", 'test_component3'])
self.assertEqual(len(es), 0)
def testGetEntityFromOneComponent(self):
es = self.component_manager.get_entities_with_components(
["test_component7"])
self.assertEqual(len(es), 2)
def testGetEntityFromDifferentSources(self):
es = self.component_manager.get_entities_with_components(
['test_component7', 'test_component3'])
self.assertEqual(len(es), 1)
es2 = self.component_manager.get_entities_with_components(
['test_component3', 'test_component7'])
self.assertEqual(len(es2), 1)
def testAddGetComponent(self):
self.component_manager.add_component_to_object(
"test_component0", self.test_entity2.id, {"some_string": "test1"})
comp = self.component_manager.get_component(
self.test_entity2.id, "test_component0")
self.assertEqual(comp.some_string, "test1")
def testGetSupportedSubset(self):
self.assertEqual(sorted(["test_component0", "test_component3"]), sorted(self.component_source1.get_supported_subset(
["test_component0", "test_component6", "test_component3", "test_component7"])))
def testHas(self):
self.assertTrue(
self.component_manager.has_component("test_component0"))
self.assertTrue(
self.component_manager.has_component("test_component1"))
self.assertTrue(
self.component_manager.has_component("test_component2"))
self.assertTrue(
self.component_manager.has_component("test_component3"))
self.assertTrue(
self.component_manager.has_component("test_component4"))
self.assertTrue(
self.component_manager.has_component("test_component5"))
self.assertFalse(
self.component_manager.has_component("test_component6"))
def testGetEntitiesForComponent(self):
entities = [self.component_manager.create_entity(
{}) for _ in range(10)]
even_entities = []
odd_entities = []
for i, e in enumerate(entities):
counter = i % 2
self.component_manager.add_component_to_object(
"test_component" + str(0 + counter), e.id, None)
self.component_manager.add_component_to_object(
"test_component" + str(2 + counter), e.id, None)
self.component_manager.add_component_to_object(
"test_component" + str(4 + counter), e.id, None)
if counter == 0:
even_entities.append(e.id)
else:
odd_entities.append(e.id)
print(entities)
print(sorted(self.component_manager.get_entities_for_component("test_component2")))
print(even_entities)
self.assertTrue(set(even_entities).issubset(
set(self.component_manager.get_entities_for_component("test_component2"))))
self.assertTrue(set(odd_entities).issubset(
set(self.component_manager.get_entities_for_component("test_component5"))))
def test_remove_component(self):
entity1 = self.component_manager.create_entity({})
self.component_manager.add_component_to_object(
"test_component0", entity1.id, {"some_string": "test1"})
comp = self.component_manager.get_component(
entity1.id, "test_component0")
self.assertEqual(comp.some_string, "test1")
self.component_manager.remove_component("test_component0", entity1.id)
with self.assertRaises(AttributeError):
self.component_manager.get_component(entity1.id, "test_component0")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import os
import time
import numpy as np
import pandas
import pandas as pd
from scripts.MADDPG.maddpg import MADDPG
from scripts.MADDPG.buffer import MultiAgentReplayBuffer
# from scripts.MADDPG_original.maddpg import MADDPG
# from scripts.MADDPG_original.buffer import MultiAgentReplayBuffer
from make_env import make_env
from scripts.MADDPG.edge_env import EdgeEnv
pandas.set_option('display.max_columns', None) # display all columns
def obs_list_to_state_vector(observation):
"""convert several ndarrays to one ndarray"""
state = np.array([])
for obs in observation:
state = np.concatenate([state, obs])
return state
if __name__ == '__main__':
debug = False
# debug = True
evaluate = False
# evaluate = True
# scenario = 'simple'
scenario = 'edge_cloud'
# print something every 500 games
PRINT_INTERVAL = 500
N_GAMES = 80000
print(f"Run {N_GAMES} episodes in total")
# the game do not have a terminal state so we set a max steps for each episode
MAX_STEPS = 20
total_steps_cntr = 0
score_history = []
sw_history_om = []
best_score = 10000 # save the model if the score > -10
# parameters of fog nodes
h_r = 5
l_r = 3
l_c = 2
n_c = 2.5
h_c = 3
avg_resource_capacity = {0: [h_r, h_r, h_r]}
avg_unit_cost = {0: [l_c, l_c, l_c]}
env = EdgeEnv(avg_resource_capacity, avg_unit_cost, n_nodes=1,
n_timesteps=10, n_tasks=500, max_steps=MAX_STEPS,
n_actions=2, p_high_value_tasks=0.2)
n_agents = env.n_nodes
actor_dims = []
for i in range(n_agents):
actor_dims.append(env.observation_space[i].shape[0])
# print(env.observation_space[i])
# exit()
critic_dims = sum(actor_dims)
# action space is a list of arrays, assume each agent has same action space
n_actions = env.n_actions
# print(env.action_space[0])
# exit()
# print(env.action_space[0].shape[0])
# exit()
# print(f"actor_dims = {actor_dims}")
# print(f"critic_dims = {critic_dims}")
print(f"number of agents = {n_agents}")
print(f"number of actions = {n_actions}")
maddpg_agents = MADDPG(actor_dims, critic_dims, n_agents, n_actions,
fc1=64, fc2=64, alpha=0.01, beta=0.01, scenario=scenario,
chkpt_dir='tmp/maddpg/')
memory = MultiAgentReplayBuffer(int(1e6), critic_dims, actor_dims,
n_actions, n_agents, batch_size=1024)
if evaluate:
maddpg_agents.load_checkpoint()
avg_sw_df = pd.DataFrame(columns=['episode_ID', 'avg_sw'])
if debug:
env.verbose = True # print the details of process
N_GAMES = 3
else:
env.verbose = False
for i in range(N_GAMES):
# for i in range(1):
obs, om_sw = env.reset()
if env.verbose:
print("df_tasks:")
print(env.df_tasks.head(20))
print(env.df_nodes)
# exit()
score = 0
done = False
episode_step_cntr = 0
node_0_actions = []
while not done:
if evaluate:
env.render()
# time.sleep(0.1) # to slow down the action for the video
actions_probs = maddpg_agents.choose_action(obs)
# choose the action according to the probabilities
actions = []
for actions_prob in actions_probs:
s = sum(actions_prob)
p = [i / s for i in actions_prob]
# a = np.random.choice(n_actions, 1, p=action)
action = np.random.choice(n_actions, 1, p=p)
actions.append(action[0]) # action in {1,2,...,10}
node_0_actions.append(actions[0])
# the actions are greater than one because of noises
# actions = np.concatenate(actions)
# print(f"actions_probs = {actions_probs}")
# print(f"actions = {actions}")
# exit()
obs_, reward, done, sw_increase = env.step(actions)
reward = reward * n_agents
# print(total_steps_cntr)
# print(f"sw_increase = {reward}")
if episode_step_cntr >= MAX_STEPS - 1:
done = True
else:
state = obs_list_to_state_vector(obs)
state_ = obs_list_to_state_vector(obs_)
memory.store_transition(obs, state, actions_probs, reward, obs_,
state_, done)
# print(f"store transition:")
# print(f"current observation = {obs}")
# print(f"next observation = {obs_}")
# print(f"current state = {state}")
# print(f"next state = {state_}")
# print(f"actions = {actions_probs}")
# print(f"reward = {reward}")
# exit()
# do not learn when evaluate, learn every 100 steps
if total_steps_cntr % 100 == 0 and not evaluate:
maddpg_agents.learn(memory)
# set the current state to new state
obs = obs_
score += sw_increase
total_steps_cntr += 1
episode_step_cntr += 1
node_0_actions_df = pandas.DataFrame(node_0_actions,
columns=['action_of_node_0'])
# print(f"social welfare of episode {i} = {score}")
# print(f"social welfare (achieved by OM) = {om_sw}")
sw_history_om.append(om_sw)
score_history.append(score)
# average score of previous 100 games
avg_score = np.mean(score_history[-100:])
avg_sw_om = np.mean(sw_history_om[-100:])
# print("score_history")
# print(score_history)
# print("avg_score")
# print(avg_score)
if env.verbose:
print('episode', i,
'social welfare by RL {:.1f}'.format(score))
print('episode', i,
'social welfare by OM {:.1f}'.format(om_sw))
if not evaluate:
if avg_score > best_score:
maddpg_agents.save_checkpoint()
best_score = avg_score
if i % PRINT_INTERVAL == 0 and i > 0:
print('episode', i,
'average social welfare by RL {:.1f}'.format(avg_score))
print('episode', i,
'average social welfare by OM {:.1f}'.format(avg_sw_om))
# print actions every * episodes
# print("actions:")
# print(actions)
part_tasks = env.df_tasks['valuation_coefficient']
part_tasks = part_tasks[0:MAX_STEPS + 1]
# print("part_tasks:")
# print(part_tasks)
# print("actions of node 0:")
# print(node_0_actions_df)
# # print actions of node 0 (the high-capacity node)
# watch_actions_df = pd.DataFrame(
# columns=['valuation_coefficient', 'node_0_action'])
# watch_actions_df['valuation_coefficient'] = part_tasks
# watch_actions_df['node_0_action'] = node_0_actions_df
# print(watch_actions_df)
# # exit()
df = pd.DataFrame({'episode_ID': [i],
'avg_sw': [avg_score]})
avg_sw_df = avg_sw_df.append(df, ignore_index=True)
if i >= 10000:
outdir = '/Users/fan/OneDrive - University of Southampton/Chandler\'s Projects/Edge-Cloud-Resource-Allocation-Using-MARL-and-Auction/scripts/MADDPG/tmp'
outname = 'average_social_welfare.csv'
fullname = os.path.join(outdir, outname)
print("... saving to .csv file ...")
avg_sw_df.to_csv(fullname, index=False)
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from fixture.session import Session
from fixture.check_profile import Check
class Fixture:
def __init__(self, base_url, browser="Safari"):
if browser == "Safari":
self.driver = webdriver.Safari()
elif browser == "Chrome":
self.driver = webdriver.Chrome()
elif browser == "Firefox":
self.driver = webdriver.Firefox()
else:
raise ValueError("Unrecognized browser %s" % browser)
browser = self.driver
browser.implicitly_wait(10)
browser.base_url = base_url
self.session = Session(self)
self.check = Check(self)
browser.maximize_window()
browser.delete_all_cookies()
def is_valid(self):
try:
self.driver.current_url
return True
except:
return False
def open_home_page(self):
browser = self.driver
browser.get(browser.base_url)
def destroy(self):
self.driver.quit()
|
from __future__ import absolute_import, print_function
import argparse
import json
import logging
import os
import random
import sys
import boto
log = logging.getLogger("nameq.dump")
def dump_hosts(s3bucket, s3prefix, filter_features=None, single=False, s3options=None):
if s3options is None:
s3options = {}
if s3prefix and not s3prefix.endswith("/"):
s3prefix += "/"
entries = []
error = None
conn = boto.connect_s3(**s3options)
bucket = conn.get_bucket(s3bucket, validate=False)
for key in bucket.list(s3prefix):
if key.name == s3prefix:
continue
data = key.get_contents_as_string()
try:
entry_features = set(json.loads(data).get("features", ()))
except (TypeError, ValueError, KeyError) as e:
log.error("%s: %s", key.name, e)
error = e
else:
if not filter_features or filter_features & entry_features:
entries.append(key.name[len(s3prefix):])
if not entries and error:
raise error
if single:
entries = [random.choice(entries)]
return entries
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--single", action="store_true", help="print at most one entry at random")
parser.add_argument("--s3host", help="S3 endpoint hostname")
parser.add_argument("s3location", help="s3bucket/s3prefix")
parser.add_argument("feature", nargs="*", help="feature names")
args = parser.parse_args()
if "/" in args.s3location:
bucket, prefix = args.s3location.split("/", 1)
else:
bucket, prefix = args.s3location, ""
progname = os.path.basename(sys.argv[0])
log_handler = logging.StreamHandler()
log_handler.setFormatter(logging.Formatter(progname + ": %(message)s"))
log.addHandler(log_handler)
log.setLevel(logging.INFO)
s3options = {}
if args.s3host:
s3options["host"] = args.s3host
for entry in dump_hosts(bucket, prefix, set(args.feature), args.single, s3options):
print(entry)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(1)
|
import json
import matplotlib.pyplot as plt
f_name = "pos_cur.json"
data = None
with open(f_name, 'r') as file:
data = json.load(file)
f = data["forward"]
r = data["reverse"]
print("len(f): %d\tlen(r): %d" % (len(f), len(r)))
p_f = []
c_f = []
for d in f:
p_f.append(d[0])
c_f.append(d[1])
plt.plot(p_f, c_f)
plt.show() |
"""This script collates all of the hand sourced enthalpy of vaporization data."""
import pandas
from nonbonded.library.models.datasets import Component, DataSetEntry
from openff.evaluator import substances
def source_enthalpy_of_vaporization() -> pandas.DataFrame:
"""Returns a list of hand sourced enthalpy of vaporization data
entries"""
data_entries = [
# Formic Acid
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="OC=O", mole_fraction=1.0)],
value=46.3,
std_error=0.25,
doi="10.3891/acta.chem.scand.24-2612",
),
# Acetic Acid
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(O)=O", mole_fraction=1.0)],
value=51.6,
std_error=0.75,
doi="10.3891/acta.chem.scand.24-2612",
),
# Propionic Acid
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCC(O)=O", mole_fraction=1.0)],
value=55,
std_error=1,
doi="10.3891/acta.chem.scand.24-2612",
),
# Butyric Acid
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCC(O)=O", mole_fraction=1.0)],
value=58,
std_error=2,
doi="10.3891/acta.chem.scand.24-2612",
),
# Isobutyric Acid
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)C(O)=O", mole_fraction=1.0)],
value=53,
std_error=2,
doi="10.3891/acta.chem.scand.24-2612",
),
# Methanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CO", mole_fraction=1.0)],
value=37.83,
std_error=0.11349,
doi="10.1016/0378-3812(85)90026-3",
),
# Ethanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCO", mole_fraction=1.0)],
value=42.46,
std_error=0.12738,
doi="10.1016/0378-3812(85)90026-3",
),
# 1-Propanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCO", mole_fraction=1.0)],
value=47.5,
std_error=0.1425,
doi="10.1016/0378-3812(85)90026-3",
),
# Isopropanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)O", mole_fraction=1.0)],
value=45.48,
std_error=0.13644,
doi="10.1016/0378-3812(85)90026-3",
),
# n-Butanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCO", mole_fraction=1.0)],
value=52.42,
std_error=0.15726,
doi="10.1016/0378-3812(85)90026-3",
),
# Isobutanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)CO", mole_fraction=1.0)],
value=50.89,
std_error=0.15267,
doi="10.1016/0378-3812(85)90026-3",
),
# t-butanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)(C)O", mole_fraction=1.0)],
value=46.75,
std_error=0.14025,
doi="10.1016/0378-3812(85)90026-3",
),
# n-pentanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCO", mole_fraction=1.0)],
value=44.36,
std_error=0.13308,
doi="10.1016/0378-3812(85)90026-3",
),
# 1-hexanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCO", mole_fraction=1.0)],
value=61.85,
std_error=0.2,
doi="10.1016/0021-9614(77)90202-6",
),
# 1-heptanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCCO", mole_fraction=1.0)],
value=66.81,
std_error=0.2,
doi="10.1016/0021-9614(77)90202-6",
),
# 1-octanol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCCCO", mole_fraction=1.0)],
value=70.98,
std_error=0.42,
doi="10.1016/0021-9614(77)90202-6",
),
# Propyl formate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCOC=O", mole_fraction=1.0)],
value=37.49,
std_error=0.07498,
doi="10.1135/cccc19803233",
),
# Butyl formate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCOC=O", mole_fraction=1.0)],
value=41.25,
std_error=0.0825,
doi="10.1135/cccc19803233",
),
# Methyl acetate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="COC(C)=O", mole_fraction=1.0)],
value=32.3,
std_error=0.0646,
doi="10.1135/cccc19803233",
),
# Ethyl acetate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCOC(C)=O", mole_fraction=1.0)],
value=35.62,
std_error=0.07124,
doi="10.1135/cccc19803233",
),
# Propyl acetate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCOC(C)=O", mole_fraction=1.0)],
value=39.83,
std_error=0.07966,
doi="10.1135/cccc19803233",
),
# Methyl propionate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCC(=O)OC", mole_fraction=1.0)],
value=35.85,
std_error=0.0717,
doi="10.1135/cccc19803233",
),
# Ethyl propionate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCOC(=O)CC", mole_fraction=1.0)],
value=39.25,
std_error=0.0785,
doi="10.1135/cccc19803233",
),
# Butyl acetate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=313.5,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCOC(C)=O", mole_fraction=1.0)],
value=42.96,
std_error=0.08592,
doi="10.1135/cccc19803233",
),
# Propyl propionate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=313.5,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCOC(=O)CC", mole_fraction=1.0)],
value=42.14,
std_error=0.08428,
doi="10.1135/cccc19803233",
),
# Methyl Butanoate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCC(=O)OC", mole_fraction=1.0)],
value=40.1,
std_error=0.4,
doi="10.1007/BF00653098",
),
# Methyl Pentanoate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCC(=O)OC", mole_fraction=1.0)],
value=44.32,
std_error=0.5,
doi="10.1007/BF00653098",
),
# Ethyl Butanoate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCC(=O)OCC", mole_fraction=1.0)],
value=42.86,
std_error=0.1,
doi="10.1016/0021-9614(86)90070-4",
),
# Ethylene glycol diacetate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(=O)OCCOC(=O)C", mole_fraction=1.0)],
value=61.44,
std_error=0.15,
doi="10.1016/0021-9614(86)90070-4",
),
# Methyl formate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=293.25,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="COC=O", mole_fraction=1.0)],
value=28.7187400224,
std_error=None,
doi="10.1135/cccc19760001",
),
# Ethyl formate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=304,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCOC=O", mole_fraction=1.0)],
value=31.63314346416,
std_error=None,
doi="10.1135/cccc19760001",
),
# 1,3-propanediol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="OCCCO", mole_fraction=1.0)],
value=70.5,
std_error=0.3,
doi="10.1021/je060419q",
),
# 2,4 pentanediol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(CC(C)O)O", mole_fraction=1.0)],
value=72.5,
std_error=0.3,
doi="10.1021/je060419q",
),
# glycerol
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="C(C(CO)O)O", mole_fraction=1.0)],
value=91.7,
std_error=0.9,
doi="10.1016/0021-9614(88)90173-5",
),
# Diethyl Malonate
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCOC(=O)CC(=O)OCC", mole_fraction=1.0)],
value=61.70,
std_error=0.25,
doi="10.1021/je100231g",
),
# 1,4-dioxane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="C1COCCO1", mole_fraction=1.0)],
value=38.64,
std_error=0.05,
doi="10.1039/P29820000565",
),
# oxane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="C1CCOCC1", mole_fraction=1.0)],
value=34.94,
std_error=0.84,
doi="10.1039/TF9615702125",
),
# methyl tert butyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="COC(C)(C)C", mole_fraction=1.0)],
value=32.42,
std_error=None,
doi="10.1016/0021-9614(80)90152-4",
),
# diisopropyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)OC(C)C", mole_fraction=1.0)],
value=32.12,
std_error=None,
doi="10.1016/0021-9614(80)90152-4",
),
# Dibutyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCOCCCC", mole_fraction=1.0)],
value=44.99,
std_error=None,
doi="10.1016/0021-9614(80)90152-4",
),
# cyclopentanone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.16,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="O=C1CCCC1", mole_fraction=1.0)],
value=42.63,
std_error=0.42,
doi="10.1002/hlca.19720550510",
),
# 2-pentanone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCC(C)=O", mole_fraction=1.0)],
value=38.43,
std_error=None,
doi="10.1016/0021-9614(83)90091-5",
),
# cyclohexanone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.16,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="O=C1CCCCC1", mole_fraction=1.0)],
value=44.89,
std_error=0.63,
doi="10.1002/hlca.19720550510",
),
# cycloheptanone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.16,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="O=C1CCCCCC1", mole_fraction=1.0)],
value=49.54,
std_error=0.63,
doi="10.1002/hlca.19720550510",
),
# cyclohexane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="C1CCCCC1", mole_fraction=1.0)],
value=33.02,
std_error=None,
doi="10.1135/cccc19790637",
),
# hexane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCC", mole_fraction=1.0)],
value=31.55,
std_error=None,
doi="10.1135/cccc19790637",
),
# methylcyclohexane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC1CCCCC1", mole_fraction=1.0)],
value=35.38,
std_error=None,
doi="10.1135/cccc19790637",
),
# heptane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCC", mole_fraction=1.0)],
value=36.58,
std_error=None,
doi="10.1135/cccc19790637",
),
# iso-octane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)CC(C)(C)C", mole_fraction=1.0)],
value=35.13,
std_error=None,
doi="10.1135/cccc19790637",
),
# decane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCCCCC", mole_fraction=1.0)],
value=51.35,
std_error=None,
doi="10.3891/acta.chem.scand.20-0536",
),
# acetone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=300.4,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(C)=O", mole_fraction=1.0)],
value=30.848632,
std_error=0.008368,
doi="10.1021/ja01559a015",
),
# butan-2-one
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCC(C)=O", mole_fraction=1.0)],
value=34.51,
std_error=0.04,
doi="0021-9614(79)90127-7",
),
# pentan-3-one
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCC(=O)CC", mole_fraction=1.0)],
value=38.52,
std_error=None,
doi="10.1016/0021-9614(83)90091-5",
),
# 4-methylpentan-2-one
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CC(=O)CC(C)C", mole_fraction=1.0)],
value=40.56,
std_error=None,
doi="10.1016/0021-9614(83)90091-5",
),
# 3-hexanone
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCC(=O)CC", mole_fraction=1.0)],
value=42.45,
std_error=None,
doi="10.1016/0021-9614(83)90091-5",
),
# 2-methylheptane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCC(C)C", mole_fraction=1.0)],
value=39.66,
std_error=None,
doi="10.1135/cccc19790637",
),
# 3-methylpentane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCC(C)CC", mole_fraction=1.0)],
value=30.26,
std_error=None,
doi="10.1135/cccc19790637",
),
# 2-Methylhexane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCC(C)C", mole_fraction=1.0)],
value=34.85,
std_error=None,
doi="10.1135/cccc19790637",
),
# Octane
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCCCCC", mole_fraction=1.0)],
value=41.47,
std_error=None,
doi="10.1135/cccc19790637",
),
# Methyl Propyl Ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCOC", mole_fraction=1.0)],
value=27.57,
std_error=0.068925,
doi="10.1016/0021-9614(80)90152-4",
),
# Ethyl isopropyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCOC(C)C", mole_fraction=1.0)],
value=30.04,
std_error=0.0751,
doi="10.1016/0021-9614(80)90152-4",
),
# Dipropyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCOCCC", mole_fraction=1.0)],
value=35.68,
std_error=0.0892,
doi="10.1016/0021-9614(80)90152-4",
),
# butyl methyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="CCCCOC", mole_fraction=1.0)],
value=32.43,
std_error=0.081075,
doi="10.1016/0021-9614(80)90152-4",
),
# methyl isopropyl ether
DataSetEntry(
property_type="EnthalpyOfVaporization",
temperature=298.15,
pressure=101.325,
phase="Liquid + Gas",
components=[Component(smiles="COC(C)C", mole_fraction=1.0)],
value=26.41,
std_error=0.066025,
doi="10.1016/0021-9614(80)90152-4",
),
]
# Normalize the smiles patterns.
for data_entry in data_entries:
original_smiles = data_entry.components[0].smiles
normalized_smiles = substances.Component(original_smiles).smiles
data_entry.components[0].smiles = normalized_smiles
data_rows = [data_entry.to_series() for data_entry in data_entries]
data_frame = pandas.DataFrame(data_rows)
return data_frame
|
import numpy as np
import tensorflow as tf
from matcher import HungarianMatcher
class DETRLosses():
def __init__(self,num_classes = 2):
super(DETRLosses, self).__init__()
self.weight_dict = {'loss_ce': 1, 'loss_bbox': 1, 'loss_giou': 2}
self.matcher = HungarianMatcher()
self.num_classes = num_classes+1
self.empty_weight = np.ones([self.num_classes])
self.eos_coef = 0.1
self.empty_weight[-1] = self.eos_coef
self.empty_weight = tf.convert_to_tensor(self.empty_weight)
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = tf.concat([tf.fill(src.shape, i) for i, (src, _) in enumerate(indices)],axis=0)
batch_idx = tf.cast(batch_idx,tf.int64)
src_idx = tf.concat([src for (src, _) in indices],axis=0)
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx =tf.concat([tf.fill(tgt.shape, i) for i, (_, tgt) in enumerate(indices)],axis=0)
tgt_idx = tf.concat([tgt for (_, tgt) in indices],axis=0)
return batch_idx, tgt_idx
def bbox_loss(self,outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
idx = self._get_src_permutation_idx(indices)
idx_t = tf.transpose(tf.stack((idx)))
src_boxes = tf.gather_nd(outputs['pred_boxes'],idx_t)
target_boxes = tf.concat([tf.gather(t['bboxes'],i) for t, (_, i) in zip(targets, indices)], axis=0)
target_boxes = tf.cast(target_boxes,tf.float32)
loss_bbox = tf.math.abs(tf.math.subtract(src_boxes,target_boxes)) #L1
losses = {}
losses['loss_bbox'] = tf.math.reduce_sum(loss_bbox) / num_boxes
loss_giou = tf.linalg.diag_part(1-(self.matcher.generalized_box_iou(
self.matcher.box_cxcywh_to_xyxy(src_boxes),
self.matcher.box_cxcywh_to_xyxy(target_boxes))))
losses['loss_giou'] = tf.math.reduce_sum(loss_giou) / num_boxes
print("LOSS_BBOX",losses['loss_bbox'])
print("LOSS_GIOU",losses['loss_giou'])
return losses
def class_loss(self,outputs, targets, indices, num_boxes):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
idx_t = tf.transpose(tf.stack((idx)))
# print("IDX_t",idx_t)
target_classes_o = tf.concat([tf.gather(t['labels'],J) for t, (_, J) in zip(targets, indices)],axis=0)
target_classes_o = tf.cast(target_classes_o,tf.int32)
target_classes = tf.fill(src_logits.shape[:2], self.num_classes-1)
target_classes = tf.tensor_scatter_nd_update(target_classes,idx_t,target_classes_o)
# print("targ classes",target_classes.shape)
# print("src_logits",src_logits.shape)
cce = tf.keras.losses.SparseCategoricalCrossentropy()
loss_ce = cce(target_classes,src_logits).numpy()
print("LOSS_CE",loss_ce)
losses = {'loss_ce': loss_ce}
return losses
def combined_loss_fn(self,outputs,targets):
# Class Loss and BBOX loss
indices = self.matcher.runMatch(outputs, targets)
print("indices",indices)
#print("indices",indices)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
# num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
num_boxes = tf.convert_to_tensor([num_boxes],dtype=tf.float64)
# num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
num_boxes = tf.clip_by_value(num_boxes,1,tf.float32.max).numpy()
# print("num_boxes")
bboxLoss = self.bbox_loss(outputs,targets,indices, num_boxes)
classLoss = self.class_loss(outputs,targets,indices, num_boxes)
combined_loss = {**bboxLoss, **classLoss}
# combined_loss = classLoss
loss = sum(combined_loss[k] * self.weight_dict[k] for k in combined_loss.keys() if k in self.weight_dict)
print("LOSS",combined_loss,loss)
return loss
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Global test counters initialization.
"""
from time import time
from pypower.t.t_globals import TestGlobals
def t_begin(num_of_tests, quiet=False):
"""Initializes the global test counters, setting everything up to
execute C{num_of_tests} tests using C{t_ok} and C{t_is}. If C{quiet}
is true, it will not print anything for the individual tests, only a
summary when C{t_end} is called.
@author: Ray Zimmerman (PSERC Cornell)
"""
TestGlobals.t_quiet = quiet
TestGlobals.t_num_of_tests = num_of_tests
TestGlobals.t_counter = 1
TestGlobals.t_ok_cnt = 0
TestGlobals.t_not_ok_cnt = 0
TestGlobals.t_skip_cnt = 0
TestGlobals.t_clock = time()
if not TestGlobals.t_quiet:
print('1..%d' % num_of_tests)
|
import logging
from basics.base import Base as PyBase
class Base(PyBase):
def __init__(self, *args, disable_logging=False, **kwargs):
# print(f"mlpug.Base(*args={args}, kwargs={kwargs})")
super().__init__(*args, **kwargs)
self._logging_disabled = None
self._set_logging_disabled(disable_logging)
@property
def logging_disabled(self):
return self._logging_disabled
def _set_logging_disabled(self, disable):
self._log.setLevel(logging.WARN if disable else logging.DEBUG)
self._logging_disabled = disable
|
"""
Definition of the :class:`PersonNameTestCase` class.
"""
from dicom_parser.data_elements.person_name import PersonName
from tests.test_data_element import DataElementTestCase
class PersonNameTestCase(DataElementTestCase):
"""
Tests for the
:class:`~dicom_parser.data_elements.person_name.PersonName`
class.
"""
TEST_CLASS = PersonName
SAMPLE_KEY = "PatientName"
|
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
from typing import Any, Type, TypeVar
from typing_extensions import Protocol
class BindableProtocol(Protocol):
"""BindableProtocol is a protocol for lazy initialization and binding to a manager"""
def bind(
self,
name, # type: str
manager, # type: Any
):
"""Bind to manager with the specified name"""
raise NotImplementedError
class BindProperty(object):
"""
BindProperty is a API declaration specification of lazy initialization of Property.
To help the IDE improve the intelligent completion experience,
use the bind_property function instead of the class.
"""
def __init__(
self,
cls, # type: Type[BindableProtocol]
*args, # type: Any
**kwargs # type: Any
):
self._name = ""
self._property_id = "_bind_property_id_%s" % id(self)
self._cls = cls
self._args = args
self._kwargs = kwargs
def __set_name__(self, obj_type, name):
self._name = name
def __get__(self, obj, obj_type=None):
if not obj:
return self._cls
# At least for Python 3.6 and higher
# The __set_name__ method was included
# which is automatically called when the class is being created
# So in order to compatibility with low versions
# we need to cache the property instance
if hasattr(obj, self._property_id):
return getattr(obj, self._property_id)
value = self._cls(*self._args, **self._kwargs) # type: ignore
value.bind(self._name, obj)
setattr(obj, self._name or self._property_id, value)
return value
T = TypeVar("T", bound=BindableProtocol)
def bind_property(cls, *args, **kwargs):
# type: (Type[T], *Any, **Any) -> T
"""The generic function wrapper for BindProperty"""
return BindProperty(cls, *args, **kwargs) # type: ignore
|
from flask import Flask, request, json
from api.scrapper import API
app = Flask(__name__)
def are_valid_query_params(query_params):
required_params = ["oci", "oco", "dci", "dco", "dd"]
try:
for param in required_params:
assert param in query_params.keys()
return True
except AssertionError:
return False
@app.route("/")
def fetch_and_return_flights():
params = request.args
if are_valid_query_params(params):
return app.response_class(status=200, mimetype="application/json", response=json.dumps({
"status": "success",
"tickets": API(params).get_data()
}))
else:
return app.response_class(status=404, mimetype="application/json", response=json.dumps({
"status": "failure",
"tickets": []
}))
if __name__ == "__main__":
app.run()
|
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from context import fiberorient as fo
def test_confidence(st_obj):
test_conf = st_obj.confidence
true_conf = np.array([[[0.10819485, 0.42774707, 0.42774707, 0.10819485],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.10819485, 0.42774707, 0.42774707, 0.10819485]],
[[0.10819485, 0.42774707, 0.42774707, 0.10819485],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.10819485, 0.42774707, 0.42774707, 0.10819485]],
[[0.10819485, 0.42774707, 0.42774707, 0.10819485],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.10819485, 0.42774707, 0.42774707, 0.10819485]],
[[0.10819485, 0.42774707, 0.42774707, 0.10819485],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.42774707, 0.68832326, 0.68832326, 0.42774707],
[0.10819485, 0.42774707, 0.42774707, 0.10819485]]])
assert_array_almost_equal(test_conf, true_conf)
def test_vectors(vectors):
test_vectors = np.zeros_like(vectors)
test_vectors[..., 0] = 1 # all x
assert_array_almost_equal(test_vectors, vectors)
|
import unrealsdk
from unrealsdk import *
from ..OptionManager import Options
import json
import os
class Viewmodel(BL2MOD):
Name = "CVM"
Description = "<B><U><font size='18' color='#e8131d'>Configurable Viewmodel</font></U></B>\n" \
"A mod that allows you to set your viewmodel the way you like it!"
Author = "Juso"
Options = [
Options.Spinner("Save current type to file", "Save the current configuration for"
" the current WeaponType", "Save", ["Save", "Save"]),
Options.Spinner("Save same type to file", "Save the current configuration for"
" all the same WeaponTypes", "Save All", ["Save All", "Save All"]),
Options.Spinner("Load from files", "Loads your previously saved configs.", "Load", ["Load", "Load"]),
Options.Slider("FirstPersonMeshFOV", "Change the FirstPersonMeshFOV", 45, 0, 100, 1),
Options.Slider("PlayerViewOffset.X", "Change the PlayerViewOffset X Value", 20, -100, 100, 1),
Options.Slider("PlayerViewOffset.Y", "Change the PlayerViewOffset Y Value", 4, -100, 100, 1),
Options.Slider("PlayerViewOffset.Z", "Change the PlayerViewOffset Z Value", 2, -100, 100, 1),
Options.Slider("RelativeRotation.Pitch", "Change the Pitch of the Weapon", 0, -32768, 32768, 182),
Options.Slider("RelativeRotation.Yaw", "Change the Yaw of the Weapon", 16384, -32768, 32768, 182),
Options.Slider("RelativeRotation.Roll", "Change the Roll of the Weapon", 0, -32768, 32768, 182),
]
PATH = os.path.dirname(os.path.realpath(__file__))
def get_pc(self):
return GetEngine().GamePlayers[0].Actor
def Enable(self):
pass
def Disable(self):
pass
def change_MeshFOV(self, value, WT):
WT.FirstPersonMeshFOV = value
self.get_pc().UpdateForegroundFOV()
def change_ViewOffset(self, xyz, value, WT):
if xyz == "PlayerViewOffset.X":
WT.PlayerViewOffset.X = value
elif xyz == "PlayerViewOffset.Y":
WT.PlayerViewOffset.Y = value
elif xyz == "PlayerViewOffset.Z":
WT.PlayerViewOffset.Z = value
pawn = self.get_pc().Pawn
pawn.SetArmPosition()
def change_RelativeRotation(self, rot, value):
if self.get_pc() and self.get_pc().Pawn:
pawn = self.get_pc().Pawn
hands = pawn.Arms.SkeletalMesh
if rot == "RelativeRotation.Pitch":
hands.Sockets[0].RelativeRotation.Pitch = int(value)
elif rot == "RelativeRotation.Yaw":
hands.Sockets[0].RelativeRotation.Yaw = int(value)
elif rot == "RelativeRotation.Roll":
hands.Sockets[0].RelativeRotation.Roll = int(value)
pawn.SetArmPosition()
self.get_pc().UpdateForegroundFOV()
saved_settings = {}
def save_to_json(self, obj):
self.saved_settings.clear()
for _option in self.Options[3:8]:
self.saved_settings[_option.Caption] = _option.CurrentValue
with open(os.path.join(self.PATH, str(obj) + ".json"), "w") as file:
json.dump(self.saved_settings, file)
self.saved_settings.clear()
for _option in self.Options[8:]:
self.saved_settings[_option.Caption] = _option.CurrentValue
with open(os.path.join(self.PATH, "SkeletalMeshSocket.json"), "w") as file:
json.dump(self.saved_settings, file)
def ModOptionChanged(self, option, newValue):
if option in self.Options:
if not self.get_pc() or not self.get_pc().Pawn or not self.get_pc().Pawn.Weapon:
return
WeaponType = self.get_pc().Pawn.Weapon.DefinitionData.WeaponTypeDefinition
if option.Caption == "FirstPersonMeshFOV":
self.change_MeshFOV(newValue, WeaponType)
elif option.Caption == "PlayerViewOffset.X":
self.change_ViewOffset(option.Caption, newValue, WeaponType)
elif option.Caption == "PlayerViewOffset.Y":
self.change_ViewOffset(option.Caption, newValue, WeaponType)
elif option.Caption == "PlayerViewOffset.Z":
self.change_ViewOffset(option.Caption, newValue, WeaponType)
elif option.Caption == "RelativeRotation.Pitch":
self.change_RelativeRotation(option.Caption, newValue)
elif option.Caption == "RelativeRotation.Yaw":
self.change_RelativeRotation(option.Caption, newValue)
elif option.Caption == "RelativeRotation.Roll":
self.change_RelativeRotation(option.Caption, newValue)
elif option.Caption == "Save current type to file":
self.save_to_json(WeaponType)
elif option.Caption == "Save same type to file":
for wt in FindAll("WeaponTypeDefinition"):
if wt.BodyWeaponHoldName == WeaponType.BodyWeaponHoldName:
self.save_to_json(wt)
elif option.Caption == "Load from files":
for root, dirs, files in os.walk(self.PATH):
for file in files:
if file.endswith(".json"):
if len(file.split()) > 1:
cls = str(file.split()[0])
obj = os.path.splitext(str(file.split()[1]))[0]
WeaponType = FindObject(cls, obj)
with open(os.path.join(root, file), "r") as f:
settings = json.load(f)
self.change_MeshFOV(settings["FirstPersonMeshFOV"], WeaponType)
for attr, value in settings.items():
try:
self.change_ViewOffset(attr, value, WeaponType)
except:
pass
else:
with open(os.path.join(root, file), "r") as f:
settings = json.load(f)
for attr, value in settings.items():
try:
self.change_RelativeRotation(attr, value)
except:
pass
unrealsdk.RegisterMod(Viewmodel())
# IronsightsRotation (Pitch=425,Yaw=-603,Roll=-128) WillowPlayerPawn
# Use this for an inspection mod
|
# test_561.py
import unittest
from arrayPairSum_561 import arrayPairSum
class sortArrayByParityTest(unittest.TestCase):
def test_sort_array_by_parity_1(self):
self.assertEqual(arrayPairSum([1,4,3,2]), 4)
def test_sort_array_by_parity_2(self):
self.assertEqual(arrayPairSum([]), 0)
if __name__ == '__main__':
unittest.main() |
file = open("/Users/yuqil/Desktop/16fall/15688/final project/code/688proj/base_dir/original-data/AMiner-Author.txt")
dict = {}
cur_name = None
cur_address = None
read_name = False
for line in file:
if line.startswith("#n"):
cur_name = line.rstrip()[3:]
read_name = True
elif line.startswith("#a"):
if read_name:
cur_address = line.rstrip()[3:]
dict[cur_name] = cur_address
read_name = False
cur_name, cur_address = None, None
else:
continue
person = open("/Users/yuqil/Desktop/16fall/15688/final project/code/688proj/base_dir/new_person.txt")
output = open("author_address.txt", "wb")
for line in person:
line = line.rstrip()
name = line[line.find(",") + 1 :]
if name in dict:
addr = dict[name]
if len(addr) > 0:
if ";" in addr:
addr = addr[0 : addr.find(";")]
output.write(name + "\t" + addr + "\n")
print name, addr
output.close()
person.close()
|
_base_ = [
'../_base_/models/oscar/oscar_gqa_config.py',
'../_base_/datasets/oscar/oscar_gqa_dataset.py',
'../_base_/default_runtime.py',
]
# cover the parrmeter in above files
model = dict(params=dict(model_name_or_path='/home/datasets/mix_data/model/vinvl/vqa/base/checkpoint-2000000', ))
data_root = '/home/datasets/mix_data/vinvl/datasets/gqa'
gqa_reader_train_cfg = dict(
model_name_or_path='/home/datasets/mix_data/model/vinvl/vqa/base/checkpoint-2000000',
data_dir=data_root,
label_file=data_root + '/trainval_testdev_all_ans2label.pkl',
label2ans_file=data_root + '/trainval_testdev_all_label2ans.pk',
)
gqa_reader_test_cfg = dict(
model_name_or_path='/home/datasets/mix_data/model/vinvl/vqa/base/checkpoint-2000000',
data_dir=data_root,
label_file=data_root + '/trainval_testdev_all_ans2label.pkl',
label2ans_file=data_root + '/trainval_testdev_all_label2ans.pk',
)
train_data = dict(data=dict(reader=gqa_reader_train_cfg, ), )
test_data = dict(data=dict(reader=gqa_reader_test_cfg, ), )
|
"""
View netCDF time series in an interactive timeseries plot with bokeh.
Plot opens in a browser window.
"""
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, show, output_file
from bokeh.palettes import Category10 as palette
import itertools
import numpy
import argparse
import galene as ga
import cftime
from dateutil.parser import parse as dateparse
import datetime
color_cycle = itertools.cycle(palette[10])
def make_interactive_plot(cube_list):
p = figure(x_axis_type='datetime', plot_width=1400, plot_height=700)
for cube in cube_list:
time_coord = cube.coord('time')
time_units = time_coord.units
time_array = numpy.array(time_coord.points)
# get cftime calendar-aware datetime
datetime_list = time_units.num2date(time_array)
# convert to python datetime
datetime_list = [
datetime.datetime(d.year, d.month, d.day, d.hour, d.minute, d.second) for d
in datetime_list
]
values = cube.data.flatten()
legend = cube.attributes['dataset_id']
c = next(color_cycle)
p.line(datetime_list, values, legend_label=legend, color=c)
ylabel = '{:} [{:}]'.format(cube.standard_name.replace('_', ' '),
cube.units)
p.yaxis.axis_label = ylabel
p.title.text = cube.attributes['location_name']
output_file('timeseries.html')
show(p)
def load_cubes(file_list, start_time=None, end_time=None):
cube_list = []
var_list = ['slev', 'temp', 'psal', 'iceextent', 'icevol']
for f in file_list:
c = None
for v in var_list:
try:
c = ga.load_cube(f, v,
start_time=start_time, end_time=end_time)
break
except Exception:
pass
assert c is not None, 'Could not open file {:}'.format(f)
cube_list.append(c)
return cube_list
def parse_args():
parser = argparse.ArgumentParser(
description='Open netCDF time series files in an interactive plot.',
# includes default values in help entries
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-f', '--files', metavar='file', nargs='+',
help='a netCDF file to open, multiple values accepted')
parser.add_argument('--remove-mean', action='store_true',
help='remove mean from time series prior to plotting')
parser.add_argument('-s', '--startdate',
help='Start date of time series, in format "YYYY-MM-DDTHH" or "YYYY-MM-DD", e.g. "2006-05-01T00".')
parser.add_argument('-e', '--enddate',
help='Start date of time series, in format "YYYY-MM-DDTHH" or "YYYY-MM-DD", e.g. "2006-05-03T00".')
parser.add_argument('-v', '--variable', help='Variable to read, e.g. "slev"')
parser.add_argument('-l', '--location_name', help='Location to read, e.g. "Helsinki"')
parser.add_argument('-d', '--datasets', help='Comma-separated list of dataset_id to read, e.g. "obs,run01"')
args = parser.parse_args()
sd = args.startdate
if sd is not None:
sd = dateparse(sd)
ed = args.enddate
if ed is not None:
ed = dateparse(ed)
cube_list = []
if args.datasets is not None:
assert args.location_name is not None, 'location_name must be set'
assert args.variable is not None, 'variable must be set'
for dataset_id in args.datasets.split(','):
dset = ga.read_dataset(
dataset_id, 'timeseries', args.variable,
location_name=args.location_name,
start_time=sd, end_time=ed, verbose=False
)
c_list = list(dset.values())
cube_list.extend(c_list)
if args.files is not None:
c_list = load_cubes(args.files, start_time=sd, end_time=ed)
cube_list.extend(c_list)
if len(cube_list) == 0:
raise Exception('No timeseries data loaded. Either files or datasets must be defined')
if args.remove_mean:
for c in cube_list:
c.data -= c.data.mean()
make_interactive_plot(cube_list)
if __name__ == '__main__':
parse_args()
|
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import ops
# from tensorflow.python.ops import array_ops
# from tensorflow.python.ops import sparse_ops
ani_mod = tf.load_op_library('ani.so');
@ops.RegisterGradient("AniCharge")
def _ani_charge_grad(op, grads):
"""The gradients for `ani_charge`.
Args:
op: The `ani_charge` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `ani_charge` op.
Returns:
Gradients with respect to the input of `ani_charge`.
"""
x,y,z,qs,mo,macs = op.inputs
# dLdy = grads
dydx = ani_mod.ani_charge_grad(x,y,z,qs,mo,macs,grads)
result = [
None,
None,
None,
dydx,
None,
None,
]
return result
if __name__ == "__main__":
cp = tf.ConfigProto(log_device_placement=True, allow_soft_placement=False, device_count = {'GPU': 1})
sess = tf.Session(config=cp)
atom_matrix = np.array([
[0, 1.0, 2.0, 3.0], # H
[2, 2.0, 1.0, 4.0], # N
[0, 0.5, 1.2, 2.3], # H
[1, 0.3, 1.7, 3.2], # C
[2, 0.6, 1.2, 1.1], # N
[0, 14.0, 23.0, 15.0], # H
[0, 2.0, 0.5, 0.3], # H
[0, 2.3, 0.2, 0.4], # H
[0, 2.3, 0.2, 0.4], # H
[1, 0.3, 1.7, 3.2], # C
[2, 0.6, 1.2, 1.1]], dtype=np.float32)
mol_idxs = np.array([0,0,0,0,0,0,0,0,1,1,1], dtype=np.int32)
atom_types = atom_matrix[:, 0]
x = atom_matrix[:, 1]
y = atom_matrix[:, 2]
z = atom_matrix[:, 3]
scatter_idxs, gather_idxs, atom_counts = ani_mod.ani_sort(atom_types)
mol_atom_counts = tf.segment_sum(tf.ones_like(mol_idxs), mol_idxs)
mol_offsets = tf.cumsum(mol_atom_counts, exclusive=True)
qs_ph = tf.placeholder(dtype=np.float32)
qs_np = np.array([
1.0,
-1.1,
1.2,
2.3,
-4.3,
3.1,
0.4,
-0.9,
-1.0,
2.2,
3.5
])
ys = ani_mod.ani_charge(x,y,z,qs_ph,mol_offsets, mol_atom_counts)
print(sess.run(ys, feed_dict={qs_ph: qs_np}))
# grads = ani_mod.ani_charge_grad(
# x,y,z,qs,mol_offsets, mol_atom_counts)
# print(sess.run(grads))
grad = tf.gradients(ys, qs_ph)
print(sess.run(grad, feed_dict={qs_ph: qs_np})) |
import clr
import re
regexString = IN[0]
SheetNameList = IN[1]
elementList = list()
for item in SheetNameList:
searchObj = re.search(regexString, item)
elementList.append(searchObj.group())
OUT = elementList |
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# NWI legend: https://www.fws.gov/wetlands/Data/Mapper-Wetlands-Legend.html
def nwi_add_color(fc):
emergent = ee.FeatureCollection(
fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Emergent Wetland')))
emergent = emergent.map(lambda f: f.set(
'R', 127).set('G', 195).set('B', 28))
# print(emergent.first())
forested = fc.filter(ee.Filter.eq(
'WETLAND_TY', 'Freshwater Forested/Shrub Wetland'))
forested = forested.map(lambda f: f.set('R', 0).set('G', 136).set('B', 55))
pond = fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Pond'))
pond = pond.map(lambda f: f.set('R', 104).set('G', 140).set('B', 192))
lake = fc.filter(ee.Filter.eq('WETLAND_TY', 'Lake'))
lake = lake.map(lambda f: f.set('R', 19).set('G', 0).set('B', 124))
riverine = fc.filter(ee.Filter.eq('WETLAND_TY', 'Riverine'))
riverine = riverine.map(lambda f: f.set(
'R', 1).set('G', 144).set('B', 191))
fc = ee.FeatureCollection(emergent.merge(
forested).merge(pond).merge(lake).merge(riverine))
base = ee.Image(0).mask(0).toInt8()
img = base.paint(fc, 'R') \
.addBands(base.paint(fc, 'G')
.addBands(base.paint(fc, 'B')))
return img
fromFT = ee.FeatureCollection("users/wqs/Pipestem/Pipestem_HUC10")
Map.addLayer(ee.Image().paint(fromFT, 0, 2), {}, 'Watershed')
huc8_id = '10160002'
nwi_asset_path = 'users/wqs/NWI-HU8/HU8_' + huc8_id + '_Wetlands' # NWI wetlands for the clicked watershed
clicked_nwi_huc = ee.FeatureCollection(nwi_asset_path)
nwi_color = nwi_add_color(clicked_nwi_huc)
Map.centerObject(clicked_nwi_huc, 10)
Map.addLayer(nwi_color, {'gamma': 0.3, 'opacity': 0.7}, 'NWI Wetlands Color')
# Display the map.
Map
|
'''
This file sets up the titanic problem using kNN algorithm.
Accuracy: 0.79904
'''
#import kNN
import knn.kNN as knn
#import model
import data.TitanicParser as model
#using pyplotlib to plot error with k
import matplotlib.pyplot as plt
#load trainer from knn
trainer = knn.knn()
#get train and test data
X,Y,testX,testY,validX,validY = model.loadData(validationSet=True)
#load train data
trainer.loadData(X,Y)
#holds error on validation set for each k
validErrors = {}
#setup k's to test
ks = range(1, 25, 2)
for k in ks:
#holds the error count for this k
error = 0
#set the k parameter in the trainer
trainer.setK(k)
#for each validation example
for i in range(len(validX)):
#try to predict its label using training data
guess = trainer.predict(validX[i])
#cast guesses to T/F
if guess > .5:
guessRound = 1
else:
guessRound = 0
#check if wrong
if guessRound != validY[i]:
error += 1
#save error
validErrors[k] = (error / len(validX))
print("Validation Error", k, (error / len(validX)))
#find best k
error = 1
bestK = 1
#for each k tried
for key in validErrors.keys():
#if this k is better, save it
if validErrors[key] < error:
error = validErrors[key]
bestK = key
#set k to trainer
trainer.setK(bestK)
#holds errors
error = 0
#for each item in test data
for i in range(len(testX)):
#try to predict its label using training data
guess = trainer.predict(testX[i])
#cast guesses to T/F
if guess > .5:
guessRound = 1
else:
guessRound = 0
#check if wrong
if guessRound != testY[i]:
error += 1
#print error and accuract
print("K:", bestK)
print("Error:", error / len(testX))
print("Accuracy:", 1 - error / len(testX))
#reset knn with best k
trainer = knn.knn(k=bestK)
#load all data
X,Y = model.loadTrainData()
#load trainer
trainer.loadData(X,Y)
#load test data
passenger, testX = model.loadActualTestData()
#open output file
output = open("knnout.csv", "w+")
#print header
output.write("PassengerId,Survived\n")
#for each test item
for i in range(len(testX)):
#make prediction
guess = trainer.predict(testX[i])
#binary cast
if guess < .5:
correctedGuess = 0
else:
correctedGuess = 1
#write to file
output.write(str(passenger[i]))
output.write(",")
output.write(str(correctedGuess))
output.write("\n")
#close output
output.close()
#holds errors related to each k
errors = []
#for each k, add its respective error to errors list
for k in ks:
errors.append(validErrors[k])
#plot, k and errors
plt.plot(ks, errors)
plt.show() |
import socket, ssl
HOST, PORT = 'example.com', 443
def handle(conn):
conn.write(b'GET / HTTP/1.1\n')
print(conn.recv().decode())
def main():
sock = socket.socket(socket.AF_INET)
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 # optional
conn = context.wrap_socket(sock, server_hostname=HOST)
try:
conn.connect((HOST, PORT))
handle(conn)
finally:
conn.close()
if __name__ == '__main__':
main()
|
"""Contract test cases for ready."""
import json
from typing import Any
from aiohttp import ClientSession, hdrs, MultipartWriter
import pytest
from rdflib import Graph
from rdflib.compare import graph_diff, isomorphic
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_with_file(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.ttl"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open("tests/files/valid_catalog.ttl", "r") as file:
text = file.read()
# body is graph of both the input data and the validation report
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_accept_json_ld(http_service: Any) -> None:
"""Should return OK and successful validation and content-type should be json-ld."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.ttl"
headers = {"Accept": "application/ld+json"}
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, headers=headers, data=mpwriter) as resp:
# ...
body = await resp.text()
await session.close()
assert resp.status == 200
assert "application/ld+json" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
[
{
"@type": [
"http://www.w3.org/ns/shacl#ValidationReport"
],
"http://www.w3.org/ns/shacl#conforms": [
{
"@value": true
}
]
},
{
"@id": "http://www.w3.org/ns/shacl#ValidationReport"
}
]
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="json-ld")
g2 = Graph().parse(data=body, format="json-ld")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_file_content_type_json_ld(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.json"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(
open(data_graph_file, "rb"), {"CONTENT-TYPE": "application/ld+json"}
)
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="json-ld")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_file_content_type_rdf_xml(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.xml"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(
open(data_graph_file, "rb"), {"CONTENT-TYPE": "application/rdf+xml"}
)
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="application/rdf+xml")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_url(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_url = "https://raw.githubusercontent.com/Informasjonsforvaltning/dcat-ap-no-validator-service/main/tests/files/valid_catalog.ttl" # noqa: B950
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(data_graph_url)
p.set_content_disposition("inline", name="data-graph-url")
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
session = ClientSession()
async with session.get(data_graph_url) as resp:
text = await resp.text()
await session.close()
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_with_file_content_encoding(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.ttl"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p.headers[hdrs.CONTENT_ENCODING] = "gzip"
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_with_default_config(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.ttl"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
config = {"expand": True, "includeExpandedTriples": False}
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p.headers[hdrs.CONTENT_ENCODING] = "gzip"
p = mpwriter.append(json.dumps(config))
p.set_content_disposition("inline", name="config")
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_with_file_and_shapes_graph_file(http_service: Any) -> None:
"""Should return OK and successful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/valid_catalog.ttl"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms true
.
"""
with open("tests/files/valid_catalog.ttl", "r") as file:
text = file.read()
# body is graph of both the input data and the validation report
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
# --- bad cases ---
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_with_not_valid_file(http_service: Any) -> None:
"""Should return OK and unsuccessful validation."""
url = f"{http_service}/validator"
data_graph_file = "tests/files/invalid_catalog.ttl"
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
ontology_graph_file = "tests/files/ontologies.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(open(data_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="data-graph-file", filename=data_graph_file
)
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
p = mpwriter.append(open(ontology_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="ontology-graph-file", filename=ontology_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
body = await resp.text()
await session.close()
assert resp.status == 200
assert "text/turtle" in resp.headers[hdrs.CONTENT_TYPE]
# results_graph (validation report) should be isomorphic to the following:
src = """
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
[] a sh:ValidationReport ;
sh:conforms false ;
sh:result [ a sh:ValidationResult ;
sh:focusNode <http://dataset-publisher:8080/datasets/1> ;
sh:resultMessage "Less than 1 values on <http://dataset-publisher:8080/datasets/1>->dcat:theme" ;
sh:resultPath <http://www.w3.org/ns/dcat#theme> ;
sh:resultSeverity sh:Violation ;
sh:sourceConstraintComponent sh:MinCountConstraintComponent ;
sh:sourceShape [ sh:class <http://www.w3.org/2004/02/skos/core#Concept> ;
sh:minCount 1 ;
sh:path <http://www.w3.org/ns/dcat#theme> ;
sh:severity sh:Violation ] ],
[ a sh:ValidationResult ;
sh:focusNode <http://dataset-publisher:8080/datasets/1> ;
sh:resultMessage "Less than 1 values on <http://dataset-publisher:8080/datasets/1>->dct:description" ;
sh:resultPath <http://purl.org/dc/terms/description> ;
sh:resultSeverity sh:Violation ;
sh:sourceConstraintComponent sh:MinCountConstraintComponent ;
sh:sourceShape [ sh:minCount 1 ;
sh:nodeKind sh:Literal ;
sh:path <http://purl.org/dc/terms/description> ;
sh:severity sh:Violation ] ],
[ a sh:ValidationResult ;
sh:focusNode <http://dataset-publisher:8080/datasets/1> ;
sh:resultMessage "Data-themes fra EU skal brukes for dcat:theme"@nb ;
sh:resultPath <http://www.w3.org/ns/dcat#theme> ;
sh:resultSeverity sh:Warning ;
sh:sourceConstraintComponent sh:QualifiedMinCountConstraintComponent ;
sh:sourceShape [ sh:message "Data-themes fra EU skal brukes for dcat:theme"@nb ;
sh:path <http://www.w3.org/ns/dcat#theme> ;
sh:qualifiedMinCount 1 ;
sh:qualifiedValueShape [ sh:node <https://data.norge.no/specification/dcat-ap-no/#DataThemeRestriction> ] ;
sh:severity sh:Warning ] ]
.
"""
with open(data_graph_file, "r") as file:
text = file.read()
g0 = Graph().parse(data=text, format="text/turtle")
g1 = g0 + Graph().parse(data=src, format="text/turtle")
g2 = Graph().parse(data=body, format="text/turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic, "results_graph is incorrect"
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_notexisting_url(http_service: Any) -> None:
"""Should return 400."""
url = f"{http_service}/validator"
data_graph_url = "https://raw.githubusercontent.com/Informasjonsforvaltning/dcat-ap-no-validator-service/main/tests/files/does_not_exist.ttl" # noqa: B950
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(data_graph_url)
p.set_content_disposition("inline", name="data-graph-url")
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
_ = await resp.text()
await session.close()
assert resp.status == 400
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_illformed_url(http_service: Any) -> None:
"""Should return 400."""
url = f"{http_service}/validator"
data_graph_url = "http://slfkjasdf" # noqa: B950
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(data_graph_url)
p.set_content_disposition("inline", name="data-graph-url")
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
_ = await resp.text()
await session.close()
assert resp.status == 400
@pytest.mark.contract
@pytest.mark.asyncio
async def test_validator_url_to_invalid_rdf(http_service: Any) -> None:
"""Should return 400."""
url = f"{http_service}/validator"
data_graph_url = "https://raw.githubusercontent.com/Informasjonsforvaltning/dcat-ap-no-validator-service/main/tests/files/invalid_rdf.txt" # noqa: B950
shapes_graph_file = "tests/files/mock_dcat-ap-no-shacl_shapes_2.00.ttl"
with MultipartWriter("mixed") as mpwriter:
p = mpwriter.append(data_graph_url)
p.set_content_disposition("inline", name="data-graph-url")
p = mpwriter.append(open(shapes_graph_file, "rb"))
p.set_content_disposition(
"attachment", name="shapes-graph-file", filename=shapes_graph_file
)
session = ClientSession()
async with session.post(url, data=mpwriter) as resp:
_ = await resp.text()
await session.close()
assert resp.status == 400
# ---------------------------------------------------------------------- #
# Utils for displaying debug information
def _dump_diff(g1: Graph, g2: Graph) -> None:
in_both, in_first, in_second = graph_diff(g1, g2)
print("\nin both:")
_dump_turtle(in_both)
print("\nin first:")
_dump_turtle(in_first)
print("\nin second:")
_dump_turtle(in_second)
def _dump_turtle(g: Graph) -> None:
for _l in g.serialize(format="text/turtle").splitlines():
if _l:
print(_l)
|
# pan_and_tilt_tracker.py
# to run this program, type:
# sudo python pan_and_tilt_tracker.py headed (GUI)
# sudo python pan_and_tilt_tracker.py headless (no GUI (for embedded use))
# this program pans/tilts two servos so a mounted webcam tracks a red ball
# use the circuit from "pan_and_tilt_tracker.png"
import RPi.GPIO as GPIO
import cv2
import numpy as np
import os
import sys
from operator import itemgetter
###################################################################################################
def main():
headed_or_headless = ""
if len(sys.argv) == 2 and str(sys.argv[1]) == "headed":
headed_or_headless = "headed"
print "entering headed mode"
elif len(sys.argv) == 2 and str(sys.argv[1]) == "headless":
headed_or_headless = "headless"
print "entering headless mode"
else:
print "\nprogram usage:\n"
print "for headed mode (GUI interface) @command prompt type: sudo python pan_and_tilt_tracker.py headed\n"
print "for headless mode (no GUI interface, i.e. embedded mode) @ command prompt type: sudo python pan_and_tilt_tracker.py headless\n"
return
# end if else
GPIO.setmode(GPIO.BCM) # use GPIO pin numbering, not physical pin numbering
led_gpio_pin = 18
pan_gpio_pin = 24
tilt_gpio_pin = 25
pwmFrequency = 100 # frequency in Hz
pwmInitialDutyCycle = 14 # initial duty cycle in %
GPIO.setup(led_gpio_pin, GPIO.OUT)
GPIO.setup(pan_gpio_pin, GPIO.OUT)
GPIO.setup(tilt_gpio_pin, GPIO.OUT)
pwmPanObject = GPIO.PWM(pan_gpio_pin, pwmFrequency)
pwmTiltObject = GPIO.PWM(tilt_gpio_pin, pwmFrequency)
pwmPanObject.start(pwmInitialDutyCycle)
pwmTiltObject.start(pwmInitialDutyCycle)
capWebcam = cv2.VideoCapture(0) # declare a VideoCapture object and associate to webcam, 0 => use 1st webcam
print "default resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT))
capWebcam.set(cv2.CAP_PROP_FRAME_WIDTH, 320.0)
capWebcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240.0)
print "updated resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT))
if capWebcam.isOpened() == False: # check if VideoCapture object was associated to webcam successfully
print "error: capWebcam not accessed successfully\n\n" # if not, print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
return # and exit function (which exits program)
# end if
intXFrameCenter = int(float(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) / 2.0)
intYFrameCenter = int(float(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) / 2.0)
panServoPosition = int(90) # pan servo position in degrees
tiltServoPosition = int(90) # tilt servo position in degrees
updateServoMotorPositions(pwmPanObject, panServoPosition, pwmTiltObject, tiltServoPosition)
while cv2.waitKey(1) != 27 and capWebcam.isOpened(): # until the Esc key is pressed or webcam connection is lost
blnFrameReadSuccessfully, imgOriginal = capWebcam.read() # read next frame
if not blnFrameReadSuccessfully or imgOriginal is None: # if frame was not read successfully
print "error: frame not read from webcam\n" # print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
break # exit while loop (which exits program)
# end if
imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
imgThreshLow = cv2.inRange(imgHSV, np.array([0, 135, 135]), np.array([19, 255, 255]))
imgThreshHigh = cv2.inRange(imgHSV, np.array([168, 135, 135]), np.array([179, 255, 255]))
imgThresh = cv2.add(imgThreshLow, imgThreshHigh)
imgThresh = cv2.GaussianBlur(imgThresh, (3, 3), 2)
imgThresh = cv2.dilate(imgThresh, np.ones((5,5),np.uint8))
imgThresh = cv2.erode(imgThresh, np.ones((5,5),np.uint8))
intRows, intColumns = imgThresh.shape
circles = cv2.HoughCircles(imgThresh, cv2.HOUGH_GRADIENT, 3, intRows / 4) # fill variable circles with all circles in the processed image
GPIO.output(led_gpio_pin, GPIO.LOW)
if circles is not None: # this line is necessary to keep program from crashing on next line if no circles were found
GPIO.output(led_gpio_pin, GPIO.HIGH)
sortedCircles = sorted(circles[0], key = itemgetter(2), reverse = True)
largestCircle = sortedCircles[0]
x, y, radius = largestCircle # break out x, y, and radius
print "ball position x = " + str(x) + ", y = " + str(y) + ", radius = " + str(radius) # print ball position and radius
if x < intXFrameCenter and panServoPosition >= 2:
panServoPosition = panServoPosition - 2
elif x > intXFrameCenter and panServoPosition <= 178:
panServoPosition = panServoPosition + 2
# end if else
if y < intYFrameCenter and tiltServoPosition >= 62:
tiltServoPosition = tiltServoPosition - 2
elif y > intYFrameCenter and tiltServoPosition <= 133:
tiltServoPosition = tiltServoPosition + 2
# end if else
updateServoMotorPositions(pwmPanObject, panServoPosition, pwmTiltObject, tiltServoPosition)
if headed_or_headless == "headed":
cv2.circle(imgOriginal, (x, y), 3, (0, 255, 0), -1) # draw small green circle at center of detected object
cv2.circle(imgOriginal, (x, y), radius, (0, 0, 255), 3) # draw red circle around the detected object
# end if
# end if
if headed_or_headless == "headed":
cv2.imshow("imgOriginal", imgOriginal) # show windows
cv2.imshow("imgThresh", imgThresh)
# end if
# end while
cv2.destroyAllWindows() # remove windows from memory
return
# end main
###################################################################################################
def updateServoMotorPositions(pwmPanObject, panServoPosition, pwmTiltObject, tiltServoPosition):
panDutyCycle = ((float(panServoPosition) * 0.01) + 0.5) * 10
tiltDutyCycle = ((float(tiltServoPosition) * 0.01) + 0.5) * 10
pwmPanObject.ChangeDutyCycle(panDutyCycle)
pwmTiltObject.ChangeDutyCycle(tiltDutyCycle)
# end function
###################################################################################################
if __name__ == "__main__":
main()
|
# Generated by Django 3.1.1 on 2020-10-15 17:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0058_auto_20201012_1444"),
("carbon_quiz", "0059_auto_20201014_1909"),
]
operations = []
|
from rest_framework.test import APIClient
from django.test import SimpleTestCase, TestCase, Client
from django.urls import reverse, resolve
from products.models import Product, Category, Comment
import json
from decimal import Decimal
from django.contrib.auth.models import User
#testing views
class TestViews(TestCase):
### 10/10, completed ###
def setUp(self):
TestCase.allow_database_queries = True
self.study_notes_category = Category.objects.create(name = 'Study notes')
self.clothing_category = Category.objects.create(name = 'Clothing')
self.author_user = User.objects.create(username='testuser')
self.author_user.set_password('12345')
self.author_user.save()
self.client = Client()
login = self.client.login(username='testuser', password='12345')
#author_user creates his product with id/pk = 1
'''
self.product1 = Product.objects.create(
image_link_1 = 'http://personal.psu.edu/xqz5228/jpg.jpg',
image_link_2 = 'https://media.alienwarearena.com/media/1327-m.jpg',
category = self.study_notes_category, #category taken from CATEGORY_CHOICES
title = 'T-shirt',
condition = 'N',
price_in_SGD = 1.0,
description = 'testing',
this_product_has_multiple_quantities = True,
delivery_location = 'CLB/Engineering', #delivery_location taken from DELIVERY_CHOICES
extra_information = 'Nothing to add',
author = author_user,
#attributes auto-generated: pub_date and view_count
)
self.PRODUCT_ID = 1
'''
self.another_user = User.objects.create(username='another_dude')
self.another_user.set_password('12345')
self.another_user.save()
self.client = Client()
self.API_client = APIClient()
self.product1 = Product.objects.create(
image_link_1 = 'http://personal.psu.edu/xqz5228/jpg.jpg',
image_link_2 = 'https://media.alienwarearena.com/media/1327-m.jpg',
category = self.study_notes_category, #category taken from CATEGORY_CHOICES
title = 'T-shirt',
condition = 'N',
price_in_SGD = 1.0,
description = 'testing',
this_product_has_multiple_quantities = True,
delivery_location = 'CLB/Engineering', #delivery_location taken from DELIVERY_CHOICES
extra_information = 'Nothing to add',
author = self.author_user,
#attributes auto-generated: pub_date and view_count
)
self.PRODUCT_ID = 1
def test_product_list_view_number_of_listings_works(self):
response = self.client.get(reverse('home'))
self.assertEquals(response.status_code, 200) #status code 200, the request was fulfilled.
self.assertTemplateUsed(response, 'products/product_list.html')
self.assertEquals(Product.objects.count(), 1)
def test_product_category_view_categorisation_works(self):
self.product2 = Product.objects.create(
image_link_1 = 'http://personal.psu.edu/xqz5228/jpg.jpg',
image_link_2 = 'https://media.alienwarearena.com/media/1327-m.jpg',
category = self.study_notes_category, #<---------------
title = 'T-shirt',
condition = 'N',
price_in_SGD = 1.0,
description = 'testing',
this_product_has_multiple_quantities = True,
delivery_location = 'CLB/Engineering', #delivery_location taken from DELIVERY_CHOICES
extra_information = 'Nothing to add',
author = self.author_user,
#attributes auto-generated: pub_date and view_count
)
self.product3 = Product.objects.create(
image_link_1 = 'http://personal.psu.edu/xqz5228/jpg.jpg',
image_link_2 = 'https://media.alienwarearena.com/media/1327-m.jpg',
category = self.clothing_category, #<---------------
title = 'T-shirt',
condition = 'N',
price_in_SGD = 1.0,
description = 'testing',
this_product_has_multiple_quantities = True,
delivery_location = 'CLB/Engineering', #delivery_location taken from DELIVERY_CHOICES
extra_information = 'Nothing to add',
author = self.author_user,
#attributes auto-generated: pub_date and view_count
)
#correct counts
self.assertEquals(len(Product.objects.filter(category = self.study_notes_category)), 2)
self.assertEquals(len(Product.objects.filter(category = self.clothing_category)), 1)
#correct HTTPResponse
response = self.client.get(reverse('product-by-category', args = (self.PRODUCT_ID,)))
self.assertEquals(response.status_code, 200) #the request was fulfilled.
def test_user_profile_view_works(self):
response = self.client.get(reverse('user-details'))
self.assertEquals(response.status_code, 200) #the request was fulfilled.
def test_product_create_works(self):
'''
check if the product listing is actually created
'''
Product.objects.create(
image_link_1 = 'http://personal.psu.edu/xqz5228/jpg.jpg',
image_link_2 = 'https://media.alienwarearena.com/media/1327-m.jpg',
category = self.study_notes_category, #category taken from CATEGORY_CHOICES
title = 'T-shirt',
condition = 'N',
price_in_SGD = 1.0,
description = 'testing',
this_product_has_multiple_quantities = True,
delivery_location = 'CLB/Engineering', #delivery_location taken from DELIVERY_CHOICES
extra_information = 'Nothing to add',
author = self.author_user,
#attributes auto-generated: pub_date and view_count
)
self.PRODUCT_ID += 1
response = self.client.get(reverse('product-detail', args = (self.PRODUCT_ID,)))
self.assertEquals(response.status_code, 302) #redirected to product-details page
self.assertEquals(Product.objects.count(), 2) #check if the product listing is created
def test_product_detail_view_from_author_works(self):
self.client.login(username='testuser', password='12345')
#self.author_user, the creator of the product listing, requests to view the listing.
response = self.client.get(reverse('product-detail', args = (self.PRODUCT_ID,)))
#authenticated
self.assertTemplateUsed(response, 'products/product_detail.html')
self.assertEquals(response.status_code, 200)
def test_product_detail_view_from_other_users_works(self):
self.client.login(username='another_dude', password='12345')
#self.author_user, the creator of the product listing, requests to view the listing.
response = self.client.get(reverse('product-detail', args = (self.PRODUCT_ID,)))
#authenticated
self.assertTemplateUsed(response, 'products/product_detail_guest.html')
self.assertEquals(response.status_code, 200)
def test_product_update_view_from_author_works(self):
self.client.login(username='testuser', password='12345')
#self.author_user, the creator of the product listing, requests to update the listing.
response = self.client.get(reverse('product-update', args = (self.PRODUCT_ID,)))
#authenticated, view from author of the listing
self.assertTemplateUsed(response, 'products/product_create.html')
self.assertEquals(response.status_code, 200)
UPDATE_request = self.client.post(reverse('product-update', args = (self.PRODUCT_ID,)))
self.assertEquals(response.status_code, 200) #check that update POST request is fulfilled (html rendered)
self.assertEquals(Product.objects.count(), 1) #confirm that product1 is still here
def test_product_update_view_from_other_users_unauthorised(self):
self.client.login(username='another_dude', password='12345')
response = self.client.get(reverse('product-update', args = (self.PRODUCT_ID,)))
#permission denied, will redirect to home
self.assertEquals(response.status_code, 302) #redirected to home
def test_product_delete_view_from_author_works(self):
self.client.login(username='testuser', password='12345')
#self.author_user, the creator of the product listing, requests to delete the listing.
GET_response = self.client.get(reverse('product-delete', args = (self.PRODUCT_ID,)))
#authenticated, view from author of the listing
self.assertTemplateUsed(GET_response, 'products/product_delete.html')
self.assertEquals(GET_response.status_code, 200)
POST_response = self.client.post(reverse('product-delete', args = (self.PRODUCT_ID,)))
self.assertEquals(POST_response.status_code, 302) #product1 deleted, redirects to home
self.assertEquals(Product.objects.count(), 0) #confirm that product1 is already deleted
def test_product_delete_view_from_other_users_unauthorised(self):
self.client.login(username='another_dude', password='12345')
response = self.client.get(reverse('product-delete', args = (self.PRODUCT_ID,))) #product2
#permission denied, will redirect to home
self.assertEquals(response.status_code, 302) #redirected to home
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: [email protected]
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import settings
import logging
import os
import re
import requests
import urllib
from bs4 import BeautifulSoup
number_type_mapping = {
'container': 'CONTAINER',
'bill_of_landing': 'BILLOFLADING',
}
def gen_resp_soup(response):
return BeautifulSoup(response.text.encode('utf8'))
class CosconAPI:
def searchContainer(self, search_by=None, number=None):
number_type = number_type_mapping[search_by]
context = requests.session()
init_response = self._execute(context)
jsessionid = self._get_jsessionid(init_response)
jsf_state = self._get_jsf_state(gen_resp_soup(init_response))
data = self._get_common_post_data(number_type, number, jsf_state)
post_response = self._post(context, data,
number, number_type, jsessionid, jsf_state)
return self._parse_post_response(post_response, context,
number_type, number, jsessionid)
def _post(self, context, data,
number, number_type, jsessionid, jsf_state):
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Faces-Request': 'partial/ajax',
'Referer': settings.COSCON_CONTAINER_URL,
'Accept': 'application/xml, text/xml, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
}
cookies = {'JSESSIONID': jsessionid,
'number': number,
'numberType': number_type,
'language': 'en_US'}
post_response = self._execute(context, data=data,
headers=headers, cookies=cookies)
return post_response
def _parse_post_response(self, response, context,
number_type, number, jsessionid):
if number_type == 'BILLOFLADING':
func = self._parse_billoflanding_post_response
else:
func = self._parse_container_post_response
data = func(response, context, number_type, number, jsessionid)
return data
def _parse_billoflanding_post_response(self, response, context,
number_type, number, jsessionid):
soup = gen_resp_soup(response)
container_num = self._get_container_num(soup, number_type)
result = self.searchContainer(search_by='container',
number=container_num)
end_time = self._get_endtime(soup)
if end_time:
result['shipment_cycle'] = [{
'status': 'Empty Equipment Returned',
'time': end_time,
}]
return result
def _get_endtime(self, soup):
top = soup.find(id='containerInfoByBlNum')
rows = top.find(name='tbody').findChildren(name='tr')
return rows[-1].findChildren(attrs={'class': 'labelTextMyFocus'})[-1].getText()
def _parse_container_post_response(self, response, context,
number_type, number, jsessionid):
jsf_state = self._get_updated_value("javax.faces.ViewState", response)
soup = gen_resp_soup(response)
history = soup.find(id='cargoTrackingContainerHistory6')
rows = history.find(name='tbody').findChildren(name='tr')
shipment_cycle = self._get_shipment_cycle_info(rows,
jsf_state, context, number_type, number, jsessionid)
prv_rows = history.find(id='Cargohistory').find(name='tbody').findChildren(name='tr')
prv_shipment_cycle = self._get_shipment_cycle_info(prv_rows,
jsf_state, context, number_type, number, jsessionid,
current=False)
return {'container': self._get_container_info(soup, number_type),
'ports': self._get_ports_info(soup, number_type),
'shipment_cycle': shipment_cycle,
'prv_shipment_cycle': prv_shipment_cycle,
}
def _get_shipment_cycle_info(self, rows,
jsf_state, context, number_type, number, jsessionid,
current=True):
from common.utils import format_datetime
shipment_cycle = []
last_vessel_info = None
for row in rows:
cols = row.find_all(attrs={'class': 'labelTextMyFocus'})
if cols:
status = cols[0].getText()
location = cols[1].getText()
time = cols[2].getText()
mode = cols[3].getText().strip()
shipment = {
'status': status,
'location': location,
'time': format_datetime(time, 'Hongkong', 'UTC'),
'mode': mode,
}
a_tag = cols[3].find_parent(name='a')
if current and mode == 'Vessel' and a_tag:
a_id = a_tag.get('id')
data = self._get_common_post_data(number_type, number, jsf_state)
data['cntrNum'] = number
data['cntrStatus'] = status
data['containerHistorySize'] = len(rows)
data['containerSize'] = 1
data['issueTime'] = time
data[a_id] = a_id
data['javax.faces.partial.render'] = 'vesselInfoField'
data['javax.faces.source'] = a_id
data['numberType'] = number_type
post_response = self._post(context, data, number, number_type,
jsessionid, jsf_state)
jsf_state = self._get_updated_value("javax.faces.ViewState",
post_response)
vessel_html = self._get_updated_value("vesselInfoField",
post_response)
vessel_info = self._parse_vessel_info(vessel_html)
last_vessel_info = vessel_info
shipment.update(vessel_info)
if mode == '' and last_vessel_info:
shipment.update(last_vessel_info)
shipment_cycle.append(shipment)
return shipment_cycle
def _parse_vessel_info(self, html):
soup = BeautifulSoup(html)
info = soup.find(id='vesselInfoField_content')
rows = info.find(name='table').findChildren(name='tr')
vessel_info = {
'vessel_name': rows[0].find_all(name='td')[1].getText(),
'from_port': rows[1].find_all(name='td')[1].getText(),
'to_port': rows[3].find_all(name='td')[1].getText(),
}
return vessel_info
def _get_container_num(self, soup, number_type):
if number_type == 'CONTAINER':
top = soup.find(id='CargoTracking1') \
.find(attrs={'class': 'Containerkuang3'})
rows = top.find(name='table').findChildren(name='tr')
else:
top = soup.find(id='containerInfoByBlNum')
rows = top.find(name='tbody').findChildren(name='tr')
return rows[-1].find(attrs={'class': 'labelTextMyFocus'}).getText()
def _get_container_info(self, soup, number_type):
if number_type != 'CONTAINER': return {}
top = soup.find(id='CargoTracking1') \
.find(attrs={'class': 'Containerkuang3'})
rows = top.find(name='table').findChildren(name='tr')
result = rows[-1].find_all(attrs={'class': 'labelTextMyFocus'})
from common.utils import format_datetime
return {
'container_num': result[0].getText(),
'container_size': result[1].getText(),
'seal_no': result[2].getText(),
'location': result[3].getText(),
'status': result[4].getText(),
'datetime': format_datetime(result[5].getText(), 'Hongkong', 'UTC'),
}
def _get_ports_info(self, soup, number_type):
if number_type != 'CONTAINER': return {}
points = soup.find(id='cargopic1')
ports = {}
for _name, _text in [('por', '提空点'), ('fnd', '返空点'),
('first_pol', '始发港'), ('last_pod', '目的港')]:
p = points.find(name='div', text=_text)
if p and p.find_next():
ports[_name] = self._get_valid_port_name(p.find_next().text)
ports['ts_port'] = []
for p in points.findChildren(name='div', text='中转港'):
if p and p.find_next():
pname = self._get_valid_port_name(p.find_next().text)
if pname and (len(ports['ts_port']) == 0
or ports['ts_port'][-1] != pname):
ports['ts_port'].append(pname)
return ports
def _get_valid_port_name(self, text):
return text.encode('utf-8').split('\n')[0].replace('·', '').strip().split(',')[0]
def _get_jsf_state(self, soup):
state = soup.find(id='javax.faces.ViewState')
return state.get('value') if state else ''
def _get_updated_value(self, name, response):
ret = re.findall(
r'<update id="%s"><\!\[CDATA\[(.*?)\]\]></update>' % name,
response.text, re.M|re.S)
return ret[0]
def _get_jsessionid(self, response):
return response.cookies.get('JSESSIONID') or ''
def _execute(self, context, data=None, headers=None, cookies=None):
try:
api_url = settings.COSCON_CONTAINER_URL
if data:
data = urllib.urlencode(data)
response = context.post(api_url, data=data,
timeout=settings.THIRDPARTY_ACCESS_TIMEOUT,
headers=headers, cookies=cookies)
else:
response = context.get(api_url,
timeout=settings.THIRDPARTY_ACCESS_TIMEOUT)
return response
except Exception, e:
logging.error("Got exception when accessing third-party API "
"(url: %s) : %s", api_url, e, exc_info=True)
raise
def _get_common_post_data(self, number_type, number, jsf_state):
return {
'a10time1': '',
'a10time2': '',
'a11time1': '',
'a11time2': '',
'a12time': '',
'a13time': '',
'a2time': '',
'a3time': '',
'a5time1': '',
'a5time2': '',
'a7time1': '',
'a7time2': '',
'a9time1': '',
'a9time2': '',
'billRemark': '',
'bkRmark': '',
'bookingNumbers': '0',
'cargoTrackSearchId': number_type,
'cargoTrackingPara': number,
'cargoTrackingRedirect': 'false',
'cargoTrckingFindButton': 'cargoTrckingFindButton',
'cntrOrderType': '',
'cntrOrderType2': '',
'cntrRemark': '',
'containerNumberAll': '',
'containerNumberAllByBookingNumber': '',
'containerSize': '',
'containerTransportationMode': '',
'isbillOfLadingExist': 'false',
'isbookingNumberExist': 'false',
'j_idt1189': '',
'j_idt172': '',
'j_idt636': '',
'j_idt665': '',
'j_idt694': '',
'j_idt723': '',
'j_idt752': '',
'javax.faces.ViewState': jsf_state,
'javax.faces.partial.ajax': 'true',
'javax.faces.partial.execute': '@all',
'javax.faces.partial.render': 'bookingNumbers billToBookingGrop billofLading_Table3 release_Information_bill release_Information_booking cargoTrackingOrderBillInformation cargoTrackingBookingOfLadingInformation cargoTrackingContainerHistory cargoTrackingContainerInfoStatus cargoTrackingContainerBillOfLadingNumber1 cargoTrackingContainerInfoByContainerNumber release_Information_booking_version release_Information_bill_version actualLoadingInfo containerInfoByBlNum containerInfoByBkgNumTable actualLoadingInfo5 documentStatus cargoTrackingAcivePictures containerNumberAll containerInfo_table3 containerInfo_table4 cargoTrackingPrintByContainer containerNumberAllByBookingNumber registerUserValidate validateCargoTracking isbillOfLadingExist isbookingNumberExist cargoTrackingContainerPictureByContainer cargoTrackingContainerHistory1 cargoTrackingOrderBillMyFocus cargoTrackingBookingMyFocus userId contaienrNoExist billChange4 bookingChange4 bookingChange3 cargoTrackingContainerHistory6 numberType containerSize containerMessage containerTab isLogin cargoTrackingBillContainer cargoTrackingBillContainer1 BillMessage BookingMessage searchSuccess searchError containerTransportationMode',
'javax.faces.source': 'cargoTrckingFindButton',
'mainForm': 'mainForm',
'num': '0',
'num1': '0',
'num2': '0',
'num3': '0',
'num4': '0',
'num5': '0',
'num6': '0',
'numberType': '',
'onlyFirstAndLast': 'false',
'onlyFirstAndLast2': 'false',
'userId': '',
'validateCargoTracking': 'false',
}
|
def data_parser(data):
print 'Your name is {0}, you are {1} years old, and your username is {2}.'.format(data[0],data[1],data[2])
if 'y' in data[3]:
f = open('data_logger.txt','w')
f.write('Your name is {0}, you are {1} years old, and your username is {2}.'.format(data[0],data[1],data[2]))
def data_retriever():
save_prompt = input('Save this data to disk, or no?\n>')
name_input = input('What\'s your name?\n>')
age_input = input('What\'s your age?\n>')
reddit_username_input = input('What\'s your reddit username?\n>')
return name_input,age_input,reddit_username_input,save_prompt
data_parser(data_retriever())
|
from settings import *
import os
PROJECT_HOME = os.path.dirname(os.path.dirname(__file__))
LOG_FOLDER=os.path.join(PROJECT_HOME, '..', 'logs')
if os.path.exists(os.path.join(PROJECT_HOME,"local_settings.py")):
from local_settings import *
LOG_LEVEL="DEBUG"
LOG_FILENAME="operations_management.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'file':{
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename':os.path.join(LOG_FOLDER, LOG_FILENAME),
'formatter': 'verbose',
'maxBytes':604800,
'backupCount':50
}
},
'loggers': {
'django': {
'handlers':['mail_admins',],
'propagate': True,
'level':'DEBUG',
},
'django.db.backends.schema': {
'handlers':['null',],
'propagate': True,
'level':'INFO',
},
'': {
'handlers': ['file',],
'propagate': True,
'level': LOG_LEVEL
}
},
}
|
# hack to run multiple luigi tasks in one session
import luigi
import json
import argparse
from mc_luigi import LearnClassifierFromGt, PipelineParameter
from mc_luigi import MulticutSegmentation, BlockwiseMulticutSegmentation
def learn_rf():
ppl_parameter = PipelineParameter()
ppl_parameter.read_input_file('./inputs.json')
inputs = ppl_parameter.inputs
ppl_parameter.separateEdgeClassification = True
ppl_parameter.ignoreSegLabel = 0
ppl_parameter.useN5Backend = True
ppl_parameter.nTrees = 150
luigi.run([
"--local-scheduler",
"--pathsToSeg", json.dumps(inputs["seg"]),
"--pathsToGt", json.dumps(inputs["gt"])],
LearnClassifierFromGt
)
def mc():
ppl_parameter = PipelineParameter()
# read the json with paths to input files
ppl_parameter.read_input_file('./inputs.json')
inputs = ppl_parameter.inputs
# set the parameters
ppl_parameter.multicutWeightingScheme = "z"
ppl_parameter.separateEdgeClassification = True
ppl_parameter.multicutVerbose = False
ppl_parameter.ignoreSegLabel = 0
ppl_parameter.useN5Backend = True
luigi.run(
["--local-scheduler",
"--pathToSeg", inputs["seg"][0],
"--pathToClassifier", inputs["rf"]],
MulticutSegmentation
)
def blockwise_mc():
ppl_parameter = PipelineParameter()
# read the json with paths to input files
ppl_parameter.read_input_file('./inputs.json')
inputs = ppl_parameter.inputs
# set the parameters
ppl_parameter.multicutWeightingScheme = "z"
ppl_parameter.separateEdgeClassification = True
ppl_parameter.multicutVerbose = False
ppl_parameter.multicutBlockShape = [20, 256, 256]
ppl_parameter.multicutBlockOverlap = [5, 50, 50]
ppl_parameter.ignoreSegLabel = 0
ppl_parameter.useN5Backend = True
n_levels = 1
luigi.run(
["--local-scheduler",
"--pathToSeg", inputs["seg"][0],
"--pathToClassifier", inputs["rf"],
"--numberOfLevels", str(n_levels)],
BlockwiseMulticutSegmentation
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('task', type=str)
args = parser.parse_args()
task_fu = eval(args.task)
task_fu()
if __name__ == '__main__':
main()
|
import os
import sys
import threading
import time
from multiprocessing import Pool
import requests
from pyramid.config import Configurator
from pyramid.response import Response
# begin chdir armor
up = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.dirname(up)
hendrix_app_dir = os.path.dirname(testdir)
hendrix_package_dir = os.path.dirname(hendrix_app_dir)
sys.path.insert(0, hendrix_package_dir)
# end chdir armor
from hendrix.deploy.base import HendrixDeploy
from zope.interface import provider
from twisted.logger import ILogObserver, formatEvent
@provider(ILogObserver)
def simpleObserver(event):
print(formatEvent(event))
# from twisted.logger import globalLogBeginner
# globalLogBeginner.beginLoggingTo([simpleObserver], redirectStandardIO=False)
def cpu_heavy(heft, label=None):
count = 0
previous_count = 0
start = 1
previous = start
one_before_that = 0
end = heft
timer_start = time.time()
while True:
new = previous + one_before_that
one_before_that = previous
previous = new
count += 1
if count == end / 2 and end % 2 == 0:
print("%s halfway: %s" % (label, time.time() - timer_start))
time.sleep(0)
if count == end:
print("%s done: %s" % (label, time.time() - timer_start))
return
global total_requests
global avg_duration
total_requests = 0
avg_duration = 0
def long():
global total_requests
global avg_duration
previous_duration = (total_requests * avg_duration)
total_requests += 1
timer_start = time.time()
thread = threading.current_thread()
print("Starting stream %s on %s" % (total_requests, thread.name))
# cpu_heavy(100000, label)
r = requests.get('http://localhost:8010/.25')
duration = time.time() - timer_start
print("Finished stream %s after %s" % (total_requests, duration))
avg_duration = float(previous_duration + duration) / float(total_requests)
print("Average duration after %s: %s" % (total_requests, avg_duration))
class PerformanceTest(object):
pool = Pool(20)
def view(self, request):
# option 1
# @crosstown_traffic()
# def wait():
# long()
# option 2
self.pool.apply_async(long)
return Response()
config = Configurator()
config.add_route('test_view', '/')
config.add_view(PerformanceTest().view, route_name='test_view')
app = config.make_wsgi_app()
if __name__ == '__main__':
deployer = HendrixDeploy(options={'wsgi': app})
deployer.run()
|
__author__ = 'peeyush'
from GCAM import FilesFolders
import timeit, sys, os
from GCAM import Fetch_pmids
import pandas as pd
def check_database_update(annoDB, cellDB, resource_path):
'''
This function checks for update in annotation db and celltype db.
If found then delete gene_occu_db.
:param annoDB:
:param cellDB:
:return:
'''
annoDB_size = len(annoDB)
cellDB_size = len(cellDB)
size = []
try:
file = open(resource_path + os.path.sep + 'db_version', 'r')
for line in file:
size.append(int(line.split(':')[1]))
#print 'check_database', int(line.split(':')[1])
file.close()
if annoDB_size not in size or cellDB_size not in size:
os.remove(resource_path + os.path.sep + 'gene_occu_db.csv')
file = open(resource_path + os.path.sep + 'db_version', 'w')
lines = ['annoDB:' + str(annoDB_size), '\ncellDB:' + str(cellDB_size)]
file.writelines(lines)
file.close()
except:
file = open(resource_path + os.path.sep + 'db_version', 'w')
lines = ['annoDB:' + str(annoDB_size), '\ncellDB:' + str(cellDB_size)]
file.writelines(lines)
file.close()
def check_old_analysed_genes(genenames, dataframe):
'''
This function will check if the gene has already been analysed and retrieve its occurrence.
:param genenames:
:param resource_path:
:return:
'''
#dataframe = FilesFolders.read_previous_occurrence_table(resource_path)
new_genelist = []
has_genes = []
if not dataframe is None:
for gene in genenames:
if gene not in dataframe.columns:
new_genelist.append(gene)
if gene in dataframe.columns:
has_genes.append(gene)
foundgenes_df = dataframe[has_genes]
return new_genelist, foundgenes_df
def occurrence_df(genenames, resource_path, subquery):
'''
This function will prepare the occurrence df for genes.
:param genenames:
:param resource_path:
:return:
'''
annDB = FilesFolders.read_annotation_database(resource_path)
cellDB = FilesFolders.celltype_DB(resource_path)
check_database_update(annDB, cellDB, resource_path)
print ('Checking for pre analysed genes....')
dataframe, created = FilesFolders.read_previous_occurrence_table(resource_path)
join = False
if dataframe is not None:
new_genenames, foundgenes_df = check_old_analysed_genes(genenames, dataframe)
if len(new_genenames) > 0: join = True
else:
foundgenes_df = pd.DataFrame()
new_genenames = genenames
print ('Reading required DBs')
occuDF = cellDB
total_abstract = 0
abs_in_DB = 0
count = 0 + len(new_genenames)
for gene in new_genenames:
sys.stdout.write("\rGenes remain for analyse:%d" % count)
sys.stdout.flush()
#print gene
GeneObj = Fetch_pmids.Genes(gene=gene, subquery=subquery, resource_path=resource_path)
GeneObj.get_pmids()
total_abstract += len(GeneObj.pmids) # calcuate total no of abstracts
GeneObj.get_pmid_pos(annoDB=annDB)
abs_in_DB += len(GeneObj.cellinpmid)
occuDF = GeneObj.get_occurrence(cellDB=occuDF)
count -= 1
joincellsynonym(occuDF, resource_path)
if not created:
occuDF.to_csv(resource_path + os.path.sep + 'gene_occu_db.csv', sep=',', ignore_index=True)
if join:
print("\nUpdating gene occurrence db....")
update_dataframe = pd.concat([occuDF.drop(['celltype'], axis=1), dataframe], axis=1)
update_dataframe.to_csv(resource_path + os.path.sep + 'gene_occu_db.csv', sep=',', ignore_index=True)
occuDF = pd.concat([occuDF, foundgenes_df], axis=1)
return occuDF
def joincellsynonym(celloccu, resource_path):
'''
Join multiple cell synonym to one.
:param celloccu:
:param cellSyn:
:return:
'''
cellSyn = FilesFolders.cell_synonym(resource_path)
colname = celloccu.columns.values.tolist()
indexRem = []
#print celloccu
for k, v in cellSyn.iterrows():
index = celloccu.celltype[celloccu.celltype == v['cell'].lower()].index.tolist()[0]
for cell in v['synonyms'].split(','):
#print cell
indexsyn = celloccu.celltype[celloccu.celltype == cell.lower()].index.tolist()[0]
indexRem.append(indexsyn)
## adding synonym
for col in colname:
if col != 'celltype' and col != 'Unnamed: 0':
celloccu.loc[index, col] = celloccu.loc[index, col] + celloccu.loc[indexsyn, col]
celloccu = celloccu.drop(celloccu.index[indexRem])
#print celloccu
return celloccu |
class ConfigurationError(Exception):
"""Error raised when a class constructor has not been initialized correctly."""
pass
class ExecutionProviderException(Exception):
""" Base class for all exceptions
Only to be invoked when only a more specific error is not available.
"""
pass
class SchedulerMissingArgs(ExecutionProviderException):
''' Error raised when the template used to compose the submit script to the local resource manager is missing required arguments
'''
def __init__(self, missing_keywords, sitename):
self.missing_keywords = missing_keywords
self.sitename = sitename
def __repr__(self):
return "SchedulerMissingArgs: Pool:{0} Arg:{1}".format(self.sitename, self.missing_keywords)
class ScriptPathError(ExecutionProviderException):
''' Error raised when the template used to compose the submit script to the local resource manager is missing required arguments
'''
def __init__(self, script_path, reason):
self.script_path = script_path
self.reason = reason
def __repr__(self):
return "Unable to write submit script:{0} Reason:{1}".format(self.script_path, self.reason)
class BadLauncher(ExecutionProviderException):
''' Error raised when a non callable object is provider as Launcher
'''
def __init__(self, launcher, reason):
self.launcher = launcher
self.reason = reason
def __repr__(self):
return "Bad Launcher provided:{0} Reason:{1}".format(self.launcher, self.reason)
class OptionalModuleMissing(ExecutionProviderException):
''' Error raised a required module is missing for a optional/extra provider
'''
def __init__(self, module_names, reason):
self.module_names = module_names
self.reason = reason
def __repr__(self):
return "Unable to Initialize provider.Missing:{0}, Reason:{1}".format(
self.module_names, self.reason
)
class ChannelRequired(ExecutionProviderException):
''' Execution provider requires a channel.
'''
def __init__(self, provider, reason):
self.provider = provider
self.reason = reason
def __repr__(self):
return "Unable to Initialize provider.Provider:{0}, Reason:{1}".format(
self.provider, self.reason
)
class ScaleOutFailed(ExecutionProviderException):
''' Generic catch. Scale out failed in the submit phase on the provider side
'''
def __init__(self, provider, reason):
self.provider = provider
self.reason = reason
def __repr__(self):
return "Unable to Initialize provider.Provider:{0}, Reason:{1}".format(
self.provider, self.reason
)
|
# -*- coding:utf-8 -*-
"""
Description:
Contract Parameter Type in neo.Wallets
Usage:
from neo.SmartContract.ContractParameterType import ContractParameterType
"""
class ContractParameterType(object):
Signature = 0x00 # 签名
Boolean = 0x01
Integer = 0x02 # 整数
Hash160 = 0x03 # 160位散列值
Hash256 = 0x04 # 256位散列值
ByteArray = 0x05 # 字节数组
PublicKey = 0x06
String = 0x07
Array = 0x10
Void = 0xff
import inspect
def ToName(param_type):
items = inspect.getmembers(ContractParameterType)
if type(param_type) is bytes:
param_type = int.from_bytes(param_type, 'little')
for item in items:
name = item[0]
val = int(item[1])
if val == param_type:
return name
return None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
Instrucciones
1. Elevar al cuadrado una lista dada por el usuario de 10 elementos,
se debe de mostrar la lista original y la lista elevada al cuadrado.
Ej.
a = [1,2,3,4,5,6,7,8,9,10]
R = [1,4,9,16,25,36,49,64,81,100]
"""
valores = []
print("Introduce un valor entero númerico \nSi deseas terminar con el proceso ingresa *no*")
while True:
valor = input("Introduce un valor: ")
if valor == "no":
break
else:
valor = int(valor)
valores.append(valor)
print("Valores originales: \n", valores)
print("Valores al cuadrado: ")
print([x**2 for x in valores])
|
import boto3
import os
import json
def lambda_handler(message, context):
if ('body' not in message or
message['httpMethod'] != 'PUT'):
return {
'statusCode': 400,
'headers': {},
'body': json.dumps({'msg': 'Bad Request'})
}
table_name = os.environ.get('TABLE', 'Users')
region = os.environ.get('REGION', 'us-east-1')
aws_environment = os.environ.get('AWSENV', 'AWS')
if aws_environment == 'AWS_SAM_LOCAL':
users_table = boto3.resource(
'dynamodb',
endpoint_url='http://dynamodb:8000'
)
else:
users_table = boto3.resource(
'dynamodb',
region_name=region
)
table = users_table.Table(table_name)
user = json.loads(message['body'])
params = {
'id': user['id'],
'date': user['date']
}
response = table.update_item(
Key=params,
UpdateExpression="set stage = :s, description = :d",
ExpressionAttributeValues={
':s': user['stage'],
':d': user['description']
},
ReturnValues="UPDATED_NEW"
)
print(response)
return {
'statusCode': 200,
'headers': {},
'body': json.dumps({'msg': 'User updated'})
}
|
from collections import defaultdict
from helpers import get_input
def main() -> int:
bin_strings = [row.strip() for row in get_input(3)]
N = len(bin_strings)
bits = len(bin_strings[0])
sums = [sum(int(string[i]) for string in bin_strings)for i in range(bits)]
gamma = int("".join(str(int(sum >= N/2)) for sum in sums), 2)
epsilon = gamma ^ (2 ** bits - 1)
return gamma * epsilon
def main2() -> int:
bin_strings = [row.strip() for row in get_input(3)]
def rating(bin_strings, function):
n = 0
while len(bin_strings) > 1:
d = defaultdict(list)
for string in bin_strings:
d[string[n]].append(string)
n += 1
bin_strings = function(d)
return int(bin_strings[0], 2)
oxygen_function = lambda d: max(d["1"], d["0"], key=lambda x: len(x))
oxygen = rating(bin_strings, oxygen_function)
co2_function = lambda d: min(d["0"], d["1"], key=lambda x: len(x))
co2 = rating(bin_strings, co2_function)
return oxygen * co2
if __name__ == "__main__":
print(main())
print(main2())
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:[email protected])
import datetime
import json
import logging
import requests
import time
import typing
from base64 import b64decode
from decimal import Decimal
from solana.blockhash import Blockhash, BlockhashCache
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.rpc.api import Client
from solana.rpc.commitment import Commitment
from solana.rpc.providers.http import HTTPProvider
from solana.rpc.types import DataSliceOpts, MemcmpOpts, RPCMethod, RPCResponse, TokenAccountOpts, TxOpts
from solana.transaction import Transaction
from .constants import SOL_DECIMAL_DIVISOR
from .instructionreporter import InstructionReporter
from .logmessages import expand_log_messages
# # 🥭 ClientException class
#
# A `ClientException` exception base class that allows trapping and handling rate limiting
# independent of other error handling.
#
class ClientException(Exception):
def __init__(self, message: str, name: str, cluster_url: str) -> None:
super().__init__(message)
self.message: str = message
self.name: str = name
self.cluster_url: str = cluster_url
def __str__(self) -> str:
return f"« {type(self)} '{self.message}' from '{self.name}' on {self.cluster_url} »"
def __repr__(self) -> str:
return f"{self}"
# # 🥭 RateLimitException class
#
# A `RateLimitException` exception base class that allows trapping and handling rate limiting
# independent of other error handling.
#
class RateLimitException(ClientException):
pass
# # 🥭 TooMuchBandwidthRateLimitException class
#
# A `TooMuchBandwidthRateLimitException` exception that specialises the `RateLimitException`
# for when too much bandwidth has been consumed.
#
class TooMuchBandwidthRateLimitException(RateLimitException):
pass
# # 🥭 TooManyRequestsRateLimitException class
#
# A `TooManyRequestsRateLimitException` exception that specialises the `RateLimitException`
# for when too many requests have been sent in a short time.
#
class TooManyRequestsRateLimitException(RateLimitException):
pass
# # 🥭 BlockhashNotFoundException class
#
# A `BlockhashNotFoundException` exception allows trapping and handling exceptions when a blockhash is sent that
# the node doesn't understand. This can happen when the blockhash is too old (and the node no longer
# considers it 'recent') or when it's too new (and hasn't yet made it to the node that is responding).
#
class BlockhashNotFoundException(ClientException):
def __init__(self, name: str, cluster_url: str, blockhash: typing.Optional[Blockhash] = None) -> None:
message: str = f"Blockhash '{blockhash}' not found on {cluster_url}."
super().__init__(message, name, cluster_url)
self.blockhash: typing.Optional[Blockhash] = blockhash
def __str__(self) -> str:
return f"« BlockhashNotFoundException '{self.name}' [{self.blockhash}] on {self.cluster_url} »"
# # 🥭 NodeIsBehindException class
#
# A `NodeIsBehindException` exception allows trapping and handling exceptions when a node is behind by too
# many slots.
#
class NodeIsBehindException(ClientException):
def __init__(self, name: str, cluster_url: str, slots_behind: int) -> None:
message: str = f"Node is behind by {slots_behind} slots."
super().__init__(message, name, cluster_url)
self.slots_behind: int = slots_behind
def __str__(self) -> str:
return f"« NodeIsBehindException '{self.name}' [behind by {self.slots_behind} slots] on {self.cluster_url} »"
# # 🥭 FailedToFetchBlockhashException class
#
# A `FailedToFetchBlockhashException` exception allows trapping and handling exceptions when we fail
# to fetch a recent or distinct blockhash.
#
class FailedToFetchBlockhashException(ClientException):
def __init__(self, message: str, name: str, cluster_url: str, pauses: typing.Sequence[float]) -> None:
super().__init__(message, name, cluster_url)
self.pauses: typing.Sequence[float] = pauses
def __str__(self) -> str:
if len(self.pauses) == 0:
return f"« FailedToFetchBlockhashException '{self.name}' Failed to get recent blockhash on {self.cluster_url} »"
pauses_text = ",".join(f"{pause}" for pause in self.pauses[:-1])
return f"« FailedToFetchBlockhashException '{self.name}' Failed to get a fresh, recent blockhash after {len(self.pauses)} attempts - paused {pauses_text} seconds between attempts on {self.cluster_url} »"
# # 🥭 TransactionException class
#
# A `TransactionException` exception that can provide additional error data, or at least better output
# of problems at the right place.
#
class TransactionException(ClientException):
def __init__(self, transaction: typing.Optional[Transaction], message: str, code: int, name: str, cluster_url: str, rpc_method: str, request_text: str, response_text: str, accounts: typing.Union[str, typing.List[str], None], errors: typing.Union[str, typing.List[str], None], logs: typing.Union[str, typing.List[str], None], instruction_reporter: InstructionReporter = InstructionReporter()) -> None:
super().__init__(message, name, cluster_url)
self.transaction: typing.Optional[Transaction] = transaction
self.code: int = code
self.rpc_method: str = rpc_method
self.request_text: str = request_text
self.response_text: str = response_text
def _ensure_list(item: typing.Union[str, typing.List[str], None]) -> typing.List[str]:
if item is None:
return []
if isinstance(item, str):
return [item]
if isinstance(item, list):
return item
return [f"{item}"]
self.accounts: typing.Sequence[str] = _ensure_list(accounts)
self.errors: typing.Sequence[str] = _ensure_list(errors)
self.logs: typing.Sequence[str] = expand_log_messages(_ensure_list(logs))
self.instruction_reporter: InstructionReporter = instruction_reporter
def __str__(self) -> str:
request_details: str = ""
response_details: str = ""
if logging.DEBUG >= logging.root.level:
request_details = f"""
Request:
{self.request_text}"""
response_details = f"""
Response:
{self.response_text}"""
transaction_details = ""
if self.transaction is not None:
instruction_details = "\n".join(list(map(self.instruction_reporter.report, self.transaction.instructions)))
transaction_details = "\n Instructions:\n " + instruction_details.replace("\n", "\n ")
accounts = "No Accounts"
if len(self.accounts) > 0:
accounts = "\n ".join([f"{item}".replace("\n", "\n ") for item in self.accounts])
errors = "No Errors"
if len(self.errors) > 0:
errors = "\n ".join([f"{item}".replace("\n", "\n ") for item in self.errors])
logs = "No Logs"
if len(self.logs) > 0:
logs = "\n ".join([f"{item}".replace("\n", "\n ") for item in self.logs])
return f"""« 𝚃𝚛𝚊𝚗𝚜𝚊𝚌𝚝𝚒𝚘𝚗𝙴𝚡𝚌𝚎𝚙𝚝𝚒𝚘𝚗 in '{self.name}' [{self.rpc_method}]: {self.code}:: {self.message}{transaction_details}
Accounts:
{accounts}
Errors:
{errors}
Logs:
{logs}{request_details}{response_details}
»"""
def __repr__(self) -> str:
return f"{self}"
UnspecifiedCommitment = Commitment("unspecified")
UnspecifiedEncoding = "unspecified"
# # 🥭 ErrorHandlingProvider class
#
# A `ErrorHandlingProvider` extends the HTTPProvider with better error handling.
#
class ErrorHandlingProvider(HTTPProvider):
def __init__(self, name: str, cluster_url: str, instruction_reporter: InstructionReporter):
super().__init__(cluster_url)
self.name: str = name
self.cluster_url: str = cluster_url
self.instruction_reporter: InstructionReporter = instruction_reporter
def make_request(self, method: RPCMethod, *params: typing.Any) -> RPCResponse:
# This is the entire method in HTTPProvider that we're overriding here:
#
# """Make an HTTP request to an http rpc endpoint."""
# request_kwargs = self._before_request(method=method, params=params, is_async=False)
# raw_response = requests.post(**request_kwargs)
# return self._after_request(raw_response=raw_response, method=method)
request_kwargs = self._before_request(method=method, params=params, is_async=False)
raw_response = requests.post(**request_kwargs)
# Some custom exceptions specifically for rate-limiting. This allows calling code to handle this
# specific case if they so choose.
#
# "You will see HTTP respose codes 429 for too many requests or 413 for too much bandwidth."
if raw_response.status_code == 413:
raise TooMuchBandwidthRateLimitException(
f"Rate limited (too much bandwidth) calling method '{method}'.", self.name, self.cluster_url)
elif raw_response.status_code == 429:
raise TooManyRequestsRateLimitException(
f"Rate limited (too many requests) calling method '{method}'.", self.name, self.cluster_url)
# Not a rate-limit problem, but maybe there was some other error?
raw_response.raise_for_status()
# All seems OK, but maybe the server returned an error? If so, try to pass on as much
# information as we can.
response_text: str = raw_response.text
response: typing.Dict[str, typing.Any] = json.loads(response_text)
if "error" in response:
if response["error"] is str:
message: str = typing.cast(str, response["error"])
raise ClientException(f"Transaction failed: '{message}'", self.name, self.cluster_url)
else:
error = response["error"]
error_message: str = error["message"] if "message" in error else "No message"
error_data: typing.Dict[str, typing.Any] = error["data"] if "data" in error else {}
error_accounts = error_data["accounts"] if "accounts" in error_data else "No accounts"
error_code: int = error["code"] if "code" in error else -1
error_err = error_data["err"] if "err" in error_data else "No error text returned"
error_logs = error_data["logs"] if "logs" in error_data else "No logs"
parameters = json.dumps({"jsonrpc": "2.0", "method": method, "params": params})
transaction: typing.Optional[Transaction] = None
blockhash: typing.Optional[Blockhash] = None
if method == "sendTransaction":
transaction = Transaction.deserialize(b64decode(params[0]))
blockhash = transaction.recent_blockhash
if error_code == -32005:
slots_behind: int = error["data"]["numSlotsBehind"] if "numSlotsBehind" in error["data"] else -1
raise NodeIsBehindException(self.name, self.cluster_url, slots_behind)
if error_err == "BlockhashNotFound":
raise BlockhashNotFoundException(self.name, self.cluster_url, blockhash)
exception_message: str = f"Transaction failed with: '{error_message}'"
raise TransactionException(transaction, exception_message, error_code, self.name,
self.cluster_url, method, parameters, response_text, error_accounts,
error_err, error_logs, self.instruction_reporter)
# The call succeeded.
return typing.cast(RPCResponse, response)
class BetterClient:
def __init__(self, client: Client, name: str, cluster_name: str, cluster_url: str, commitment: Commitment, skip_preflight: bool, encoding: str, blockhash_cache_duration: int, instruction_reporter: InstructionReporter) -> None:
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.compatible_client: Client = client
self.name: str = name
self.cluster_name: str = cluster_name
self.cluster_url: str = cluster_url
self.commitment: Commitment = commitment
self.skip_preflight: bool = skip_preflight
self.encoding: str = encoding
self.blockhash_cache_duration: int = blockhash_cache_duration
self.instruction_reporter: InstructionReporter = instruction_reporter
# kangda said in Discord: https://discord.com/channels/791995070613159966/836239696467591186/847816026245693451
# "I think you are better off doing 4,8,16,20,30"
self.retry_pauses: typing.Sequence[Decimal] = [Decimal(4), Decimal(
8), Decimal(16), Decimal(20), Decimal(30)]
@staticmethod
def from_configuration(name: str, cluster_name: str, cluster_url: str, commitment: Commitment, skip_preflight: bool, encoding: str, blockhash_cache_duration: int, instruction_reporter: InstructionReporter) -> "BetterClient":
provider: HTTPProvider = ErrorHandlingProvider(name, cluster_url, instruction_reporter)
blockhash_cache: typing.Union[BlockhashCache, bool] = False
if blockhash_cache_duration > 0:
blockhash_cache = BlockhashCache(blockhash_cache_duration)
client: Client = Client(cluster_url, commitment=commitment, blockhash_cache=blockhash_cache)
client._provider = provider
return BetterClient(client, name, cluster_name, cluster_url, commitment, skip_preflight, encoding, blockhash_cache_duration, instruction_reporter)
def get_balance(self, pubkey: typing.Union[PublicKey, str], commitment: Commitment = UnspecifiedCommitment) -> Decimal:
resolved_commitment, _ = self.__resolve_defaults(commitment)
response = self.compatible_client.get_balance(pubkey, resolved_commitment)
value = Decimal(response["result"]["value"])
return value / SOL_DECIMAL_DIVISOR
def get_account_info(self, pubkey: typing.Union[PublicKey, str], commitment: Commitment = UnspecifiedCommitment,
encoding: str = UnspecifiedEncoding, data_slice: typing.Optional[DataSliceOpts] = None) -> typing.Any:
resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)
response = self.compatible_client.get_account_info(pubkey, resolved_commitment, resolved_encoding, data_slice)
return response["result"]
def get_confirmed_signatures_for_address2(self, account: typing.Union[str, Keypair, PublicKey], before: typing.Optional[str] = None, until: typing.Optional[str] = None, limit: typing.Optional[int] = None) -> typing.Sequence[str]:
response = self.compatible_client.get_confirmed_signature_for_address2(account, before, until, limit)
return [result["signature"] for result in response["result"]]
def get_confirmed_transaction(self, signature: str, encoding: str = "json") -> typing.Any:
_, resolved_encoding = self.__resolve_defaults(None, encoding)
response = self.compatible_client.get_confirmed_transaction(signature, resolved_encoding)
return response["result"]
def get_minimum_balance_for_rent_exemption(self, size: int, commitment: Commitment = UnspecifiedCommitment) -> int:
resolved_commitment, _ = self.__resolve_defaults(commitment)
response = self.compatible_client.get_minimum_balance_for_rent_exemption(size, resolved_commitment)
return int(response["result"])
def get_program_accounts(self, pubkey: typing.Union[str, PublicKey],
commitment: Commitment = UnspecifiedCommitment,
encoding: typing.Optional[str] = UnspecifiedEncoding,
data_slice: typing.Optional[DataSliceOpts] = None,
data_size: typing.Optional[int] = None,
memcmp_opts: typing.Optional[typing.List[MemcmpOpts]] = None) -> typing.Any:
resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)
response = self.compatible_client.get_program_accounts(
pubkey, resolved_commitment, resolved_encoding, data_slice, data_size, memcmp_opts)
return response["result"]
def get_recent_blockhash(self, commitment: Commitment = UnspecifiedCommitment) -> Blockhash:
resolved_commitment, _ = self.__resolve_defaults(commitment)
response = self.compatible_client.get_recent_blockhash(resolved_commitment)
return Blockhash(response["result"]["value"]["blockhash"])
def get_token_account_balance(self, pubkey: typing.Union[str, PublicKey], commitment: Commitment = UnspecifiedCommitment) -> Decimal:
resolved_commitment, _ = self.__resolve_defaults(commitment)
response = self.compatible_client.get_token_account_balance(pubkey, resolved_commitment)
value = Decimal(response["result"]["value"]["amount"])
decimal_places = response["result"]["value"]["decimals"]
divisor = Decimal(10 ** decimal_places)
return value / divisor
def get_token_accounts_by_owner(self, owner: PublicKey, token_account_options: TokenAccountOpts, commitment: Commitment = UnspecifiedCommitment,) -> typing.Any:
resolved_commitment, _ = self.__resolve_defaults(commitment)
response = self.compatible_client.get_token_accounts_by_owner(owner, token_account_options, resolved_commitment)
return response["result"]["value"]
def get_multiple_accounts(self, pubkeys: typing.List[typing.Union[PublicKey, str]], commitment: Commitment = UnspecifiedCommitment,
encoding: str = UnspecifiedEncoding, data_slice: typing.Optional[DataSliceOpts] = None) -> typing.Any:
resolved_commitment, resolved_encoding = self.__resolve_defaults(commitment, encoding)
response = self.compatible_client.get_multiple_accounts(
pubkeys, resolved_commitment, resolved_encoding, data_slice)
return response["result"]["value"]
def send_transaction(self, transaction: Transaction, *signers: Keypair, opts: TxOpts = TxOpts(preflight_commitment=UnspecifiedCommitment)) -> str:
proper_commitment: Commitment = opts.preflight_commitment
if proper_commitment == UnspecifiedCommitment:
proper_commitment = self.commitment
proper_opts = TxOpts(preflight_commitment=proper_commitment,
skip_confirmation=opts.skip_confirmation,
skip_preflight=opts.skip_preflight)
response = self.compatible_client.send_transaction(transaction, *signers, opts=proper_opts)
return str(response["result"])
def wait_for_confirmation(self, transaction_ids: typing.Sequence[str], max_wait_in_seconds: int = 60) -> typing.Sequence[str]:
self.logger.info(f"Waiting up to {max_wait_in_seconds} seconds for {transaction_ids}.")
all_confirmed: typing.List[str] = []
start_time: datetime.datetime = datetime.datetime.now()
cutoff: datetime.datetime = start_time + datetime.timedelta(seconds=max_wait_in_seconds)
for transaction_id in transaction_ids:
while datetime.datetime.now() < cutoff:
time.sleep(1)
confirmed = self.get_confirmed_transaction(transaction_id)
if confirmed is not None:
self.logger.info(
f"Confirmed {transaction_id} after {datetime.datetime.now() - start_time} seconds.")
all_confirmed += [transaction_id]
break
if len(all_confirmed) != len(transaction_ids):
self.logger.info(f"Timed out after {max_wait_in_seconds} seconds waiting on transaction {transaction_id}.")
return all_confirmed
def __resolve_defaults(self, commitment: typing.Optional[Commitment], encoding: typing.Optional[str] = None) -> typing.Tuple[Commitment, str]:
if commitment is None or commitment == UnspecifiedCommitment:
commitment = self.commitment
if encoding is None or encoding == UnspecifiedEncoding:
encoding = self.encoding
return commitment, encoding
def __str__(self) -> str:
return f"« 𝙱𝚎𝚝𝚝𝚎𝚛𝙲𝚕𝚒𝚎𝚗𝚝 [{self.cluster_name}]: {self.cluster_url} »"
def __repr__(self) -> str:
return f"{self}"
|
# coding=utf-8
from __future__ import unicode_literals
import django
from ..compat import patterns
if django.VERSION < (1, 6):
from django.conf.urls.defaults import url, include
else:
from django.conf.urls import url, include
urlpatterns = patterns(
url(r'viewlet/', include('viewlet.urls'))
)
|
import logging
import multiprocessing
import time
import rdkit
from rdkit.Chem.QED import qed
from sqlalchemy import create_engine
# logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def construct_problem():
# We have to delay all importing of tensorflow until the child processes launch,
# see https://github.com/tensorflow/tensorflow/issues/8220. We should be more careful about where / when we
# import tensorflow, especially if there's a chance we'll use tf.serving to do the policy / reward evaluations on
# the workers. Might require upstream changes to nfp as well.
from rlmolecule.tree_search.reward import RankedRewardFactory
from rlmolecule.molecule.molecule_config import MoleculeConfig
from rlmolecule.molecule.molecule_problem import MoleculeTFAlphaZeroProblem
from rlmolecule.molecule.molecule_state import MoleculeState
class QEDOptimizationProblem(MoleculeTFAlphaZeroProblem):
def __init__(self,
engine: 'sqlalchemy.engine.Engine',
config: 'MoleculeConfig', **kwargs) -> None:
super(QEDOptimizationProblem, self).__init__(engine, config, **kwargs)
self._config = config
def get_initial_state(self) -> MoleculeState:
return MoleculeState(rdkit.Chem.MolFromSmiles('C'), self._config)
def get_reward(self, state: MoleculeState) -> (float, {}):
if state.forced_terminal:
return qed(state.molecule), {'forced_terminal': True, 'smiles': state.smiles}
return 0.0, {'forced_terminal': False, 'smiles': state.smiles}
config = MoleculeConfig(max_atoms=25,
min_atoms=1,
tryEmbedding=True,
sa_score_threshold=4.,
stereoisomers=False)
engine = create_engine(f'sqlite:///qed_data.db',
connect_args={'check_same_thread': False},
execution_options = {"isolation_level": "AUTOCOMMIT"})
run_id = 'qed_example'
reward_factory = RankedRewardFactory(
engine=engine,
run_id=run_id,
reward_buffer_min_size=10,
reward_buffer_max_size=50,
ranked_reward_alpha=0.75
)
problem = QEDOptimizationProblem(
engine,
config,
run_id=run_id,
reward_class=reward_factory,
features=8,
num_heads=2,
num_messages=1,
min_buffer_size=15,
policy_checkpoint_dir='policy_checkpoints'
)
return problem
def run_games():
from rlmolecule.alphazero.alphazero import AlphaZero
game = AlphaZero(construct_problem())
while True:
path, reward = game.run(num_mcts_samples=50)
logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1][0]}')
def train_model():
construct_problem().train_policy_model(steps_per_epoch=100,
game_count_delay=20,
verbose=2)
def monitor():
from rlmolecule.sql.tables import RewardStore
problem = construct_problem()
while True:
best_reward = problem.session.query(RewardStore) \
.filter_by(run_id=problem.run_id) \
.order_by(RewardStore.reward.desc()).first()
num_games = len(list(problem.iter_recent_games()))
if best_reward:
print(f"Best Reward: {best_reward.reward:.3f} for molecule "
f"{best_reward.data['smiles']} with {num_games} games played")
time.sleep(5)
if __name__ == "__main__":
jobs = [multiprocessing.Process(target=monitor)]
jobs[0].start()
time.sleep(1)
for i in range(5):
jobs += [multiprocessing.Process(target=run_games)]
jobs += [multiprocessing.Process(target=train_model)]
for job in jobs[1:]:
job.start()
for job in jobs:
job.join(300)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import jreadability_feedback as jf
import unittest
import tempfile
import json
class AppTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, jf.app.config['DATABASE'] = tempfile.mkstemp()
jf.app.config['TESTING'] = True
self.app = jf.app.test_client()
jf.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(jf.app.config['DATABASE'])
def login(self, username, password):
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
def view(self):
return self.app.get('/view', follow_redirects=True)
def test_login(self):
rv = self.app.get('/login')
assert u'ユーザ名' in rv.data.decode('utf-8')
def test_empty_db(self):
rv = self.login('admin', 'default')
rv = self.app.get('/view')
assert u'ありません' in rv.data.decode('utf-8')
def test_login_logout(self):
rv = self.login('admin', 'default')
assert u'ログイン' in rv.data.decode('utf-8')
rv = self.logout()
assert u'ログアウト' in rv.data.decode('utf-8')
rv = self.login('adminx', 'default')
assert u'無効です' in rv.data.decode('utf-8')
rv = self.login('admin', 'defaultx')
assert u'無効です' in rv.data.decode('utf-8')
def test_messages(self):
self.login('admin', 'default')
rv = self.app.post('/post', data=json.dumps(dict(
original_text=u'ものは試し',
evaluation=5,
grade=4,
)), follow_redirects=True)
assert rv.status_code == 204
rv = self.view()
assert u'ありません' not in rv.data.decode('utf-8')
assert u'ものは試し' in rv.data.decode('utf-8')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
import re, sys
from Bio import SeqIO
def read_gff(infile):
f = open(infile, 'r')
gff_dict = dict()
trans_dict = dict()
for line in f:
line = line.strip()
if line.startswith('#'): continue
content = line.split()
if content[2] == "transcript":
gid = re.search(r'source_gene=(.*?);', content[8]).group(1)
tid = re.search(r'transcript_id=(.*?);', content[8]).group(1)
gff_dict[tid] = [content[0], content[3]]
trans_dict[tid] = gid
return gff_dict, trans_dict
def run(fastafile, gff_file):
gff_dict, trans_dict = read_gff(gff_file)
f = open(fastafile, 'r')
for record in SeqIO.parse(f, 'fasta'):
seqid = record.id
sequence = record.seq
for index, base in enumerate(sequence):
if base == ".":
relative_start = index * 3
start_pos = int(gff_dict[seqid][1]) + relative_start - 1
print(gff_dict[seqid][0] + "\t" + str(start_pos) + "\t" + str(start_pos + 3) + "\t" + seqid + ":" + trans_dict[seqid])
f.close()
if __name__ == "__main__":
protein_file = sys.argv[1]
gff_file = sys.argv[2]
run(protein_file, gff_file)
|
#! /usr/bin/env python
import os
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
from api.controller.BaseStaticFileHandler import BaseStaticFileHandler
from api.controller.ServerListController import ServerListController
from api.controller.InfoController import InfoController
from api.controller.CommandsController import CommandsController
from api.controller.InfoListController import InfoListController
from api.controller.StatusController import StatusController
from api.controller.SettingsController import SettingsController
from api.controller.SlowlogController import SlowlogController
from daemonized import daemonized
class redis_live(daemonized):
def run_daemon(self):
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=0, help="debug mode", type=int)
tornado.options.parse_command_line()
print os.path.abspath('.')
# Bootup
handlers = [
(r"/api/servers", ServerListController),
(r"/api/info", InfoController),
(r"/api/status", StatusController),
(r"/api/infolist",InfoListController),
(r"/api/commands", CommandsController),
(r"/api/settings",SettingsController),
(r"/api/slowlog",SlowlogController),
(r"/(.*)", BaseStaticFileHandler, {"path": os.path.abspath('.')+'/www'})
]
server_settings = {'debug': options.debug}
application = tornado.web.Application(handlers, **server_settings)
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
live= redis_live()
live.start()
|
from dataclasses import dataclass
from datetime import datetime
from dash.dash import Dash
from dash.dependencies import Input, Output
from dash.development.base_component import Component
from plotly.missing_ipywidgets import FigureWidget
from dash.exceptions import PreventUpdate
from components.transaction_store_block import TransactionStore
from core.typings import BalanceType
from .base_block import BaseComponent, BaseComponentConfig
from dash import html, dcc
import plotly.graph_objects as go
import pandas as pd
@dataclass
class ForecastComponentInput():
store_name: str
class ForecastComponent(BaseComponent):
layout: Component
def __init__(self, input: ForecastComponentInput, app: Dash = None, config: BaseComponentConfig = None) -> None:
self.input = input
super().__init__(app=app, config=config)
def render(self):
"""Initializes the component layout"""
return html.Div(children=[
dcc.Graph(
id=self.prefix('graph')
)
])
def callbacks(self, app: Dash) -> None:
@app.callback(
Output(self.get_name(), 'figure'),
Input(self.input.store_name, "data")
)
def load_data(data) -> FigureWidget:
if data is None:
raise PreventUpdate
trx_data = TransactionStore.load_data(data)
balance_data = trx_data.get_balance_data()
fig = go.Figure()
def add_trace(data: pd.DataFrame, name: str, scheduled: bool = False):
nonlocal fig
fig.add_trace(
go.Scatter(x=data["date"],
y=data["balance"],
name=name,
line=go.scatter.Line(
dash="dash" if scheduled is True else "solid"
)))
add_trace(self.get_recorded_checkings(balance_data), "Checkings")
add_trace(self.get_scheduled_checkings(balance_data), "Checkings (scheduled)", True)
add_trace(self.get_recorded_liabilities(balance_data), "Liabilities")
add_trace(self.get_scheduled_liabilities(balance_data), "Liabilities (scheduled)", True)
fig.add_vline(datetime.now())
return fig
def _get_scheduled(self, data: pd.DataFrame, scheduled_value: bool):
"""Filter data based on the scheduled value"""
return data[data["scheduled"] == scheduled_value]
def _get_type(self, data: pd.DataFrame, type: BalanceType):
"""Filter data based on the type value"""
return data[data["type"] == type]
def get_recorded_checkings(self, data: pd.DataFrame):
"""Filter data for only recorded checkings"""
all_recorded = self._get_scheduled(data, False)
return self._get_type(all_recorded, BalanceType.CHECKINGS)
def get_scheduled_checkings(self, data: pd.DataFrame):
"""Filter data for only scheduled checkings"""
all_scheduled = self._get_scheduled(data, True)
return self._get_type(all_scheduled, BalanceType.CHECKINGS)
def get_recorded_liabilities(self, data: pd.DataFrame):
"""Filter data for only recorded liabilities"""
all_recorded = self._get_scheduled(data, False)
return self._get_type(all_recorded, BalanceType.LIABILITIES)
def get_scheduled_liabilities(self, data: pd.DataFrame):
"""Filter data for only scheduled liabilities"""
all_scheduled = self._get_scheduled(data, True)
return self._get_type(all_scheduled, BalanceType.LIABILITIES)
def get_name(self):
return self.prefix('graph')
|
#!/usr/bin/env python
import os
def mkdir_p(dir):
'''make a directory (dir) if it doesn't exist'''
if not os.path.exists(dir):
os.mkdir(dir)
job_directory = "%s/jobs" %os.getcwd()
# Make top level directories
mkdir_p(job_directory)
script_path= "$HOME/projects/seasonal_forecasting/code/scripts/"
fields = ["swvl1", "msl", "t2m"]
regions = ["EU", "NA-EU", "EU"]
preprocesses = ["anomalies", "anomalies", "anomalies"]
lrs = [1e-2, 1e-9, 5e-5]
n_min,n_max = 1, 5
for field, region, preprocess, lr in zip(fields, regions, preprocesses, lrs):
for n in range(n_min,n_max+1):
jn = field + '_' + region + '_' + preprocess + '_n' + str(n)
job_file = os.path.join(job_directory,"%s.job" %jn)
with open(job_file, 'w') as fh:
fh.writelines("#!/bin/tcsh\n")
fh.writelines("#SBATCH --job-name=%s.job\n" % jn)
fh.writelines("#SBATCH --partition=pCluster")
fh.writelines("#SBATCH --N=1")
fh.writelines("#SBATCH --n=40")
fh.writelines("#SBATCH --t=30\n")
fh.writelines("#SBATCH --output=jobs/%s.out\n" % jn)
fh.writelines("#SBATCH --error=jobs/%s.err\n" % jn)
fh.writelines("#SBATCH --mail-type=END\n")
fh.writelines("#SBATCH [email protected]\n")
fh.writelines("#SBATCH --account=nonnenma\n")
fh.writelines(f"python {script_path}script_pCluster_run_single.py {n} {field} {region} {preprocess} {lr}\n")
os.system("sbatch %s" %job_file) |
def fibonacci(num):
fibs = [0,1]
for i in range(num-2):
fibs.append(fibs[-2]+fibs[-1])
print(fibs[num-1])
n = int(input())
for i in range(n):
m = int(input())
fibonacci(m+1) |
from __future__ import absolute_import, print_function, unicode_literals
from validator.constants import BUGZILLA_BUG
from ..regex.generic import FILE_REGEXPS
from .jstypes import Global, Hook, Interfaces
# SQL.
SYNCHRONOUS_SQL_DESCRIPTION = (
'The use of synchronous SQL via the storage system leads to severe '
'responsiveness issues, and should be avoided at all costs. Please '
'use asynchronous SQL via Sqlite.jsm (http://mzl.la/sqlite-jsm) or '
'the `executeAsync` method, or otherwise switch to a simpler database '
'such as JSON files or IndexedDB.')
def check_dynamic_sql(this, args, callee):
"""Check for the use of non-static strings when creating/exeucting SQL
statements."""
if len(args) >= 1 and not args[0].is_clean_literal:
this.traverser.warning(
err_id=('js', 'instanceactions', 'executeSimpleSQL_dynamic'),
warning='SQL statements should be static strings',
description=('Dynamic SQL statement should be constucted via '
'static strings, in combination with dynamic '
'parameter binding via Sqlite.jsm wrappers '
'(http://mzl.la/sqlite-jsm) or '
'`createAsyncStatement` '
'(https://developer.mozilla.org/en-US/docs'
'/Storage#Binding_parameters)'))
@Global.hook('**', 'extend')
class MaybeDBConnection(Hook):
def createStatement(this, args, callee):
"""Handle calls to `createStatement`, returning an object which emits
warnings upon calls to `execute` and `executeStep` rather than
`executeAsync`."""
check_dynamic_sql(this, args, callee)
return this.traverser.wrap().query_interface(
'mozIStorageBaseStatement')
@Hook.on_call
def createAsyncStatement(this, args, callee):
check_dynamic_sql(this, args, callee)
@Hook.on_call
def executeSimpleSQL(this, args, callee):
"""Handle calls to `executeSimpleSQL`, warning that asynchronous
methods should be used instead. """
check_dynamic_sql(this, args, callee)
return {'err_id': ('js', 'instanceactions', 'executeSimpleSQL'),
'warning': 'Synchronous SQL should not be used',
'description': SYNCHRONOUS_SQL_DESCRIPTION}
@Interfaces.hook
class mozIStorageBaseStatement(Hook):
execute = {'on_call': SYNCHRONOUS_SQL_DESCRIPTION}
executeStep = {'on_call': SYNCHRONOUS_SQL_DESCRIPTION}
# XMLHttpRequest.
@Interfaces.hook
class nsIXMLHttpRequest(Hook):
@Hook.on_call
def open(this, args, callee):
"""Check that XMLHttpRequest.open is not called synchronously."""
if len(args) >= 3 and not args[2].as_bool():
return ('Synchronous HTTP requests can cause serious UI '
'performance problems, especially for users with '
'slow network connections.')
@Global.hook('XMLHttpRequest', 'return')
def XMLHttpRequest(this, args, callee):
return this.traverser.wrap().query_interface('nsIXMLHttpRequest')
# Other.
Interfaces.hook(('nsIAccessibleRetrieval',),
on_get=(
'Using the nsIAccessibleRetrieval interface causes significant '
'performance degradation in Gecko. It should only be used in '
'accessibility-related add-ons.'))
Interfaces.hook(('nsIDNSService', 'resolve'),
on_call={
'warning': '`nsIDNSService.resolve()` should not be used.',
'description': 'The `nsIDNSService.resolve` method performs a '
'synchronous DNS lookup, which will freeze the UI. This '
'can result in severe performance issues. '
'`nsIDNSService.asyncResolve()` should be used instead.'})
Interfaces.hook(('nsISound', 'play'),
on_call={
'warning': '`nsISound.play` should not be used.',
'description': 'The `nsISound.play` function is synchronous, and thus '
'freezes the interface while the sound is playing. It '
'should be avoided in favor of the HTML5 audio APIs.'})
FILE_REGEXPS.extend([
# Use of deprecated DOM mutation events.
(r'\b(?:on)?(?:%s)\b' % '|'.join((
'DOMAttrModified', 'DOMAttributeNameChanged',
'DOMCharacterDataModified', 'DOMElementNameChanged',
'DOMNodeInserted', 'DOMNodeInsertedIntoDocument',
'DOMNodeRemoved', 'DOMNodeRemovedFromDocument',
'DOMSubtreeModified')),
{'err_id': ('testcases_regex', 'file', 'mutation-events'),
'warning': 'DOM mutation events are deprecated',
'description': 'DOM mutation events officially deprecated, due '
'to their severe performance impact, and should not '
'be used. Please use MutationObserver '
'objects, or other triggers which do not involve '
'directly checking the DOM.'}),
# Use of mouse events with potential performance impacts.
(r'\b(?:on)?mouse(?:move|over|out)\b',
{'err_id': ('testcases_regex', 'file', 'mouse-events'),
'warning': 'Mouse events may cause performance issues.',
'description': (
'The use of `mousemove`, `mouseover`, and `mouseout` is '
'discouraged. These events are dispatched with high frequency '
'and can cause severe performance issues.')}),
])
# Prototype mutation.
WARN_PROTOTYPE_MUTATION = {
'err_id': ('testcases_javascript', 'performance', 'prototype_mutation'),
'warning': 'Mutating the prototypes of existing objects is deprecated',
'description': ('Mutating the prototypes of objects using `__proto__` or '
'`Object.setPrototypeOf` causes severe performance '
'degradation, and is deprecated. You should instead use '
'`Object.create` to create a new object with the given '
'prototype.',
'See bug %s for more information.'
% BUGZILLA_BUG % 948227),
}
Global.hook(('Object', 'setPrototypeOf'), on_call=WARN_PROTOTYPE_MUTATION)
@Global.hook(('**', '__proto__'), 'on_set')
def set__proto__(this, value):
if this.set_by != 'object literal':
# Warn only if this is an assignment. Ignore if it's a property
# present in an object literal.
return WARN_PROTOTYPE_MUTATION
# Event loop.
Global.hook(('**', 'processNextEvent'),
on_call=(
'Spinning the event loop with processNextEvent is a common cause of '
'deadlocks, crashes, and other errors due to unintended reentrancy. '
'Please use asynchronous callbacks instead wherever possible'))
|
from helpers import read_data, get_settings, package_translation
import api
settings = get_settings()
article_map = read_data('article_map')
locales = ['de', 'es', 'fr', 'ja', 'pt-br']
for article in article_map:
url = '{}/articles/{}/translations/missing.json'.format(settings['src_root'], article)
missing_locales = api.get_resource_list(url, list_name='locales', paginate=False)
for locale in locales:
if locale in missing_locales: # if translation missing in src, nothing to move
continue
print('Moving {} translation for article {}'.format(locale, article))
# get translation in src hc
url = '{}/articles/{}/translations/{}.json'.format(settings['src_root'], article, locale)
translation = api.get_resource(url)
# create translation in dest hc
url = '{}/articles/{}/translations.json'.format(settings['dst_root'], article_map[article])
payload = package_translation(translation)
api.post_resource(url, payload)
print('\nFinished moving translations.\n')
|
from unittest.mock import Mock, call
from pytest import fixture, mark
from pyviews.binding import BindingContext
from pyviews.binding.expression import ExpressionBinding, bind_setter_to_expression
from pyviews.binding.tests.common import InnerViewModel, ParentViewModel
from pyviews.expression import Expression, execute
from pyviews.core import InheritedDict, XmlAttr
@fixture
def expression_binding_fixture(request):
inner_vm = InnerViewModel(0, 'inner str')
view_model = ParentViewModel(0, inner_vm)
callback = Mock()
request.cls.view_model = view_model
request.cls.callback = callback
@mark.usefixtures('expression_binding_fixture')
class ExpressionBindingTests:
"""ExpressionBinding tests"""
def _bind(self, expression: Expression, global_vars: InheritedDict) -> ExpressionBinding:
binding = ExpressionBinding(self.callback, expression, global_vars)
binding.bind()
return binding
@mark.parametrize('source, global_dict', [
('vm', {'vm': InnerViewModel(0, '')}),
('vm.int_value', {'vm': InnerViewModel(2, '')}),
('vm.str_value', {'vm': InnerViewModel(2, 'asdf')}),
('(vm.int_value, parent.inner_vm.int_value)', {
'vm': InnerViewModel(2, 'asdf'),
'parent': ParentViewModel(0, InnerViewModel(0, ''))
}),
])
def test_initialize_target(self, source: str, global_dict: dict):
"""Target should be updated with expression value on Binding.bind() call"""
expression = Expression(source)
self._bind(expression, InheritedDict(global_dict))
expected = execute(expression, global_dict)
assert self.callback.call_args == call(expected)
@mark.parametrize('source, global_dict, change', [
('vm.int_value', {'vm': InnerViewModel(0, '')},
lambda gl: setattr(gl['vm'], 'int_value', 3)),
('vm.inner_vm.int_value', {'vm': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['vm'], 'inner_vm', InnerViewModel(5, 'updated'))),
('vm.inner_vm.str_value', {'vm': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['vm'].inner_vm, 'str_value', 'asdf')
),
('(vm.int_value, parent.inner_vm.int_value)',
{'vm': InnerViewModel(0, ''),
'parent': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['parent'].inner_vm, 'int_value', 3)
),
('(vm.str_value, str(parent.inner_vm.int_value))',
{'vm': InnerViewModel(0, ''),
'parent': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['parent'].inner_vm, 'int_value', 3)
),
('vms[0].int_value', {'vms': [InnerViewModel(0, '')]},
lambda gl: setattr(gl['vms'][0], 'int_value', 3)
),
('vms[ivm.int_value].int_value', {
'vms': [InnerViewModel(0, ''), InnerViewModel(1, '')],
'ivm': InnerViewModel(0, '')
},
lambda gl: setattr(gl['vms'][0], 'int_value', 3)
),
('vms[ivm.int_value].int_value', {
'vms': [InnerViewModel(0, ''), InnerViewModel(1, '')],
'ivm': InnerViewModel(0, '')
},
lambda gl: setattr(gl['ivm'], 'int_value', 1)
)
])
def test_expression_changed(self, source: str, global_dict: dict, change):
"""Target should be updated after expression result is changed"""
expression = Expression(source)
global_vars = InheritedDict(global_dict)
self._bind(expression, global_vars)
change(global_vars)
expected = execute(expression, global_vars.to_dictionary())
assert self.callback.call_args_list[1:] == [call(expected)]
@mark.parametrize('source, global_dict, change', [
('vm.int_value', {'vm': InnerViewModel(0, '')},
lambda gl: setattr(gl['vm'], 'int_value', 3)),
('vm.inner_vm.int_value', {'vm': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['vm'], 'inner_vm', InnerViewModel(5, 'updated'))),
('vm.inner_vm.str_value', {'vm': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['vm'].inner_vm, 'str_value', 'asdf')
),
('(vm.int_value, parent.inner_vm.int_value)',
{'vm': InnerViewModel(0, ''),
'parent': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['parent'].inner_vm, 'int_value', 3)
),
('(vm.str_value, str(parent.inner_vm.int_value))',
{'vm': InnerViewModel(0, ''),
'parent': ParentViewModel(0, InnerViewModel(0, ''))},
lambda gl: setattr(gl['parent'].inner_vm, 'int_value', 3)
),
('vms[0].int_value', {'vms': [InnerViewModel(0, '')]},
lambda gl: setattr(gl['vms'][0], 'int_value', 3)
),
('vms[ivm.int_value].int_value', {
'vms': [InnerViewModel(0, ''), InnerViewModel(1, '')],
'ivm': InnerViewModel(0, '')
},
lambda gl: setattr(gl['vms'][0], 'int_value', 3)
),
('vms[ivm.int_value].int_value', {
'vms': [InnerViewModel(0, ''), InnerViewModel(1, '')],
'ivm': InnerViewModel(0, '')
},
lambda gl: setattr(gl['ivm'], 'int_value', 1)
)
])
def test_destroy(self, source: str, global_dict: dict, change):
"""Destroy should stop handling expression changes and update target"""
expression = Expression(source)
global_vars = InheritedDict(global_dict)
binding = self._bind(expression, global_vars)
self.callback.reset_mock()
binding.destroy()
change(global_vars)
assert not self.callback.called
@fixture
def binding_context_fixture(request):
setter, xml_attr = Mock(), XmlAttr('name')
context = BindingContext({
'setter': setter,
'xml_attr': xml_attr,
'expression_body': '1+1',
'node': Mock(node_globals=InheritedDict())
})
request.cls.context = context
@mark.usefixtures('binding_context_fixture')
class BindSetterToExpressionTests:
def test_binds_setter_to_expression_changes(self):
"""should bind setter to expression changes"""
self.context.node = Mock(node_globals=InheritedDict({'value': 1}))
self.context.expression_body = 'value'
bind_setter_to_expression(self.context)
self.context.setter.reset_mock()
self.context.node.node_globals['value'] = 2
assert self.context.setter.call_args == call(self.context.node, self.context.xml_attr.name,
2)
def test_returns_binding(self):
"""should return expression binding"""
actual = bind_setter_to_expression(self.context)
assert isinstance(actual, ExpressionBinding)
|
from django.urls import path
from .views import my_works, my_works_detail, my_post_detail
app_name = 'my_works_blog'
urlpatterns = [
path('', my_works, name='index'),
path("<int:pk>/", my_works_detail, name='project_detail'),
path("<int:pk>/<int:pk1>", my_post_detail, name='post_detail'),
]
|
#!/usr/bin/python -i
import numpy as np
from NuclearNormMinimization import NuclearNormMinimization
from sklearn.metrics import mean_squared_error
U = np.random.random((10,10))
S = np.zeros((10,10))
S[0,0] = 500
S[1,1] = 100
S[2,2] = 50
V = np.random.random((10,20))
matrix = np.matmul(U, np.matmul(S, V))
incomplete_matrix = matrix.copy()
blah = np.random.random(incomplete_matrix.shape)
hidden_entries = (blah >= 0.7)
sampled_entries = np.logical_not(hidden_entries)
incomplete_matrix[ hidden_entries ] = np.nan
solver = NuclearNormMinimization()
completed_matrix = solver.complete(incomplete_matrix)
mse = mean_squared_error(matrix, completed_matrix)
print('mean_squared_error = {0:.6f}'.format(mse))
|
"""
TODO:
*Choose which drink you want out of the top n
*Exception when 0 mathces
"""
import sqlite3
def main():
ingredientList = [];
matchDict = {};
print("""\nHello, I'm Cocky, and this is Cocky's Cocktail Shack. \nI'll help you make a cocktail.""");
print("Please enter your ingredients and write \"done\" when finished \n")
while True:
ingredientString = input("> ")
if ingredientString.lower() == "done":
break
else:
ingredientList.append(ingredientString.lower())
makeTable()
dbEntries = fetchTable()
for entry in dbEntries:
matchName, matchValue = getMatches(ingredientList, entry[0], entry[1])
matchDict[matchName] = matchValue
bestDrink = max(matchDict, key=matchDict.get)
bestDrinkIngredients = getBestDrink(bestDrink)
bestDrinkInstructions = getBestInstructions(bestDrink)
haveList, haveNotList = getMatchingIngredients(ingredientList, bestDrinkIngredients)
haveString = ", ".join(haveList)
haveNotString = ", ".join(haveNotList)
print("\n")
print("Your most suitable cocktail is: ", bestDrink)
print("The ingredients you have are: ", haveString, "\r")
print("The ingredients you need to get are: ", haveNotString, "\r")
print("\n")
print("Instructions: ", bestDrinkInstructions)
conn.close()
def makeTable():
c.execute("""CREATE TABLE IF NOT EXISTS cocktails
(names text, ingredients text, instructions text)""")
conn.commit()
def fetchTable():
entriesList = []
c.execute("SELECT * FROM cocktails")
for entry in c.fetchall():
entryName, entryIngredients, entryInstructions = entry
entryIngredients = entryIngredients.split(", ")
entriesList.append([entryName, entryIngredients, entryInstructions])
return entriesList
def getMatches(userIngredients, cocktailName, cocktailIngredients):
matchDict = {};
for item in userIngredients:
if item in cocktailIngredients:
if not cocktailName in matchDict:
matchDict[cocktailName] = 1
else:
matchDict[cocktailName] += 1
elif not cocktailName in matchDict:
matchDict[cocktailName] = 0
return(cocktailName, matchDict[cocktailName])
def getBestDrink(bestDrink):
c.execute("SELECT ingredients FROM cocktails WHERE names = (?)", [bestDrink])
ingredientList = c.fetchone()[0].split(", ")
return(ingredientList)
def getMatchingIngredients(userIngredients, bestIngredients):
haveList = []
haveNotList = []
for item in bestIngredients:
if item in userIngredients:
haveList.append(item)
else:
haveNotList.append(item)
return(haveList, haveNotList)
def getBestInstructions(bestDrink):
c.execute("SELECT instructions FROM cocktails WHERE names = (?)", [bestDrink])
instruction = c.fetchone()[0]
return(instruction)
conn = sqlite3.connect("cocktails2.db")
c = conn.cursor()
main()
|
"""
Factories for features and geometries
"""
from typing import Mapping, Union
import keytree.compat
from keytree.model import GEOM_TYPES, NSMAP, Feature, Geometry
def feature(element, kmlns: Union[str, Mapping] = NSMAP) -> Feature:
kid = element.attrib.get("id")
name = element.findtext("kml:name", namespaces=kmlns) or element.findtext(
"kml:Name", namespaces=kmlns
)
snippet = element.findtext("kml:Snippet", namespaces=kmlns)
description = element.findtext(
"kml:description", namespaces=kmlns
) or element.findtext("kml:Description", namespaces=kmlns)
for geom_type in GEOM_TYPES:
geom_element = element.find(geom_type, namespaces=kmlns)
if geom_element is not None:
g = geometry(geom_element, kmlns=kmlns)
return Feature(kid, g, name=name, snippet=snippet, description=description)
return Feature(kid, None, name=name, snippet=snippet, description=description)
def geometry(element, kmlns: Mapping = NSMAP) -> Geometry:
tp = element.tag.split("}")
if kmlns is None:
kmlns = {"": tp[0][1:]}
geom_type = tp[1]
# geom_type = element.tag
return geometry_factory[geom_type](element, kmlns)
def geometry_Point(element, kmlns: Mapping = NSMAP):
t = element.findtext("kml:coordinates", namespaces=kmlns)
tv = t.split(",")
return Geometry("Point", tuple([float(v) for v in tv]))
def geometry_LineString(element, kmlns: Mapping = NSMAP):
text = element.findtext("kml:coordinates", namespaces=kmlns)
ts = text.split()
coords = []
for t in ts:
tv = t.split(",")
coords.append(tuple([float(v) for v in tv]))
return Geometry("LineString", tuple(coords))
def geometry_Track(element, kmlns: Mapping = NSMAP):
sourcecoords = element.findall("gx:coord", namespaces=kmlns)
coords = []
for coord in sourcecoords:
tv = coord.text.split()
coords.append(tuple([float(v) for v in tv]))
return Geometry("LineString", tuple(coords))
def geometry_MultiTrack(element, kmlns: Mapping = NSMAP):
geometries = []
for i in [el for el in element if el.tag.split("}")[1] == "Track"]:
geometries.append(geometry(i, kmlns=kmlns))
return Geometry("MultiLineString", geometries)
def geometry_Polygon(element, kmlns: Mapping = NSMAP):
shell = element.find("kml:outerBoundaryIs", namespaces=kmlns)
text = shell.findtext("*/kml:coordinates", namespaces=kmlns)
ts = text.split()
shell_coords = []
for t in ts:
tv = t.split(",")
shell_coords.append(tuple([float(v) for v in tv]))
poly_coords = []
poly_coords.append(tuple(shell_coords))
holes = element.findall("kml:innerBoundaryIs", namespaces=kmlns)
for hole in holes:
text = hole.findtext("*/kml:coordinates", namespaces=kmlns)
ts = text.split()
hole_coords = []
for t in ts:
tv = t.split(",")
hole_coords.append(tuple([float(v) for v in tv]))
poly_coords.append(tuple(hole_coords))
return Geometry("Polygon", tuple(poly_coords))
def geometry_Multi(element, kmlns: Mapping = NSMAP):
geometries = []
geometry_type = multi_geometry[
[
el.tag.split("}")[1]
for el in element
if el.tag.split("}")[1] in multi_geometry.keys()
][0]
]
for i in element:
geometries.append(geometry(i, kmlns=kmlns).coordinates)
return Geometry(geometry_type, geometries)
geometry_factory = {
"Point": geometry_Point,
"LineString": geometry_LineString,
"Track": geometry_Track,
"MultiTrack": geometry_MultiTrack,
"Polygon": geometry_Polygon,
"MultiGeometry": geometry_Multi,
}
multi_geometry = {
"Point": "MultiPoint",
"LineString": "MultiLineString",
"Polygon": "MultiPolygon",
}
|
# Copyright © 2020, Oracle and/or its affiliates. All rights reserved.
# In this example, we specify and estimate a Bayes Factor for the 4-5th rule
import torch
from relax import *
from relax.interface import BayesFactor
import relax.estimators.importance.KDE_tips as Stan
from data import data2 as data
# Step 0: pre-process the data
candidates_qualified = filter(lambda candidate: candidate['qualified'] == 'y',data)
candidates_unqualified = filter(lambda candidate: candidate['qualified'] == 'n',data)
female_candidates_qualified = filter(lambda candidate: candidate['gender'] == 'female',candidates_qualified)
male_candidates_qualified = filter(lambda candidate: candidate['gender'] == 'male',candidates_qualified)
female_accepted_qualified = filter(lambda candidate: candidate['decision'] == 'accept',female_candidates_qualified)
male_accepted_qualified = filter(lambda candidate: candidate['decision'] == 'accept',male_candidates_qualified)
female_candidates_unqualified = filter(lambda candidate: candidate['gender'] == 'female',candidates_unqualified)
male_candidates_unqualified = filter(lambda candidate: candidate['gender'] == 'male',candidates_unqualified)
female_accepted_unqualified = filter(lambda candidate: candidate['decision'] == 'accept',female_candidates_unqualified)
male_accepted_unqualified = filter(lambda candidate: candidate['decision'] == 'accept',male_candidates_unqualified)
nfcq = len(list(female_candidates_qualified))
nmcq = len(list(male_candidates_qualified))
nfaq = len(list(female_accepted_qualified))
nmaq = len(list(male_accepted_qualified))
nfcu = len(list(female_candidates_unqualified))
nmcu = len(list(male_candidates_unqualified))
nfau = len(list(female_accepted_unqualified))
nmau = len(list(male_accepted_unqualified))
# Step 1: specify the models
def majority(path, const):
def model():
theta = sample('theta' + path, Beta(const['alpha' + path],const['beta' + path]))
X = sample('X' + path, Binomial(const['N' + path],theta))
return [theta,X]
return model
def demographic_parity(path,const):
pro_tip = ('STAN_NUTS', ['theta' + path, 'phi' + path], ['X' + path, 'Y' + path])
def model():
[theta,X] = majority(path,const)()
phi = sample('phi' + path, Uniform(0.8 * theta,1.0))
Y = sample('Y' + path, Binomial(const['M' + path],phi))
pro_tip
return model
def demographic_parity_dual(path,const):
pro_tip = ('STAN_NUTS', ['theta' + path, 'phi' + path], ['X' + path, 'Y' + path])
def model():
[theta,X] = majority(path,const)()
phi = sample('phi' + path, Uniform(0.0,0.8 * theta))
Y = sample('Y' + path, Binomial(const['M' + path],phi))
pro_tip
return model
def equality_odds_template(f,path,const):
pro_tip = ('Block', [path + 'qualified', path + 'unqualified'])
def model():
for j in ['qualified','unqualified']:
f(path + j,const)()
pro_tip
return model
# Step 2: create the Bayes Factor
constants = {'N_qualified': nmcq,
'M_qualified': nfcq,
'N_unqualified': nmcu,
'M_unqualified': nfcu,
'alpha_qualified': 1.0,
'beta_qualified': 1.0,
'alpha_unqualified': 1.0,
'beta_unqualified': 1.0
}
data = {'X_qualified': torch.tensor([float(nmaq)]),
'Y_qualified': torch.tensor([float(nfaq)]),
'X_unqualified': torch.tensor([float(nmau)]),
'Y_unqualified': torch.tensor([float(nfau)])
}
data_stan = {'X_qualified': nmaq,
'Y_qualified': nfaq,
'X_unqualified': nmau,
'Y_unqualified': nfau
}
null_model = equality_odds_template(demographic_parity,'_',constants)
alternative_model = equality_odds_template(demographic_parity_dual,'_',constants)
test_equality_odds = BayesFactor(null_model,alternative_model,data)
# Step 3:
estimator = Stan.Experiment(100,test_equality_odds,constants,data_stan)
estimation = estimator.estimate(100000,test_equality_odds)
print('Estimate of dual Bayes Factor: ', estimation)
|
class UrlManager(object):
def __init__(self):
pass
def get_main_seed_url(self):
return 'https://www.meitulu.com/'
|
"""Library integration with Random123."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2016 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pytools import ImmutableRecord
from mako.template import Template
import numpy as np
# {{{ rng metadata
class RNGInfo(ImmutableRecord):
@property
def full_name(self):
return "%s%dx%d" % (self.name, self.width, self.bits)
_philox_base_info = RNGInfo(
name="philox",
pyopencl_header="pyopencl-random123/philox.cl",
generic_header="Random123/philox.h",
key_width=2)
_threefry_base_info = RNGInfo(
name="threefry",
pyopencl_header="pyopencl-random123/threefry.cl",
generic_header="Random123/threefry.h",
key_width=4)
RNG_VARIANTS = [
_philox_base_info.copy(width=2, bits=32),
_philox_base_info.copy(width=2, bits=64),
_philox_base_info.copy(width=4, bits=32),
_philox_base_info.copy(width=4, bits=64),
_threefry_base_info.copy(width=2, bits=32),
_threefry_base_info.copy(width=2, bits=64),
_threefry_base_info.copy(width=4, bits=32),
_threefry_base_info.copy(width=4, bits=64),
]
FUNC_NAMES_TO_RNG = dict(
(v.full_name + suffix, v)
for v in RNG_VARIANTS
for suffix in [
"", "_f32", "_f64",
])
# }}}
# {{{ preamble
PREAMBLE_TEMPLATE = Template("""
%if is_pyopencl_target:
#include <${ rng_variant.pyopencl_header }>
%else:
#include <${ rng_variant.generic_header }>
%endif
<%
name = rng_variant.full_name
width = rng_variant.width
if rng_variant.bits == 32:
counter_type = "uint%d" % width
key_type = "uint%d" % rng_variant.key_width
elif rng_variant.bits == 64:
counter_type = "ulong%d" % width
key_type = "ulong%d" % rng_variant.key_width
else:
assert False
%>
typedef union {
${ counter_type } v;
${ name }_ctr_t c;
} ${ name }_ctr_vec_union;
${ counter_type } ${ name }_bump(${ counter_type } ctr)
{
if (++ctr.x == 0)
if (++ctr.y == 0)
++ctr.z;
return ctr;
}
${ counter_type } ${ name }_gen(
${ counter_type } ctr,
${ key_type } key,
${ counter_type } *new_ctr)
{
${ name }_ctr_vec_union result;
result.c = ${ name }(
*(${ name }_ctr_t *) &ctr,
*(${ name }_key_t *) &key);
*new_ctr = ${ name }_bump(ctr);
return result.v;
}
float${ width } ${ name }_f32(
${ counter_type } ctr,
${ key_type } key,
${ counter_type } *new_ctr)
{
*new_ctr = ctr;
return
convert_float${ width }(${ name }_gen(*new_ctr, key, new_ctr))
* ${ repr(1./2**32) }f;
}
double${ width } ${ name }_f64(
${ counter_type } ctr,
${ key_type } key,
${ counter_type } *new_ctr)
{
*new_ctr = ctr;
%if rng_variant.bits == 32:
return
convert_double${ width }(${ name }_gen(*new_ctr, key, new_ctr))
* ${ repr(1./2**32) }
+
convert_double${ width }(${ name }_gen(*new_ctr, key, new_ctr))
* ${ repr(1./2**64) };
%elif rng_variant.bits == 64:
*new_ctr = ctr;
return
convert_double${ width }(${ name }_gen(*new_ctr, key, new_ctr))
* ${ repr(1./2**64) };
%else:
#error Unrecognized bit width in RNG
%endif
}
""", strict_undefined=True)
# }}}
def random123_preamble_generator(preamble_info):
for f in preamble_info.seen_functions:
try:
rng_variant = FUNC_NAMES_TO_RNG[f.name]
except KeyError:
continue
from loopy.target.pyopencl import PyOpenCLTarget
yield ("90-random123-"+rng_variant.full_name,
PREAMBLE_TEMPLATE.render(
is_pyopencl_target=isinstance(
preamble_info.kernel.target,
PyOpenCLTarget),
rng_variant=rng_variant,
))
def random123_function_mangler(kernel, name, arg_dtypes):
try:
rng_variant = FUNC_NAMES_TO_RNG[name]
except KeyError:
return None
from loopy.types import NumpyType
target = kernel.target
base_dtype = {32: np.uint32, 64: np.uint64}[rng_variant.bits]
ctr_dtype = target.vector_dtype(NumpyType(base_dtype), rng_variant.width)
key_dtype = target.vector_dtype(NumpyType(base_dtype), rng_variant.key_width)
from loopy.kernel.data import CallMangleInfo
fn = rng_variant.full_name
if name == fn:
return CallMangleInfo(
target_name=fn+"_gen",
result_dtypes=(ctr_dtype, ctr_dtype),
arg_dtypes=(ctr_dtype, key_dtype))
elif name == fn + "_f32":
return CallMangleInfo(
target_name=name,
result_dtypes=(
target.vector_dtype(NumpyType(np.float32), rng_variant.width),
ctr_dtype),
arg_dtypes=(ctr_dtype, key_dtype))
elif name == fn + "_f64":
return CallMangleInfo(
target_name=name,
result_dtypes=(
target.vector_dtype(NumpyType(np.float64), rng_variant.width),
ctr_dtype),
arg_dtypes=(ctr_dtype, key_dtype))
else:
return None
# vim: foldmethod=marker
|
import os
import pytest
import renderapi
import json
import mock
import numpy as np
from test_data import (
RAW_STACK_INPUT_JSON,
render_params,
montage_z,
solver_montage_parameters,
test_pointmatch_parameters as pointmatch_example,
test_pointmatch_parameters_qsub as pointmatch_example_qsub)
from asap.solver.solve import Solve_stack
from asap.pointmatch.create_tilepairs import TilePairClientModule
from asap.pointmatch.generate_point_matches_spark import (
PointMatchClientModuleSpark)
from asap.pointmatch.generate_point_matches_qsub import (
PointMatchClientModuleQsub)
@pytest.fixture(scope='module')
def render():
render_params['project'] = solver_montage_parameters[
'input_stack']['project']
render = renderapi.connect(**render_params)
return render
@pytest.fixture(scope='module')
def tspecs_from_json():
tilespecs = [renderapi.tilespec.TileSpec(json=d)
for d in RAW_STACK_INPUT_JSON]
return tilespecs
# raw stack with lens correction and intensity correction done
@pytest.fixture(scope='module')
def raw_stack(render, tspecs_from_json):
test_raw_stack = 'input_raw_stack'
renderapi.stack.create_stack(
test_raw_stack,
render=render)
renderapi.client.import_tilespecs(
test_raw_stack,
tspecs_from_json,
render=render)
renderapi.stack.set_stack_state(
test_raw_stack,
'COMPLETE',
render=render)
yield test_raw_stack
renderapi.stack.delete_stack(
test_raw_stack,
render=render)
@pytest.fixture(scope='module')
def test_create_montage_tile_pairs(render, raw_stack, tmpdir_factory):
output_directory = str(tmpdir_factory.mktemp('Montage'))
params = {
"render": render_params,
"zNeighborDistance": 0,
"xyNeighborFactor": 0.9,
"excludeCornerNeighbors": "true",
"excludeSameLayerNeighbors": "false",
"excludeCompletelyObscuredTiles": "true",
"minZ": montage_z,
"maxZ": montage_z,
"output_dir": output_directory,
"stack": raw_stack,
"output_json": "out.json"
}
mod = TilePairClientModule(input_data=params, args=[])
mod.run()
# check if the file has been created
with open(params['output_json'], 'r') as fp:
out_d = json.load(fp)
tilepair_file = out_d['tile_pair_file']
assert(os.path.exists(tilepair_file) and
os.path.getsize(tilepair_file) > 0)
with open(tilepair_file, 'r') as f:
js = json.load(f)
npairs = js['neighborPairs']
assert(len(npairs) == 4)
yield tilepair_file
def test_create_montage_tile_pairs_no_z(render, raw_stack, tmpdir):
output_directory = str(tmpdir.join('Montage'))
params = {
"render": render_params,
"zNeighborDistance": 0,
"xyNeighborFactor": 0.9,
"excludeCornerNeighbors": "true",
"excludeSameLayerNeighbors": "false",
"excludeCompletelyObscuredTiles": "true",
"output_dir": output_directory,
"stack": raw_stack,
"output_json": "out.json"
}
mod = TilePairClientModule(input_data=params, args=[])
mod.run()
# check if the file has been created
with open(params['output_json'], 'r') as fp:
out_d = json.load(fp)
tilepair_file = out_d['tile_pair_file']
assert(os.path.exists(tilepair_file) and
os.path.getsize(tilepair_file) > 0)
with open(tilepair_file, 'r') as f:
js = json.load(f)
npairs = js['neighborPairs']
assert(len(npairs) == 4)
@pytest.fixture(scope='module')
def test_point_match_generation(
render, test_create_montage_tile_pairs, tmpdir_factory):
output_directory = str(tmpdir_factory.mktemp('output_json'))
pointmatch_example['output_json'] = os.path.join(
output_directory, 'output.json')
pointmatch_example['pairJson'] = test_create_montage_tile_pairs
mod = PointMatchClientModuleSpark(input_data=pointmatch_example, args=[])
mod.run()
with open(pointmatch_example['output_json'], 'r') as fp:
output_d = json.load(fp)
assert (output_d['pairCount'] > 0)
yield pointmatch_example['collection']
class MockSubprocessException(Exception):
pass
def mock_suprocess_qsub_call(cmd):
print(cmd)
raise MockSubprocessException('fake subprocess call')
@mock.patch('subprocess.check_call', side_effect=mock_suprocess_qsub_call)
def test_point_match_generation_qsub(
render, test_create_montage_tile_pairs, tmpdir_factory):
output_directory = str(tmpdir_factory.mktemp('output_json'))
pointmatch_example_qsub['output_json'] = os.path.join(
output_directory, 'output.json')
pointmatch_example_qsub['pairJson'] = test_create_montage_tile_pairs
mod = PointMatchClientModuleQsub(
input_data=pointmatch_example_qsub, args=[])
with pytest.raises(MockSubprocessException):
mod.run()
def test_montage_solver(
render, raw_stack, test_point_match_generation, tmpdir_factory):
output_directory = str(tmpdir_factory.mktemp('output_json'))
parameters = dict(solver_montage_parameters)
parameters['input_stack']['name'] = raw_stack
parameters['pointmatch']['name'] = test_point_match_generation
parameters['pointmatch']['db_interface'] = 'render'
parameters['input_stack']['db_interface'] = 'render'
parameters['output_json'] = os.path.join(
output_directory, "montage_solve.json")
# affine half size
parameters['transformation'] = "AffineModel"
parameters['fullsize_transform'] = False
mod = Solve_stack(input_data=parameters, args=[])
mod.run()
precision = 1e-7
assert np.all(np.array(mod.module.results['precision']) < precision)
assert np.all(np.array(mod.module.results['error'])) < 200
with open(parameters['output_json'], 'r') as f:
output_d = json.load(f)
assert parameters['output_stack']['name'][0] == output_d['stack']
|
import VIKOR as vikor
import numpy as np
from normalisation_fuc import omri_normalisation
# Step 2*: change SR fucntion to adapt rim version
def SR_omri(D, w, AB):
nD = omri_normalisation(D, AB)
S = np.zeros(nD.shape[0])
R = np.zeros(nD.shape[0])
for i in range(nD.shape[0]):
for j in range(nD.shape[1]):
nD[i,j] = nD[i,j] * w[j]
S[i] = sum(nD[i,:])
R[i] = max(nD[i,:])
return S, R
# VIKOR OMRI version
def vikor_omri(D, w, AB):
s, r = SR_omri(D, w, AB)
q = vikor.Q(s, r, len(w))
return s, r, q |
import functools
import re
import pathlib
import timeit
from typing import Union, List, Generator, Type
from net_models.validators import normalize_interface_name
from net_models.models.interfaces.InterfaceModels import InterfaceModel
from net_models.models import VRFModel
from net_models.models.services.ServerModels import *
from net_models.inventory import HostConfig, ConfigDefaults
from net_parser.utils import re_search_lines, re_filter_lines, compile_regex, property_autoparse
from net_parser.config import (
BaseConfigParser, BaseConfigLine, IosConfigLine,
IosConfigParser, IosInterfaceParser, IosAaaParser, IosVrfDefinitionParser, IosLineParser, IosLoggingLine, IosBannerLine
)
class IosConfigParser(BaseConfigParser):
INTERFACE_LINE_CLASS = IosInterfaceParser
CONFIG_LINE_CLS = IosConfigLine
_interface_pattern = r"[A-z]{2,}(?:[A-z\-])?\d+(?:\/\d+)?(?:\:\d+)?(?:\.\d+)?"
_ip_address_pattern = r"(?:\d{1,3}\.){3}\d{1,3}"
_host_pattern = r"[A-z0-9\-\_\.]+"
_source_interface_regex = re.compile(pattern=r"source (?P<src_interface>{0})".format(_interface_pattern))
_source_vrf_regex = re.compile(pattern=r"vrf (?P<vrf>\S+)")
_hostname_regex = re.compile(pattern=r"^hostname (?P<hostname>\S+)\Z")
_ip_arp_proxy_disable_regex = re.compile(pattern=r"^(?:(?P<no>no) )?ip arp proxy disable$", flags=re.MULTILINE)
_service_password_encryption_regex = re.compile(pattern=r"^(?:(?P<no>no) )?service password-encryption$", flags=re.MULTILINE)
_banner_regex = re.compile(pattern=r"^banner (?P<banner_type>\S+)")
_service_pad_regex = re.compile(pattern=r"^(?:(?P<no>no) )?service pad$", flags=re.MULTILINE)
_ip_finger_regex = re.compile(pattern=r"^(?:(?P<no>no) )?ip finger(?: (?P<rfc_compliant>rfc-compliant))?$", flags=re.MULTILINE)
_ip_source_route_regex = re.compile(pattern=r"^(?:(?P<no>no) )?ip source-route$", flags=re.MULTILINE)
_ntp_server_base_regex = re.compile(pattern=r"^ntp server(?: vrf \S+)? (?P<server>{0}|{1})".format(_ip_address_pattern, _host_pattern), flags=re.MULTILINE)
_ntp_peer_base_regex = re.compile(pattern=r"^ntp peer(?: vrf \S+)? (?P<server>{0}|{1})".format(_ip_address_pattern, _host_pattern), flags=re.MULTILINE)
_ntp_authentication_keys_regex = re.compile(pattern=r"^ntp authentication-key (?P<key_id>\d+) (?P<method>\S+) (?P<value>\S+)(?: (?P<encryption_type>\d+))?", flags=re.MULTILINE)
_ntp_trusted_key_regex = re.compile(pattern=r"^ntp trusted-key (?P<key_id>\d+)", flags=re.MULTILINE)
_ntp_acl_regex = re.compile(pattern=r"^ntp access-group (?P<access_type>\S+) (?P<acl_name>\S+)", flags=re.MULTILINE)
_ntp_src_interface_regex = re.compile(pattern=r"^ntp source (?P<src_interface>{})".format(_interface_pattern), flags=re.MULTILINE)
_logging_source_interface_regex = re.compile(pattern=r"^logging source-interface (?P<src_interface>{0})(?: vrf (?P<vrf>\S+))?".format(_interface_pattern))
_logging_server_base_regex = re.compile(pattern=r"^logging host (?P<server>{0}|{1})".format(_ip_address_pattern, _host_pattern))
_logging_transport_regex = re.compile(pattern=r"transport (?P<protocol>udp|tcp) port (?P<port>\d+)")
def __init__(self,
config: Union[pathlib.Path, List[str], str],
verbosity: int =4,
name: str = "BaseConfigParser",
defaults: Type[ConfigDefaults] = None,
**kwargs):
super().__init__(config=config, verbosity=verbosity, name="IosConfigParser", **kwargs)
self.DEFAULTS = defaults or ConfigDefaults()
@functools.cached_property
def hostname(self):
candidates = self.re_search_lines(regex=self._hostname_regex, group="hostname")
return self.first_candidate_or_none(candidates=candidates)
@property
def interface_lines(self) -> Generator[IosInterfaceParser, None, None]:
return (x for x in self.lines if 'interface' in x.get_type)
@property
def interfaces(self) -> Generator[InterfaceModel, None, None]:
return (x.to_model() for x in self.interface_lines)
@functools.cached_property
def aaa_lines(self):
return (x for x in self.lines if isinstance(x, IosAaaParser))
@functools.cached_property
def management_lines(self):
return (x.to_model() for x in self.lines if isinstance(x, IosLineParser))
@functools.cached_property
def logging_lines(self):
return (x for x in self.lines if isinstance(x, IosLoggingLine))
def get_interface_line(self, interface_name: str) -> Union[IosInterfaceParser, None]:
interface_name = normalize_interface_name(interface_name=interface_name, short=False)
candidates = [x for x in self.interface_lines if x.name == interface_name]
return self.first_candidate_or_none(candidates=candidates)
@property
def vrf_definition_lines(self) -> Generator[IosVrfDefinitionParser, None, None]:
return (x for x in self.lines if isinstance(x, IosVrfDefinitionParser))
@property
def vrfs(self) -> Generator[VRFModel, None, None]:
return (x.model for x in self.vrf_definition_lines)
@functools.cached_property
def ntp(self) -> NtpConfig:
ntp = NtpConfig()
ntp_lines = self.re_search_lines(regex=self.compile_regex(pattern=r"^ntp .*", flags=re.MULTILINE))
if not len(ntp_lines):
return None
# Source Interface
ntp_src_interface = self.first_candidate_or_none(candidates=re_search_lines(lines=ntp_lines, regex=self._ntp_src_interface_regex, group="src_interface"))
if ntp_src_interface is not None:
ntp.src_interface = ntp_src_interface
# Servers Section
candidate_pattern = self._ntp_server_base_regex
regexes = [
self._source_interface_regex,
self._source_vrf_regex,
re.compile("key (?P<key_id>\d+)"),
re.compile("(?P<prefer>prefer)")
]
ntp_servers = property_autoparse(lines=ntp_lines, candidate_pattern=candidate_pattern, regexes=regexes, logger=self.logger, include_candidate=True)
if len(ntp_servers):
ntp_servers = [self._val_to_bool(entry=x, keys=['prefer']) for x in ntp_servers]
ntp_servers = [NtpServer.parse_obj(x) for x in ntp_servers]
if len(ntp_servers):
ntp.servers = ntp_servers
# Peers
candidate_pattern = self._ntp_peer_base_regex
regexes = [
self._source_interface_regex,
self._source_vrf_regex,
re.compile("key (?P<key_id>\d+)"),
re.compile("(?P<prefer>prefer)")
]
ntp_peers = property_autoparse(lines=ntp_lines, candidate_pattern=candidate_pattern, regexes=regexes, logger=self.logger, include_candidate=True)
if len(ntp_peers):
ntp_peers = [self._val_to_bool(entry=x, keys=['prefer']) for x in ntp_peers]
ntp_peers = [NtpServer.parse_obj(x) for x in ntp_peers]
if len(ntp_peers):
ntp.peers = ntp_peers
authenticate = self._globals_check(
regex=re.compile(
pattern=r"^(?:(?P<no>no) )?ntp authenticate$",
flags=re.MULTILINE
),
default=False
)
ntp.authenticate = authenticate
# Keys
ntp_auth_keys, ntp_lines = re_filter_lines(lines=ntp_lines, regex=self._ntp_authentication_keys_regex, group='ALL')
if len(ntp_auth_keys):
ntp_auth_keys = [NtpKey.parse_obj(x) for x in ntp_auth_keys]
ntp_trusted_keys = [int(x) for x in re_search_lines(lines=ntp_lines, regex=self._ntp_trusted_key_regex, group='key_id')]
for ntp_key in ntp_auth_keys:
if ntp_key.key_id in ntp_trusted_keys:
ntp_key.trusted = True
if len(ntp_auth_keys):
ntp.ntp_keys = ntp_auth_keys
# Access Lists
acl_lines, ntp_lines = re_filter_lines(lines=ntp_lines, regex=self._ntp_acl_regex, group="ALL")
if len(acl_lines):
ntp.access_groups = NtpAccessGroups()
for entry in acl_lines:
if entry['access_type'] == 'serve-only':
ntp.access_groups.serve_only = entry['acl_name']
if entry['access_type'] == 'query-only':
ntp.access_groups.query_only = entry['acl_name']
if entry['access_type'] == 'serve':
ntp.access_groups.serve = entry['acl_name']
if entry['access_type'] == 'peer':
ntp.access_groups.peer = entry['acl_name']
return ntp
@functools.cached_property
def logging(self) -> LoggingConfig:
logging_lines = list(self.logging_lines)
if len(logging_lines) == 0:
return None
logging = LoggingConfig()
candidate_pattern = self._logging_server_base_regex
regexes = [
self._source_vrf_regex,
self._source_interface_regex,
self._logging_transport_regex
]
logging_servers = property_autoparse(lines=logging_lines, candidate_pattern=self._logging_server_base_regex, regexes=regexes, logger=self.logger, include_candidate=True)
logging_servers = [{k:v for k,v in x.items() if v is not None} for x in logging_servers]
logging_servers = [LoggingServer.parse_obj(x) for x in logging_servers]
if len(logging_servers):
logging.servers = logging_servers
logging_sources = re_search_lines(lines=logging_lines, regex=self._logging_source_interface_regex, group='ALL')
logging_sources = [LoggingSource.parse_obj(x) for x in logging_sources]
if len(logging_sources):
logging.sources = logging_sources
return logging
@property
def routing(self):
raise NotImplementedError
@functools.cached_property
def proxy_arp_enabled(self) -> bool:
candidates = self.re_search_lines(regex=self._ip_arp_proxy_disable_regex, group='ALL')
candidate = self.first_candidate_or_none(candidates=candidates)
if candidate is not None:
candidate = self._val_to_bool(entry=candidate, keys=['no'])
if candidate['no'] is True:
# no ip arp proxy disable
return True
elif candidate['no'] is False:
# ip arp proxy disable
return False
else:
# Enabled by default
return True
def _globals_check(self, regex: re.Pattern, default: bool) -> bool:
"""
This function looks for given `regex`, if found and prefixed with 'no', returns False. If not prefixed with no,
returns True. Returns 'default' otherwise.
Args:
regex: re.Pattern with 'no' groups specified
default: bool - which value to return if regex did not match
Returns:
"""
candidates = self.re_search_lines(regex=regex, group='ALL')
candidate = self.first_candidate_or_none(candidates=candidates)
if candidate is not None:
candidate = self._val_to_bool(entry=candidate, keys=['no'])
if candidate['no'] is True:
# Negated
return False
elif candidate['no'] is False:
return True
else:
return default
@functools.cached_property
def password_encryption_enabled(self) -> bool:
return self._globals_check(regex=self._service_password_encryption_regex, default=False)
@functools.cached_property
def banner(self):
banners = {}
candidates = self.re_search_lines(regex=self._banner_regex)
stop_chars = ['^C', chr(3)]
for candidate in candidates:
banner_type = candidate.re_search(regex=self._banner_regex, group='banner_type')
banner_text = None
# Determine the stopchar
stop_char_occurences = {candidate.text.count(x):x for x in stop_chars}
stop_char = stop_char_occurences[max(stop_char_occurences.keys())]
if max(stop_char_occurences.keys()) == 2: # SingleLine
banner_text = [x for x in candidate.text.split(stop_char) if x != ''][-1]
else: # Multiline
banner_text = []
# First line
first_part_candidates = [x for x in candidate.text.split(stop_char) if x != ''][1:]
if len(first_part_candidates):
banner_text.append(first_part_candidates[0])
for line in self.lines[candidate.number+1:]:
if stop_char in line.text:
last_part_candidate = [x for x in line.text.split(stop_char) if x != ''][:1]
if len(last_part_candidate):
banner_text.append(last_part_candidate[0])
break
else:
banner_text.append(line.text)
if isinstance(banner_text, list):
banner_text = '\n'.join(banner_text)
banners[banner_type] = banner_text
return banners
@functools.cached_property
def ip_source_routing_enabled(self):
return self._globals_check(regex=self._ip_source_route_regex, default=True)
@functools.cached_property
def ip_finger_enabled(self):
return self._globals_check(regex=self._ip_finger_regex, default=True)
@functools.cached_property
def service_pad_enabled(self) -> bool:
"""
Packet Assembler Disassembler service
Returns: bool
"""
return self._globals_check(regex=self._service_pad_regex, default=True)
@functools.cached_property
def service_tcp_keepalives_in(self):
self._globals_check(regex=self._service_tcp_keepalives_in, default=False)
@functools.cached_property
def service_tcp_keepalives_in(self):
self._globals_check(regex=self._service_tcp_keepalives_in, default=False)
def to_model(self):
model = HostConfig(interfaces={x.name: x for x in self.interfaces})
if self.hostname is not None:
model.hostname = self.hostname
vrfs = list(self.vrfs)
if len(vrfs):
model.vrf_definitions = vrfs
management_lines = list(self.management_lines)
if len(management_lines):
model.management_lines = management_lines
if self.ntp is not None:
model.ntp = self.ntp
if self.logging is not None:
model.logging = self.logging
return model
|
import ConfigParser, os, uuid, subprocess
cur_dir = os.path.dirname(__file__)
couchdb_ini_file_path = os.path.join(cur_dir, 'etc/couchdb.ini')
if os.path.isfile(couchdb_ini_file_path):
config = ConfigParser.ConfigParser()
config.read([couchdb_ini_file_path])
config.set('couchdb', 'uuid', uuid.uuid4().hex)
with open(couchdb_ini_file_path, 'wb') as configfile:
config.write(configfile)
subprocess.check_call([os.path.join(cur_dir, 'bin/circusd'), "--daemon"]) |
from cli.src.helpers.build_io import (get_inventory_path_for_build,
load_inventory, load_manifest,
save_inventory)
from cli.src.helpers.data_loader import load_schema_obj
from cli.src.helpers.data_loader import types as data_types
from cli.src.helpers.doc_list_helpers import select_single
from cli.src.helpers.objdict_helpers import merge_objdict
from cli.src.models.AnsibleHostModel import AnsibleHostModel
from cli.src.models.AnsibleInventoryItem import AnsibleInventoryItem
from cli.src.Step import Step
class AnsibleInventoryUpgrade(Step):
def __init__(self, build_dir, backup_build_dir, config_docs):
super().__init__(__name__)
self.build_dir = build_dir
self.backup_build_dir = backup_build_dir
self.cluster_model = None
self.config_docs = config_docs
self.manifest_docs = []
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def get_role(self, inventory, role_name):
for role in inventory:
if role.role == role_name:
return role
return None
def delete_role(self, inventory, role_name):
for i in range(len(inventory)):
if inventory[i].role == role_name:
del inventory[i]
return
def rename_role(self, inventory, role_name, new_role_name):
role = self.get_role(inventory, role_name)
if role is not None:
role.role = new_role_name
def get_new_config_roles(self):
roles = []
for doc in self.config_docs:
if "configuration/" in doc.kind:
roles.append(doc.kind.replace('configuration/', ''))
return roles
def upgrade(self):
inventory_path = get_inventory_path_for_build(self.backup_build_dir)
self.logger.info(f'Loading backup Ansible inventory: {inventory_path}')
loaded_inventory = load_inventory(inventory_path)
# move loaded inventory to templating structure
new_inventory = []
for key in loaded_inventory.groups:
if key != 'all' and key != 'ungrouped':
group_hosts = loaded_inventory.groups[key].hosts
new_hosts = []
for host in group_hosts:
new_hosts.append(AnsibleHostModel(host.address, host.vars['ansible_host']))
new_inventory.append(AnsibleInventoryItem(key, new_hosts))
self.logger.info('Upgrading Ansible inventory')
# load cluster model from manifest
self.manifest_docs = load_manifest(self.backup_build_dir)
self.cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
# Merge manifest cluster config with newer defaults
default_cluster_model = load_schema_obj(data_types.DEFAULT, self.cluster_model.provider, 'epiphany-cluster')
merge_objdict(default_cluster_model, self.cluster_model)
self.cluster_model = default_cluster_model
# repository & image_registry roles added in v0.4.0
repository = self.get_role(new_inventory, 'repository')
if repository is None:
raise Exception('repository group not found in inventory. '
'Your deployment may not be supported by this version of Epiphany. '
'You may try to use older version first.')
# add image_registry if not present
image_registry = self.get_role(new_inventory, 'image_registry')
if image_registry is None:
hosts = [AnsibleHostModel(repository.hosts[0].name, repository.hosts[0].ip)]
new_inventory.append(AnsibleInventoryItem('image_registry', hosts))
# save new inventory
save_inventory(new_inventory, self.cluster_model, self.build_dir)
return 0
|
from flask import current_app
from src.shared.entity import Session
from src.shared.manage_error import ManageErrorUtils, CodeError, TError
from .entities import InputOutput, InputOutputSchema
class InputOutputDBService:
@staticmethod
def check_input_output_exists(input_output_id):
session = None
try:
session = Session()
existing_input_output = session.query(InputOutput).filter_by(id_es=input_output_id).first()
if existing_input_output is None:
msg = "L'entrée sortie n'existe pas"
ManageErrorUtils.exception(CodeError.DB_VALIDATION_ERROR, TError.DATA_NOT_FOUND, msg, 404)
except Exception as error:
current_app.logger.error(f"InputOutputDBService - check_input_output_exists : {error}")
raise
except ValueError as error:
current_app.logger.error(f"InputOutputDBService - check_input_output_exists : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def check_input_output_uniqueness(annee_recette, annee_affectation, input_output_id=None):
session = None
try:
session = Session()
existing_input_output = session.query(InputOutput).filter(InputOutput.annee_recette_es == annee_recette,
InputOutput.annee_affectation_es == annee_affectation)
if input_output_id is not None:
existing_input_output = existing_input_output.filter(InputOutput.id_es != input_output_id)
existing_input_output = existing_input_output.first()
if existing_input_output is not None:
msg = f'L\'entrée sortie ({annee_recette} , {annee_affectation}) existe déjà.'
ManageErrorUtils.exception(CodeError.DB_VALIDATION_ERROR, TError.UNIQUE_CONSTRAINT_ERROR, msg, 400)
except Exception as error:
current_app.logger.error(f"InputOutputDBService - check_input_output_uniqueness : {error}")
raise
except ValueError as error:
current_app.logger.error(f"InputOutputDBService - check_input_output_uniqueness : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def get_input_output_by_id(input_output_id: int):
session = None
try:
session = Session()
input_output_object = session.query(InputOutput).filter_by(id_es=input_output_id).first()
schema = InputOutputSchema()
input_output = schema.dump(input_output_object)
if input_output is None:
ManageErrorUtils.value_error(CodeError.DB_VALIDATION_WARNING, TError.DATA_NOT_FOUND,
'Cette entrée sortie n\'existe pas', 404)
return input_output
except Exception as error:
current_app.logger.error(f"InputOutputDBService - get_input_output_by_id : {error}")
raise
except ValueError as error:
current_app.logger.error(f"InputOutputDBService - get_input_output_by_id : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def get_input_output_by_filter(query_param=None):
session = None
try:
session = Session()
input_outputs = session.query(InputOutput)
if query_param is not None:
annee_recette_es = query_param.get('annee_recette_es', default=None, type=int)
if annee_recette_es is not None:
input_outputs = input_outputs.filter(InputOutput.annee_recette_es == annee_recette_es)
annee_recette_es_sup = query_param.get('annee_recette_es_sup', default=None, type=int)
if annee_recette_es_sup is not None:
input_outputs = input_outputs.filter(InputOutput.annee_recette_es >= annee_recette_es_sup)
annee_recette_es_inf = query_param.get('annee_recette_es_inf', default=None, type=int)
if annee_recette_es_inf is not None:
input_outputs = input_outputs.filter(InputOutput.annee_recette_es <= annee_recette_es_inf)
annee_affectation_es = query_param.get('annee_affectation_es', default=None, type=int)
if annee_affectation_es is not None:
input_outputs = input_outputs.filter(InputOutput.annee_affectation_es == annee_affectation_es)
annee_affectation_es_sup = query_param.get('annee_affectation_es_sup', default=None, type=int)
if annee_affectation_es_sup is not None:
input_outputs = input_outputs.filter(InputOutput.annee_affectation_es >= annee_affectation_es_sup)
annee_affectation_es_inf = query_param.get('annee_affectation_es_inf', default=None, type=int)
if annee_affectation_es_inf is not None:
input_outputs = input_outputs.filter(InputOutput.annee_affectation_es <= annee_affectation_es_inf)
montant_es = query_param.get('montant_es', default=None, type=float)
if montant_es is not None:
input_outputs = input_outputs.filter(InputOutput.montant_es == montant_es)
montant_es_sup = query_param.get('montant_es_sup', default=None, type=float)
if montant_es_sup is not None:
input_outputs = input_outputs.filter(InputOutput.montant_es >= montant_es_sup)
montant_es_inf = query_param.get('montant_es_inf', default=None, type=float)
if montant_es_inf is not None:
input_outputs = input_outputs.filter(InputOutput.montant_es <= montant_es_inf)
input_outputs = input_outputs.all()
input_outputs = InputOutputSchema(many=True).dump(input_outputs)
if input_outputs is None:
ManageErrorUtils.value_error(CodeError.DB_VALIDATION_WARNING, TError.DATA_NOT_FOUND,
'Cette-es entrée-s sortie-s n\'existe-nt pas', 404)
return input_outputs
except Exception as error:
current_app.logger.error(f"InputOutputDBService - get_input_output_by_filter : {error}")
raise
except ValueError as error:
current_app.logger.error(f"InputOutputDBService - get_input_output_by_filter : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def insert(input_output: InputOutput):
session = None
inserted_input_output = None
try:
session = Session()
session.add(input_output)
if input_output is None:
msg = "Une erreur est survenue lors de l'enregistrement de cette entrée sortie"
ManageErrorUtils.value_error(CodeError.DB_VALIDATION_WARNING, TError.INSERT_ERROR, msg, 500)
else:
session.commit()
inserted_input_output = InputOutputSchema().dump(input_output)
return inserted_input_output
except Exception as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - insert : {error}")
raise
except ValueError as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - insert : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def update(input_output: InputOutput):
session = None
try:
session = Session()
session.merge(input_output)
session.commit()
updated_input_output = InputOutputSchema().dump(input_output)
return updated_input_output
except Exception as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - update : {error}")
raise
except ValueError as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - update : {error}")
raise
finally:
if session is not None:
session.close()
@staticmethod
def delete(input_output_id: int) -> int:
session = None
try:
session = Session()
session.query(InputOutput).filter_by(id_es=input_output_id).delete()
session.commit()
return input_output_id
except Exception as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - delete : {error}")
raise
except ValueError as error:
session.rollback()
current_app.logger.error(f"InputOutputDBService - delete : {error}")
raise
finally:
if session is not None:
session.close()
|
import pandas as pd
from IPython.display import display
dt1 = {
'aa': ['j', 'b', 'e', 'g', 'i', 'c'],
"ab": [4, 2, 5, 6, 1, 7],
}
dt2 = {
'aa': ['b', 'e', 'i', 'j', 'c', 'g'],
"ac": [4, 9, 5, 8, 3, 4],
}
df1 = pd.DataFrame(dt1)
# df1 = df1.set_index('aa')
display(df1)
df2 = pd.DataFrame(dt2)
# df2 = df2.set_index('aa')
display(df2)
# df3 = pd.concat([df1, df2], axis=1, sort=False)
# df3.reset_index(inplace=True)
# df3 = df3.rename(columns = {'index':'aa'})
# display(df3)
df3 = df1.merge(df2, how='left')
df3 = df3.reindex(sorted(df3.columns), axis=1)
df3 = df3[['ac', 'aa', 'ab']]
display(df3)
|
"""
A python program to plot a grade histogram
Usage: python plot_grade_hist.py [filename] [colname] [maxy] ([nscorecols])
Required inputs:
filename - text file containing either two columns (name total) or
three columns (name, score_multiple-choice, score_short-answer)
colname - the name of the column containing the score of interest.
NOTE: for the old-school text files, this will be 'col2'
_unless_ the old-school file also is in 3-column format, in which
case this parameter is ignored and the optional nscorecols
parameter should be set to 2.
If the input file is in CSV format, the colname parameter could
be something like 'Midterm 2 (32620)' or 'MT2' or 'Final Grade'
maxy - maximum value for y axis
Optional input:
nscorecols - number of score columns (1 for 2-column input, 2 for 3-column
input). ONLY set this if the input file is in the old-school
text format AND it is in 3-column format (i.e., with
nscorecols=2). If it is in the old-school text format but is
in the 2-column input, then DO NOT set this keyword, but just
set the colname variable above to 'col2'
"""
import numpy as n
from matplotlib import pyplot as p
import sys
import gradefuncs as gf
if len(sys.argv) < 4:
print('')
print('ERROR: This program requires at least 3 input parameters:')
print(' 1. infile - name of the input file containing scores')
print(' 2. colname - name of column containing the relevant score if the')
print(' input file is in csv format produced by smartsite or canvas or')
print(' if it is in old-school text format with one total-score column')
print(' In the second case (text format with one column of scores) the')
print(' colname parameter should be set to "col2"')
print(' 3. maxy - maximum y value for plot')
print('It may also take an optional fourth parameter, which should ONLY BE')
print(' USED if the file is BOTH in the old-school text format and has')
print(' two columns with scores (one for multiple-choice and one for short')
print(' answer), in which case, this parameter should be used and set to 2.')
print('')
print('Format: python plot_grade_hist.py infile colname maxy')
print(' --- or ---')
print('Format: python plot_grade_hist.py infile colname maxy 2')
print('')
sys.exit()
if len(sys.argv) == 5:
old_3col = True
else:
old_3col = False
infile = sys.argv[1]
colname = sys.argv[2]
maxy = float(sys.argv[3])
if old_3col:
tot = gf.read_text(infile,2)
else:
tot = gf.read_table(infile, colname)
if tot is None:
print('Could not plot histogram')
print('')
sys.exit()
binsize = 3
gf.plot_tothist(infile,tot,maxy,binsize)
|
from flask import jsonify
def badrequest(message):
response = jsonify({'status': 'BAD_REQUEST', 'error_message': message})
response.status_code = 400
return response
|
import csv
datosStudents = sc.textFile("/home/leo/Files/studentsPR.csv")
students=datosStudents.map(lambda x: [x]).map(lambda x : list(csv.reader(x))[0])
final=students.filter(lambda x: x[5] == 'F').filter(lambda x: x[2] == '71381')
final.foreach(print)
|
# Generated by Django 2.2.12 on 2020-04-30 05:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='event',
name='partner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Partner'),
),
]
|
def get_warnings(connection):
warnings = connection.show_warnings()
if warnings:
print('warnings:', warnings)
def get_script_from_file(filename):
f = open(filename, 'r')
script = f.read()
f.close()
return script
def get_report(mysql, script):
conn = mysql.connect()
get_warnings(conn)
cursor = conn.cursor()
cursor.execute(script)
results = cursor.fetchall()
cursor.close()
conn.close()
# print('report results:', results)
return results
|
#
from modules import ilmodule
import requests
import re
import time
from time import mktime
from datetime import datetime
import json
import globals
#
# Try to determine the actual date of the picture. By metadata, filename, etc,
# Assuming all the EXIF data was already extracted previously.
# Place this step of processing - one of the latest (i.e. after EXIF, GPS, etc.)
#
class DateOfPicture(ilmodule.ILModule):
def __init__(self):
super().__init__()
self.getMessageBus().subscribe(self.onMessage, globals.TOPIC_DATE)
def onMessage(self, arg):
self.getLogger().debug("Received topic date request: " + str(arg))
metadata = self.findDateOfImage(arg)
# metadata["text_en"] = ""
# metadata["text_ru"] = ""
# Json pretty print
self.getLogger().info(json.dumps(metadata, indent=4))
self.saveJsonToFile(
json.dumps(metadata, indent=4),
"json_" + arg["hash"] + ".json",
)
self.getMessageBus().sendMessage(globals.TOPIC_SAVEDB, arg=metadata)
def loadJsonFromFile(self, filename):
with open(filename, "r") as f:
return json.load(f)
def stringIsDateTime(self, str):
try:
time.strptime(str, "%Y:%m:%d %H:%M:%S")
return True
except ValueError:
return False
def stringIsDate(self, str):
try:
time.strptime(str, "%Y:%m:%d")
return True
except ValueError:
return False
def stringIsTime(self, str):
try:
time.strptime(str, "%H:%M:%S")
return True
except ValueError:
return False
def findDateOfImage(self, image_data): # Getting the path to the image file.
try:
date_time = ""
# if DateTimeOriginal is not available, use DateTime
if "DateTimeOriginal" in image_data["EXIF"] and self.stringIsDateTime(
str(image_data["EXIF"]["DateTimeOriginal"]).replace(": ", ":")
):
date_time = str(image_data["EXIF"]["DateTimeOriginal"]).replace(
": ", ":"
)
else:
if "DateTime" in image_data["EXIF"] and self.stringIsDateTime(
str(image_data["EXIF"]["DateTime"]).replace(": ", ":")
):
date_time = str(image_data["EXIF"]["DateTime"]).replace(": ", ":")
if date_time != "":
image_data["dateOfImage"] = date_time
return image_data
gps_time = ""
if "GPSTimeStamp" in image_data["gps"] and self.stringIsTime(
str(image_data["gps"]["GPSTimeStamp"]).replace(": ", ":")
):
gps_time = str(image_data["gps"]["GPSTimeStamp"]).replace(": ", ":")
else:
gps_time = "00:00:01"
gps_date = ""
if "GPSDateStamp" in image_data["gps"] and self.stringIsDate(
str(image_data["gps"]["GPSDateStamp"]).replace(": ", ":")
):
gps_date = str(image_data["gps"]["GPSDateStamp"]).replace(": ", ":")
else:
gps_date = ""
# If there's no DATE - we're not interested in TIME.
if gps_date != "":
gps_date_parsed = datetime.fromtimestamp(
mktime(time.strptime(gps_date, "%Y:%m:%d"))
).date()
gps_time_parsed = datetime.strptime(gps_time, "%H:%M:%S").time()
gps_date_parsed = datetime.combine(gps_date_parsed, gps_time_parsed)
image_data["dateOfImage"] = gps_date_parsed.strftime(
"%Y:%m:%d %H:%M:%S"
)
return image_data
# If no date can be found in image_data, let's inspect the filename itself (the last hope)
fileName = image_data["image_path"]
fullDateRegExp = re.search(
"((19|20)\d\d)([\-\._/]*)(0[1-9]|1[012])([\-\._/]*)(0[1-9]|[12][0-9]|3[01])([\-\._/\s]*)(0[0-9]|1[1-9]|2[1-3])([\-\._/\s]*)(0[0-9]|[1-5][1-9])([\-\._/\s]*)(0[0-9]|[1-5][1-9])",
fileName,
)
if fullDateRegExp:
d = datetime(
int(fullDateRegExp.group(1)),
int(fullDateRegExp.group(4)),
int(fullDateRegExp.group(6)),
int(fullDateRegExp.group(8)),
int(fullDateRegExp.group(10)),
int(fullDateRegExp.group(12)),
)
image_data["dateOfImage"] = d.strftime("%Y:%m:%d %H:%M:%S")
return image_data
dateOnlyRegExp = re.search(
"((19|20)\d\d)([\-\._/]*)(0[1-9]|1[012])([\-\._/]*)(0[1-9]|[12][0-9]|3[01])",
fileName,
)
if dateOnlyRegExp:
d = datetime(
int(dateOnlyRegExp.group(1)),
int(dateOnlyRegExp.group(4)),
int(dateOnlyRegExp.group(6)),
0,
0,
1,
)
image_data["dateOfImage"] = d.strftime("%Y:%m:%d %H:%M:%S")
return image_data
else:
self.getLogger().warning(
"No DATE information found in picture file: " + str(fileName)
)
return image_data
except Exception as e:
self.getLogger().error(str(e))
finally:
self.cleanupTmp()
|
# noinspection PyUnresolvedReferences
import gc
import ucomputer
import utime
def address():
return ucomputer.get_computer_address()
def tmp_address():
return ucomputer.get_tmp_address()
def free_memory():
return gc.mem_free()
def total_memory():
return gc.mem_alloc() + gc.mem_free()
def uptime():
return utime.time_up()
def shutdown(reboot: bool = False):
if reboot:
ucomputer.reboot()
else:
ucomputer.shutdown()
raise NotImplementedError("invalid behavior")
def reboot():
ucomputer.reboot()
raise NotImplementedError("invalid behavior")
def get_boot_address() -> str:
import component
# noinspection PyUnresolvedReferences
eeprom = component.eeprom
return eeprom.getData().decode()
def set_boot_address(address: str):
import component
# noinspection PyUnresolvedReferences
eeprom = component.eeprom
eeprom.setData(address.encode())
def runlevel():
return 1
def users():
return ucomputer.get_users()
def add_user(user: str):
return ucomputer.add_user(user)
def remove_user(user: str):
return ucomputer.remove_user(user)
def push_signal(name, *args):
ucomputer.push_signal(name, *args)
def pull_signal(seconds):
signal = ucomputer.pop_signal(int(seconds * 20))
if signal is None:
return None
name, args = signal
return (name,) + args
def beep(frequency=None, duration=None):
return ucomputer.beep(frequency, duration)
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
import zlib
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Swf(KaitaiStruct):
"""SWF files are used by Adobe Flash (AKA Shockwave Flash, Macromedia
Flash) to encode rich interactive multimedia content and are,
essentially, a container for special bytecode instructions to play
back that content. In early 2000s, it was dominant rich multimedia
web format (.swf files were integrated into web pages and played
back with a browser plugin), but its usage largely declined in
2010s, as HTML5 and performant browser-native solutions
(i.e. JavaScript engines and graphical approaches, such as WebGL)
emerged.
There are a lot of versions of SWF (~36), format is somewhat
documented by Adobe.
.. seealso::
Source - https://www.adobe.com/content/dam/acom/en/devnet/pdf/swf-file-format-spec.pdf
"""
class Compressions(Enum):
zlib = 67
none = 70
lzma = 90
class TagType(Enum):
end_of_file = 0
place_object = 4
remove_object = 5
set_background_color = 9
define_sound = 14
place_object2 = 26
remove_object2 = 28
frame_label = 43
export_assets = 56
script_limits = 65
file_attributes = 69
place_object3 = 70
symbol_class = 76
metadata = 77
define_scaling_grid = 78
do_abc = 82
define_scene_and_frame_label_data = 86
SEQ_FIELDS = ["compression", "signature", "version", "len_file", "plain_body", "zlib_body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['compression']['start'] = self._io.pos()
self.compression = KaitaiStream.resolve_enum(Swf.Compressions, self._io.read_u1())
self._debug['compression']['end'] = self._io.pos()
self._debug['signature']['start'] = self._io.pos()
self.signature = self._io.read_bytes(2)
self._debug['signature']['end'] = self._io.pos()
if not self.signature == b"\x57\x53":
raise kaitaistruct.ValidationNotEqualError(b"\x57\x53", self.signature, self._io, u"/seq/1")
self._debug['version']['start'] = self._io.pos()
self.version = self._io.read_u1()
self._debug['version']['end'] = self._io.pos()
self._debug['len_file']['start'] = self._io.pos()
self.len_file = self._io.read_u4le()
self._debug['len_file']['end'] = self._io.pos()
if self.compression == Swf.Compressions.none:
self._debug['plain_body']['start'] = self._io.pos()
self._raw_plain_body = self._io.read_bytes_full()
_io__raw_plain_body = KaitaiStream(BytesIO(self._raw_plain_body))
self.plain_body = Swf.SwfBody(_io__raw_plain_body, self, self._root)
self.plain_body._read()
self._debug['plain_body']['end'] = self._io.pos()
if self.compression == Swf.Compressions.zlib:
self._debug['zlib_body']['start'] = self._io.pos()
self._raw__raw_zlib_body = self._io.read_bytes_full()
self._raw_zlib_body = zlib.decompress(self._raw__raw_zlib_body)
_io__raw_zlib_body = KaitaiStream(BytesIO(self._raw_zlib_body))
self.zlib_body = Swf.SwfBody(_io__raw_zlib_body, self, self._root)
self.zlib_body._read()
self._debug['zlib_body']['end'] = self._io.pos()
class Rgb(KaitaiStruct):
SEQ_FIELDS = ["r", "g", "b"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['r']['start'] = self._io.pos()
self.r = self._io.read_u1()
self._debug['r']['end'] = self._io.pos()
self._debug['g']['start'] = self._io.pos()
self.g = self._io.read_u1()
self._debug['g']['end'] = self._io.pos()
self._debug['b']['start'] = self._io.pos()
self.b = self._io.read_u1()
self._debug['b']['end'] = self._io.pos()
class DoAbcBody(KaitaiStruct):
SEQ_FIELDS = ["flags", "name", "abcdata"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['flags']['start'] = self._io.pos()
self.flags = self._io.read_u4le()
self._debug['flags']['end'] = self._io.pos()
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
self._debug['abcdata']['start'] = self._io.pos()
self.abcdata = self._io.read_bytes_full()
self._debug['abcdata']['end'] = self._io.pos()
class SwfBody(KaitaiStruct):
SEQ_FIELDS = ["rect", "frame_rate", "frame_count", "file_attributes_tag", "tags"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['rect']['start'] = self._io.pos()
self.rect = Swf.Rect(self._io, self, self._root)
self.rect._read()
self._debug['rect']['end'] = self._io.pos()
self._debug['frame_rate']['start'] = self._io.pos()
self.frame_rate = self._io.read_u2le()
self._debug['frame_rate']['end'] = self._io.pos()
self._debug['frame_count']['start'] = self._io.pos()
self.frame_count = self._io.read_u2le()
self._debug['frame_count']['end'] = self._io.pos()
if self._root.version >= 8:
self._debug['file_attributes_tag']['start'] = self._io.pos()
self.file_attributes_tag = Swf.Tag(self._io, self, self._root)
self.file_attributes_tag._read()
self._debug['file_attributes_tag']['end'] = self._io.pos()
self._debug['tags']['start'] = self._io.pos()
self.tags = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['tags']:
self._debug['tags']['arr'] = []
self._debug['tags']['arr'].append({'start': self._io.pos()})
_t_tags = Swf.Tag(self._io, self, self._root)
_t_tags._read()
self.tags.append(_t_tags)
self._debug['tags']['arr'][len(self.tags) - 1]['end'] = self._io.pos()
i += 1
self._debug['tags']['end'] = self._io.pos()
class Rect(KaitaiStruct):
SEQ_FIELDS = ["b1", "skip"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['b1']['start'] = self._io.pos()
self.b1 = self._io.read_u1()
self._debug['b1']['end'] = self._io.pos()
self._debug['skip']['start'] = self._io.pos()
self.skip = self._io.read_bytes(self.num_bytes)
self._debug['skip']['end'] = self._io.pos()
@property
def num_bits(self):
if hasattr(self, '_m_num_bits'):
return self._m_num_bits if hasattr(self, '_m_num_bits') else None
self._m_num_bits = (self.b1 >> 3)
return self._m_num_bits if hasattr(self, '_m_num_bits') else None
@property
def num_bytes(self):
if hasattr(self, '_m_num_bytes'):
return self._m_num_bytes if hasattr(self, '_m_num_bytes') else None
self._m_num_bytes = (((self.num_bits * 4) - 3) + 7) // 8
return self._m_num_bytes if hasattr(self, '_m_num_bytes') else None
class Tag(KaitaiStruct):
SEQ_FIELDS = ["record_header", "tag_body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['record_header']['start'] = self._io.pos()
self.record_header = Swf.RecordHeader(self._io, self, self._root)
self.record_header._read()
self._debug['record_header']['end'] = self._io.pos()
self._debug['tag_body']['start'] = self._io.pos()
_on = self.record_header.tag_type
if _on == Swf.TagType.define_sound:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.DefineSoundBody(_io__raw_tag_body, self, self._root)
self.tag_body._read()
elif _on == Swf.TagType.set_background_color:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.Rgb(_io__raw_tag_body, self, self._root)
self.tag_body._read()
elif _on == Swf.TagType.script_limits:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.ScriptLimitsBody(_io__raw_tag_body, self, self._root)
self.tag_body._read()
elif _on == Swf.TagType.do_abc:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.DoAbcBody(_io__raw_tag_body, self, self._root)
self.tag_body._read()
elif _on == Swf.TagType.export_assets:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.SymbolClassBody(_io__raw_tag_body, self, self._root)
self.tag_body._read()
elif _on == Swf.TagType.symbol_class:
self._raw_tag_body = self._io.read_bytes(self.record_header.len)
_io__raw_tag_body = KaitaiStream(BytesIO(self._raw_tag_body))
self.tag_body = Swf.SymbolClassBody(_io__raw_tag_body, self, self._root)
self.tag_body._read()
else:
self.tag_body = self._io.read_bytes(self.record_header.len)
self._debug['tag_body']['end'] = self._io.pos()
class SymbolClassBody(KaitaiStruct):
SEQ_FIELDS = ["num_symbols", "symbols"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['num_symbols']['start'] = self._io.pos()
self.num_symbols = self._io.read_u2le()
self._debug['num_symbols']['end'] = self._io.pos()
self._debug['symbols']['start'] = self._io.pos()
self.symbols = [None] * (self.num_symbols)
for i in range(self.num_symbols):
if not 'arr' in self._debug['symbols']:
self._debug['symbols']['arr'] = []
self._debug['symbols']['arr'].append({'start': self._io.pos()})
_t_symbols = Swf.SymbolClassBody.Symbol(self._io, self, self._root)
_t_symbols._read()
self.symbols[i] = _t_symbols
self._debug['symbols']['arr'][i]['end'] = self._io.pos()
self._debug['symbols']['end'] = self._io.pos()
class Symbol(KaitaiStruct):
SEQ_FIELDS = ["tag", "name"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag']['start'] = self._io.pos()
self.tag = self._io.read_u2le()
self._debug['tag']['end'] = self._io.pos()
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
class DefineSoundBody(KaitaiStruct):
class SamplingRates(Enum):
rate_5_5_khz = 0
rate_11_khz = 1
rate_22_khz = 2
rate_44_khz = 3
class Bps(Enum):
sound_8_bit = 0
sound_16_bit = 1
class Channels(Enum):
mono = 0
stereo = 1
SEQ_FIELDS = ["id", "format", "sampling_rate", "bits_per_sample", "num_channels", "num_samples"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['id']['start'] = self._io.pos()
self.id = self._io.read_u2le()
self._debug['id']['end'] = self._io.pos()
self._debug['format']['start'] = self._io.pos()
self.format = self._io.read_bits_int_be(4)
self._debug['format']['end'] = self._io.pos()
self._debug['sampling_rate']['start'] = self._io.pos()
self.sampling_rate = KaitaiStream.resolve_enum(Swf.DefineSoundBody.SamplingRates, self._io.read_bits_int_be(2))
self._debug['sampling_rate']['end'] = self._io.pos()
self._debug['bits_per_sample']['start'] = self._io.pos()
self.bits_per_sample = KaitaiStream.resolve_enum(Swf.DefineSoundBody.Bps, self._io.read_bits_int_be(1))
self._debug['bits_per_sample']['end'] = self._io.pos()
self._debug['num_channels']['start'] = self._io.pos()
self.num_channels = KaitaiStream.resolve_enum(Swf.DefineSoundBody.Channels, self._io.read_bits_int_be(1))
self._debug['num_channels']['end'] = self._io.pos()
self._io.align_to_byte()
self._debug['num_samples']['start'] = self._io.pos()
self.num_samples = self._io.read_u4le()
self._debug['num_samples']['end'] = self._io.pos()
class RecordHeader(KaitaiStruct):
SEQ_FIELDS = ["tag_code_and_length", "big_len"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['tag_code_and_length']['start'] = self._io.pos()
self.tag_code_and_length = self._io.read_u2le()
self._debug['tag_code_and_length']['end'] = self._io.pos()
if self.small_len == 63:
self._debug['big_len']['start'] = self._io.pos()
self.big_len = self._io.read_s4le()
self._debug['big_len']['end'] = self._io.pos()
@property
def tag_type(self):
if hasattr(self, '_m_tag_type'):
return self._m_tag_type if hasattr(self, '_m_tag_type') else None
self._m_tag_type = KaitaiStream.resolve_enum(Swf.TagType, (self.tag_code_and_length >> 6))
return self._m_tag_type if hasattr(self, '_m_tag_type') else None
@property
def small_len(self):
if hasattr(self, '_m_small_len'):
return self._m_small_len if hasattr(self, '_m_small_len') else None
self._m_small_len = (self.tag_code_and_length & 63)
return self._m_small_len if hasattr(self, '_m_small_len') else None
@property
def len(self):
if hasattr(self, '_m_len'):
return self._m_len if hasattr(self, '_m_len') else None
self._m_len = (self.big_len if self.small_len == 63 else self.small_len)
return self._m_len if hasattr(self, '_m_len') else None
class ScriptLimitsBody(KaitaiStruct):
SEQ_FIELDS = ["max_recursion_depth", "script_timeout_seconds"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['max_recursion_depth']['start'] = self._io.pos()
self.max_recursion_depth = self._io.read_u2le()
self._debug['max_recursion_depth']['end'] = self._io.pos()
self._debug['script_timeout_seconds']['start'] = self._io.pos()
self.script_timeout_seconds = self._io.read_u2le()
self._debug['script_timeout_seconds']['end'] = self._io.pos()
|
"""Support for Konnected devices."""
import asyncio
import copy
import hmac
import json
import logging
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from .config_flow import ( # Loading the config flow file will register the flow
CONF_DEFAULT_OPTIONS,
CONF_IO,
CONF_IO_BIN,
CONF_IO_DIG,
CONF_IO_SWI,
OPTIONS_SCHEMA,
)
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_DISCOVERY,
CONF_INVERSE,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
DOMAIN,
PIN_TO_ZONE,
STATE_HIGH,
STATE_LOW,
UPDATE_ENDPOINT,
ZONE_TO_PIN,
ZONES,
)
from .errors import CannotConnect
from .handlers import HANDLERS
from .panel import AlarmPanel
_LOGGER = logging.getLogger(__name__)
def ensure_pin(value):
"""Check if valid pin and coerce to string."""
if value is None:
raise vol.Invalid("pin value is None")
if PIN_TO_ZONE.get(str(value)) is None:
raise vol.Invalid("pin not valid")
return str(value)
def ensure_zone(value):
"""Check if valid zone and coerce to string."""
if value is None:
raise vol.Invalid("zone value is None")
if str(value) not in ZONES is None:
raise vol.Invalid("zone not valid")
return str(value)
def import_validator(config):
"""Validate zones and reformat for import."""
config = copy.deepcopy(config)
io_cfgs = {}
# Replace pins with zones
for conf_platform, conf_io in (
(CONF_BINARY_SENSORS, CONF_IO_BIN),
(CONF_SENSORS, CONF_IO_DIG),
(CONF_SWITCHES, CONF_IO_SWI),
):
for zone in config.get(conf_platform, []):
if zone.get(CONF_PIN):
zone[CONF_ZONE] = PIN_TO_ZONE[zone[CONF_PIN]]
del zone[CONF_PIN]
io_cfgs[zone[CONF_ZONE]] = conf_io
# Migrate config_entry data into default_options structure
config[CONF_IO] = io_cfgs
config[CONF_DEFAULT_OPTIONS] = OPTIONS_SCHEMA(config)
# clean up fields migrated to options
config.pop(CONF_BINARY_SENSORS, None)
config.pop(CONF_SENSORS, None)
config.pop(CONF_SWITCHES, None)
config.pop(CONF_BLINK, None)
config.pop(CONF_DISCOVERY, None)
config.pop(CONF_IO, None)
return config
# configuration.yaml schemas (legacy)
BINARY_SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SWITCH_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
DEVICE_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA_YAML]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA_YAML]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA_YAML]),
vol.Inclusive(CONF_HOST, "host_info"): cv.string,
vol.Inclusive(CONF_PORT, "host_info"): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}
),
import_validator,
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Optional(CONF_DEVICES): vol.All(
cv.ensure_list, [DEVICE_SCHEMA_YAML]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
YAML_CONFIGS = "yaml_configs"
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Konnected platform."""
cfg = config.get(DOMAIN)
if cfg is None:
cfg = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: cfg.get(CONF_ACCESS_TOKEN),
CONF_API_HOST: cfg.get(CONF_API_HOST),
CONF_DEVICES: {},
}
hass.http.register_view(KonnectedView)
# Check if they have yaml configured devices
if CONF_DEVICES not in cfg:
return True
for device in cfg.get(CONF_DEVICES, []):
# Attempt to importing the cfg. Use
# hass.async_add_job to avoid a deadlock.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=device,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up panel from a config entry."""
client = AlarmPanel(hass, entry)
# create a data store in hass.data[DOMAIN][CONF_DEVICES]
await client.async_save_data()
try:
await client.async_connect()
except CannotConnect:
# this will trigger a retry in the future
raise config_entries.ConfigEntryNotReady
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
entry.add_update_listener(async_entry_updated)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][CONF_DEVICES].pop(entry.data[CONF_ID])
return unload_ok
async def async_entry_updated(hass: HomeAssistant, entry: ConfigEntry):
"""Reload the config entry when options change."""
await hass.config_entries.async_reload(entry.entry_id)
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
name = "api:konnected"
requires_auth = False # Uses access token from configuration
def __init__(self):
"""Initialize the view."""
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def update_sensor(self, request: Request, device_id) -> Response:
"""Process a put or post."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
auth = request.headers.get(AUTHORIZATION, None)
tokens = []
if hass.data[DOMAIN].get(CONF_ACCESS_TOKEN):
tokens.extend([hass.data[DOMAIN][CONF_ACCESS_TOKEN]])
tokens.extend(
[
entry.data[CONF_ACCESS_TOKEN]
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_ACCESS_TOKEN)
]
)
if auth is None or not next(
(True for token in tokens if hmac.compare_digest(f"Bearer {token}", auth)),
False,
):
return self.json_message("unauthorized", status_code=HTTP_UNAUTHORIZED)
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
except json.decoder.JSONDecodeError:
_LOGGER.error(
(
"Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions."
)
)
device = data[CONF_DEVICES].get(device_id)
if device is None:
return self.json_message(
"unregistered device", status_code=HTTP_BAD_REQUEST
)
try:
zone_num = str(payload.get(CONF_ZONE) or PIN_TO_ZONE[payload[CONF_PIN]])
zone_data = device[CONF_BINARY_SENSORS].get(zone_num) or next(
(s for s in device[CONF_SENSORS] if s[CONF_ZONE] == zone_num), None
)
except KeyError:
zone_data = None
if zone_data is None:
return self.json_message(
"unregistered sensor/actuator", status_code=HTTP_BAD_REQUEST
)
zone_data["device_id"] = device_id
for attr in ["state", "temp", "humi", "addr"]:
value = payload.get(attr)
handler = HANDLERS.get(attr)
if value is not None and handler:
hass.async_create_task(handler(hass, zone_data, payload))
return self.json_message("ok")
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
device = data[CONF_DEVICES].get(device_id)
if not device:
return self.json_message(
f"Device {device_id} not configured", status_code=HTTP_NOT_FOUND
)
# Our data model is based on zone ids but we convert from/to pin ids
# based on whether they are specified in the request
try:
zone_num = str(
request.query.get(CONF_ZONE) or PIN_TO_ZONE[request.query[CONF_PIN]]
)
zone = next(
(
switch
for switch in device[CONF_SWITCHES]
if switch[CONF_ZONE] == zone_num
)
)
except StopIteration:
zone = None
except KeyError:
zone = None
zone_num = None
if not zone:
target = request.query.get(
CONF_ZONE, request.query.get(CONF_PIN, "unknown")
)
return self.json_message(
f"Switch on zone or pin {target} not configured",
status_code=HTTP_NOT_FOUND,
)
resp = {}
if request.query.get(CONF_ZONE):
resp[CONF_ZONE] = zone_num
else:
resp[CONF_PIN] = ZONE_TO_PIN[zone_num]
# Make sure entity is setup
zone_entity_id = zone.get(ATTR_ENTITY_ID)
if zone_entity_id:
resp["state"] = self.binary_value(
hass.states.get(zone_entity_id).state, zone[CONF_ACTIVATION],
)
return self.json(resp)
_LOGGER.warning("Konnected entity not yet setup, returning default")
resp["state"] = self.binary_value(STATE_OFF, zone[CONF_ACTIVATION])
return self.json(resp)
async def put(self, request: Request, device_id) -> Response:
"""Receive a sensor update via PUT request and async set state."""
return await self.update_sensor(request, device_id)
async def post(self, request: Request, device_id) -> Response:
"""Receive a sensor update via POST request and async set state."""
return await self.update_sensor(request, device_id)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
¹⁷O 2D DAS NMR of Coesite
^^^^^^^^^^^^^^^^^^^^^^^^^
"""
# %%
# Coesite is a high-pressure (2-3 GPa) and high-temperature (700°C) polymorph of silicon
# dioxide :math:`\text{SiO}_2`. Coesite has five crystallographic :math:`^{17}\text{O}`
# sites. The experimental dataset used in this example is published in
# Grandinetti `et al.` [#f1]_
import numpy as np
import csdmpy as cp
import matplotlib.pyplot as plt
from lmfit import Minimizer
from mrsimulator import Simulator
from mrsimulator.methods import Method2D
from mrsimulator import signal_processing as sp
from mrsimulator.utils import spectral_fitting as sf
from mrsimulator.utils import get_spectral_dimensions
from mrsimulator.utils.collection import single_site_system_generator
from mrsimulator.method.event import SpectralEvent
# sphinx_gallery_thumbnail_number = 3
# %%
# Import the dataset
# ------------------
filename = "https://sandbox.zenodo.org/record/814455/files/DASCoesite.csdf"
experiment = cp.load(filename)
# standard deviation of noise from the dataset
sigma = 921.6698
# For spectral fitting, we only focus on the real part of the complex dataset
experiment = experiment.real
# Convert the coordinates along each dimension from Hz to ppm.
_ = [item.to("ppm", "nmr_frequency_ratio") for item in experiment.dimensions]
# plot of the dataset.
max_amp = experiment.max()
levels = (np.arange(14) + 1) * max_amp / 15 # contours are drawn at these levels.
options = dict(levels=levels, alpha=0.75, linewidths=0.5) # plot options
plt.figure(figsize=(4.25, 3.0))
ax = plt.subplot(projection="csdm")
ax.contour(experiment, colors="k", **options)
ax.invert_xaxis()
ax.set_ylim(30, -30)
plt.grid()
plt.tight_layout()
plt.show()
# %%
# Create a fitting model
# ----------------------
# **Guess model**
#
# Create a guess list of spin systems.
shifts = [29, 39, 54.8, 51, 56] # in ppm
Cq = [6.1e6, 5.4e6, 5.5e6, 5.5e6, 5.1e6] # in Hz
eta = [0.1, 0.2, 0.15, 0.15, 0.3]
abundance_ratio = [1, 1, 2, 2, 2]
abundance = np.asarray(abundance_ratio) / 8 * 100 # in %
spin_systems = single_site_system_generator(
isotope="17O",
isotropic_chemical_shift=shifts,
quadrupolar={"Cq": Cq, "eta": eta},
abundance=abundance,
)
# %%
# **Method**
#
# Create the DAS method.
# Get the spectral dimension parameters from the experiment.
spectral_dims = get_spectral_dimensions(experiment)
DAS = Method2D(
channels=["17O"],
magnetic_flux_density=11.744, # in T
spectral_dimensions=[
dict(
**spectral_dims[0],
events=[
SpectralEvent(
fraction=0.5,
rotor_angle=37.38 * 3.14159 / 180,
transition_query=[{"P": [-1], "D": [0]}],
),
SpectralEvent(
fraction=0.5,
rotor_angle=79.19 * 3.14159 / 180,
transition_query=[{"P": [-1], "D": [0]}],
),
],
),
# The last spectral dimension block is the direct-dimension
dict(
**spectral_dims[1],
events=[
SpectralEvent(
rotor_angle=54.735 * 3.14159 / 180,
transition_query=[{"P": [-1], "D": [0]}],
)
],
),
],
experiment=experiment, # also add the measurement to the method.
)
# Optimize the script by pre-setting the transition pathways for each spin system from
# the das method.
for sys in spin_systems:
sys.transition_pathways = DAS.get_transition_pathways(sys)
# %%
# **Guess Spectrum**
# Simulation
# ----------
sim = Simulator(spin_systems=spin_systems, methods=[DAS])
sim.config.number_of_sidebands = 1 # no sidebands are required for this dataset.
sim.run()
# Post Simulation Processing
# --------------------------
processor = sp.SignalProcessor(
operations=[
# Gaussian convolution along both dimensions.
sp.IFFT(dim_index=(0, 1)),
sp.apodization.Gaussian(FWHM="0.15 kHz", dim_index=0),
sp.apodization.Gaussian(FWHM="0.1 kHz", dim_index=1),
sp.FFT(dim_index=(0, 1)),
sp.Scale(factor=4e7),
]
)
processed_data = processor.apply_operations(data=sim.methods[0].simulation).real
# Plot of the guess Spectrum
# --------------------------
plt.figure(figsize=(4.25, 3.0))
ax = plt.subplot(projection="csdm")
ax.contour(experiment, colors="k", **options)
ax.contour(processed_data, colors="r", linestyles="--", **options)
ax.invert_xaxis()
ax.set_ylim(30, -30)
plt.grid()
plt.tight_layout()
plt.show()
# %%
# Least-squares minimization with LMFIT
# -------------------------------------
# Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick
# setup of the fitting parameters.
params = sf.make_LMFIT_params(sim, processor)
print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"]))
# %%
# **Solve the minimizer using LMFIT**
minner = Minimizer(sf.LMFIT_min_function, params, fcn_args=(sim, processor, sigma))
result = minner.minimize(method="powell")
result
# %%
# The best fit solution
# ---------------------
best_fit = sf.bestfit(sim, processor)[0]
# Plot the spectrum
plt.figure(figsize=(4.25, 3.0))
ax = plt.subplot(projection="csdm")
ax.contour(experiment, colors="k", **options)
ax.contour(best_fit, colors="r", linestyles="--", **options)
ax.invert_xaxis()
ax.set_ylim(30, -30)
plt.grid()
plt.tight_layout()
plt.show()
# %%
# The best fit solution
# ---------------------
residuals = sf.residuals(sim, processor)[0]
fig, ax = plt.subplots(
1, 3, sharey=True, figsize=(10, 3.0), subplot_kw={"projection": "csdm"}
)
vmax, vmin = experiment.max(), experiment.min()
for i, dat in enumerate([experiment, best_fit, residuals]):
ax[i].imshow(dat, aspect="auto", vmax=vmax, vmin=vmin)
ax[i].invert_xaxis()
ax[0].set_ylim(30, -30)
plt.tight_layout()
plt.show()
# %%
# .. [#f1] Grandinetti, P. J., Baltisberger, J. H., Farnan, I., Stebbins, J. F.,
# Werner, U. and Pines, A.
# Solid-State :math:`^{17}\text{O}` Magic-Angle and Dynamic-Angle Spinning NMR
# Study of the :math:`\text{SiO}_2` Polymorph Coesite, J. Phys. Chem. 1995,
# **99**, *32*, 12341-12348.
# `DOI: 10.1021/j100032a045 <https://doi.org/10.1021/j100032a045>`_
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPopvar(RPackage):
"""PopVar: Genomic Breeding Tools: Genetic Variance Prediction andCross-
Validation"""
homepage = "https://cloud.r-project.org/package=PopVar"
url = "https://cloud.r-project.org/src/contrib/PopVar_1.2.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/PopVar"
version('1.2.1', sha256='5e3df79634ab63708a431e4b8e6794675972ac6c58d2bc615726aa0f142f5f25')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-bglr', type=('build', 'run'))
depends_on('r-qtl', type=('build', 'run'))
depends_on('r-rrblup', type=('build', 'run'))
|
class Person:
def __init__(self, firstname, lastname):
self.firstname = firstname
self.lastname = lastname
self.address = []
def add_address(self, address):
self.address.append(address)
class Address:
def __init__(self, street, number):
self.street = street
self.number = number
if __name__ == "__main__":
from copy import deepcopy
fred = Person("Fred", "Chaves")
fred_address = Address("Av Principal", "100A")
fred.add_address(fred_address)
darly = deepcopy(fred)
darly.firstname = "Darly"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.