filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_4036 | # Import flask and template operators
from flask import Flask, render_template
# Import SQLAlchemy
from flask_sqlalchemy import SQLAlchemy
# Define the WSGI application object
app = Flask(__name__)
# Configurations
app.config.from_object('config')
# Define the database object which is imported
# by modules and controllers
db = SQLAlchemy(app)
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# Import a module / component using its blueprint handler variable (mod_auth)
from app.mod_auth.controllers import mod_auth as auth_module
# Register blueprint(s)
app.register_blueprint(auth_module)
# app.register_blueprint(xyz_module)
# ..
# Build the database:
# This will create the database file using SQLAlchemy
db.create_all()
|
the-stack_0_4037 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import time
import paddle
import numpy as np
from .visualization import plot_tracking_dict
__all__ = [
'MOTTimer',
'Detection',
'write_mot_results',
'save_vis_results',
'load_det_results',
'preprocess_reid',
'get_crops',
'clip_box',
'scale_coords',
]
class MOTTimer(object):
"""
This class used to compute and print the current FPS while evaling.
"""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
class Detection(object):
"""
This class represents a bounding box detection in a single image.
Args:
tlwh (Tensor): Bounding box in format `(top left x, top left y,
width, height)`.
score (Tensor): Bounding box confidence score.
feature (Tensor): A feature vector that describes the object
contained in this image.
cls_id (Tensor): Bounding box category id.
"""
def __init__(self, tlwh, score, feature, cls_id):
self.tlwh = np.asarray(tlwh, dtype=np.float32)
self.score = float(score)
self.feature = np.asarray(feature, dtype=np.float32)
self.cls_id = int(cls_id)
def to_tlbr(self):
"""
Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
def to_xyah(self):
"""
Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = self.tlwh.copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def write_mot_results(filename, results, data_type='mot', num_classes=1):
# support single and multi classes
if data_type in ['mot', 'mcmot']:
save_format = '{frame},{id},{x1},{y1},{w},{h},{score},{cls_id},-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} car 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
f = open(filename, 'w')
for cls_id in range(num_classes):
for frame_id, tlwhs, tscores, track_ids in results[cls_id]:
if data_type == 'kitti':
frame_id -= 1
for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):
if track_id < 0: continue
if data_type == 'mot':
cls_id = -1
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id,
id=track_id,
x1=x1,
y1=y1,
x2=x2,
y2=y2,
w=w,
h=h,
score=score,
cls_id=cls_id)
f.write(line)
print('MOT results save in {}'.format(filename))
def save_vis_results(data,
frame_id,
online_ids,
online_tlwhs,
online_scores,
average_time,
show_image,
save_dir,
num_classes=1):
if show_image or save_dir is not None:
assert 'ori_image' in data
img0 = data['ori_image'].numpy()[0]
online_im = plot_tracking_dict(
img0,
num_classes,
online_tlwhs,
online_ids,
online_scores,
frame_id=frame_id,
fps=1. / average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(
os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
def load_det_results(det_file, num_frames):
assert os.path.exists(det_file) and os.path.isfile(det_file), \
'{} is not exist or not a file.'.format(det_file)
labels = np.loadtxt(det_file, dtype='float32', delimiter=',')
assert labels.shape[1] == 7, \
"Each line of {} should have 7 items: '[frame_id],[x0],[y0],[w],[h],[score],[class_id]'.".format(det_file)
results_list = []
for frame_i in range(num_frames):
results = {'bbox': [], 'score': [], 'cls_id': []}
lables_with_frame = labels[labels[:, 0] == frame_i + 1]
# each line of lables_with_frame:
# [frame_id],[x0],[y0],[w],[h],[score],[class_id]
for l in lables_with_frame:
results['bbox'].append(l[1:5])
results['score'].append(l[5])
results['cls_id'].append(l[6])
results_list.append(results)
return results_list
def scale_coords(coords, input_shape, im_shape, scale_factor):
im_shape = im_shape.numpy()[0]
ratio = scale_factor[0][0]
pad_w = (input_shape[1] - int(im_shape[1])) / 2
pad_h = (input_shape[0] - int(im_shape[0])) / 2
coords = paddle.cast(coords, 'float32')
coords[:, 0::2] -= pad_w
coords[:, 1::2] -= pad_h
coords[:, 0:4] /= ratio
coords[:, :4] = paddle.clip(coords[:, :4], min=0, max=coords[:, :4].max())
return coords.round()
def clip_box(xyxy, input_shape, im_shape, scale_factor):
im_shape = im_shape.numpy()[0]
ratio = scale_factor.numpy()[0][0]
img0_shape = [int(im_shape[0] / ratio), int(im_shape[1] / ratio)]
xyxy[:, 0::2] = paddle.clip(xyxy[:, 0::2], min=0, max=img0_shape[1])
xyxy[:, 1::2] = paddle.clip(xyxy[:, 1::2], min=0, max=img0_shape[0])
w = xyxy[:, 2:3] - xyxy[:, 0:1]
h = xyxy[:, 3:4] - xyxy[:, 1:2]
mask = paddle.logical_and(h > 0, w > 0)
keep_idx = paddle.nonzero(mask)
xyxy = paddle.gather_nd(xyxy, keep_idx[:, :1])
return xyxy, keep_idx
def get_crops(xyxy, ori_img, w, h):
crops = []
xyxy = xyxy.numpy().astype(np.int64)
ori_img = ori_img.numpy()
ori_img = np.squeeze(ori_img, axis=0).transpose(1, 0, 2)
for i, bbox in enumerate(xyxy):
crop = ori_img[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
crops.append(crop)
crops = preprocess_reid(crops, w, h)
return crops
def preprocess_reid(imgs,
w=64,
h=192,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
im_batch = []
for img in imgs:
img = cv2.resize(img, (w, h))
img = img[:, :, ::-1].astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
img = np.expand_dims(img, axis=0)
im_batch.append(img)
im_batch = np.concatenate(im_batch, 0)
return im_batch
|
the-stack_0_4038 | from math import floor
def format_(number: int) -> float:
return round(float(number), 2)
def play_for( a_performance: dict, plays: dict) -> dict:
return plays[a_performance["playID"]]
def amount_for(a_performance: dict, plays: dict) -> int:
result: int = 0
if play_for(a_performance, plays)["type"] == "tragedy":
result = 40000
if int(a_performance["audience"]) > 30:
result += 1000 * (a_performance["audience"] - 30)
elif play_for(a_performance, plays)["type"] == "comedy":
result = 30000
if a_performance["audience"] > 20:
result += (10000 + 500 * (a_performance["audience"]) - 20)
result += 300 * a_performance["audience"]
else:
print("Unkonw type: %s" % play_for(a_performance, plays)["type"])
return result
def statement(invoice: dict, plays: dict) -> str:
total_amount: int = 0
volume_credits: int = 0
result: str = "Statement for %s\n" % invoice["customer"]
for perf in invoice["performances"]:
# add volume credits
volume_credits += max(perf["audience"] - 30, 0)
# add extra credit for every ten comedy attendees
if play_for(perf, plays)["type"] == "comedy":
volume_credits += floor(perf["audience"] / 5)
# print line for this order
result += "%s: $%d (%d seats)\n" % (play_for(perf, plays)["name"], format_(amount_for(perf, plays)/100), perf["audience"])
total_amount += amount_for(perf, plays)
result += "Amount owed is %d\n" % format_(total_amount/100)
result += "You earned %d credits\n" % format_(volume_credits)
return result
if __name__ == "__main__":
import json
with open("./invoices.json") as f:
invoices = json.load(f)
print(invoices[0]["customer"])
with open("./plays.json") as f:
plays = json.load(f)
print(statement(invoices[0], plays)) |
the-stack_0_4039 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AlgorithmResult(AbstractModel):
"""每个算法的返回结果
"""
def __init__(self):
r"""
:param AlgoId: 算法ID
:type AlgoId: str
:param AlgoName: 算法名称
注意:此字段可能返回 null,表示取不到有效值。
:type AlgoName: str
:param Result: 算法返回的结果。
- 当算法类型为“OCR(1)”时,结果为文本字符串
- 当算法类型为“文本分类(2)”时,结果字符串为json对象数组:
Class:分类结果
Confidence:置信度
- 算法类型为“情感分析(3)”时,结果字符串为json对象:
Positive:正面情感概率
Negative:负面情感概率
Neutral:中性情感概率
- 当算法类型为“合同要素抽取(4)”时,结果字符串为json对象数组:
NodeName:一级要素名称
ItemName:二级要素名称
Content:要素文本内容
- 当算法类型为“实体识别(5)”时,结果字符串为json对象数组:
- Entity:实体类型
- Content:实体文本内容
注意:此字段可能返回 null,表示取不到有效值。
:type Result: str
:param Error: 算法调用错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type Error: str
:param AlgoType: 算法类型:
1:OCR算法
2:文本分类算法
3:情感分析算法
4:合同要素抽取算法
5、实体识别算法
注意:此字段可能返回 null,表示取不到有效值。
:type AlgoType: int
"""
self.AlgoId = None
self.AlgoName = None
self.Result = None
self.Error = None
self.AlgoType = None
def _deserialize(self, params):
self.AlgoId = params.get("AlgoId")
self.AlgoName = params.get("AlgoName")
self.Result = params.get("Result")
self.Error = params.get("Error")
self.AlgoType = params.get("AlgoType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInvocationResultRequest(AbstractModel):
"""DescribeInvocationResult请求参数结构体
"""
def __init__(self):
r"""
:param InvokeId: 调用id,为调用InvokeService接口返回的RequestId
:type InvokeId: str
"""
self.InvokeId = None
def _deserialize(self, params):
self.InvokeId = params.get("InvokeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeInvocationResultResponse(AbstractModel):
"""DescribeInvocationResult返回参数结构体
"""
def __init__(self):
r"""
:param Results: 服务的调用结果
:type Results: list of AlgorithmResult
:param Status: 0:获取结果失败
1:结果还没有生成,继续轮询
2:获取结果成功
:type Status: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Results = None
self.Status = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = AlgorithmResult()
obj._deserialize(item)
self.Results.append(obj)
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
class InvokeServiceRequest(AbstractModel):
"""InvokeService请求参数结构体
"""
def __init__(self):
r"""
:param ServiceId: 待调用的服务ID。
:type ServiceId: str
:param ServiceStatus: 要调用服务的状态:0表示调试版本,1表示上线版本
:type ServiceStatus: int
:param FileUrl: 用于测试的文档的URL。
:type FileUrl: str
:param Input: 用于测试的文本,当此值不为空时,调用内容以此参数的值为准。
:type Input: str
"""
self.ServiceId = None
self.ServiceStatus = None
self.FileUrl = None
self.Input = None
def _deserialize(self, params):
self.ServiceId = params.get("ServiceId")
self.ServiceStatus = params.get("ServiceStatus")
self.FileUrl = params.get("FileUrl")
self.Input = params.get("Input")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InvokeServiceResponse(AbstractModel):
"""InvokeService返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId") |
the-stack_0_4041 | from rocketpyBeta import *
from numpy.random import normal, uniform, choice
from datetime import datetime
IN = {
"impulse": (1240, 100),
"burnOut": (2.84, 0.15),
"nozzleRadius": (30/1000, 0.5/1000),
"throatRadius": (8/1000, 0.5/1000),
"grainSeparation": (1/1000, 0.5/1000),
"grainDensity": (1707, 24),
"grainOuterRadius": (21.05/1000, 0.137/1000),
"grainInitialInnerRadius": (9.63/1000, 0.076/1000),
"grainInitialHeight": (118.38/1000, 0.415/1000),
"m_prop": (1.664, 0.05),
"m_aero": (0.696, 0.02),
"inertiaI": (0.3437,0.01*0.3437),
"inertiaZ": (0.00288,0.01*0.00288),
"radius": (0.0378,0.0001),
"distanceRocketNozzle": (0.467,0.003),
"distanceRocketPropellant": (0.091,0.003),
"powerOffDrag": (1,0.03),
"powerOnDrag": (1,0.03),
"noseLength": (0.151, 0.001),
"noseDistanceToCM": (0.539, 0.003),
"tail1TopRadius": (0.0378, 0.0001),
"tail1BottomRadius": (0.0602/2, 0.0001),
"tail1Length": (0.00765, 0.0001),
"tail1DistanceToCM": (0.168, 0.003),
"tail2Length": (0.00580, 0.0001),
"tail2TopRadius": (0.0602/2,0.0001),
"tail2BottomRadius": (0.0723/2,0.0001),
"tail2DistanceToCM": (-0.3374,0.003),
"tail3Length": (0.005, 0.0005),
"tail3TopRadius": (0.0723/2, 0.0001),
"tail3BottomRadius": (0.0411/2, 0.0001),
"tail3DistanceToCM": (-0.4624, 0.0001),
"finSpan": (0.070, 0.001),
"finRootChord": (0.08, 0.001),
"finTipChord": (0.04, 0.001),
"finDistanceToCM": (-0.344, 0.003),
"inclination": (85, 1),
"heading": (90, 1),
"m_rec": (0.160, 0.024),
"CdS": (0.43, 0.086),
"lag_rec": (1 , 0.5),
"m_se": (0.300, 0.02),
"lag_se": (0.73, 0.16)}
while True:
# Number of simulations
s = 500
print('Initializing new dispersion analysis sequence.')
print('Euporia I - Plan A - Balistic')
print('Number of simulations: '+str(s))
print('Estimated time: ' + str(1.5*s/60) + ' mins')
print(datetime.now())
init = datetime.now()
# Initialize output
inputs = []
output = []
# Enviroment Variabels
envRailLength = normal(2, 0.01, s)
envYear = choice(np.arange(2013, 2017), s)
envDay = choice(np.arange(1, 10), s)
# envHour = choice([18, 12], s, p=[0.5, 0.5])
# Motor Variables
motorBurnOut = normal(*IN['burnOut'], s)
motorTotalImpulse = normal(*IN['impulse'], s)
motornozzleRadius = normal(*IN['nozzleRadius'], s)
motorthroatRadius = normal(*IN['throatRadius'], s)
motorgrainSeparation = normal(*IN['grainSeparation'], s)
motorgrainDensity = normal(*IN['grainDensity'], s)
motorgrainOuterRadius = normal(*IN['grainOuterRadius'], s)
motorgrainInitialInnerRadius = normal(*IN['grainInitialInnerRadius'], s)
motorgrainInitialHeight = normal(*IN['grainInitialHeight'], s)
# Rocket Variables
rMassSE = normal(*IN['m_se'], s)
rMassRec = normal(*IN['m_rec'], s)
rMassProp = normal(*IN['m_prop'], s)
rMassAero = normal(*IN['m_aero'], s)
rInertiaI = normal(*IN['inertiaI'], s)
rInertiaZ = normal(*IN['inertiaZ'], s)
rRadius = normal(*IN['radius'], s)
rDistanceRocketNozzle = normal(*IN['distanceRocketNozzle'], s)
rDistanceRocketPropellant = normal(*IN['distanceRocketPropellant'], s)
rpowerOnDrag = normal(*IN['powerOnDrag'], s)
rpowerOffDrag = normal(*IN['powerOffDrag'], s)
# Nose
rNoseLength = normal(*IN['noseLength'], s)
rNoseDistanceToCM = normal(*IN['noseDistanceToCM'], s)
# Fins
rFinsSpan = normal(*IN['finSpan'], s)
rFinsRootChord = normal(*IN['finRootChord'], s)
rFinsTipChord = normal(*IN['finTipChord'], s)
rFinsDistanceToCM = normal(*IN['finDistanceToCM'], s)
# Tail 1
rTail1TopRadius = normal(*IN['tail1TopRadius'], s)
rTail1BottomRadius = normal(*IN['tail1BottomRadius'], s)
rTail1Length = normal(*IN['tail1Length'], s)
rTail1DistanceToCM = normal(*IN['tail1DistanceToCM'], s)
# Tail 2
rTail2TopRadius = normal(*IN['tail2TopRadius'], s)
rTail2BottomRadius = normal(*IN['tail2BottomRadius'], s)
rTail2Length = normal(*IN['tail2Length'], s)
rTail2DistanceToCM = normal(*IN['tail2DistanceToCM'], s)
# Tail 3
rTail3TopRadius = normal(*IN['tail3TopRadius'], s)
rTail3BottomRadius = normal(*IN['tail3BottomRadius'], s)
rTail3Length = normal(*IN['tail3Length'], s)
rTail3DistanceToCM = normal(*IN['tail3DistanceToCM'], s)
# Parachute
pDrogueCdS = normal(*IN['CdS'], s)
pDrogueLag = normal(*IN['lag_rec'], s)
dSeLag = normal(*IN['lag_se'], s)
# Flight variables
fInclination = normal(*IN['inclination'], s)
fHeading = normal(*IN['heading'], s)
# Initialize enviroment and motor
E = Environment(railLength=2,
gravity=9.8,
windData='../data/weather/RioSaoPaulo.nc',
location=(-21.961526, -47.480908),
date=(2016, 2, 4, 12))
for i in range(s):
print('Iteration: ', i, end='\r')
# Enviroment Variabels
railLength = envRailLength[i]
year = envYear[i]
day = envDay[i]
hour = 12
# Motor Variables
burnOut = motorBurnOut[i]
totalImpulse = motorTotalImpulse[i]
nozzleRadius = motornozzleRadius[i]
throatRadius = motorthroatRadius[i]
grainSeparation = motorgrainSeparation[i]
grainDensity = motorgrainDensity[i]
grainOuterRadius = motorgrainOuterRadius[i]
grainInitialInnerRadius = motorgrainInitialInnerRadius[i]
grainInitialHeight = motorgrainInitialHeight[i]
# Rocket Variables
m_aeroI = rMassAero[i]
m_recI = rMassRec[i]
m_seI = rMassSE[i]
m_propI = rMassProp[i]
mass = m_aeroI + m_recI + m_seI + m_propI
inertiaI = rInertiaI[i]
inertiaZ = rInertiaZ[i]
radius = rRadius[i]
distanceRocketNozzle = rDistanceRocketNozzle[i]
distanceRocketPropellant = rDistanceRocketPropellant[i]
powerOnDrag = rpowerOnDrag[i]
powerOffDrag = rpowerOffDrag[i]
# Nose
noseLength = rNoseLength[i]
noseDistanceToCM = rNoseDistanceToCM[i]
# Fins
finSpan = rFinsSpan[i]
finRootChord = rFinsRootChord[i]
finTipChord = rFinsTipChord[i]
finDistanceToCM = rFinsDistanceToCM[i]
# Tail 1
tail1TopRadius = rTail1TopRadius[i]
tail1BottomRadius = rTail1BottomRadius[i]
tail1Length = rTail1Length[i]
tail1DistanceToCM = rTail1DistanceToCM[i]
# Tail 2
tail2TopRadius = rTail2TopRadius[i]
tail2BottomRadius = rTail2BottomRadius[i]
tail2Length = rTail2Length[i]
tail2DistanceToCM = rTail2DistanceToCM[i]
# Tail 3
tail3TopRadius = rTail3TopRadius[i]
tail3BottomRadius = rTail3BottomRadius[i]
tail3Length = rTail3Length[i]
tail3DistanceToCM = rTail3DistanceToCM[i]
# Parachute
drogueCdS = pDrogueCdS[i]
drogueLag = pDrogueLag[i] + dSeLag[i]
# Flight variables
inclination = fInclination[i]
heading = fHeading[i]
inputs.append([year, day, hour, railLength, burnOut, totalImpulse, mass, inertiaI, inertiaZ, radius, inclination, heading])
E.setDate((year, 2, day, hour))
E.railLength = railLength
Jiboia58 = Motor(thrustSource='../data/jiboia/thrustCurve.csv',
burnOut=2.84,
reshapeThrustCurve=(burnOut, totalImpulse),
interpolationMethod='spline',
nozzleRadius=nozzleRadius,
throatRadius=throatRadius,
grainNumber=5,
grainSeparation=grainSeparation,
grainDensity=grainDensity,
grainOuterRadius=grainOuterRadius,
grainInitialInnerRadius=grainInitialInnerRadius,
grainInitialHeight=grainInitialHeight)
EuporiaI = Rocket(motor=Jiboia58,
mass=m_aeroI+m_propI+m_recI+m_seI,
inertiaI=inertiaI,
inertiaZ=inertiaZ,
radius=radius,
distanceRocketNozzle=distanceRocketNozzle,
distanceRocketPropellant=distanceRocketPropellant,
offCenter=0,
powerOffDrag="../data/euporia/euporiaIDragOff.csv",
powerOnDrag="../data/euporia/euporiaIDragOn.csv",
drogueArea=False,
drogueCd=False,
drogueLag=drogueLag,
mainArea=False,
mainCd=False,
mainAlt=50)
EuporiaI.powerOffDrag = powerOffDrag*EuporiaI.powerOffDrag
EuporiaI.powerOnDrag = powerOnDrag*EuporiaI.powerOnDrag
EuporiaI.addNose(length=noseLength, kind='parabolic', distanceToCM=noseDistanceToCM)
EuporiaI.addTail(topRadius=tail1TopRadius, bottomRadius=tail1BottomRadius, length=tail1Length, distanceToCM=tail1DistanceToCM)
EuporiaI.addTail(topRadius=tail2TopRadius, bottomRadius=tail2BottomRadius, length=tail2Length, distanceToCM=tail2DistanceToCM)
EuporiaI.addFins(n=4, rootChord=finRootChord, tipChord=finTipChord, span=finSpan, distanceToCM=finDistanceToCM)
EuporiaI.addTail(topRadius=tail3TopRadius, bottomRadius=tail3BottomRadius, length=tail3Length, distanceToCM=tail3DistanceToCM)
F = Flight(EuporiaI, E, inclination=inclination, heading=heading, flightPhases=-1, timeStep=[0.01, 0.1])
# Calculate Max Vel
sol = np.array(F.solution)
F.vx = Function(sol[:, [0, 4]], 'Time (s)', 'Vx (m/s)', 'spline', extrapolation="natural")
F.vy = Function(sol[:, [0, 5]], 'Time (s)', 'Vy (m/s)', 'spline', extrapolation="natural")
F.vz = Function(sol[:, [0, 6]], 'Time (s)', 'Vz (m/s)', 'spline', extrapolation="natural")
F.v = (F.vx**2 + F.vy**2 + F.vz**2)**0.5
F.v.setDiscrete(0, burnOut, 100)
F.maxVel = np.amax(F.v.source[:, 1])
# Output
output.append([F.outOfRailTime, F.outOfRailVelocity, F.maxVel, F.apogeeTime, F.apogee, F.apogeeX, F.apogeeY,
F.drogueOpeningTime, F.drogueOpeningVelocity, F.drogueX, F.drogueY, F.drogueZ,
F.tFinal, F.xImpact, F.yImpact, F.impactVelocity, F.rocket.staticMargin])
# Write to file
print('Sequence completed!')
id = str(choice(200000))
np.savetxt('InpDispersion' + id + '.euporia_I_AB', inputs, delimiter=',')
np.savetxt('OutDispersion' + id + '.euporia_I_AB', output, delimiter=',')
print('Results written to file!')
print('End Time:', datetime.now())
print('Total Elapsed Time (min):', (datetime.now() - init).seconds/60)
print('Avarage Time (s):', (datetime.now() - init).seconds/s)
print() |
the-stack_0_4045 | from datetime import timedelta
import aiohttp
import aiohttp_client_cache
from sqlalchemy.ext.asyncio import AsyncSession
from Backend.core.errors import CustomException
from Backend.crud import discord_users
from Backend.database.models import DiscordUsers
from Backend.networking.bungieAuth import BungieAuth
from Backend.networking.schemas import WebResponse
from Backend.networking.base import NetworkBase
from settings import BUNGIE_TOKEN
class BungieApi(NetworkBase):
"""Handles all networking to any API. To call an api that is not bungies, change the headers"""
# base bungie headers
normal_headers = {"X-API-Key": BUNGIE_TOKEN, "Accept": "application/json"}
auth_headers = normal_headers.copy()
# the cache object. Low expire time since players dont want to wait an eternity for their stuff to _update
cache = aiohttp_client_cache.SQLiteBackend(
cache_name="networking/bungie_networking_cache",
expire_after=timedelta(minutes=5),
urls_expire_after={
"platform/app/oauth/token": 0, # do not save token stuff
"Destiny2/Stats/PostGameCarnageReport": 0, # do not save pgcr. We save them anyway and don't look them up more than once
"Destiny2/*/Profile/**components=": timedelta(minutes=15), # profile call
"Destiny2/*/Account/*/Stats": timedelta(minutes=60), # stats
"Destiny2/*/Account/*/Character/*/Stats/Activities": timedelta(minutes=5), # activity history
},
)
def __init__(self, db: AsyncSession, user: DiscordUsers = None, headers: dict = None, i_understand_what_im_doing_and_that_setting_this_to_true_might_break_stuff: bool = False):
assert user or headers or i_understand_what_im_doing_and_that_setting_this_to_true_might_break_stuff, "One argument needs to be defined"
self.user = user
self.discord_id = user.discord_id if user else None
self.db = db
# allows different urls than bungies to be called (fe. steam players)
if headers:
self.normal_headers = headers
self.auth_headers = headers
self.bungie_request = False
async def get(self, route: str, params: dict = None, use_cache: bool = True) -> WebResponse:
"""Grabs JSON from the specified URL (no oauth)"""
# check if the user has a private profile, if so we use oauth
if self.user:
if self.user.private_profile:
# then we use get_with_token()
return await self.get_with_token(route=route, params=params, use_cache=use_cache)
try:
async with aiohttp_client_cache.CachedSession(cache=self.cache) as session:
# use cache for the responses
if use_cache:
return await self._request(
session=session,
method="GET",
route=route,
headers=self.normal_headers,
params=params,
)
# do not use cache
else:
async with session.disabled():
return await self._request(
session=session,
method="GET",
route=route,
headers=self.normal_headers,
params=params,
)
except CustomException as exc:
if exc.error == "BungieDestinyPrivacyRestriction":
# catch the BungieDestinyPrivacyRestriction error to change privacy settings in our db
await discord_users.update(db=self.db, to_update=self.user, has_private_profile=True)
# then call the same endpoint again, this time with a token
return await self.get_with_token(route=route, params=params, use_cache=use_cache)
else:
# otherwise raise error again
raise exc
async def get_with_token(
self, route: str, params: dict = None, use_cache: bool = True
) -> WebResponse:
"""Grabs JSON from the specified URL (oauth)"""
# set the auth headers to a working token
await self.__set_auth_headers()
# ignore cookies
no_jar = aiohttp.DummyCookieJar()
async with aiohttp_client_cache.CachedSession(cache=self.cache, cookie_jar=no_jar) as session:
# use cache for the responses
if use_cache:
return await self._request(
session=session,
method="GET",
route=route,
headers=self.auth_headers,
params=params,
)
# do not use cache
else:
async with session.disabled():
return await self._request(
session=session,
method="GET",
route=route,
headers=self.auth_headers,
params=params,
)
async def post(self, route: str, json: dict, params: dict = None) -> WebResponse:
"""Post data to bungie. self.discord_id must have the authentication for the action"""
# set the auth headers to a working token
await self.__set_auth_headers()
async with aiohttp_client_cache.CachedSession(cache=self.cache) as session:
# do not use cache here
async with session.disabled():
return await self._request(
session=session,
method="POST",
route=route,
json=json,
headers=self.auth_headers,
params=params,
)
async def __set_auth_headers(self):
"""Update the auth headers to include a working token. Raise an error if that doesnt exist"""
# get a working token or abort
auth = BungieAuth(db=self.db, user=self.user)
token = await auth.get_working_token()
# use special token headers if its a bungie request
if self.bungie_request:
self.auth_headers.update(
{
"Authorization": f"Bearer {token}",
}
)
|
the-stack_0_4048 | """empty message
Revision ID: dd7377000e2e
Revises: 3a28d0608e7f
Create Date: 2020-08-18 14:14:59.119395
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd7377000e2e'
down_revision = '3a28d0608e7f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_date', sa.DateTime(), nullable=False),
sa.Column('modify_date', sa.DateTime(), nullable=True),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.Column('answer_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['answer_id'], ['answer.id'], name=op.f('fk_comment_answer_id_answer'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], name=op.f('fk_comment_question_id_question'), ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_comment_user_id_user'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_comment'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
# ### end Alembic commands ###
|
the-stack_0_4051 | print('\033[1;31m comparando valores \033[m')
ja_chega = float(input('digite um valor'))
desisto = float(input('digite um valor '))
if ja_chega > desisto:
print('o primeiro valor é maior !!')
elif desisto > ja_chega:
print('o segundo valor é maior !!')
else:
print('os dois valores são iguais')
escolha4 = ''
while escolha4 != 'sim' and escolha4 != 'nao':
escolha4 = str(input('você deseja executar novamente [sim/nao]?')).lower()
if escolha4 == 'sim':
import jogo_do_tio_Dodo
if escolha4 == 'nao':
print('obrigado por ultilizar nossos serviços')
break |
the-stack_0_4052 | import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
print("current num class : {}".format(num_classes))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, num_classes,pretrained, progress, **kwargs):
model = ResNet(block, layers, num_classes, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(num_classes=1000,pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], num_classes,pretrained, progress,
**kwargs)
'''
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
'''
def test():
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = resnet18(num_classes=6).to(device)
y = net(torch.randn(1,3,224,224).to(device))
print(y.size())
#test() |
the-stack_0_4053 | # Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mlog
import pickle, os, uuid, shlex
import sys
from itertools import chain
from pathlib import PurePath
from collections import OrderedDict
from .mesonlib import (
MesonException, MachineChoice, PerMachine,
default_libdir, default_libexecdir, default_prefix
)
from .wrap import WrapMode
import ast
import argparse
import configparser
from typing import (
Any, Dict, Generic, Iterable, List, Optional, Type, TypeVar, Union
)
import typing
import enum
if typing.TYPE_CHECKING:
from . import dependencies
version = '0.51.999'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'vs2019', 'xcode']
default_yielding = False
# Can't bind this near the class method it seems, sadly.
_T = TypeVar('_T')
class UserOption(Generic[_T]):
def __init__(self, description, choices, yielding):
super().__init__()
self.choices = choices
self.description = description
if yielding is None:
yielding = default_yielding
if not isinstance(yielding, bool):
raise MesonException('Value of "yielding" must be a boolean.')
self.yielding = yielding
def printable_value(self):
return self.value
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value: Any) -> _T:
raise RuntimeError('Derived option class did not override validate_value.')
def set_value(self, newvalue):
self.value = self.validate_value(newvalue)
class UserStringOption(UserOption[str]):
def __init__(self, description, value, choices=None, yielding=None):
super().__init__(description, choices, yielding)
self.set_value(value)
def validate_value(self, value):
if not isinstance(value, str):
raise MesonException('Value "%s" for string option is not a string.' % str(value))
return value
class UserBooleanOption(UserOption[bool]):
def __init__(self, description, value, yielding=None):
super().__init__(description, [True, False], yielding)
self.set_value(value)
def __bool__(self) -> bool:
return self.value
def validate_value(self, value) -> bool:
if isinstance(value, bool):
return value
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % value)
class UserIntegerOption(UserOption[int]):
def __init__(self, description, min_value, max_value, value, yielding=None):
super().__init__(description, [True, False], yielding)
self.min_value = min_value
self.max_value = max_value
self.set_value(value)
c = []
if min_value is not None:
c.append('>=' + str(min_value))
if max_value is not None:
c.append('<=' + str(max_value))
self.choices = ', '.join(c)
def validate_value(self, value) -> int:
if isinstance(value, str):
value = self.toint(value)
if not isinstance(value, int):
raise MesonException('New value for integer option is not an integer.')
if self.min_value is not None and value < self.min_value:
raise MesonException('New value %d is less than minimum value %d.' % (value, self.min_value))
if self.max_value is not None and value > self.max_value:
raise MesonException('New value %d is more than maximum value %d.' % (value, self.max_value))
return value
def toint(self, valuestring) -> int:
try:
return int(valuestring)
except ValueError:
raise MesonException('Value string "%s" is not convertable to an integer.' % valuestring)
class UserUmaskOption(UserIntegerOption, UserOption[Union[str, int]]):
def __init__(self, description, value, yielding=None):
super().__init__(description, 0, 0o777, value, yielding)
self.choices = ['preserve', '0000-0777']
def printable_value(self):
if self.value == 'preserve':
return self.value
return format(self.value, '04o')
def validate_value(self, value):
if value is None or value == 'preserve':
return 'preserve'
return super().validate_value(value)
def toint(self, valuestring):
try:
return int(valuestring, 8)
except ValueError as e:
raise MesonException('Invalid mode: {}'.format(e))
class UserComboOption(UserOption[str]):
def __init__(self, description, choices: List[str], value, yielding=None):
super().__init__(description, choices, yielding)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def validate_value(self, value):
if value not in self.choices:
optionsstring = ', '.join(['"%s"' % (item,) for item in self.choices])
raise MesonException('Value "%s" for combo option is not one of the choices. Possible choices are: %s.' % (value, optionsstring))
return value
class UserArrayOption(UserOption[List[str]]):
def __init__(self, description, value, shlex_split=False, user_input=False, allow_dups=False, **kwargs):
super().__init__(description, kwargs.get('choices', []), yielding=kwargs.get('yielding', None))
self.shlex_split = shlex_split
self.allow_dups = allow_dups
self.value = self.validate_value(value, user_input=user_input)
def validate_value(self, value, user_input=True) -> List[str]:
# User input is for options defined on the command line (via -D
# options). Users can put their input in as a comma separated
# string, but for defining options in meson_options.txt the format
# should match that of a combo
if not user_input and isinstance(value, str) and not value.startswith('['):
raise MesonException('Value does not define an array: ' + value)
if isinstance(value, str):
if value.startswith('['):
newvalue = ast.literal_eval(value)
elif value == '':
newvalue = []
else:
if self.shlex_split:
newvalue = shlex.split(value)
else:
newvalue = [v.strip() for v in value.split(',')]
elif isinstance(value, list):
newvalue = value
else:
raise MesonException('"{0}" should be a string array, but it is not'.format(str(newvalue)))
if not self.allow_dups and len(set(newvalue)) != len(newvalue):
msg = 'Duplicated values in array option is deprecated. ' \
'This will become a hard error in the future.'
mlog.deprecation(msg)
for i in newvalue:
if not isinstance(i, str):
raise MesonException('String array element "{0}" is not a string.'.format(str(newvalue)))
if self.choices:
bad = [x for x in newvalue if x not in self.choices]
if bad:
raise MesonException('Options "{}" are not in allowed choices: "{}"'.format(
', '.join(bad), ', '.join(self.choices)))
return newvalue
class UserFeatureOption(UserComboOption):
static_choices = ['enabled', 'disabled', 'auto']
def __init__(self, description, value, yielding=None):
super().__init__(description, self.static_choices, value, yielding)
def is_enabled(self):
return self.value == 'enabled'
def is_disabled(self):
return self.value == 'disabled'
def is_auto(self):
return self.value == 'auto'
def load_configs(filenames: List[str]) -> configparser.ConfigParser:
"""Load configuration files from a named subdirectory."""
config = configparser.ConfigParser()
config.read(filenames)
return config
if typing.TYPE_CHECKING:
CacheKeyType = typing.Tuple[typing.Tuple[typing.Any, ...], ...]
SubCacheKeyType = typing.Tuple[typing.Any, ...]
class DependencyCacheType(enum.Enum):
OTHER = 0
PKG_CONFIG = 1
CMAKE = 2
@classmethod
def from_type(cls, dep: 'dependencies.Dependency') -> 'DependencyCacheType':
from . import dependencies
# As more types gain search overrides they'll need to be added here
if isinstance(dep, dependencies.PkgConfigDependency):
return cls.PKG_CONFIG
if isinstance(dep, dependencies.CMakeDependency):
return cls.CMAKE
return cls.OTHER
class DependencySubCache:
def __init__(self, type_: DependencyCacheType):
self.types = [type_]
self.__cache = {} # type: typing.Dict[SubCacheKeyType, dependencies.Dependency]
def __getitem__(self, key: 'SubCacheKeyType') -> 'dependencies.Dependency':
return self.__cache[key]
def __setitem__(self, key: 'SubCacheKeyType', value: 'dependencies.Dependency') -> None:
self.__cache[key] = value
def __contains__(self, key: 'SubCacheKeyType') -> bool:
return key in self.__cache
def values(self) -> typing.Iterable['dependencies.Dependency']:
return self.__cache.values()
class DependencyCache:
"""Class that stores a cache of dependencies.
This class is meant to encapsulate the fact that we need multiple keys to
successfully lookup by providing a simple get/put interface.
"""
def __init__(self, builtins_per_machine: PerMachine[typing.Dict[str, UserOption[typing.Any]]], for_machine: MachineChoice):
self.__cache = OrderedDict() # type: typing.MutableMapping[CacheKeyType, DependencySubCache]
self.__builtins_per_machine = builtins_per_machine
self.__for_machine = for_machine
def __calculate_subkey(self, type_: DependencyCacheType) -> typing.Tuple[typing.Any, ...]:
if type_ is DependencyCacheType.PKG_CONFIG:
return tuple(self.__builtins_per_machine[self.__for_machine]['pkg_config_path'].value)
elif type_ is DependencyCacheType.CMAKE:
return tuple(self.__builtins_per_machine[self.__for_machine]['cmake_prefix_path'].value)
assert type_ is DependencyCacheType.OTHER, 'Someone forgot to update subkey calculations for a new type'
return tuple()
def __iter__(self) -> typing.Iterator['CacheKeyType']:
return self.keys()
def put(self, key: 'CacheKeyType', dep: 'dependencies.Dependency') -> None:
t = DependencyCacheType.from_type(dep)
if key not in self.__cache:
self.__cache[key] = DependencySubCache(t)
subkey = self.__calculate_subkey(t)
self.__cache[key][subkey] = dep
def get(self, key: 'CacheKeyType') -> typing.Optional['dependencies.Dependency']:
"""Get a value from the cache.
If there is no cache entry then None will be returned.
"""
try:
val = self.__cache[key]
except KeyError:
return None
for t in val.types:
subkey = self.__calculate_subkey(t)
try:
return val[subkey]
except KeyError:
pass
return None
def values(self) -> typing.Iterator['dependencies.Dependency']:
for c in self.__cache.values():
yield from c.values()
def keys(self) -> typing.Iterator['CacheKeyType']:
return iter(self.__cache.keys())
def items(self) -> typing.Iterator[typing.Tuple['CacheKeyType', typing.List['dependencies.Dependency']]]:
for k, v in self.__cache.items():
vs = []
for t in v.types:
subkey = self.__calculate_subkey(t)
if subkey in v:
vs.append(v[subkey])
yield k, vs
def clear(self) -> None:
self.__cache.clear()
# Can't bind this near the class method it seems, sadly.
_V = TypeVar('_V')
# This class contains all data that must persist over multiple
# invocations of Meson. It is roughly the same thing as
# cmakecache.
class CoreData:
def __init__(self, options: argparse.Namespace, scratch_dir: str):
self.lang_guids = {
'default': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'c': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'cpp': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'test': '3AC096D0-A1C2-E12C-1390-A8335801FDAB',
'directory': '2150E333-8FDC-42A3-9474-1A3956D46DE8',
}
self.test_guid = str(uuid.uuid4()).upper()
self.regen_guid = str(uuid.uuid4()).upper()
self.install_guid = str(uuid.uuid4()).upper()
self.target_guids = {}
self.version = version
self.init_builtins()
self.backend_options = {} # : Dict[str, UserOption]
self.user_options = {} # : Dict[str, UserOption]
self.compiler_options = PerMachine({}, {})
self.base_options = {} # : Dict[str, UserOption]
self.cross_files = self.__load_config_files(options, scratch_dir, 'cross')
self.compilers = PerMachine(OrderedDict(), OrderedDict())
build_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
host_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
self.deps = PerMachine(build_cache, host_cache) # type: PerMachine[DependencyCache]
self.compiler_check_cache = OrderedDict()
# Only to print a warning if it changes between Meson invocations.
self.config_files = self.__load_config_files(options, scratch_dir, 'native')
self.libdir_cross_fixup()
@staticmethod
def __load_config_files(options: argparse.Namespace, scratch_dir: str, ftype: str) -> List[str]:
# Need to try and make the passed filenames absolute because when the
# files are parsed later we'll have chdir()d.
if ftype == 'cross':
filenames = options.cross_file
else:
filenames = options.native_file
if not filenames:
return []
real = []
for i, f in enumerate(filenames):
f = os.path.expanduser(os.path.expandvars(f))
if os.path.exists(f):
if os.path.isfile(f):
real.append(os.path.abspath(f))
elif os.path.isdir(f):
raise MesonException('Cross and native files must not be directories')
else:
# in this case we've been passed some kind of pipe, copy
# the contents of that file into the meson private (scratch)
# directory so that it can be re-read when wiping/reconfiguring
copy = os.path.join(scratch_dir, '{}.{}.ini'.format(uuid.uuid4(), ftype))
with open(f, 'r') as rf:
with open(copy, 'w') as wf:
wf.write(rf.read())
real.append(copy)
# Also replace the command line argument, as the pipe
# probably wont exist on reconfigure
filenames[i] = copy
continue
elif sys.platform != 'win32':
paths = [
os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),
] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')
for path in paths:
path_to_try = os.path.join(path, 'meson', ftype, f)
if os.path.isfile(path_to_try):
real.append(path_to_try)
break
else:
raise MesonException('Cannot find specified {} file: {}'.format(ftype, f))
continue
raise MesonException('Cannot find specified {} file: {}'.format(ftype, f))
return real
def libdir_cross_fixup(self):
# By default set libdir to "lib" when cross compiling since
# getting the "system default" is always wrong on multiarch
# platforms as it gets a value like lib/x86_64-linux-gnu.
if self.cross_files:
self.builtins['libdir'].value = 'lib'
def sanitize_prefix(self, prefix):
prefix = os.path.expanduser(prefix)
if not os.path.isabs(prefix):
raise MesonException('prefix value {!r} must be an absolute path'
''.format(prefix))
if prefix.endswith('/') or prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(prefix) == 3 and prefix[1] == ':':
pass
# If prefix is a single character, preserve it since it is
# the root directory.
elif len(prefix) == 1:
pass
else:
prefix = prefix[:-1]
return prefix
def sanitize_dir_option_value(self, prefix, option, value):
'''
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
'''
if option.endswith('dir') and os.path.isabs(value) and \
option not in builtin_dir_noprefix_options:
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
if os.path.commonpath([value, prefix]) != str(PurePath(prefix)):
m = 'The value of the {!r} option is {!r} which must be a ' \
'subdir of the prefix {!r}.\nNote that if you pass a ' \
'relative path, it is assumed to be a subdir of prefix.'
raise MesonException(m.format(option, value, prefix))
# Convert path to be relative to prefix
skip = len(prefix) + 1
value = value[skip:]
return value
def init_builtins(self):
# Create builtin options with default values
self.builtins = {}
for key, opt in builtin_options.items():
self.builtins[key] = opt.init_option()
self.builtins_per_machine = PerMachine({}, {})
for for_machine in iter(MachineChoice):
for key, opt in builtin_options_per_machine.items():
self.builtins_per_machine[for_machine][key] = opt.init_option()
def init_backend_options(self, backend_name):
if backend_name == 'ninja':
self.backend_options['backend_max_links'] = \
UserIntegerOption(
'Maximum number of linker processes to run or 0 for no '
'limit',
0, None, 0)
elif backend_name.startswith('vs'):
self.backend_options['backend_startup_project'] = \
UserStringOption(
'Default project to execute in Visual Studio',
'')
def get_builtin_option(self, optname):
for opts in self._get_all_builtin_options():
v = opts.get(optname)
if v is None:
continue
if optname == 'wrap_mode':
return WrapMode.from_string(v.value)
return v.value
raise RuntimeError('Tried to get unknown builtin option %s.' % optname)
def _try_set_builtin_option(self, optname, value):
for opts in self._get_all_builtin_options():
opt = opts.get(optname)
if opt is None:
continue
if optname == 'prefix':
value = self.sanitize_prefix(value)
else:
prefix = self.builtins['prefix'].value
value = self.sanitize_dir_option_value(prefix, optname, value)
break
else:
return False
opt.set_value(value)
# Make sure that buildtype matches other settings.
if optname == 'buildtype':
self.set_others_from_buildtype(value)
else:
self.set_buildtype_from_others()
return True
def set_builtin_option(self, optname, value):
res = self._try_set_builtin_option(optname, value)
if not res:
raise RuntimeError('Tried to set unknown builtin option %s.' % optname)
def set_others_from_buildtype(self, value):
if value == 'plain':
opt = '0'
debug = False
elif value == 'debug':
opt = '0'
debug = True
elif value == 'debugoptimized':
opt = '2'
debug = True
elif value == 'release':
opt = '3'
debug = False
elif value == 'minsize':
opt = 's'
debug = True
else:
assert(value == 'custom')
return
self.builtins['optimization'].set_value(opt)
self.builtins['debug'].set_value(debug)
def set_buildtype_from_others(self):
opt = self.builtins['optimization'].value
debug = self.builtins['debug'].value
if opt == '0' and not debug:
mode = 'plain'
elif opt == '0' and debug:
mode = 'debug'
elif opt == '2' and debug:
mode = 'debugoptimized'
elif opt == '3' and not debug:
mode = 'release'
elif opt == 's' and debug:
mode = 'minsize'
else:
mode = 'custom'
self.builtins['buildtype'].set_value(mode)
@staticmethod
def get_prefixed_options_per_machine(
options_per_machine # : PerMachine[Dict[str, _V]]]
) -> Iterable[Dict[str, _V]]:
for for_machine in iter(MachineChoice):
prefix = for_machine.get_prefix()
yield {
prefix + k: v
for k, v in options_per_machine[for_machine].items()
}
def _get_all_nonbuiltin_options(self) -> Iterable[Dict[str, UserOption]]:
yield self.backend_options
yield self.user_options
yield from self.get_prefixed_options_per_machine(self.compiler_options)
yield self.base_options
def _get_all_builtin_options(self) -> Dict[str, UserOption]:
yield from self.get_prefixed_options_per_machine(self.builtins_per_machine)
yield self.builtins
def get_all_options(self) -> Dict[str, UserOption]:
yield from self._get_all_nonbuiltin_options()
yield from self._get_all_builtin_options()
def validate_option_value(self, option_name, override_value):
for opts in self.get_all_options():
opt = opts.get(option_name)
if opt is not None:
try:
return opt.validate_value(override_value)
except MesonException as e:
raise type(e)(('Validation failed for option %s: ' % option_name) + str(e)) \
.with_traceback(sys.exc_into()[2])
else:
raise MesonException('Tried to validate unknown option %s.' % option_name)
def get_external_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang + '_args'].value
def get_external_link_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang + '_link_args'].value
def merge_user_options(self, options):
for (name, value) in options.items():
if name not in self.user_options:
self.user_options[name] = value
else:
oldval = self.user_options[name]
if type(oldval) != type(value):
self.user_options[name] = value
def set_options(self, options, subproject='', warn_unknown=True):
# Set prefix first because it's needed to sanitize other options
prefix = self.builtins['prefix'].value
if 'prefix' in options:
prefix = self.sanitize_prefix(options['prefix'])
self.builtins['prefix'].set_value(prefix)
for key in builtin_dir_noprefix_options:
if key not in options:
self.builtins[key].set_value(builtin_options[key].prefixed_default(key, prefix))
unknown_options = []
for k, v in options.items():
if k == 'prefix':
continue
if self._try_set_builtin_option(k, v):
continue
for opts in self._get_all_nonbuiltin_options():
tgt = opts.get(k)
if tgt is None:
continue
tgt.set_value(v)
break
else:
unknown_options.append(k)
if unknown_options and warn_unknown:
unknown_options = ', '.join(sorted(unknown_options))
sub = 'In subproject {}: '.format(subproject) if subproject else ''
mlog.warning('{}Unknown options: "{}"'.format(sub, unknown_options))
def set_default_options(self, default_options, subproject, env):
# Set defaults first from conf files (cross or native), then
# override them as nec as necessary.
for k, v in env.paths.host:
if v is not None:
env.cmd_line_options.setdefault(k, v)
# Set default options as if they were passed to the command line.
# Subprojects can only define default for user options.
from . import optinterpreter
for k, v in default_options.items():
if subproject:
if optinterpreter.is_invalid_name(k, log=False):
continue
k = subproject + ':' + k
env.cmd_line_options.setdefault(k, v)
# Create a subset of cmd_line_options, keeping only options for this
# subproject. Also take builtin options if it's the main project.
# Language and backend specific options will be set later when adding
# languages and setting the backend (builtin options must be set first
# to know which backend we'll use).
options = {}
# Some options default to environment variables if they are
# unset, set those now. These will either be overwritten
# below, or they won't. These should only be set on the first run.
if env.first_invocation:
p_env = os.environ.get('PKG_CONFIG_PATH')
if p_env:
options['pkg_config_path'] = p_env.split(':')
for k, v in env.cmd_line_options.items():
if subproject:
if not k.startswith(subproject + ':'):
continue
elif k not in builtin_options.keys() \
and 'build.' + k not in builtin_options_per_machine.keys() \
and k not in builtin_options_per_machine.keys():
if ':' in k:
continue
if optinterpreter.is_invalid_name(k, log=False):
continue
options[k] = v
self.set_options(options, subproject)
def process_new_compiler(self, lang: str, comp, env):
from . import compilers
self.compilers[comp.for_machine][lang] = comp
optprefix = lang + '_'
for k, o in comp.get_and_default_options(env.properties[comp.for_machine]).items():
if not k.startswith(optprefix):
raise MesonException('Internal error, %s has incorrect prefix.' % k)
# prefixed compiler options affect just this machine
opt_prefix = comp.for_machine.get_prefix()
if opt_prefix + k in env.cmd_line_options:
o.set_value(env.cmd_line_options[opt_prefix + k])
self.compiler_options[comp.for_machine].setdefault(k, o)
enabled_opts = []
for optname in comp.base_options:
if optname in self.base_options:
continue
oobj = compilers.base_options[optname]
if optname in env.cmd_line_options:
oobj.set_value(env.cmd_line_options[optname])
enabled_opts.append(optname)
self.base_options[optname] = oobj
self.emit_base_options_warnings(enabled_opts)
def emit_base_options_warnings(self, enabled_opts: list):
if 'b_bitcode' in enabled_opts:
mlog.warning('Base option \'b_bitcode\' is enabled, which is incompatible with many linker options. Incompatible options such as such as \'b_asneeded\' have been disabled.')
mlog.warning('Please see https://mesonbuild.com/Builtin-options.html#Notes_about_Apple_Bitcode_support for more details.')
class CmdLineFileParser(configparser.ConfigParser):
def __init__(self):
# We don't want ':' as key delimiter, otherwise it would break when
# storing subproject options like "subproject:option=value"
super().__init__(delimiters=['='])
def get_cmd_line_file(build_dir):
return os.path.join(build_dir, 'meson-private', 'cmd_line.txt')
def read_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
# Do a copy because config is not really a dict. options.cmd_line_options
# overrides values from the file.
d = dict(config['options'])
d.update(options.cmd_line_options)
options.cmd_line_options = d
properties = config['properties']
if not options.cross_file:
options.cross_file = ast.literal_eval(properties.get('cross_file', '[]'))
if not options.native_file:
# This will be a string in the form: "['first', 'second', ...]", use
# literal_eval to get it into the list of strings.
options.native_file = ast.literal_eval(properties.get('native_file', '[]'))
def write_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
properties = {}
if options.cross_file:
properties['cross_file'] = options.cross_file
if options.native_file:
properties['native_file'] = options.native_file
config['options'] = options.cmd_line_options
config['properties'] = properties
with open(filename, 'w') as f:
config.write(f)
def update_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
config['options'].update(options.cmd_line_options)
with open(filename, 'w') as f:
config.write(f)
def major_versions_differ(v1, v2):
return v1.split('.')[0:2] != v2.split('.')[0:2]
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except (pickle.UnpicklingError, EOFError):
raise MesonException(load_fail_msg)
except AttributeError:
raise MesonException(
"Coredata file {!r} references functions or classes that don't "
"exist. This probably means that it was generated with an old "
"version of meson.".format(filename))
if not isinstance(obj, CoreData):
raise MesonException(load_fail_msg)
if major_versions_differ(obj.version, version):
raise MesonException('Build directory has been generated with Meson version %s, '
'which is incompatible with current version %s.\n' %
(obj.version, version))
return obj
def save(obj, build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
prev_filename = filename + '.prev'
tempfilename = filename + '~'
if major_versions_differ(obj.version, version):
raise MesonException('Fatal version mismatch corruption.')
if os.path.exists(filename):
import shutil
shutil.copyfile(filename, prev_filename)
with open(tempfilename, 'wb') as f:
pickle.dump(obj, f)
f.flush()
os.fsync(f.fileno())
os.replace(tempfilename, filename)
return filename
def register_builtin_arguments(parser):
for n, b in builtin_options.items():
b.add_to_argparse(n, parser, '', '')
for n, b in builtin_options_per_machine.items():
b.add_to_argparse(n, parser, '', ' (just for host machine)')
b.add_to_argparse(n, parser, 'build.', ' (just for build machine)')
parser.add_argument('-D', action='append', dest='projectoptions', default=[], metavar="option",
help='Set the value of an option, can be used several times to set multiple options.')
def create_options_dict(options):
result = {}
for o in options:
try:
(key, value) = o.split('=', 1)
except ValueError:
raise MesonException('Option {!r} must have a value separated by equals sign.'.format(o))
result[key] = value
return result
def parse_cmd_line_options(args):
args.cmd_line_options = create_options_dict(args.projectoptions)
# Merge builtin options set with --option into the dict.
for name in chain(
builtin_options.keys(),
('build.' + k for k in builtin_options_per_machine.keys()),
builtin_options_per_machine.keys(),
):
value = getattr(args, name, None)
if value is not None:
if name in args.cmd_line_options:
cmdline_name = BuiltinOption.argparse_name_to_arg(name)
raise MesonException(
'Got argument {0} as both -D{0} and {1}. Pick one.'.format(name, cmdline_name))
args.cmd_line_options[name] = value
delattr(args, name)
_U = TypeVar('_U', bound=UserOption[_T])
class BuiltinOption(Generic[_T, _U]):
"""Class for a builtin option type.
Currently doesn't support UserIntegerOption, or a few other cases.
"""
def __init__(self, opt_type: Type[_U], description: str, default: Any, yielding: Optional[bool] = None, *,
choices: Any = None):
self.opt_type = opt_type
self.description = description
self.default = default
self.choices = choices
self.yielding = yielding
def init_option(self) -> _U:
"""Create an instance of opt_type and return it."""
keywords = {'yielding': self.yielding, 'value': self.default}
if self.choices:
keywords['choices'] = self.choices
return self.opt_type(self.description, **keywords)
def _argparse_action(self) -> Optional[str]:
if self.default is True:
return 'store_false'
elif self.default is False:
return 'store_true'
return None
def _argparse_choices(self) -> Any:
if self.opt_type is UserBooleanOption:
return [True, False]
elif self.opt_type is UserFeatureOption:
return UserFeatureOption.static_choices
return self.choices
@staticmethod
def argparse_name_to_arg(name: str) -> str:
if name == 'warning_level':
return '--warnlevel'
else:
return '--' + name.replace('_', '-')
def prefixed_default(self, name: str, prefix: str = '') -> Any:
if self.opt_type in [UserComboOption, UserIntegerOption]:
return self.default
try:
return builtin_dir_noprefix_options[name][prefix]
except KeyError:
pass
return self.default
def add_to_argparse(self, name: str, parser: argparse.ArgumentParser, prefix: str, help_suffix: str) -> None:
kwargs = {}
c = self._argparse_choices()
b = self._argparse_action()
h = self.description
if not b:
h = '{} (default: {}).'.format(h.rstrip('.'), self.prefixed_default(name))
else:
kwargs['action'] = b
if c and not b:
kwargs['choices'] = c
kwargs['default'] = argparse.SUPPRESS
kwargs['dest'] = prefix + name
cmdline_name = self.argparse_name_to_arg(prefix + name)
parser.add_argument(cmdline_name, help=h + help_suffix, **kwargs)
# Update `docs/markdown/Builtin-options.md` after changing the options below
builtin_options = OrderedDict([
# Directories
('prefix', BuiltinOption(UserStringOption, 'Installation prefix', default_prefix())),
('bindir', BuiltinOption(UserStringOption, 'Executable directory', 'bin')),
('datadir', BuiltinOption(UserStringOption, 'Data file directory', 'share')),
('includedir', BuiltinOption(UserStringOption, 'Header file directory', 'include')),
('infodir', BuiltinOption(UserStringOption, 'Info page directory', 'share/info')),
('libdir', BuiltinOption(UserStringOption, 'Library directory', default_libdir())),
('libexecdir', BuiltinOption(UserStringOption, 'Library executable directory', default_libexecdir())),
('localedir', BuiltinOption(UserStringOption, 'Locale data directory', 'share/locale')),
('localstatedir', BuiltinOption(UserStringOption, 'Localstate data directory', 'var')),
('mandir', BuiltinOption(UserStringOption, 'Manual page directory', 'share/man')),
('sbindir', BuiltinOption(UserStringOption, 'System executable directory', 'sbin')),
('sharedstatedir', BuiltinOption(UserStringOption, 'Architecture-independent data directory', 'com')),
('sysconfdir', BuiltinOption(UserStringOption, 'Sysconf data directory', 'etc')),
# Core options
('auto_features', BuiltinOption(UserFeatureOption, "Override value of all 'auto' features", 'auto')),
('backend', BuiltinOption(UserComboOption, 'Backend to use', 'ninja', choices=backendlist)),
('buildtype', BuiltinOption(UserComboOption, 'Build type to use', 'debug',
choices=['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'])),
('debug', BuiltinOption(UserBooleanOption, 'Debug', True)),
('default_library', BuiltinOption(UserComboOption, 'Default library type', 'shared', choices=['shared', 'static', 'both'])),
('errorlogs', BuiltinOption(UserBooleanOption, "Whether to print the logs from failing tests", True)),
('install_umask', BuiltinOption(UserUmaskOption, 'Default umask to apply on permissions of installed files', '022')),
('layout', BuiltinOption(UserComboOption, 'Build directory layout', 'mirror', choices=['mirror', 'flat'])),
('optimization', BuiltinOption(UserComboOption, 'Optimization level', '0', choices=['0', 'g', '1', '2', '3', 's'])),
('stdsplit', BuiltinOption(UserBooleanOption, 'Split stdout and stderr in test logs', True)),
('strip', BuiltinOption(UserBooleanOption, 'Strip targets on install', False)),
('unity', BuiltinOption(UserComboOption, 'Unity build', 'off', choices=['on', 'off', 'subprojects'])),
('warning_level', BuiltinOption(UserComboOption, 'Compiler warning level to use', '1', choices=['0', '1', '2', '3'])),
('werror', BuiltinOption(UserBooleanOption, 'Treat warnings as errors', False)),
('wrap_mode', BuiltinOption(UserComboOption, 'Wrap mode', 'default', choices=['default', 'nofallback', 'nodownload', 'forcefallback'])),
])
builtin_options_per_machine = OrderedDict([
('pkg_config_path', BuiltinOption(UserArrayOption, 'List of additional paths for pkg-config to search', [])),
('cmake_prefix_path', BuiltinOption(UserArrayOption, 'List of additional prefixes for cmake to search', [])),
])
# Special prefix-dependent defaults for installation directories that reside in
# a path outside of the prefix in FHS and common usage.
builtin_dir_noprefix_options = {
'sysconfdir': {'/usr': '/etc'},
'localstatedir': {'/usr': '/var', '/usr/local': '/var/local'},
'sharedstatedir': {'/usr': '/var/lib', '/usr/local': '/var/local/lib'},
}
forbidden_target_names = {'clean': None,
'clean-ctlist': None,
'clean-gcno': None,
'clean-gcda': None,
'coverage': None,
'coverage-text': None,
'coverage-xml': None,
'coverage-html': None,
'phony': None,
'PHONY': None,
'all': None,
'test': None,
'benchmark': None,
'install': None,
'uninstall': None,
'build.ninja': None,
'scan-build': None,
'reconfigure': None,
'dist': None,
'distcheck': None,
}
|
the-stack_0_4054 | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for AWS's Athena EDW service."""
import datetime
import json
import logging
import os
from typing import Dict
from perfkitbenchmarker import data
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import flags
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
AWS_ATHENA_CMD_PREFIX = ['aws', 'athena']
AWS_ATHENA_CMD_POSTFIX = ['--output', 'json']
# TODO(user): Derive the full table set from the TPC suite.
TPC_H_TABLES = [
'customer', 'lineitem', 'nation', 'orders', 'part', 'partsupp', 'region',
'supplier'
]
TPC_DS_TABLES = [
'call_center', 'catalog_page', 'catalog_returns', 'catalog_sales',
'customer', 'customer_address', 'customer_demographics', 'date_dim',
'dbgen_version', 'household_demographics', 'income_band', 'inventory',
'item', 'promotion', 'reason', 'ship_mode', 'store', 'store_returns',
'store_sales', 'time_dim', 'warehouse', 'web_page', 'web_returns',
'web_sales', 'web_site'
]
FLAGS = flags.FLAGS
class AthenaQueryError(RuntimeError):
pass
def GetAthenaClientInterface(
database: str, output_bucket: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Athena client Interface.
Args:
database: Name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
Returns:
A concrete Client Interface object (subclass of EdwClientInterface)
Raises:
RuntimeError: if an unsupported athena_client_interface is requested
"""
if FLAGS.athena_client_interface == 'CLI':
return CliClientInterface(database, output_bucket)
raise RuntimeError('Unknown Athena Client Interface requested.')
class CliClientInterface(edw_service.EdwClientInterface):
"""Command Line Client Interface class for Athena.
Uses the native Athena client available with the awscli
https://docs.aws.amazon.com/cli/latest/reference/athena/index.html.
Attributes:
database: String name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
"""
def __init__(self, database: str, output_bucket: str):
super(CliClientInterface, self).__init__()
self.database = database
self.output_bucket = 's3://%s' % output_bucket
def Prepare(self, benchmark_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the bq tool dependencies and authenticates using a service account.
Args:
benchmark_name: String name of the benchmark, to allow extraction and
usage of benchmark specific artifacts (certificates, etc.) during client
vm preparation.
"""
self.client_vm.Install('pip')
self.client_vm.RemoteCommand('sudo pip install absl-py')
for pkg in ('aws_credentials', 'awscli'):
self.client_vm.Install(pkg)
# Push the framework to execute a sql query and gather performance details.
service_specific_dir = os.path.join('edw', Athena.SERVICE_TYPE)
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir, 'script_runner.sh')))
runner_permission_update_cmd = 'chmod 755 {}'.format('script_runner.sh')
self.client_vm.RemoteCommand(runner_permission_update_cmd)
self.client_vm.PushFile(
data.ResourcePath(os.path.join('edw', 'script_driver.py')))
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir,
'provider_specific_script_driver.py')))
def ExecuteQuery(self, query_name) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
stdout, _ = self.client_vm.RemoteCommand(
'python script_driver.py --script={} --database={} --query_timeout={} '
'--athena_query_output_bucket={}'.format(query_name, self.database,
FLAGS.athena_query_timeout,
self.output_bucket))
script_performance = json.loads(str(stdout))
execution_time = script_performance[query_name]['execution_time']
run_metadata = {'script': query_name}
if 'error_details' in script_performance[query_name]:
run_metadata['error_details'] = script_performance[query_name][
'error_details']
run_metadata.update(self.GetMetadata())
return execution_time, run_metadata
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {'client': FLAGS.athena_client_interface}
def ReadScript(script_uri):
"""Method to read a sql script based on its local path.
Arguments:
script_uri: Local URI of file containing SQL query.
Returns:
Query String contents of the URI location.
Raises:
IOError: If the script cannot be read.
"""
with open(script_uri) as fp:
return fp.read()
def PrepareQueryString(query_string_template, substitutions):
"""Method to read a template Athena script and substitute placeholders.
Args:
query_string_template: Template version of the Athena query.
substitutions: A dictionary of string placeholder keys and corresponding
string values.
Returns:
Materialized Athena query as a string.
"""
for key, value in substitutions.items():
query_string = query_string_template.replace(key, value)
return query_string
def RunScriptCommand(script_command):
"""Method to execute an AWS Athena cli command.
Args:
script_command: Fully compiled AWS Athena cli command.
Returns:
String stdout result of executing the query.
Script Command execution duration in seconds (rounded).
Raises:
AthenaQueryError: If the return code does not indicate success.
"""
start_time = datetime.datetime.now()
stdout, _, retcode = vm_util.IssueCommand(
script_command, raise_on_failure=False)
if retcode:
raise AthenaQueryError
end_time = datetime.datetime.now()
return stdout, int((end_time - start_time).total_seconds())
class Athena(edw_service.EdwService):
"""Object representing a Athena data warehouse."""
CLOUD = providers.AWS
SERVICE_TYPE = 'athena'
def __init__(self, edw_service_spec):
super(Athena, self).__init__(edw_service_spec)
self.region = util.GetRegionFromZone(FLAGS.zones[0])
self.output_bucket = '-'.join(
[FLAGS.athena_output_location_prefix, self.region, FLAGS.run_uri])
self.client_interface = GetAthenaClientInterface(self.cluster_identifier,
self.output_bucket)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
self.s3_service.MakeBucket(self.output_bucket)
if FLAGS.provision_athena:
self.data_bucket = 'pkb' + self.cluster_identifier.replace('_', '')
self.tables = (
TPC_H_TABLES if FLAGS.edw_tpc_dsb_type == 'tpc_h' else TPC_DS_TABLES)
self.athena_db_create_time = 0
self.athena_table_create_time = 0
def BuildAthenaCommand(self, query_string, database=None):
"""Method to compile a AWS Athena cli command.
Arguments:
query_string: A string with the query that needs to be executed on Athena.
database: The Athena database against which the query should be executed.
Returns:
Fully compiled AWS Athena cli command.
"""
cmd = []
cmd.extend(AWS_ATHENA_CMD_PREFIX)
cmd.extend([
'--region', self.region,
'start-query-execution',
'--query-string', query_string
])
if database:
cmd.extend(['--query-execution-context', ('Database=%s' % database)])
cmd.extend([
'--result-configuration',
('OutputLocation=s3://%s' % self.output_bucket)
])
cmd.extend(AWS_ATHENA_CMD_POSTFIX)
return cmd
def _Create(self):
"""Create a Athena data warehouse."""
def _EmptyDatabase():
"""Remove tables, if they exist, so they can be refreshed.
If the database and/or tables don't already exist, the drop commands
will simply fail, which won't raise errors.
"""
drop_script_path = data.ResourcePath('edw/athena/%s/ddl/drop.sql' %
FLAGS.edw_tpc_dsb_type)
drop_script_contents = ReadScript(drop_script_path)
# Drop all tables so the database can be dropped.
for table in self.tables:
# Remove the folder backing each parquet table so they can be refreshed.
vm_util.IssueCommand([
'aws', 's3', 'rm',
's3://%s/%s_parquet' % (self.data_bucket, table), '--recursive'
], raise_on_failure=False)
# The parquet tables don't have the type suffix so that the queries can
# run as written without having to change the table names.
for suffix in ['_csv', '']:
script_contents = PrepareQueryString(drop_script_contents,
{'{table}': table + suffix})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
RunScriptCommand(script_command)
drop_database_query_string = PrepareQueryString(
'drop database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(drop_database_query_string)
RunScriptCommand(script_command)
def _CreateDatabase():
create_database_query_string = PrepareQueryString(
'create database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(create_database_query_string)
return RunScriptCommand(script_command)
def _CreateTable(table_create_sql_template):
template_script_path = data.ResourcePath(table_create_sql_template)
template_script_contents = ReadScript(template_script_path)
script_contents = PrepareQueryString(template_script_contents,
{'{bucket}': self.data_bucket})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
return RunScriptCommand(script_command)
def _CreateAllTables():
"""Create all TPC benchmarking tables."""
cumulative_table_create_time = 0
for table in self.tables:
for suffix in ['_csv', '_parquet']:
script = 'edw/athena/%s/ddl/%s.sql' % (FLAGS.edw_tpc_dsb_type,
table + suffix)
_, table_create_time = _CreateTable(script)
cumulative_table_create_time += table_create_time
return cumulative_table_create_time
_EmptyDatabase()
_, self.athena_db_create_time = _CreateDatabase()
self.athena_table_create_time = _CreateAllTables()
def _Exists(self):
"""Method to validate the existence of a Athena data warehouse.
Returns:
Boolean value indicating the existence of a Athena data warehouse.
"""
raise NotImplementedError
def _Delete(self):
"""Delete a Athena data warehouse."""
if not FLAGS.teardown_athena:
logging.info('The current resource is requested to be long living.')
return
raise NotImplementedError
def Cleanup(self):
# Direct cleanup is used instead of _DeleteDependencies because the Athena
# warehouse resource isn't created/deleted each time.
self.s3_service.DeleteBucket(self.output_bucket)
def GetMetadata(self):
"""Return a dictionary of the metadata for the Athena data warehouse."""
basic_data = super(Athena, self).GetMetadata()
basic_data.update({'database': self.cluster_identifier})
basic_data.update(self.client_interface.GetMetadata())
return basic_data
|
the-stack_0_4056 | import json
from django.db.models import Count
from django.http import HttpResponse
from django.template import loader
from .models import Scent, TestResult
def index(request):
template = loader.get_template('smelltest/index.html')
context = {
}
return HttpResponse(template.render(context, request))
def data(request):
scents = Scent.objects.order_by('id')
test_results = TestResult.objects.values('scent', 'guess').annotate(Count('scent'))
ret = {
'nodes': [{
'name': s.name,
'group': 1,
'testCount': s.tests.count()
} for s in scents],
'links': [{
'source': r['scent'] - 1, # 0-index array vs 1-index table PK
'target': r['guess'] - 1,
'value': r['scent__count']
} for r in test_results]
}
return HttpResponse(json.dumps(ret), content_type="application/json")
|
the-stack_0_4060 | #!/usr/bin/env python
# Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tool to automatically format source code in Nuitka style.
"""
import os
import re
import subprocess
import sys
from logging import warning
from nuitka.tools.quality.Git import (
getFileHashContent,
putFileHashContent,
updateFileIndex,
updateWorkingFile,
)
from nuitka.Tracing import my_print
from nuitka.utils.Execution import getExecutablePath, withEnvironmentPathAdded
from nuitka.utils.FileOperations import (
getFileContents,
renameFile,
withPreserveFileMode,
)
from nuitka.utils.Shebang import getShebangFromFile
from nuitka.utils.Utils import getOS
def _cleanupWindowsNewlines(filename):
""" Remove Windows new-lines from a file.
Simple enough to not depend on external binary.
"""
with open(filename, "rb") as f:
source_code = f.read()
updated_code = source_code.replace(b"\r\n", b"\n")
updated_code = updated_code.replace(b"\n\r", b"\n")
if updated_code != source_code:
with open(filename, "wb") as out_file:
out_file.write(updated_code)
def _cleanupTrailingWhitespace(filename):
""" Remove trailing white spaces from a file.
"""
with open(filename, "r") as f:
source_lines = [line for line in f]
clean_lines = [line.rstrip() for line in source_lines]
if clean_lines != source_lines:
with open(filename, "w") as out_file:
out_file.write("\n".join(clean_lines) + "\n")
def _updateCommentNode(comment_node):
if "pylint:" in str(comment_node.value):
def replacer(part):
def renamer(pylint_token):
# pylint: disable=too-many-branches,too-many-return-statements
if pylint_token == "E0602":
return "undefined-variable"
elif pylint_token in ("E0401", "F0401"):
return "import-error"
elif pylint_token == "E1102":
return "not-callable"
elif pylint_token == "E1133":
return " not-an-iterable"
elif pylint_token == "E1128":
return "assignment-from-none"
# Save line length for this until isort is better at long lines.
elif pylint_token == "useless-suppression":
return "I0021"
# elif pylint_token == "I0021":
# return "useless-suppression"
elif pylint_token == "R0911":
return "too-many-return-statements"
elif pylint_token == "R0201":
return "no-self-use"
elif pylint_token == "R0902":
return "too-many-instance-attributes"
elif pylint_token == "R0912":
return "too-many-branches"
elif pylint_token == "R0914":
return "too-many-locals"
elif pylint_token == "R0915":
return "too-many-statements"
elif pylint_token == "W0123":
return "eval-used"
elif pylint_token == "W0603":
return "global-statement"
elif pylint_token == "W0613":
return "unused-argument"
elif pylint_token == "W0622":
return "redefined-builtin"
elif pylint_token == "W0703":
return "broad-except"
else:
return pylint_token
return part.group(1) + ",".join(
sorted(renamer(token) for token in part.group(2).split(","))
)
new_value = re.sub(
r"(pylint\: disable=)(.*)", replacer, str(comment_node.value), flags=re.M
)
comment_node.value = new_value
def _cleanupPyLintComments(filename, abort):
from baron.parser import ( # pylint: disable=I0021,import-error,no-name-in-module
ParsingError, # @UnresolvedImport
)
from redbaron import ( # pylint: disable=I0021,import-error,no-name-in-module
RedBaron, # @UnresolvedImport
)
old_code = getFileContents(filename)
try:
red = RedBaron(old_code)
# red = RedBaron(old_code.rstrip()+'\n')
except ParsingError:
if abort:
raise
my_print("PARSING ERROR.")
return 2
for node in red.find_all("CommentNode"):
try:
_updateCommentNode(node)
except Exception:
my_print("Problem with", node)
node.help(deep=True, with_formatting=True)
raise
new_code = red.dumps()
if new_code != old_code:
with open(filename, "w") as source_code:
source_code.write(red.dumps())
def _cleanupImportRelative(filename):
package_name = os.path.dirname(filename).replace(os.path.sep, ".")
# Make imports local if possible.
if package_name.startswith("nuitka."):
source_code = getFileContents(filename)
updated_code = re.sub(
r"from %s import" % package_name, "from . import", source_code
)
updated_code = re.sub(r"from %s\." % package_name, "from .", source_code)
if source_code != updated_code:
with open(filename, "w") as out_file:
out_file.write(updated_code)
_binary_calls = {}
def _getPythonBinaryCall(binary_name):
if binary_name not in _binary_calls:
# Try running Python installation.
try:
__import__(binary_name)
_binary_calls[binary_name] = [sys.executable, "-m", binary_name]
return _binary_calls[binary_name]
except ImportError:
pass
binary_path = getExecutablePath(binary_name)
if binary_path:
_binary_calls[binary_name] = [binary_path]
return _binary_calls[binary_name]
sys.exit("Error, cannot find %s, not installed for this Python?" % binary_name)
return _binary_calls[binary_name]
def _cleanupImportSortOrder(filename):
_cleanupImportRelative(filename)
isort_call = _getPythonBinaryCall("isort")
contents = getFileContents(filename)
start_index = None
if "\n# isort:start" in contents:
parts = contents.splitlines()
start_index = parts.index("# isort:start")
contents = "\n".join(parts[start_index + 1 :])
with open(filename, "w") as out_file:
out_file.write(contents)
with open(os.devnull, "w") as devnull:
subprocess.check_call(
isort_call
+ [
"-q", # quiet, but stdout is still garbage
"-ot", # Order imports by type in addition to alphabetically
"-m3", # "vert-hanging"
"-up", # Prefer braces () over \ for line continuation.
"-tc", # Trailing commas
"-ns", # Do not ignore those:
"__init__.py",
filename,
],
stdout=devnull,
)
if start_index is not None:
contents = getFileContents(filename)
contents = "\n".join(parts[: start_index + 1]) + "\n" + contents
with open(filename, "w") as out_file:
out_file.write(contents)
warned_clang_format = False
def cleanupClangFormat(filename):
""" Call clang-format on a given filename to format C code.
Args:
filename: What file to re-format.
"""
# Using global here, as this is really a singleton, in
# the form of a module, pylint: disable=global-statement
global warned_clang_format
clang_format_path = getExecutablePath("clang-format-6.0")
# Extra ball on Windows, check default installation PATH too.
if not clang_format_path and getOS() == "Windows":
with withEnvironmentPathAdded("PATH", r"C:\Program Files\LLVM\bin"):
clang_format_path = getExecutablePath("clang-format")
if clang_format_path:
subprocess.call(
[
clang_format_path,
"-i",
"-style={BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 120}",
filename,
]
)
else:
if not warned_clang_format:
warning("Need to install LLVM for C files format.")
warned_clang_format = True
def _shouldNotFormatCode(filename):
parts = os.path.abspath(filename).split(os.path.sep)
if "inline_copy" in parts:
return True
elif "tests" in parts:
return "run_all.py" not in parts and "compile_itself.py" not in parts
else:
return False
def _isPythonFile(filename):
if filename.endswith((".py", ".pyw", ".scons")):
return True
else:
shebang = getShebangFromFile(filename)
if shebang is not None:
shebang = shebang[2:].lstrip()
if shebang.startswith("/usr/bin/env"):
shebang = shebang[12:].lstrip()
if shebang.startswith("python"):
return True
return False
def autoformat(filename, git_stage, abort):
# This does a lot of distinctions, pylint:disable=too-many-branches
if os.path.isdir(filename):
return
filename = os.path.normpath(filename)
my_print("Consider", filename, end=": ")
is_python = _isPythonFile(filename)
is_c = filename.endswith((".c", ".h"))
is_txt = filename.endswith(
(".txt", ".rst", ".sh", ".in", ".md", ".stylesheet", ".j2")
)
# Some parts of Nuitka must not be re-formatted with black or clang-format
# as they have different intentions.
if not (is_python or is_c or is_txt):
my_print("Ignored file type")
return
# Work on a temporary copy
tmp_filename = filename + ".tmp"
if git_stage:
old_code = getFileHashContent(git_stage["dst_hash"])
else:
old_code = getFileContents(filename, "rb")
with open(tmp_filename, "wb") as output_file:
output_file.write(old_code)
try:
if is_python:
_cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(filename):
_cleanupPyLintComments(tmp_filename, abort)
_cleanupImportSortOrder(tmp_filename)
black_call = _getPythonBinaryCall("black")
subprocess.call(black_call + ["-q", tmp_filename])
_cleanupWindowsNewlines(tmp_filename)
elif is_c:
_cleanupWindowsNewlines(tmp_filename)
cleanupClangFormat(filename)
_cleanupWindowsNewlines(tmp_filename)
elif is_txt:
_cleanupWindowsNewlines(tmp_filename)
_cleanupTrailingWhitespace(tmp_filename)
_cleanupWindowsNewlines(tmp_filename)
changed = False
if old_code != getFileContents(tmp_filename, "rb"):
my_print("Updated.")
with withPreserveFileMode(filename):
if git_stage:
new_hash_value = putFileHashContent(tmp_filename)
updateFileIndex(git_stage, new_hash_value)
updateWorkingFile(filename, git_stage["dst_hash"], new_hash_value)
else:
renameFile(tmp_filename, filename)
changed = True
else:
my_print("OK.")
return changed
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
|
the-stack_0_4061 | import json
import requests
def webhook(event, context):
print(event)
body = json.loads(event['Records'][0]['body'])
print(body)
headers = {
'Authorization': body['token'],
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post('https://notify-api.line.me/api/notify',
headers=headers,
data={'message': body['message']})
print(r)
# AWS record sample
#{'Records': [{'messageId': 'fddc42ba-a122-4581-965e-d0144ac8a5ad', 'receiptHandle': 'AQEBjO32gY5pXOfOrmDR0hD4k1av9KyjbHFpc+rIBPV2Brif7Lo+jqnGevSjfFwlICyGf+BhWwKaxFw8XdB3QTzRbw0vnLURjnQeDSBrJHa/S57SRs9TOLRBq38maycAVg69iZbetg9VhLMBCcLtOtPHTzKkmo+/Sosm51WA5CzXK7A0rteikx6nxS1CUIpq6MAujodupP0Hgr5RjK5nH/nmxA4Db0leWEmLokalZbtlx4W14tp7PZxPOrQOLDaGrH//p4h32tY8IN3MkCqi+gyNT7kCU4KwCGOIrybb07ZWyKBTKw+KOMNr/Ykj4z2N1qxIvTM55UY9d8V29YsH32OjrZTei5P7Nke/51E2tWkmkqoFAlqzxDjQPvpP+Pvvr8aazeeZ6opkr59UefAiiyM71Q==', 'body': 'hi', 'attributes': {'ApproximateReceiveCount': '9', 'SentTimestamp': '1566621263072', 'SenderId': '901588721449', 'ApproximateFirstReceiveTimestamp': '1566621263072'}, 'messageAttributes': {}, 'md5OfBody': '49f68a5c8493ec2c0bf489821c21fc3b', 'eventSource': 'aws:sqs', 'eventSourceARN': 'arn:aws:sqs:us-east-1:901588721449:LINE_notify_consumer', 'awsRegion': 'us-east-1'}]}
|
the-stack_0_4063 | # 要添加一个新单元,输入 '# %%'
# 要添加一个新的标记单元,输入 '# %% [markdown]'
# %% [markdown]
# # 含并行连结的网络(GoogLeNet)
#
# Inception块
# %%
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
class Inception(nn.Module):
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inception, self).__init__(**kwargs)
# 线路1:1x1卷积
self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
# 线路2:1x1卷积 + 3x3卷积
self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
# 线路3:1x1卷积 + 5x5卷积
self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
# 线路4:3x3最大值池化 + 1x1卷积
self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
# 4个线路输出 在通道维度拼接
return torch.cat((p1, p2, p3, p4), dim=1)
# %% [markdown]
# GoogLeNet模型
# %%
# GoogLeNet 由5个block串行构成
# b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
# nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2,
# padding=1))
# b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
# nn.Conv2d(64, 192, kernel_size=3, padding=1),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
# Inception(256, 128, (128, 192), (32, 96), 64),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
# Inception(512, 160, (112, 224), (24, 64), 64),
# Inception(512, 128, (128, 256), (24, 64), 64),
# Inception(512, 112, (144, 288), (32, 64), 64),
# Inception(528, 256, (160, 320), (32, 128), 128),
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
# Inception(832, 384, (192, 384), (48, 128), 128),
# nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
# net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
class GooLeNet(nn.Module):
def __init__(self):
super().__init__()
self.b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
Inception(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
Inception(512, 160, (112, 224), (24, 64), 64),
Inception(512, 128, (128, 256), (24, 64), 64),
Inception(512, 112, (144, 288), (32, 64), 64),
Inception(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
Inception(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
self.fc = nn.Linear(1024, 10)
def forward(self, x):
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
x = self.b4(x)
x = self.b5(x)
x = self.fc(x)
return x
# %% [markdown]
# 为了使Fashion-MNIST上的训练短小精悍,我们将输入的高和宽从224降到96
# %%
X = torch.rand(size=(1, 1, 96, 96))
for layer in net:
X = layer(X)
print(layer.__class__.__name__, 'output shape:\t', X.shape)
# %% [markdown]
# 训练模型
# %%
# lr, num_epochs, batch_size = 0.1, 10, 128
# train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
# d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
# %%
model = GooLeNet()
# loss
loss_func = F.cross_entropy
# 优化器设置
opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
# 数据
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=bs, shuffle=False)
# 训练
for epoch in range(epochs):
# 设置为训练模式
model.train()
# iterate: 每次一个batch
for xb, yb in train_dl:
# 前向传播
pred = model(xb)
# 计算损失
loss = loss_func(pred, yb)
# 反向传播,计算loss关于各权重参数的偏导,更新grad
loss.backward()
# 优化器基于梯度下降原则,更新(学习)权重参数parameters
opt.step()
# 各权重参数的偏导清零 grad=>0
opt.zero_grad()
# 设置为评估(推理)模式,设置BN、dropout等模块
model.eval()
# 不更新梯度
with torch.no_grad():
valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl)
print(epoch, valid_loss / len(valid_dl)) |
the-stack_0_4065 | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`snmp_v3_account_show`-PyFOS util to show the snmp v3 account information.
*******************************************************************************
The :mod:`snmp_v3_account_show` provides option to display the
snmp v3 account information.
This module can be used to display the snmp v3 account information including
the index, user name, user group, authentication & privacy protocol,
authenticaiton & privacy password and manager engine id.
* inputs:
| Infrastructure options:
| -i,--ipaddr=IPADDR IP address of FOS switch.
| -L,--login=LOGIN login name.
| -P,--password=PASSWORD password.
| -f,--vfid=VFID VFID to which the request is directed to [OPTIONAL].
| -s,--secured=MODE HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose verbose mode[OPTIONAL].
| Util scripts options:
| --index=INDEX Index of SNMPv3 account
* outputs:
* SNMP v3 account configuration details.
.. function:: snmp_v3_account_info(session, v3account)
* Display the snmp v3 account information.
Example usage of the method:
result = snmp_v3_account_info(inputs['session'], v3account)
print (result)
Details::
snmp_v3_acc_obj = v3_account()
result = snmp_v3_acc_obj.get(session)
* inputs:
:param session: session returned by login.
* outputs:
:rtype: dictionary of return snmp v3 account rest response
*use cases*
1. Retrieve the snmp v3 account details.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.utils import brcd_util
from pyfos.pyfos_brocade_snmp import v3_account
def snmp_v3_account_info(session, v3account):
snmp_v3_acc_obj = v3_account()
if v3account is None:
result = snmp_v3_acc_obj.get(session, None)
else:
result = snmp_v3_acc_obj.get(session, v3account)
return result
def main(argv):
# Print arguments
# print(sys.argv[1:])
filters = ['index']
inputs = brcd_util.parse(argv, v3_account, filters)
v3account_obj = inputs['utilobject']
session = brcd_util.getsession(inputs)
result = snmp_v3_account_info(
inputs['session'], v3account_obj.peek_index())
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_4066 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opentelemetry import trace as trace_api
from opentelemetry.exporter.datadog import constants, propagator
from opentelemetry.sdk import trace
from opentelemetry.trace import get_current_span, set_span_in_context
FORMAT = propagator.DatadogFormat()
def get_as_list(dict_object, key):
value = dict_object.get(key)
return [value] if value is not None else []
class TestDatadogFormat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.serialized_trace_id = propagator.format_trace_id(
trace.generate_trace_id()
)
cls.serialized_parent_id = propagator.format_span_id(
trace.generate_span_id()
)
cls.serialized_origin = "origin-service"
def test_malformed_headers(self):
"""Test with no Datadog headers"""
malformed_trace_id_key = FORMAT.TRACE_ID_KEY + "-x"
malformed_parent_id_key = FORMAT.PARENT_ID_KEY + "-x"
context = get_current_span(
FORMAT.extract(
get_as_list,
{
malformed_trace_id_key: self.serialized_trace_id,
malformed_parent_id_key: self.serialized_parent_id,
},
)
).get_context()
self.assertNotEqual(context.trace_id, int(self.serialized_trace_id))
self.assertNotEqual(context.span_id, int(self.serialized_parent_id))
self.assertFalse(context.is_remote)
def test_missing_trace_id(self):
"""If a trace id is missing, populate an invalid trace id."""
carrier = {
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_current_span(ctx).get_context()
self.assertEqual(span_context.trace_id, trace_api.INVALID_TRACE_ID)
def test_missing_parent_id(self):
"""If a parent id is missing, populate an invalid trace id."""
carrier = {
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
}
ctx = FORMAT.extract(get_as_list, carrier)
span_context = get_current_span(ctx).get_context()
self.assertEqual(span_context.span_id, trace_api.INVALID_SPAN_ID)
def test_context_propagation(self):
"""Test the propagation of Datadog headers."""
parent_context = get_current_span(
FORMAT.extract(
get_as_list,
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
FORMAT.SAMPLING_PRIORITY_KEY: str(constants.AUTO_KEEP),
FORMAT.ORIGIN_KEY: self.serialized_origin,
},
)
).get_context()
self.assertEqual(
parent_context.trace_id, int(self.serialized_trace_id)
)
self.assertEqual(
parent_context.span_id, int(self.serialized_parent_id)
)
self.assertEqual(parent_context.trace_flags, constants.AUTO_KEEP)
self.assertEqual(
parent_context.trace_state.get(constants.DD_ORIGIN),
self.serialized_origin,
)
self.assertTrue(parent_context.is_remote)
child = trace.Span(
"child",
trace_api.SpanContext(
parent_context.trace_id,
trace.generate_span_id(),
is_remote=False,
trace_flags=parent_context.trace_flags,
trace_state=parent_context.trace_state,
),
parent=parent_context,
)
child_carrier = {}
child_context = set_span_in_context(child)
FORMAT.inject(dict.__setitem__, child_carrier, context=child_context)
self.assertEqual(
child_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id
)
self.assertEqual(
child_carrier[FORMAT.PARENT_ID_KEY], str(child.context.span_id)
)
self.assertEqual(
child_carrier[FORMAT.SAMPLING_PRIORITY_KEY],
str(constants.AUTO_KEEP),
)
self.assertEqual(
child_carrier.get(FORMAT.ORIGIN_KEY), self.serialized_origin
)
def test_sampling_priority_auto_reject(self):
"""Test sampling priority rejected."""
parent_context = get_current_span(
FORMAT.extract(
get_as_list,
{
FORMAT.TRACE_ID_KEY: self.serialized_trace_id,
FORMAT.PARENT_ID_KEY: self.serialized_parent_id,
FORMAT.SAMPLING_PRIORITY_KEY: str(constants.AUTO_REJECT),
},
)
).get_context()
self.assertEqual(parent_context.trace_flags, constants.AUTO_REJECT)
child = trace.Span(
"child",
trace_api.SpanContext(
parent_context.trace_id,
trace.generate_span_id(),
is_remote=False,
trace_flags=parent_context.trace_flags,
trace_state=parent_context.trace_state,
),
parent=parent_context,
)
child_carrier = {}
child_context = set_span_in_context(child)
FORMAT.inject(dict.__setitem__, child_carrier, context=child_context)
self.assertEqual(
child_carrier[FORMAT.SAMPLING_PRIORITY_KEY],
str(constants.AUTO_REJECT),
)
|
the-stack_0_4068 | """This file contains functions used as part of a user creation pipeline, such as django-social-auth."""
# pylint: disable=W0613
from urllib.parse import urlunparse, urlparse
from .models import TermsAndConditions
from django.http import HttpResponseRedirect, QueryDict
from django.conf import settings
import logging
ACCEPT_TERMS_PATH = getattr(settings, "ACCEPT_TERMS_PATH", "/terms/accept/")
TERMS_RETURNTO_PARAM = getattr(settings, "TERMS_RETURNTO_PARAM", "returnTo")
LOGGER = logging.getLogger(name="termsandconditions")
def user_accept_terms(backend, user, uid, social_user=None, *args, **kwargs):
"""Check if the user has accepted the terms and conditions after creation."""
LOGGER.debug("user_accept_terms")
if TermsAndConditions.get_active_terms_not_agreed_to(user):
return redirect_to_terms_accept("/")
else:
return {"social_user": social_user, "user": user}
def redirect_to_terms_accept(current_path="/", slug="default"):
"""Redirect the user to the terms and conditions accept page."""
redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
if slug != "default":
redirect_url_parts[2] += slug
querystring = QueryDict(redirect_url_parts[4], mutable=True)
querystring[TERMS_RETURNTO_PARAM] = current_path
redirect_url_parts[4] = querystring.urlencode(safe="/")
return HttpResponseRedirect(urlunparse(redirect_url_parts))
|
the-stack_0_4071 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class OmPropVo(object):
def __init__(self, uuid=None, omId=None, omName=None, attrName=None, attrText=None, attrType=None, userPin=None, instanceId=None):
"""
:param uuid: (Optional)
:param omId: (Optional)
:param omName: (Optional)
:param attrName: (Optional)
:param attrText: (Optional)
:param attrType: (Optional)
:param userPin: (Optional)
:param instanceId: (Optional)
"""
self.uuid = uuid
self.omId = omId
self.omName = omName
self.attrName = attrName
self.attrText = attrText
self.attrType = attrType
self.userPin = userPin
self.instanceId = instanceId
|
the-stack_0_4072 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from ifdvsonogramonly import ifdvsonogramonly
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
# import seaborn as sns; sns.set()
from matplotlib.ticker import FuncFormatter
"""
Load data song data
"""
# load in song data
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed_forTOD.csv"
log_song_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
col_to_skip = ['Latitude', 'Longitude', 'RecordingDay',
'RecordingMonth', 'RecordingYear', 'RecordingTime',
'RecordingTimeSeconds']
data_subset = log_song_data.drop(col_to_skip, axis=1)
# load in time data --> before or after sunrise, twilights, and noon (only going to use sunrise and noon)
data_path = "C:/Users/abiga/Box " \
"Sync/Abigail_Nicole/ChippiesTimeOfDay" \
"/FinalChippiesDataReExportedAs44100Hz_LogTransformed" \
"_forTOD_SunriseTwilightNoon.csv"
time_data = pd.DataFrame.from_csv(data_path, header=0, index_col=None)
# must remove duplicates -- have more than one bird from same recording -- duplicate catalog number and time data
time_data = time_data.drop_duplicates()
# combine tables using catalog no
combined_df = pd.merge(data_subset, time_data, on='CatalogNo')
# only keep ones with time data
combined_df = combined_df.drop(combined_df[combined_df.Sunrise ==
'--'].index).copy().reset_index(
drop=True)
song_variables = combined_df.columns[1:5]
"""
Finding an example of East, West, South songs (the ones closes to the average for specified song features of interest)
"""
var_of_interest = ['Duration of Song Bout (log(ms))', 'Total Number of '
'Syllables (log(number))']
var_diffs = ['DiffBoutDur', 'DiffSyllDur', 'DiffSilenceDur', 'DiffNumSylls']
example_files = {}
bout_dur = {}
for time in ['before sunrise', 'after sunrise', 'after noon']:
mean_df = pd.DataFrame(columns=['CatalogNo', 'DiffBoutDur', 'DiffSyllDur', 'DiffSilenceDur', 'DiffNumSylls'])
for i in range(0, 2):
tod_data = combined_df.loc[combined_df['Sunrise'] == time]
mean_df['CatalogNo'] = tod_data['CatalogNo']
mean_df['Duration of Song Bout (log(ms))'] = tod_data['Duration of ' \
'Song Bout (' \
'log(ms))']
mean_df[var_diffs[i]] = abs(tod_data[var_of_interest[i]] - tod_data[
var_of_interest[i]].mean())
mean_df['DiffSum'] = mean_df[var_diffs].sum(axis=1)
example_files.update({time: mean_df.loc[mean_df['DiffSum'].idxmin()][
'CatalogNo']})
bout_dur.update({time: mean_df.loc[mean_df['DiffSum'].idxmin()][
'Duration of Song Bout (log(ms))']})
del mean_df
print(example_files)
"""
Load in example songs and make figures
"""
# outputs from before we added data that had been misclassified as Borror Lab but really was ML
# song_names = ['176261_44k_b5of11_beforesunriseExt.wav',
# 'XC76506_b1of2_morningExt.wav',
# '76777_b4of17_afternoonExt.wav']
song_names = ['176261_44k_b5of11_beforesunriseExt.wav',
'73829 s1 bout_morningExt_ampmore.wav', # amplified in Audacity for better visualization in plot
'15435 s1 bout_afternoonExt.wav']
for name in song_names:
song_file = "C:/Users/abiga\Box " \
"Sync\Abigail_Nicole\ChippiesTimeOfDay" \
"\TODExampleSongs_boutDurNumSylls/ExtendedTimeOfRecording/" +\
name
song, rate = sf.read(song_file)
sonogram, timeAxis_conversion, freqAxis_conversion = ifdvsonogramonly(song,
rate,
1024,
1010.0,
2.0)
fig = plt.figure(figsize=(11, 7))
ax = fig.add_subplot(1, 1, 1)
# sns.set(style='white')
[rows, cols] = np.shape(sonogram)
im = plt.imshow(np.log(sonogram+3),
cmap='gray_r',
extent=[0, cols, 0, rows],
aspect='auto')
ax.get_xaxis().set_major_formatter(plt.FuncFormatter(
lambda x, p: "%.2f" % (x*timeAxis_conversion/1000)))
ax.get_yaxis().set_major_formatter(plt.FuncFormatter(
lambda x, p: "%.0f" % (x*freqAxis_conversion/1000)))
plt.tick_params(labelsize=14)
plt.savefig("C:/Users/abiga\Box "
"Sync/Abigail_Nicole/ChippiesTimeOfDay"
"/TODExampleSongs_boutDurNumSylls/ExtendedTimeOfRecording/" +
name + '_sonogram' + '.pdf', type='pdf',
dpi=fig.dpi, bbox_inches='tight',
transparent=True)
# plt.show()
|
the-stack_0_4073 | class ElementConstructor(type):
def __new__(cls, name, classes, fields):
fields["cache"] = dict()
return super().__new__(cls, name, classes, fields)
# Common abstract classes
class Element(metaclass=ElementConstructor):
def __new__(cls, key, *args, **kwargs):
if not str(key) in cls.cache:
cls.cache[str(key)] = super().__new__(cls, *args, **kwargs)
return cls.cache[str(key)]
def __repr__(self):
return str(self.__getattribute__(self.primary_key))
def delete(self):
key = self.__getattribute__(self.primary_key)
if key in self.cache:
del self.cache[key]
@classmethod
def clear_cache(cls):
cls.cache.clear()
@property
def primary_key(self):
raise NotImplementedError
class UpdatableElement(Element):
@property
def entry_data_path(self):
raise NotImplementedError
@property
def base_url(self):
raise NotImplementedError
def set_data(self, data):
raise NotImplementedError
class HasMediaElement(UpdatableElement):
@property
def media_path(self):
raise NotImplementedError
@property
def media_query_hash(self):
raise NotImplementedError
class Account(HasMediaElement):
primary_key = "username"
entry_data_path = ("ProfilePage", 0, "graphql", "user")
base_url = ""
media_path = ("user", "edge_owner_to_timeline_media")
media_query_hash = "c6809c9c025875ac6f02619eae97a80e"
def __init__(self, username):
# self.id = None
self.username = username
# self.full_name = None
# self.profile_pic_url = None
# self.profile_pic_url_hd = None
# self.fb_page = None
# self.biography = None
# self.follows_count = None
# self.followers_count = None
# self.media_count = None
# self.is_private = None
# self.is_verified = None
# self.country_block = None
self.media = set()
self.follows = set()
self.followers = set()
def set_data(self, data):
self.id = data["id"]
self.full_name = data["full_name"]
self.profile_pic_url = data["profile_pic_url"]
self.profile_pic_url_hd = data["profile_pic_url_hd"]
self.fb_page = data["connected_fb_page"]
self.biography = data["biography"]
self.follows_count = data["edge_follow"]["count"]
self.followers_count = data["edge_followed_by"]["count"]
self.media_count = data["edge_owner_to_timeline_media"]["count"]
self.is_private = data["is_private"]
self.is_verified = data["is_verified"]
self.country_block = data["country_block"]
class Media(UpdatableElement):
primary_key = "code"
entry_data_path = ("PostPage", 0, "graphql", "shortcode_media")
base_url = "p/"
def __init__(self, code):
self.id = None
self.code = code
self.caption = None
self.owner = None
self.date = None
self.location = None
self.likes_count = None
self.comments_count = None
self.comments_disabled = None
self.is_video = None
self.video_url = None
self.is_ad = None
self.display_url = None
self.resources = None
self.is_album = None
self.album = set()
self.likes = set()
self.comments = set()
def set_data(self, data):
self.id = data["id"]
self.code = data["shortcode"]
if data["edge_media_to_caption"]["edges"]:
self.caption = data["edge_media_to_caption"]["edges"][0]["node"]["text"]
else:
self.caption = None
if "username" in data["owner"]:
self.owner = Account(data["owner"]["username"])
self.date = data["taken_at_timestamp"]
if "location" in data and data["location"] and "id" in data["location"]:
self.location = Location(data["location"]["id"])
self.likes_count = data["edge_media_preview_like"]["count"]
if "edge_media_to_comment" in data:
self.comments_count = data["edge_media_to_comment"]["count"]
else:
self.comments_count = data["edge_media_to_parent_comment"]["count"]
self.comments_disabled = data["comments_disabled"]
self.is_video = data["is_video"]
if self.is_video and "video_url" in data:
self.video_url = data["video_url"]
if "is_ad" in data:
self.is_ad = data["is_ad"]
self.display_url = data["display_url"]
if "display_resources" in data:
self.resources = [resource["src"] for resource in data["display_resources"]]
else:
self.resources = [resource["src"] for resource in data["thumbnail_resources"]]
self.album = set()
self.is_album = data.get("__typename") == "GraphSidecar"
if "edge_sidecar_to_children" in data:
for edge in data["edge_sidecar_to_children"]["edges"]:
if edge["node"].get("shortcode", self.code) != self.code:
child = Media(edge["node"]["shortcode"])
child.id = edge["node"]["id"]
child.is_video = edge["node"]["is_video"]
if child.is_video and "video_url" in edge["node"]:
child.video_url = edge["node"]["video_url"]
child.display_url = edge["node"]["display_url"]
if "display_resources" in edge["node"]:
child.resources = [resource["src"] for resource in edge["node"]["display_resources"]]
elif "thumbnail_resources" in edge["node"]:
child.resources = [resource["src"] for resource in edge["node"]["thumbnail_resources"]]
child.is_album = False
self.album.add(child)
class Story(Element):
primary_key = "id"
def __init__(self, id):
self.id = id
class Location(HasMediaElement):
primary_key = "id"
entry_data_path = ("LocationsPage", 0, "graphql", "location")
base_url = "explore/locations/"
media_path = ("location", "edge_location_to_media")
media_query_hash = "ac38b90f0f3981c42092016a37c59bf7"
def __init__(self, id):
self.id = id
self.slug = None
self.name = None
self.has_public_page = None
self.directory = None
self.coordinates = None
self.media_count = None
self.media = set()
self.top_posts = set()
def set_data(self, data):
self.id = data["id"]
self.slug = data["slug"]
self.name = data["name"]
self.has_public_page = data["has_public_page"]
if "directory" in data:
self.directory = data["directory"]
self.coordinates = (data["lat"], data["lng"])
self.media_count = data["edge_location_to_media"]["count"]
for node in data["edge_location_to_top_posts"]["edges"]:
self.top_posts.add(Media(node["node"]["shortcode"]))
class Tag(HasMediaElement):
primary_key = "name"
entry_data_path = ("TagPage", 0, "graphql", "hashtag")
base_url = "explore/tags/"
media_path = ("hashtag", "edge_hashtag_to_media")
media_query_hash = "ded47faa9a1aaded10161a2ff32abb6b"
def __init__(self, name):
self.name = name
self.media_count = None
self.media = set()
self.top_posts = set()
def set_data(self, data):
self.name = data["name"]
self.media_count = data["edge_hashtag_to_media"]["count"]
for node in data["edge_hashtag_to_top_posts"]["edges"]:
self.top_posts.add(Media(node["node"]["shortcode"]))
class Comment(Element):
primary_key = "id"
def __init__(self, id, media, owner, text, created_at):
self.id = id
self.media = media
self.owner = owner
self.text = text
self.created_at = created_at
|
the-stack_0_4075 | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
"""Word embedding training datasets."""
__all__ = ['Text8']
import os
import zipfile
from mxnet.gluon.utils import check_sha1, download
from .dataset import CorpusDataset
from .utils import _get_home_dir
###############################################################################
# Datasets
###############################################################################
class Text8(CorpusDataset):
"""Text8 corpus
http://mattmahoney.net/dc/textdata.html
Part of the test data for the Large Text Compression Benchmark
http://mattmahoney.net/dc/text.html. The first 10**8 bytes of the English
Wikipedia dump on Mar. 3, 2006.
License: https://en.wikipedia.org/wiki/Wikipedia:Copyrights
Parameters
----------
root : str, default '$MXNET_HOME/datasets/text8'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
"""
archive_file = ('text8.zip', '6c70299b93b7e1f927b42cd8f6ac1a31547c7a2e')
data_file = {
'train': ('text8', '0dc3edebc970dcc96137e7deda4d9995af9d93de')
}
url = 'http://mattmahoney.net/dc/'
def __init__(self, root=os.path.join(_get_home_dir(), 'datasets', 'text8'),
segment='train', max_sentence_length=10000):
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
self._root = root
self._segment = segment
self._max_sentence_length = max_sentence_length
super(Text8, self).__init__(self._get_data())
# pylint: disable=access-member-before-definition
if max_sentence_length:
data = []
for sentence in self._data:
for i in range(0, len(sentence), max_sentence_length):
data.append(sentence[i:i + max_sentence_length])
self._data = data
def _get_data(self):
archive_file_name, archive_hash = self.archive_file
data_file_name, data_hash = self.data_file[self._segment]
root = self._root
path = os.path.join(root, data_file_name)
if not os.path.exists(path) or not check_sha1(path, data_hash):
downloaded_file_path = download(self.url + archive_file_name,
path=root, sha1_hash=archive_hash)
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(root)
return path
|
the-stack_0_4077 | def findSmollest(arr):
smallest = arr[0]
smallest_index = 0
for i in range(1, len(arr)):
if arr[i] < smallest:
smallest = arr[i]
smallest_index = i
return smallest_index
def selectionSort(arr):
newArr = []
for i in range(len(arr)):
smallest = findSmollest(arr)
newArr.append(arr.pop(smallest))
return newArr
print(selectionSort([5,3,8,2,6,9,7,5]))
print(selectionSort([53,3345,854,34,622,952,74,15]))
|
the-stack_0_4078 | """ Static order of nodes in dask graph
Dask makes decisions on what tasks to prioritize both
* Dynamically at runtime
* Statically before runtime
Dynamically we prefer to run tasks that were just made available. However when
several tasks become available at the same time we have an opportunity to break
ties in an intelligent way
d
|
b c
\ /
a
For example after we finish ``a`` we can choose to run either ``b`` or ``c``
next. Making small decisions like this can greatly affect our performance,
especially because the order in which we run tasks affects the order in which
we can release memory, which operationally we find to have a large affect on
many computation. We want to run tasks in such a way that we keep only a small
amount of data in memory at any given time.
Static Ordering
---------------
And so we create a total ordering over all nodes to serve as a tie breaker. We
represent this ordering with a dictionary mapping keys to integer values.
Lower scores have higher priority. These scores correspond to the order in
which a sequential scheduler would visit each node.
{'a': 0,
'c': 1,
'd': 2,
'b': 3}
There are several ways in which we might order our keys. This is a nuanced
process that has to take into account many different kinds of workflows, and
operate efficiently in linear time. We strongly recommend that readers look at
the docstrings of tests in dask/tests/test_order.py. These tests usually have
graph types laid out very carefully to show the kinds of situations that often
arise, and the order we would like to be determined.
Policy
------
Work towards *small goals* with *big steps*.
1. **Small goals**: prefer tasks whose final dependents have few dependencies.
We prefer to prioritize those tasks that help branches of computation that
can terminate quickly.
With more detail, we compute the total number of dependencies that each
task depends on (both its own dependencies, and the dependencies of its
dependencies, and so on), and then we choose those tasks that drive towards
results with a low number of total dependencies. We choose to prioritize
tasks that work towards finishing shorter computations first.
2. **Big steps**: prefer tasks with many dependents
However, many tasks work towards the same final dependents. Among those,
we choose those tasks with the most work left to do. We want to finish
the larger portions of a sub-computation before we start on the smaller
ones.
3. **Name comparison**: break ties with key name
Often graphs are made with regular keynames. When no other structural
difference exists between two keys, use the key name to break ties.
This relies on the regularity of graph constructors like dask.array to be a
good proxy for ordering. This is usually a good idea and a sane default.
"""
from __future__ import absolute_import, division, print_function
from .core import get_dependencies, reverse_dict, get_deps # noqa: F401
from .utils_test import add, inc # noqa: F401
def order(dsk, dependencies=None):
""" Order nodes in dask graph
This produces an ordering over our tasks that we use to break ties when
executing. We do this ahead of time to reduce a bit of stress on the
scheduler and also to assist in static analysis.
This currently traverses the graph as a single-threaded scheduler would
traverse it. It breaks ties in the following ways:
1. Start from roots nodes that have the largest subgraphs
2. When a node has dependencies that are not yet computed prefer
dependencies with large subtrees (start hard things first)
2. When we reach a node that can be computed we then traverse up and
prefer dependents that have small super-trees (few total dependents)
(finish existing work quickly)
Examples
--------
>>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}
>>> order(dsk)
{'a': 0, 'c': 1, 'b': 2, 'd': 3}
"""
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
for k, deps in dependencies.items():
deps.discard(k)
dependents = reverse_dict(dependencies)
total_dependencies = ndependencies(dependencies, dependents)
total_dependents, min_dependencies = ndependents(dependencies, dependents, total_dependencies)
waiting = {k: set(v) for k, v in dependencies.items()}
def dependencies_key(x):
return total_dependencies.get(x, 0), ReverseStrComparable(x)
def dependents_key(x):
return (min_dependencies[x],
-total_dependents.get(x, 0),
StrComparable(x))
result = dict()
seen = set() # tasks that should not be added again to the stack
i = 0
stack = [k for k, v in dependents.items() if not v]
if len(stack) < 10000:
stack = sorted(stack, key=dependencies_key)
else:
stack = stack[::-1]
while stack:
item = stack.pop()
if item in result:
continue
deps = waiting[item]
if deps:
stack.append(item)
seen.add(item)
if len(deps) < 1000:
deps = sorted(deps, key=dependencies_key)
stack.extend(deps)
continue
result[item] = i
i += 1
for dep in dependents[item]:
waiting[dep].discard(item)
deps = [d for d in dependents[item]
if d not in result and not (d in seen and len(waiting[d]) > 1)]
if len(deps) < 1000:
deps = sorted(deps, key=dependents_key, reverse=True)
stack.extend(deps)
return result
def ndependents(dependencies, dependents, total_dependencies):
""" Number of total data elements that depend on key
For each key we return the number of keys that can only be run after this
key is run. The root nodes have value 1 while deep child nodes will have
larger values.
We also return the minimum value of the maximum number of dependencies of
all final dependencies (see module-level comment for more)
Examples
--------
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> total_dependencies = ndependencies(dependencies, dependents)
>>> total_dependents, min_dependencies = ndependents(dependencies,
... dependents,
... total_dependencies)
>>> sorted(total_dependents.items())
[('a', 3), ('b', 2), ('c', 1)]
Returns
-------
total_dependendents: Dict[key, int]
min_dependencies: Dict[key, int]
"""
result = dict()
min_result = dict()
num_needed = {k: len(v) for k, v in dependents.items()}
current = {k for k, v in num_needed.items() if v == 0}
while current:
key = current.pop()
result[key] = 1 + sum(result[parent] for parent in dependents[key])
try:
min_result[key] = min(min_result[parent] for parent in dependents[key])
except ValueError:
min_result[key] = total_dependencies[key]
for child in dependencies[key]:
num_needed[child] -= 1
if num_needed[child] == 0:
current.add(child)
return result, min_result
def ndependencies(dependencies, dependents):
""" Number of total data elements on which this key depends
For each key we return the number of tasks that must be run for us to run
this task.
Examples
--------
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> sorted(ndependencies(dependencies, dependents).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
result = dict()
num_needed = {k: len(v) for k, v in dependencies.items()}
current = {k for k, v in num_needed.items() if v == 0}
while current:
key = current.pop()
result[key] = 1 + sum(result[child] for child in dependencies[key])
for parent in dependents[key]:
num_needed[parent] -= 1
if num_needed[parent] == 0:
current.add(parent)
return result
class StrComparable(object):
""" Wrap object so that it defaults to string comparison
When comparing two objects of different types Python fails
>>> 'a' < 1 # doctest: +SKIP
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
This class wraps the object so that, when this would occur it instead
compares the string representation
>>> StrComparable('a') < StrComparable(1)
False
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except Exception:
return str(self.obj) < str(other.obj)
class ReverseStrComparable(object):
""" Wrap object so that it defaults to string comparison
Used when sorting in reverse direction. See StrComparable for normal
documentation.
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj > other.obj
except Exception:
return str(self.obj) > str(other.obj)
|
the-stack_0_4081 | import os
import uuid
from datetime import datetime
from django.conf import settings
from django.utils.translation import gettext as _
from rest_framework.pagination import LimitOffsetPagination
from magpie.apps.files.models import File
from magpie.apps.files.versions.v1.serializers.file import (
FileSerializer,
FilesSerializer,
)
from magpie.core import api_exceptions
from magpie.core.cache import Cache
class FileService:
@classmethod
def upload_file(cls, request):
file_keys = dict(request.FILES).keys()
uploaded_files = []
for file_key in file_keys:
file_data = {}
file = request.FILES[file_key]
file_data['file'] = request.data[file_key]
file_data['consumer'] = request.consumer.id
file_data['file_name'] = request.FILES[file_key].name
# Check if file is greater than max upload size
if float(file.size) > float(
settings.MAGPIE['MAX_UPLOAD_SIZE']):
raise api_exceptions.ValidationError400({
'file_size': _('File is larger than expected'),
'max_size': f"{settings.MAGPIE['MAX_UPLOAD_SIZE']} "
f"bytes",
})
file_serializer = FileSerializer(
data=file_data,
)
if file_serializer.is_valid(raise_exception=True):
file_serializer.save()
uploaded_files.append(
file_serializer.data
)
upload_message = _("Count of uploaded files")
done_files_count = len(uploaded_files)
return {
"message": f"{upload_message}: {done_files_count}",
"count": done_files_count,
"files": uploaded_files,
}
@classmethod
def download_file(cls, request, file_id):
try:
if not isinstance(file_id, uuid.UUID):
file_id = uuid.UUID(file_id)
except ValueError:
raise api_exceptions.ValidationError400(
{
'id': _('Not a valid UUID')
}
)
file_object = Cache.get(
str(f"file_id:{file_id}-user_id:{request.user.id}"),
)
if not file_object:
try:
file_object = File.objects.get(
file_id=file_id,
consumer=request.consumer,
)
Cache.set(
key=str(f"file_id:{file_id}-user_id:{request.user.id}"),
store_value=file_object,
expiry_time=settings.MAGPIE['CACHE_EXPIRY'],
)
except File.DoesNotExist:
raise api_exceptions.NotFound404(
_('File does not exists or does not belongs to this user'),
)
path = file_object.file.path
return (
os.path.basename(path),
os.path.dirname(path),
)
@classmethod
def get_files(cls, request):
files_query = File.objects.filter(
consumer=request.consumer,
)
if request.query_params is not None:
if 'created_at_from' in request.query_params:
try:
created_at_from = datetime.fromtimestamp(
float(request.query_params['created_at_from'])
)
files_query = files_query.filter(
created_at__gte=created_at_from
)
except ValueError:
raise api_exceptions.ValidationError400(
detail={
'created_at_from': _("Datetime parsing error")
}
)
if 'created_at_to' in request.query_params:
try:
created_at_to = datetime.fromtimestamp(
float(request.query_params['created_at_to'])
)
files_query = files_query.filter(
created_at__lte=created_at_to
)
except ValueError:
raise api_exceptions.ValidationError400(
detail={
'created_at_to': _("Datetime parsing error")
}
)
# Order by
if 'order_by' in request.query_params:
order_field_error = []
order_by = [
x.strip() for x in request.query_params
['order_by'].split(',')
]
for order in order_by:
if not File.model_field_exists(
order.replace('-', ''),
):
order_field_error.append(order)
if order_field_error:
raise api_exceptions.ValidationError400(
{
'non_fields': _("Invalid choices in order by "
"query"),
'errors': order_field_error,
}
)
files_query = files_query.order_by(
*order_by
)
paginator = LimitOffsetPagination()
files_query = paginator.paginate_queryset(files_query, request)
files_serializer = FilesSerializer(
files_query,
many=True
)
return files_serializer.data, paginator
@classmethod
def delete_file(cls, request, file_id):
try:
if not isinstance(file_id, uuid.UUID):
file_id = uuid.UUID(file_id)
except ValueError:
raise api_exceptions.ValidationError400(
{
'id': _('Not a valid UUID')
}
)
try:
file_object = File.objects.get(
file_id=file_id,
consumer=request.consumer,
)
except File.DoesNotExist:
raise api_exceptions.NotFound404(
_('File does not exists or does not belongs to this consumer'),
)
Cache.delete(
key=str(f"file_id:{file_id}-user_id:{request.user.id}"),
)
file_object.file.delete()
file_object.delete()
return True
|
the-stack_0_4083 | import re
import numpy as np
from specutils import SpectrumList
from specutils.io.registers import data_loader
from .common import get_times
from ..loaders import FITS_FILE_EXTS, no_auto_identify
def calc_aaomega_resolutions(hdr):
# TODO: Add MODE = MOS / IFU
grat = hdr.get("GRATID")
gang = hdr.get("GRATANGL")
cang = hdr.get("CAMANGL")
order = hdr.get("ORDER")
if not (grat and gang and cang and order):
return None, None, None, None
# perhaps I should get this from somewhere in header?
npixels = 2048
# use value for MOS
resolutionpix = 3.4
# check if hdr['INSTRUME'] contains KOALA or SPIRAL ???
# resolutionpix = 2.1
rad = 180.0 / np.pi
flcam = 247
pix = 0.015
hwid = (npixels * pix / flcam) / 2.0
ddisp = np.cos(72.5 / (3.15 * 190))
slant = {
"580V": 0.7,
"385R": 0.6,
"1700B": 0.2,
"1500V": 0.0,
"1000R": 1.2,
"1000I": 1.8,
"3200R": 0.0,
"2500V": 0.0,
"2000R": 0.0,
"1700I": 0.7,
"1700D": 0.2,
}
slantr = slant[grat] / rad
linespmm = int(grat[:-1])
gangr = gang / rad
cangr = cang / rad - gangr
lcen = 1e7 * (np.sin(gangr) + np.sin(cangr)) / (linespmm * order)
lblaze = 1e7 * 2 * np.sin(gangr + slantr) / (linespmm * order)
# Get central and blaze wavelengths
lcen = int(lcen + 0.5)
lblaze = int(lblaze + 0.5)
dispc = 1e7 * pix / flcam * np.cos(cangr) / (order * linespmm)
resolutionpix = resolutionpix * np.cos(gangr) / np.cos(cangr)
resa = resolutionpix * dispc
res = lcen / resa
lcb = 1e7 * (np.sin(gangr) + np.sin(cangr - hwid)) / (order * linespmm)
lcr = 1e7 * (np.sin(gangr) + np.sin(cangr + hwid)) / (order * linespmm)
leb = ddisp * lcb
ler = ddisp * lcr
dcb = 1e7 * pix / flcam * np.cos(cangr - hwid) / (order * linespmm)
dcr = 1e7 * pix / flcam * np.cos(cangr + hwid) / (order * linespmm)
deb = ddisp * dcb
der = ddisp * dcr
racb = resolutionpix * dcb
racr = resolutionpix * dcr
raeb = racb * ddisp
raer = racr * ddisp
rcb = lcb / (resolutionpix * dcb)
rcr = lcr / (resolutionpix * dcr)
reb = rcb / ddisp
rer = rcr / ddisp
dispc = int((1000 * dispc) + 0.5) / 1000
resa = int((1000 * resa) + 0.5) / 1000
res = int(res + 0.5)
resa = int((1000 * resa) + 0.5) / 1000
res = int(res + 0.5)
lcb = int(lcb + 0.5)
lcr = int(lcr + 0.5)
leb = int(leb + 0.5)
ler = int(ler + 0.5)
dcb = int((1000 * dcb) + 0.5) / 1000
dcr = int((1000 * dcr) + 0.5) / 1000
deb = int((1000 * deb) + 0.5) / 1000
der = int((1000 * der) + 0.5) / 1000
racb = int((1000 * racb) + 0.5) / 1000
racr = int((1000 * racr) + 0.5) / 1000
raeb = int((1000 * raeb) + 0.5) / 1000
raer = int((1000 * raer) + 0.5) / 1000
rcb = int(rcb + 0.5)
rcr = int(rcr + 0.5)
reb = int(reb + 0.5)
rer = int(rer + 0.5)
covc = lcr - lcb
cove = ler - leb
cov = ler - lcb
cen_res = resa
cen_rp = res
cen_rp_min = rcb
cen_rp_max = rcr
# cen_res = FWHM in Angstrom;
# cen_rp = resolving power at central wavelength
# cen_rp_min = min resolving power
# cen_rp_max = max resolving power
return cen_res, cen_rp, cen_rp_min, cen_rp_max
@data_loader(
label="Data Central AAOmega obscore", extensions=FITS_FILE_EXTS,
dtype=SpectrumList, identifier=no_auto_identify,
)
def aaomega_obscore_loader(fname):
spectra = SpectrumList.read(fname, format="Data Central AAOmega")
for spec in spectra:
cen_res, cen_rp, cen_rp_min, cen_rp_max = calc_aaomega_resolutions(
spec.meta["header"]
)
if cen_res is not None:
break
for spec in spectra:
# Don't produce obscore values for sky
if spec.meta["purpose"] == "reduced":
spec.meta["obscore"] = {}
obscore = spec.meta["obscore"]
hdr = spec.meta["header"]
t1, t2 = get_times(hdr, duration_kw="EXPOSED")
if t1 is not None:
obscore["t_min"] = t1.to_value('mjd',subfmt='float')
if t2 is not None:
obscore["t_max"] = t2.to_value('mjd',subfmt='float')
obscore["s_ra"] = hdr["RA"]
obscore["s_dec"] = hdr["DEC"]
obscore["s_fov"] = 2.1 / 3600
obscore["s_seeing"] = hdr.get("SEEING")
obscore["obs_collection"] = "aat_archive"
obscore["facility_name"] = "AAT"
# obscore["dataproduct_type"] = "spectrum"
obscore["dataproduct_subtype"] = "science"
obscore["calib_level"] = 2
obscore["t_exptime"] = hdr.get("EXPOSED")
nspecpix = len(spec.spectral_axis)
obscore["em_xel"] = nspecpix
obscore["em_ucd"] = "em.wl"
obscore["em_unit"] = "angstrom"
obscore["s_xel1"] = nspecpix
obscore["s_xel2"] = 1
obscore["t_xel"] = 1
obscore["em_min"] = spec.spectral_axis[0].meter
obscore["em_max"] = spec.spectral_axis[-1].meter
obscore["em_res_power"] = cen_rp
obscore["em_res_power_min"] = cen_rp_min
obscore["em_res_power_max"] = cen_rp_max
obscore["em_resolution"] = cen_res * 1e-10
obscore["o_ucd"] = "phot.count"
# spectra are calibrated in flux: ROW1 hdr comment: Flux-calibrated
# spectrum in 10^-17 erg/s/cm^2/A
# obscore['o_ucd'] = 'phot.flux'
# obscore['o_unit'] = '1.0E-17 erg/s/cm^2/A'
# obscore['o_calib_status'] = 'absolute'
# or perhaps 'relative' since these are fibre spectra?
obscore["instrument_name"] = "2dF-AAOmega"
obscore["em_calib_status"] = "calibrated"
if "OBJECT" in hdr:
obscore["target_name"] = hdr["OBJECT"]
if "OBJCOM" in hdr:
obscore["alt_target_name"] = hdr["OBJCOM"]
# alternative name: OBJECT
if "OBJPIV" in hdr:
obscore["obs_id"] = "%s-%s" % (
re.sub(".sds", "", hdr["CFG_FILE"]),
hdr["OBJPIV"],
)
return spectra
|
the-stack_0_4084 | #*****************************************************
# *
# Copyright 2019 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
# trigger_app.py
# This application acts as an IoT device in the AWS DeepLens Greengrass group.
# It triggers the camera to take a capture which is then published in a topic.
# The device consumes the message from the topic and shows it in the screen.
# The user then decides whether to keep it (pressing the letter 'y'), stop the app
# by pressing 'q', or drop it by pressing any other key
import logging
import queue
import base64
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import numpy as np
import cv2
import time
GREENGRASS_IP = "<your DeepLens's IP address>"
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Queue to receive the pictures from the DeepLens
thumbnail_queue = queue.Queue()
# For certificate based connection
mqttClient = AWSIoTMQTTClient("trigger")
# Configurations
# For TLS mutual authentication
mqttClient.configureEndpoint(GREENGRASS_IP, 8883)
# Make sure your certificates and key names are the same as below
mqttClient.configureCredentials("./certs/rootca.pem", "./certs/private.pem.key", "./certs/certificate.pem.crt")
mqttClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
mqttClient.configureDrainingFrequency(2) # Draining: 2 Hz
mqttClient.configureConnectDisconnectTimeout(5) # 5 sec
mqttClient.configureMQTTOperationTimeout(5) # 5 sec
def main():
try:
connected = False
logger.debug("Connecting")
mqttClient.connect()
connected = True
mqttClient.subscribe('trigger/thumbnail', 0, process_thumbnail)
logger.debug("Connected!")
except BaseException as e:
logger.error("Error in connect!")
logger.exception(e)
if connected:
cv2.namedWindow("Input")
while True:
# Notify the camera to take a picture
mqttClient.publish('trigger/snap', json.dumps({ 'action': 'capture' }), 0)
# Wait until there is a thumbnail to show
try:
payload = thumbnail_queue.get()
except Exception:
pass
if payload:
thumbnail = payload.get('thumbnail')
pic_id = payload.get('id')
if thumbnail:
# Show the picture and wait for user input
pressed_key = str(chr(show_thumbnail(thumbnail) & 255)).lower()
if pressed_key == 'y':
logger.debug('Telling to store into S3')
# Notify the camera to save the picture
mqttClient.publish('trigger/snap', json.dumps({ 'action': 'save', 'id': pic_id }), 0)
elif pressed_key == 'q':
break
else:
time.sleep(5)
cv2.destroyAllWindows()
def show_thumbnail(thumbnail):
logger.debug(len(thumbnail))
nparr = np.frombuffer(base64.b64decode(thumbnail), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
cv2.imshow('image', img)
return(cv2.waitKey(0))
def process_thumbnail(client, userdata, message):
payload = json.loads(message.payload.decode())
logger.debug('New message received: ')
logger.debug(payload.get('id'))
logger.debug("from topic: ")
logger.debug(message.topic)
logger.debug("--------------\n\n")
thumbnail_queue.put(payload)
if __name__ == "__main__":
main()
|
the-stack_0_4087 | """jc - JSON CLI output utility `systemctl` command output parser
Usage (cli):
$ systemctl | jc --systemctl
or
$ jc systemctl
Usage (module):
import jc.parsers.systemctl
result = jc.parsers.systemctl.parse(systemctl_command_output)
Schema:
[
{
"unit": string,
"load": string,
"active": string,
"sub": string,
"description": string
}
]
Examples:
$ systemctl -a | jc --systemctl -p
[
{
"unit": "proc-sys-fs-binfmt_misc.automount",
"load": "loaded",
"active": "active",
"sub": "waiting",
"description": "Arbitrary Executable File Formats File System Automount Point"
},
{
"unit": "dev-block-8:2.device",
"load": "loaded",
"active": "active",
"sub": "plugged",
"description": "LVM PV 3klkIj-w1qk-DkJi-0XBJ-y3o7-i2Ac-vHqWBM on /dev/sda2 2"
},
{
"unit": "dev-cdrom.device",
"load": "loaded",
"active": "active",
"sub": "plugged",
"description": "VMware_Virtual_IDE_CDROM_Drive"
},
...
]
"""
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.4'
description = '`systemctl` command parser'
author = 'Kelly Brazil'
author_email = '[email protected]'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux']
magic_commands = ['systemctl']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
# nothing more to process
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
# Clear any blank lines
linedata = list(filter(None, data.splitlines()))
raw_output = []
if jc.utils.has_data(data):
# clean up non-ascii characters, if any
cleandata = []
for entry in linedata:
cleandata.append(entry.encode('ascii', errors='ignore').decode())
header_text = cleandata[0]
header_list = header_text.lower().split()
raw_output = []
for entry in cleandata[1:]:
if 'LOAD = ' in entry:
break
else:
entry_list = entry.rstrip().split(maxsplit=4)
output_line = dict(zip(header_list, entry_list))
raw_output.append(output_line)
if raw:
return raw_output
else:
return _process(raw_output)
|
the-stack_0_4088 |
# https://practice.geeksforgeeks.org/problems/leaders-in-an-array-1587115620/1/?track=md-arrays&batchId=144#
# https://www.geeksforgeeks.org/leaders-in-an-array/
def leaders(A,N):
#Code here
ls = []
max_from_right = A[-1]
for i in range(N-2,-1,-1):
# max_ele=max(A[i+1:])
# print(A[i])
if max_from_right <= A[i]:
# max_ele=max(A[i+1:])
ls.append(A[i])
max_from_right = A[i]
ls=ls[::-1]
ls.append(A[-1])
return ls
a= [16,17,4,3,5,2]
n = 6
print(leaders(a,n))
|
the-stack_0_4089 | # Given a list of iterators, implement a FlattenedIterator class which incrementally iterates over the integers from all the iterators in an interleaved fashion.
# Example:
# Iterators[0] = [1,2,3]
# Iterators[1] = [4,5]
# Iterators[2] = [6,7,8]
# FlattenedIterator = [1, 4, 6, 2, 5, 7, 3, 8]
# An iterator implements the next() and hasNext() interface. You're free to use them, and you will implement them on the FlattenedIterator class.
# You're free to initialize FlattenedIterator with any data structure of your choice for the iterators.
class FlattenedIterator:
def __init__(self, subiterators):
self.subiterators = []
self.res_index = 0
self.getValue(subiterators)
def getValue(self,Subiterators):
for item in Subiterators:
if item.hasNext():
self.subiterators.append(item)
def ridValue(self):
self.subiterators.pop(self.res_index)
def moveNext(self):
res_index = self.res_index
if not self.subiterators[res_index].hasNext():
self.ridValue()
else:
res_index = self.res_index + 1
if res_index <= len(self.subiterators) - 1:
self.res_index = res_index
else:
self.res_index = 0
def hasNext(self):
if (self.subiterators):
return True
return False
def next(self):
if self.hasNext():
next_value = self.subiterators[self.res_index].next()
self.moveNext()
return next_value
|
the-stack_0_4090 | from django.core.exceptions import ValidationError
from django.test import TestCase
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailPageTests
from bc.standardpages.tests.fixtures import IndexPageFactory, InformationPageFactory
from ...standardpages.models import IndexPage, InformationPage
from ..models import HomePage
from .fixtures import HomePageFactory
class HomepageWagtailPageTests(WagtailPageTests):
"""
Test page creation and infrastructure
"""
def test_can_create_homepage(self):
self.assertCanCreateAt(Page, HomePage)
def test_can_only_create_homepage_under_root(self):
self.assertAllowedParentPageTypes(
HomePage,
{Page},
msg="HomePage should only be added as child of Page (root)",
)
class HomePageModelTests(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=1)
"""
Create a homepage which satisfies all required fields for positive test.
"""
self.homepage = HomePageFactory.build_with_fk_objs_committed()
self.root_page.add_child(instance=self.homepage)
"""
Set up children pages for TOC
5 IndexPage with 4 children InformationPage each
"""
self.index_pages = []
for i in range(5):
index_page = IndexPageFactory.build()
self.homepage.add_child(instance=index_page)
self.index_pages.append(index_page)
for j in range(4):
information_page = InformationPageFactory.build()
index_page.add_child(instance=information_page)
"""
Set up information page as children of homepage
"""
self.information_page = InformationPageFactory.build()
self.homepage.add_child(instance=self.information_page)
def test_hero_validation_when_no_image(self):
with self.assertRaises(ValidationError):
self.homepage.hero_image.delete()
self.homepage.save()
def test_hero_validation_when_no_strapline(self):
with self.assertRaises(ValidationError):
self.homepage.strapline = None
self.homepage.save()
def test_child_sections_types(self):
# IndexPage can only be created as direct children of homepage, so we don't have to test for nested IndexPage
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages),
msg="HomePage.child_sections should get IndexPage pages under the homepage, nothing more.",
)
self.assertTrue(
len(self.homepage.child_sections) < len(self.homepage.get_children()),
msg="Homepage.child_sections should not include pages that are not IndexPage.",
)
def test_child_sections_only_get_published_sections(self):
self.index_pages[0].unpublish()
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages) - 1,
msg="HomePage.child_sections should not include unpublished pages.",
)
def test_child_sections_only_get_public_sections(self):
self.index_pages[0].view_restrictions.create(password="test")
self.assertEqual(
len(self.homepage.child_sections),
len(self.index_pages) - 1,
msg="HomePage.child_sections should not include private pages.",
)
def test_child_sections_sortorder(self):
"""
Test that the queryset for IndexPage uses Wagtail explorer sort order
"""
section_page = self.index_pages[0]
original_order = list(
self.homepage.child_sections.values_list("title", flat=True)
)
# Move self.index_pages[0]'s sortoder to last
section_page.path = IndexPage._get_children_path_interval(self.homepage.path)[1]
section_page.save()
self.assertNotEqual(
original_order,
list(self.homepage.child_sections.values_list("title", flat=True)),
msg="HomePage.child_sections should sort by page path (Wagtail explorer custom sort).",
)
"""
Testing IndexPage.featured_pages
This is also covered in IndexPageModelTests(). However we are also testing here
in case someone decides to change how it behaves on IndexPage and doesn't realise
it also affects HomePage.
"""
def test_child_sections_returns_max_3_grandchildren(self):
# We have initially created 4 children under self.index_pages[0]
self.assertNotEqual(
len(self.index_pages[0].featured_pages),
len(self.index_pages[0].get_children().live().public()),
msg="IndexPage.featured_pages should be limited.",
)
self.assertLessEqual(
len(self.index_pages[0].featured_pages),
3,
msg="IndexPage.featured_pages should be limited to max 3.",
)
def test_child_sections_returns_live_grandchildren(self):
# Unpublish 2 of the 4 children
children = self.index_pages[0].featured_pages
children[0].unpublish()
children[1].unpublish()
self.assertNotEqual(
len(self.index_pages[0].featured_pages),
len(self.index_pages[0].get_children().public()[:3]),
msg="IndexPage.featured_pages should not include unpublished pages.",
)
def test_child_sections_returns_public_grandchildren(self):
section_page = self.index_pages[0]
section_page.get_children().first().delete() # delete 1 so we only have 3 to start with
section_page.get_children().last().view_restrictions.create(password="test")
self.assertEqual(
len(section_page.featured_pages),
len(section_page.get_children().live()) - 1,
msg="IndexPage.featured_pages should not include private pages.",
)
def test_child_sections_grandchildren_sortorder(self):
"""
Test that the queryset grandchildren uses Wagtail explorer sort order
"""
section_page = self.index_pages[0]
child_page = section_page.featured_pages.first()
original_order = list(
section_page.featured_pages.values_list("title", flat=True)
)
# Move childpage's sortoder to last
child_page.path = InformationPage._get_children_path_interval(
section_page.path
)[1]
child_page.save()
self.assertNotEqual(
original_order,
list(section_page.featured_pages.values_list("title", flat=True)),
msg="IndexPage.featured_pages should sort by page path (Wagtail explorer custom sort).",
)
|
the-stack_0_4091 | gab = ["A", "C", "E", "B", "D", "B", "B", "C", "A", "E"]
n = []
while True:
p = []
print('-'*50)
nome = input('NOME DO CANDIDATO(digite N para sair): ').upper()
if nome == "N":
break
else:
ac = 0
p.append(nome)
for i in range(1, 11):
print('-'*50)
resposta = input('INFORME A RESPOSTA ENTRE - A e E: ').upper()
if resposta == gab[i-1]:
ac += 1
p.append(resposta)
p.append(ac)
n.append(p)
for i in range(0, len(n)):
print('NOTA FINAL->',n[i][0], ": ", n[i][11])
|
the-stack_0_4092 | from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.views.generic import TemplateView
from .forms import PostForm, CommentForm
from .models import Post, Comment
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
@login_required
def get_posts(request):
if request.method == 'POST':
if 'post' in request.POST:
postForm = PostForm(request.POST)
if postForm.is_valid():
post = postForm.save(commit=False)
post.author = request.user
post.save()
return redirect('cabPosts:posts')
else:
commentForm=CommentForm(request.POST)
if commentForm.is_valid():
post_id = request.POST['post_id']
post_instance = get_object_or_404(Post, id=post_id)
comment = commentForm.save(commit=False)
comment.name = request.user
comment.post = post_instance
comment.email = request.user.email
comment.save()
return redirect('cabPosts:posts')
else:
return render(request,'500.html',{})
else:
postForm = PostForm()
posts = Post.objects.all()
commentForm = CommentForm()
comments=Comment.objects.all()
args = {'postForm':postForm, 'posts':posts ,'commentForm':commentForm,'comments':comments}
return render(request, 'cabPosts/posts.html', args)
|
the-stack_0_4093 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""Describes the format of Error response.
:param code: Error code
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, code=None, message=None):
super(ErrorResponse, self).__init__()
self.code = code
self.message = message
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
|
the-stack_0_4094 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
import sys
sys.path.append('../') # or just install the module
sys.path.append('../../fuzzy-tools') # or just install the module
sys.path.append('../../astro-lightcurves-handler') # or just install the module
sys.path.append('../../astro-lightcurves-fats') # or just install the module
###################################################################################################################################################
import argparse
from fuzzytools.prints import print_big_bar
parser = argparse.ArgumentParser(prefix_chars='--')
parser.add_argument('--method', type=str)
parser.add_argument('--kf', type=str)
parser.add_argument('--mid', type=str, default='0')
parser.add_argument('--mode', type=str, default='all')
parser.add_argument('--classifier_mids', type=int, default=2)
main_args = parser.parse_args()
print_big_bar()
###################################################################################################################################################
import numpy as np
from fuzzytools.files import load_pickle, save_pickle, get_dict_from_filedir
from lcfats.files import load_features
from fuzzytools.progress_bars import ProgressBar
from lcfats.classifiers import train_classifier, evaluate_classifier
import pandas as pd
filedir = f'../../surveys-save/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={main_args.method}.splcds'
filedict = get_dict_from_filedir(filedir)
rootdir = filedict['_rootdir']
cfilename = filedict['_cfilename']
lcdataset = load_pickle(filedir)
lcset_info = lcdataset['raw'].get_info()
lcdataset.only_keep_kf(main_args.kf) # saves ram
# print(lcdataset)
# for train_config in ['r']:
for train_config in ['r', 's', 'r+s']:
for classifier_mid in range(0, main_args.classifier_mids):
print(f'training brf for train_config={train_config}; kf={main_args.kf}; mode={main_args.mode}; method={main_args.method}; mid={main_args.mid}c{classifier_mid}')
train_df_x_r, train_df_y_r = load_features(f'../save/fats/{cfilename}/{main_args.kf}@train.df', main_args.mode)
if train_config=='r':
k = 1 # 1 s_repeats*2
train_df_x = pd.concat([train_df_x_r]*k, axis='rows')
train_df_y = pd.concat([train_df_y_r]*k, axis='rows')
if train_config=='s':
k = 1 # 1 2
train_df_x = pd.concat([train_df_x_s]*k, axis='rows')
train_df_y = pd.concat([train_df_y_s]*k, axis='rows')
if train_config=='r+s':
train_df_x = pd.concat([train_df_x_r]*s_repeats+[train_df_x_s], axis='rows')
train_df_y = pd.concat([train_df_y_r]*s_repeats+[train_df_y_s], axis='rows')
features = list(train_df_x.columns)
val_df_x, val_df_y = load_features(f'../save/fats/{cfilename}/{main_args.kf}@val.df', main_args.mode)
brf_d = train_classifier(train_df_x, train_df_y, val_df_x, val_df_y, lcset_info,
max_samples=len(train_df_x_r),
)
d = evaluate_classifier(brf_d, f'../save/fats/{cfilename}/{main_args.kf}@test.df', main_args.mode, lcset_info)
save_rootdir = f'../save'
save_filedir = f'{save_rootdir}/exp=rf_eval~train_config={train_config}~mode={main_args.mode}/{cfilename}/{main_args.kf}@test/id={main_args.mid}c{classifier_mid}.d'
save_pickle(save_filedir, d) |
the-stack_0_4096 | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from typing import Any, List, Tuple, Union
import sys
import random
def set_seed(seed=None):
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "a", should_flush: bool = True, append=False):
self.file = None
if append:
file_mode = 'a'
else:
file_mode = 'w'
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
def write_loss(iterations, trainer, train_writer, prefix):
members = [attr for attr in dir(trainer) \
if not callable(getattr(trainer, attr)) and not attr.startswith("__") and (
'loss' in attr or 'grad' in attr or 'nwd' in attr ) and 'name' not in attr and 'pool' not in attr]
for m in members:
train_writer.add_scalar(prefix+'/'+m, getattr(trainer, m), iterations + 1)
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def str2bool(x):
return x.lower() in ('true')
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def get_model_list(dirname, key, exclude='latest'):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f and exclude not in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[-1,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
elif image.shape[1]>image.shape[0]:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
fmt = 'RGB' if image.ndim == 3 else 'L'
return Image.fromarray(image, fmt)
def save_images(image, filename, drange=[-1,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename, "JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def to_var( x):
"""Converts numpy to variable."""
if torch.cuda.is_available():
x = x.cuda()
return torch.autograd.Variable(x)
def to_data(x):
"""Converts variable to numpy."""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
def save_image_grid(images, filename, drange=[-1,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) |
the-stack_0_4100 | """add project settings
Revision ID: 1bb8cb3abf60
Revises: 393a0cce62c7
Create Date: 2018-08-22 15:20:47.132129
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '1bb8cb3abf60'
down_revision = '393a0cce62c7'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('mm_project_settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('project', sa.String(), nullable=False),
sa.Column('settings', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('mm_project_settings_user',
sa.Column('mm_project_settings_id', sa.Integer(), nullable=True),
sa.Column('mb_user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['mb_user_id'], ['mb_user.mb_user_id'], ),
sa.ForeignKeyConstraint(['mm_project_settings_id'], ['mm_project_settings.id'], )
)
def downgrade():
op.drop_table('mm_project_settings_user')
op.drop_table('mm_project_settings')
|
the-stack_0_4102 | from __future__ import absolute_import, division, print_function
import os,sys
from iotbx import pdb
from iotbx import reflection_file_reader
from iotbx import file_reader
from mmtbx.refinement.real_space import individual_sites
import mmtbx
import libtbx.phil.command_line
from six.moves import range
master_phil = libtbx.phil.parse("""
flip_base {
pdb_file = None
.type = path
.help = '''input PDB file'''
reflection_file = None
.type = path
.help = '''Reflection file'''
out_pdb_file = None
.type = str
.help = '''input PDB file'''
chain = None
.type = str
.help = '''Chain of the residue that is to be flipped'''
alt_loc = None
.type = str
.help = '''Alternate location of the residue that is to be flipped'''
res_num = None
.type = int
.help = '''Residue number of the residue that is to be flipped'''
n_refine_cycles = 3
.type = int
.help = '''Number of real-space refinement cycles'''
help = False
.type = bool
.help = '''Show help message'''
}
""", process_includes=True)
def usage(msg='', log=sys.stderr):
s = '''
******************************************************************************
Usage :
python.phenix flipbase.py xxxx.mtz yyyy.pdb chain=A res_num=1
Will flip base of chain A residue 1 of yyyy.pdb and do a real-space
refinement using xxxx.mtz.
Required :
pdb_file input PDB file
reflection_file Reflection file
chain Chain of the residue that is to be flipped
res_num Residue number of the residue that is to be flipped
Options :
out_pdb_file input PDB file
alt_loc Alternate location of the residue that is to be flipped
n_refine_cycles Number of real-space refinement cycles
help Show help message
******************************************************************************
'''
if msg != '' :
s = '*'*79 + '\n\n!!!!! %s !!!!!\n' % msg + s
print(s);sys.exit()
base_rotation_axes = {
"A" : ["C1'", "N9"],
"G" : ["C1'", "N9"],
"C" : ["C1'", "N1"],
"T" : ["C1'", "N1"],
"U" : ["C1'", "N1"],
}
base_rotatable_atoms = {
"A" : ["N1", "C2", "H2", "N3", "C4", "C5", "C6", "N6", "H61", "H62", "N7",
"C8", "H8"],
"G" : ["N1", "H1", "C2", "N2", "H21", "H22", "N3", "C4", "C5", "C6", "O6",
"N7", "C8", "H8"],
"C" : ["C2", "O2", "N3", "C4", "N4", "H41", "H42", "C5", "H5", "C6", "H6"],
"T" : ["C2", "O2", "N3", "H3", "C4", "O4", "C5", "C7", "H71", "H72", "H73",
"C6", "H6"],
"U" : ["C2", "O2", "N3", "H3", "C4", "O4", "C5", "H5", "C6", "H6"],
}
def flip_base(atom_group, angle=180):
import scitbx.matrix
axis_point_1 = axis_point_2 = None
rotateable_atoms = []
base_name = atom_group.resname.strip()
if ("r" in base_name):
base_name = base_name.replace("r")
elif (base_name.startswith("D") and len(base_name) == 2):
base_name = base_name[1]
assert base_name in base_rotation_axes, base_name
for atom in atom_group.atoms():
atom_name = atom.name.strip()
if (atom_name == base_rotation_axes[base_name][0]):
axis_point_1 = atom.xyz
elif (atom_name == base_rotation_axes[base_name][1]):
axis_point_2 = atom.xyz
elif (atom_name in base_rotatable_atoms[base_name]):
rotateable_atoms.append(atom)
if (None in [axis_point_1, axis_point_2]):
raise RuntimeError("Missing atom(s) for rotateable axis.")
elif (len(rotateable_atoms) == 0):
raise RuntimeError("Missing nucleotide base.")
for atom in rotateable_atoms :
atom.xyz = scitbx.matrix.rotate_point_around_axis(
axis_point_1=axis_point_1,
axis_point_2=axis_point_2,
point=atom.xyz,
angle=angle,
deg=True)
def get_target_map(reflection_file_name, log=sys.stderr):
miller_arrays = reflection_file_reader.any_reflection_file(file_name =
reflection_file_name).as_miller_arrays()
ma = miller_arrays[0]
fft_map = ma.fft_map(resolution_factor=0.25)
fft_map.apply_sigma_scaling()
print("\nUsing sigma scaled map.\n", file=log)
target_map = fft_map.real_map_unpadded()
return target_map
def flip_and_refine(pdb_hierarchy,
xray_structure,
target_map,
geometry_restraints_manager,
chain,
res_num,
alt_loc = None,
n_refine_cycles = 3,
log = sys.stdout):
sites_cart = xray_structure.sites_cart()
ero = False
for ch in pdb_hierarchy.chains():
if ch.id.strip() != chain : continue
for rg in ch.residue_groups():
if rg.resseq_as_int() != res_num : continue
if rg.have_conformers() and not alt_loc :
s = 'Specified residue has alternate conformations. Please specify '
raise RuntimeError(s + 'alt_loc on the command line')
for residue in rg.atom_groups():
if alt_loc and alt_loc != residue.altloc.strip():
continue
flip_base(residue, angle=180)
sites_cart.set_selected(residue.atoms().extract_i_seq(),
residue.atoms().extract_xyz())
xray_structure = xray_structure.replace_sites_cart(sites_cart)
sele = residue.atoms().extract_i_seq()
print('real-space refinement BEGIN'.center(79,'*'), file=log)
for i in range(n_refine_cycles):
print('real-space refinement cycle %i...' % (i + 1), file=log)
ero = individual_sites.easy(
map_data = target_map,
xray_structure = xray_structure,
pdb_hierarchy = pdb_hierarchy,
geometry_restraints_manager = geometry_restraints_manager,
selection = sele)
print('real-space refinement FINISHED'.center(79,'*'), file=log)
if not ero : raise RuntimeError('Specified residue not found')
return ero.pdb_hierarchy
def run(args):
# phil parsing----------------------------------------------------------
interpreter = libtbx.phil.command_line.argument_interpreter(master_phil=master_phil)
sources = []
for arg in args:
if os.path.isfile(arg): #Handles loose filenames
input_file = file_reader.any_file(arg)
if (input_file.file_type == "pdb"):
sources.append(interpreter.process(arg="pdb_file=\"%s\"" % arg))
if (input_file.file_type == "hkl"):
sources.append(interpreter.process(arg="reflection_file=\"%s\"" % arg))
elif (input_file.file_type == "phil"):
sources.append(input_file.file_object)
else: #Handles arguments with xxx=yyy formatting
arg_phil = interpreter.process(arg=arg)
sources.append(arg_phil)
work_phil = master_phil.fetch(sources=sources)
work_params = work_phil.extract()
params = work_params.flip_base
if work_params.flip_base.pdb_file == None :
usage('PDB file not provided!')
if work_params.flip_base.reflection_file == None :
usage('Reflection file not provided!')
if work_params.flip_base.chain == None :
usage('chain not provided!')
if work_params.flip_base.res_num == None :
usage('res_num file not provided!')
if work_params.flip_base.out_pdb_file == None :
fn = work_params.flip_base.pdb_file.replace('.pdb','_baseflip.pdb')
work_params.flip_base.out_pdb_file = fn
#usage('out_pdb_file file not provided!')
params = work_params.flip_base
if params.help:
usage()
sys.exit()
# end phil parsing ------------------------------------------------------
pdb_file_name = params.pdb_file
reflection_file_name = params.reflection_file
log = sys.stdout
print('\ngettinsg target_map...\n', file=log)
target_map = get_target_map(reflection_file_name, log)
ppf = mmtbx.utils.process_pdb_file_srv(log=False).process_pdb_files(
[pdb_file_name])[0]
grm = mmtbx.restraints.manager(
geometry = ppf.geometry_restraints_manager(show_energies = False),
normalization = True)
pdb_hierarchy = ppf.all_chain_proxies.pdb_hierarchy
pdb_hierarchy.atoms().reset_i_seq()
xray_structure = ppf.xray_structure(show_summary = False)
flip_hierarchy = flip_and_refine(pdb_hierarchy,
xray_structure,
target_map = target_map,
geometry_restraints_manager = grm,
chain = params.chain,
res_num = params.res_num,
alt_loc = params.alt_loc,
n_refine_cycles = params.n_refine_cycles,
log= log)
flip_hierarchy.write_pdb_file(params.out_pdb_file)
print('\nOut written to %s' % params.out_pdb_file, file=log)
if __name__ == "__main__":
run(sys.argv[1:])
|
the-stack_0_4105 | import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import Linear
import torch.optim as optim
import networkx as nx
from torch_geometric.data import InMemoryDataset, Data
from torch_geometric.nn import GCNConv
import argparse
import mlflow
from fedgraphconv.prep_mhealth import prep_mhealth
from fedgraphconv.prep_wisdm import prep_wisdm
from fedgraphconv.data_utils import HARData, HARDataCentral
from fedgraphconv.models import GCN_mhealth, GCN_mhealth_Attn, GCN_wisdm, GCN_wisdm_Attn
from fedgraphconv.fed_utils import average_weights
import time
import tqdm
import random
import copy
import datetime as dttm
since = dttm.datetime.now()
since_str = dttm.datetime.strftime(since, '%d-%m-%y %H:%M:%S')
parser = argparse.ArgumentParser()
parser.add_argument('--data',
default= 'wisdm',
help = 'Dataset to use')
parser.add_argument('--num_sample',
default= 32,
type= int,
help = 'Number of samples in each window')
parser.add_argument('--dist_thresh',
default= 0.3,
type = float,
help = 'Minimum euclidean distance to draw an edge')
parser.add_argument('--train_prop',
default= 0.7,
type = float,
help = 'Proportion of data to include in training.')
parser.add_argument('--local_epochs',
default= 10,
type = int,
help = 'Number of local epochs to run')
parser.add_argument('--batch_size',
default= 4,
type = int,
help = 'Batch size in each iteration')
parser.add_argument('--lr',
default= 0.01,
type = float,
help = 'Learning rate')
parser.add_argument('--num_rounds',
default= 10,
type = int,
help = 'Number of federated rounds')
parser.add_argument('--fl_sample',
default= 0.4,
type = float,
help = 'Proportion of agents that participate in each federation round')
parser.add_argument('--attention',
default=False,
help = 'Use graph attention instead of convolution.')
def train(data, criterion):
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
y = data.y.squeeze().t() - 1
loss = criterion(out[data.train_mask], y[data.train_mask] )
loss.backward()
optimizer.step()
return loss
def evaluate(data):
global_model.eval()
y = data.y.squeeze().t() - 1
with torch.no_grad():
out = global_model(data.x, data.edge_index)
accuracy = torch.mean((torch.argmax(out[~data.train_mask] , 1) == y[~data.train_mask]).float())
return accuracy
if __name__ == '__main__':
args = parser.parse_args()
if args.attention:
mlflow.set_experiment('gnn_federated_attention')
else:
print("setting attention to false")
mlflow.set_experiment('gnn_federated_1')
DATADIR = 'data/processed'
if args.data == 'mhealth':
prep_mhealth(args.num_sample, args.dist_thresh, args.train_prop)
num_class = 12
input_dim = 23
DATADIR = 'data/processed/mhealth'
if args.attention:
global_model = GCN_mhealth_Attn(input_dim, num_class)
else:
global_model = GCN_mhealth(input_dim, num_class)
elif args.data == 'wisdm':
prep_wisdm(args.num_sample, args.dist_thresh, args.train_prop)
num_class = 6
input_dim = 9
DATADIR = 'data/processed/wisdm'
if args.attention:
global_model = GCN_wisdm_Attn(input_dim, num_class)
else:
global_model = GCN_wisdm(input_dim, num_class)
mlflow.set_tag('dataset', args.data)
FL_AGENTS = os.listdir(DATADIR)
NUM_ROUNDS = args.num_rounds
FL_SAMPLE = args.fl_sample
EPOCHS = args.local_epochs
mlflow.log_params({
'num_sample': args.num_sample,
'dist_thresh': args.dist_thresh,
'train_prop' : args.train_prop,
'local_epochs' : EPOCHS,
'lr': args.lr,
'num_rounds': NUM_ROUNDS,
'fl_sample': FL_SAMPLE
})
excel = []
for each_round in tqdm.tqdm(range(NUM_ROUNDS)):
agents_to_train = random.sample(FL_AGENTS, k= int(FL_SAMPLE * len(FL_AGENTS)))
model_list = []
metrics = {}
_n = 0
_a = 0
for each_agent in agents_to_train:
# read the data.
dataset = HARData(os.path.join(DATADIR, str(each_agent)))[0]
loss = nn.CrossEntropyLoss()
model = copy.deepcopy(global_model)
optimizer = optim.Adam(model.parameters(), args.lr)
for epoch in range(EPOCHS):
loss_ = train(dataset, loss)
model_list.append(model.state_dict())
# average weight at end of round.
avg_weights = average_weights(model_list)
global_model.load_state_dict(avg_weights)
# get accuracy at end of round.
dataset = HARDataCentral(DATADIR)
i = 1
for each_data in dataset:
accuracy = evaluate(each_data) # evaluate the global model on each data.
metrics['accuracy-agent_{0}'.format(i)]= accuracy.item()
_n += each_data.x[~each_data.train_mask].size()[0]
_a += each_data.x[~each_data.train_mask].size()[0] * accuracy.item()
i+=1
metrics['accuracy'] = _a / _n
mlflow.log_metrics(metrics, step = each_round)
now = dttm.datetime.now()
excel.append((epoch, since_str, _a / _n, now.strftime('%y-%m-%d %H:%M:%S'), (now-since).total_seconds()))
df = pd.DataFrame(excel)
df.columns =['epoch', 'time_start', 'accuracy', 'log_time', 'time_elapsed']
df.to_csv('logs_{0}_gnn_federated.csv'.format(args.data), index= None)
|
the-stack_0_4106 | import sys
sys.path.insert(0, '/home/paul/.conda/envs/tensorflow/lib/python3.6/site-packages')
import keras.preprocessing.image
import deepometry.image.iterator_balanced, deepometry.image.iterator
class ImageDataGenerator(keras.preprocessing.image.ImageDataGenerator):
def __init__(self,
height_shift_range=0.0,
horizontal_flip=False,
preprocessing_function=None,
rotation_range=0.0,
vertical_flip=False,
width_shift_range=0.0):
super(ImageDataGenerator, self).__init__(
height_shift_range=height_shift_range,
horizontal_flip=horizontal_flip,
preprocessing_function=preprocessing_function,
rotation_range=rotation_range,
vertical_flip=vertical_flip,
width_shift_range=width_shift_range
)
def flow(self, x,
y=None,
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="tif",
balance=True,
mixup_alpha=0.0):
if balance:
return deepometry.image.iterator_balanced.NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
mixup_alpha=mixup_alpha
)
else:
return deepometry.image.iterator.NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
mixup_alpha=mixup_alpha
)
def flow_from_directory(self, directory,
target_size=(48, 48),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="tif",
follow_links=False):
raise NotImplementedError()
|
the-stack_0_4108 | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import os
import multiprocessing as mp
from simple_systolic_array import P, N, make_sdfg
from dace.config import Config
import dace.dtypes
import numpy as np
def run_test(do_async):
Config.set("compiler", "intel_fpga", "launch_async", value=do_async)
name = "async_test"
sdfg = make_sdfg(name)
sdfg.specialize({"P": P.get(), "N": N.get()})
# We don't care about the result, as long as it compiles and runs
sdfg(A=A)
if __name__ == "__main__":
N.set(128)
P.set(4)
Config.set("compiler", "fpga_vendor", value="intel_fpga")
Config.set("compiler", "intel_fpga", "mode", value="emulator")
A = np.empty((N.get()), dtype=np.int32)
for v in [False, True]:
# Has to be a separate process, as the Intel FPGA runtime cannot be
# initialized twice in the same executable
p = mp.Process(target=run_test, args=(v,))
p.start()
p.join()
|
the-stack_0_4110 | from com.bridgelabz.utility.queue import Queue
class Banking:
def run(self):
queue=Queue()
money=0
char ='y'
while(char!='n'):
choice=input("select e for enqueue and d for dequeue")
if(choice=='e'):
name=input("enter name")
queue.enqueue(name)
select=input("enter w for withdraw and d for deposit")
if(select=='w'):
amount=int(input("withdraw amount"))
if(money<amount):
print("insufficient funds")
else:
print("funds added")
money=money+amount
else:
amount = int(input("deposit amount"))
amount=amount+money
else:
queue.dequeue()
char=input("do you want to continue")
queue.display()
return
Banking().run()
|
the-stack_0_4111 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Modifications copyright (c) 2021 DocYard Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import cv2
import numpy as np
__all__ = ["SASTProcessTrain"]
class SASTProcessTrain(object):
def __init__(
self,
image_shape=[512, 512],
min_crop_size=24,
min_crop_side_ratio=0.3,
min_text_size=10,
max_text_size=512,
**kwargs
):
self.input_size = image_shape[1]
self.min_crop_size = min_crop_size
self.min_crop_side_ratio = min_crop_side_ratio
self.min_text_size = min_text_size
self.max_text_size = max_text_size
def quad_area(self, poly):
"""
compute area of a polygon
:param poly:
:return:
"""
edge = [
(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1]),
]
return np.sum(edge) / 2.0
def gen_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if True:
rect = cv2.minAreaRect(
poly.astype(np.int32)
) # (center (x,y), (width, height), angle of rotation)
rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = (
np.linalg.norm(box[(i + 0) % 4] - poly[0])
+ np.linalg.norm(
box[(i + 1) % 4] - poly[point_num // 2 - 1]
)
+ np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2])
+ np.linalg.norm(box[(i + 3) % 4] - poly[-1])
)
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad
def check_and_validate_polys(self, polys, tags, xxx_todo_changeme):
"""
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
"""
(h, w) = xxx_todo_changeme
if polys.shape[0] == 0:
return polys, np.array([]), np.array([])
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
hv_tags = []
for poly, tag in zip(polys, tags):
quad = self.gen_quad_from_poly(poly)
p_area = self.quad_area(quad)
if abs(p_area) < 1:
print("invalid poly")
continue
if p_area > 0:
if not tag:
print("poly in wrong direction")
tag = True # reversed cases should be ignore
poly = poly[
(0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1), :
]
quad = quad[(0, 3, 2, 1), :]
len_w = np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(
quad[3] - quad[2]
)
len_h = np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(
quad[1] - quad[2]
)
hv_tag = 1
if len_w * 2.0 < len_h:
hv_tag = 0
validated_polys.append(poly)
validated_tags.append(tag)
hv_tags.append(hv_tag)
return (
np.array(validated_polys),
np.array(validated_tags),
np.array(hv_tags),
)
def crop_area(
self, im, polys, tags, hv_tags, crop_background=False, max_tries=25
):
"""
make random crop from the input image
:param im:
:param polys:
:param tags:
:param crop_background:
:param max_tries: 50 -> 25
:return:
"""
h, w, _ = im.shape
pad_h = h // 10
pad_w = w // 10
h_array = np.zeros((h + pad_h * 2), dtype=np.int32)
w_array = np.zeros((w + pad_w * 2), dtype=np.int32)
for poly in polys:
poly = np.round(poly, decimals=0).astype(np.int32)
minx = np.min(poly[:, 0])
maxx = np.max(poly[:, 0])
w_array[minx + pad_w : maxx + pad_w] = 1
miny = np.min(poly[:, 1])
maxy = np.max(poly[:, 1])
h_array[miny + pad_h : maxy + pad_h] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return im, polys, tags, hv_tags
for i in range(max_tries):
xx = np.random.choice(w_axis, size=2)
xmin = np.min(xx) - pad_w
xmax = np.max(xx) - pad_w
xmin = np.clip(xmin, 0, w - 1)
xmax = np.clip(xmax, 0, w - 1)
yy = np.random.choice(h_axis, size=2)
ymin = np.min(yy) - pad_h
ymax = np.max(yy) - pad_h
ymin = np.clip(ymin, 0, h - 1)
ymax = np.clip(ymax, 0, h - 1)
# if xmax - xmin < ARGS.min_crop_side_ratio * w or \
# ymax - ymin < ARGS.min_crop_side_ratio * h:
if (
xmax - xmin < self.min_crop_size
or ymax - ymin < self.min_crop_size
):
# area too small
continue
if polys.shape[0] != 0:
poly_axis_in_area = (
(polys[:, :, 0] >= xmin)
& (polys[:, :, 0] <= xmax)
& (polys[:, :, 1] >= ymin)
& (polys[:, :, 1] <= ymax)
)
selected_polys = np.where(
np.sum(poly_axis_in_area, axis=1) == 4
)[0]
else:
selected_polys = []
if len(selected_polys) == 0:
# no text in this area
if crop_background:
return (
im[ymin : ymax + 1, xmin : xmax + 1, :],
polys[selected_polys],
tags[selected_polys],
hv_tags[selected_polys],
)
else:
continue
im = im[ymin : ymax + 1, xmin : xmax + 1, :]
polys = polys[selected_polys]
tags = tags[selected_polys]
hv_tags = hv_tags[selected_polys]
polys[:, :, 0] -= xmin
polys[:, :, 1] -= ymin
return im, polys, tags, hv_tags
return im, polys, tags, hv_tags
def generate_direction_map(self, poly_quads, direction_map):
""""""
width_list = []
height_list = []
for quad in poly_quads:
quad_w = (
np.linalg.norm(quad[0] - quad[1])
+ np.linalg.norm(quad[2] - quad[3])
) / 2.0
quad_h = (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[2] - quad[1])
) / 2.0
width_list.append(quad_w)
height_list.append(quad_h)
norm_width = max(sum(width_list) / (len(width_list) + 1e-6), 1.0)
average_height = max(sum(height_list) / (len(height_list) + 1e-6), 1.0)
for quad in poly_quads:
direct_vector_full = (
(quad[1] + quad[2]) - (quad[0] + quad[3])
) / 2.0
direct_vector = (
direct_vector_full
/ (np.linalg.norm(direct_vector_full) + 1e-6)
* norm_width
)
direction_label = tuple(
map(
float,
[
direct_vector[0],
direct_vector[1],
1.0 / (average_height + 1e-6),
],
)
)
cv2.fillPoly(
direction_map,
quad.round().astype(np.int32)[np.newaxis, :, :],
direction_label,
)
return direction_map
def calculate_average_height(self, poly_quads):
""""""
height_list = []
for quad in poly_quads:
quad_h = (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[2] - quad[1])
) / 2.0
height_list.append(quad_h)
average_height = max(sum(height_list) / len(height_list), 1.0)
return average_height
def generate_tcl_label(
self,
hw,
polys,
tags,
ds_ratio,
tcl_ratio=0.3,
shrink_ratio_of_width=0.15,
):
"""
Generate polygon.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
score_map = np.zeros(
(
h,
w,
),
dtype=np.float32,
)
tbo_map = np.zeros((h, w, 5), dtype=np.float32)
training_mask = np.ones(
(
h,
w,
),
dtype=np.float32,
)
_ = np.ones((h, w, 3)) * np.array([0, 0, 1]).reshape([1, 1, 3]).astype(
np.float32
)
for _, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(
poly
)
min_area_quad_h = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[3])
+ np.linalg.norm(min_area_quad[1] - min_area_quad[2])
)
min_area_quad_w = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[1])
+ np.linalg.norm(min_area_quad[2] - min_area_quad[3])
)
if (
min(min_area_quad_h, min_area_quad_w)
< self.min_text_size * ds_ratio
or min(min_area_quad_h, min_area_quad_w)
> self.max_text_size * ds_ratio
):
continue
if tag:
# continue
cv2.fillPoly(
training_mask,
poly.astype(np.int32)[np.newaxis, :, :],
0.15,
)
else:
tcl_poly = self.poly2tcl(poly, tcl_ratio)
tcl_quads = self.poly2quads(tcl_poly)
poly_quads = self.poly2quads(poly)
# stcl map
stcl_quads, quad_index = self.shrink_poly_along_width(
tcl_quads,
shrink_ratio_of_width=shrink_ratio_of_width,
expand_height_ratio=1.0 / tcl_ratio,
)
# generate tcl map
cv2.fillPoly(
score_map, np.round(stcl_quads).astype(np.int32), 1.0
)
# generate tbo map
for idx, quad in enumerate(stcl_quads):
quad_mask = np.zeros((h, w), dtype=np.float32)
quad_mask = cv2.fillPoly(
quad_mask,
np.round(quad[np.newaxis, :, :]).astype(np.int32),
1.0,
)
tbo_map = self.gen_quad_tbo(
poly_quads[quad_index[idx]], quad_mask, tbo_map
)
return score_map, tbo_map, training_mask
def generate_tvo_and_tco(
self, hw, polys, tags, tcl_ratio=0.3, ds_ratio=0.25
):
"""
Generate tcl map, tvo map and tbo map.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
poly_mask = np.zeros((h, w), dtype=np.float32)
tvo_map = np.ones((9, h, w), dtype=np.float32)
tvo_map[0:-1:2] = np.tile(np.arange(0, w), (h, 1))
tvo_map[1:-1:2] = np.tile(np.arange(0, w), (h, 1)).T
poly_tv_xy_map = np.zeros((8, h, w), dtype=np.float32)
# tco map
tco_map = np.ones((3, h, w), dtype=np.float32)
tco_map[0] = np.tile(np.arange(0, w), (h, 1))
tco_map[1] = np.tile(np.arange(0, w), (h, 1)).T
poly_tc_xy_map = np.zeros((2, h, w), dtype=np.float32)
poly_short_edge_map = np.ones((h, w), dtype=np.float32)
for poly, poly_tag in zip(polys, tags):
if poly_tag:
continue
# adjust point order for vertical poly
poly = self.adjust_point(poly)
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(
poly
)
min_area_quad_h = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[3])
+ np.linalg.norm(min_area_quad[1] - min_area_quad[2])
)
min_area_quad_w = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[1])
+ np.linalg.norm(min_area_quad[2] - min_area_quad[3])
)
# generate tcl map and text, 128 * 128
tcl_poly = self.poly2tcl(poly, tcl_ratio)
# generate poly_tv_xy_map
for idx in range(4):
cv2.fillPoly(
poly_tv_xy_map[2 * idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 0], 0), w)),
)
cv2.fillPoly(
poly_tv_xy_map[2 * idx + 1],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 1], 0), h)),
)
# generate poly_tc_xy_map
for idx in range(2):
cv2.fillPoly(
poly_tc_xy_map[idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(center_point[idx]),
)
# generate poly_short_edge_map
cv2.fillPoly(
poly_short_edge_map,
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(max(min(min_area_quad_h, min_area_quad_w), 1.0)),
)
# generate poly_mask and training_mask
cv2.fillPoly(
poly_mask,
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
1,
)
tvo_map *= poly_mask
tvo_map[:8] -= poly_tv_xy_map
tvo_map[-1] /= poly_short_edge_map
tvo_map = tvo_map.transpose((1, 2, 0))
tco_map *= poly_mask
tco_map[:2] -= poly_tc_xy_map
tco_map[-1] /= poly_short_edge_map
tco_map = tco_map.transpose((1, 2, 0))
return tvo_map, tco_map
def adjust_point(self, poly):
"""
adjust point order.
"""
point_num = poly.shape[0]
if point_num == 4:
len_1 = np.linalg.norm(poly[0] - poly[1])
len_2 = np.linalg.norm(poly[1] - poly[2])
len_3 = np.linalg.norm(poly[2] - poly[3])
len_4 = np.linalg.norm(poly[3] - poly[0])
if (len_1 + len_3) * 1.5 < (len_2 + len_4):
poly = poly[[1, 2, 3, 0], :]
elif point_num > 4:
vector_1 = poly[0] - poly[1]
vector_2 = poly[1] - poly[2]
cos_theta = np.dot(vector_1, vector_2) / (
np.linalg.norm(vector_1) * np.linalg.norm(vector_2) + 1e-6
)
theta = np.arccos(np.round(cos_theta, decimals=4))
if abs(theta) > (70 / 180 * math.pi):
index = list(range(1, point_num)) + [0]
poly = poly[np.array(index), :]
return poly
def gen_min_area_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if point_num == 4:
min_area_quad = poly
center_point = np.sum(poly, axis=0) / 4
else:
rect = cv2.minAreaRect(
poly.astype(np.int32)
) # (center (x,y), (width, height), angle of rotation)
center_point = rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = (
np.linalg.norm(box[(i + 0) % 4] - poly[0])
+ np.linalg.norm(
box[(i + 1) % 4] - poly[point_num // 2 - 1]
)
+ np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2])
+ np.linalg.norm(box[(i + 3) % 4] - poly[-1])
)
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad, center_point
def shrink_quad_along_width(
self, quad, begin_width_ratio=0.0, end_width_ratio=1.0
):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array(
[[begin_width_ratio], [end_width_ratio]], dtype=np.float32
)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def shrink_poly_along_width(
self, quads, shrink_ratio_of_width, expand_height_ratio=1.0
):
"""
shrink poly with given length.
"""
upper_edge_list = []
def get_cut_info(edge_len_list, cut_len):
for idx, edge_len in enumerate(edge_len_list):
cut_len -= edge_len
if cut_len <= 0.000001:
ratio = (cut_len + edge_len_list[idx]) / edge_len_list[idx]
return idx, ratio
for quad in quads:
upper_edge_len = np.linalg.norm(quad[0] - quad[1])
upper_edge_list.append(upper_edge_len)
# length of left edge and right edge.
left_length = (
np.linalg.norm(quads[0][0] - quads[0][3]) * expand_height_ratio
)
right_length = (
np.linalg.norm(quads[-1][1] - quads[-1][2]) * expand_height_ratio
)
shrink_length = (
min(left_length, right_length, sum(upper_edge_list))
* shrink_ratio_of_width
)
# shrinking length
upper_len_left = shrink_length
upper_len_right = sum(upper_edge_list) - shrink_length
left_idx, left_ratio = get_cut_info(upper_edge_list, upper_len_left)
left_quad = self.shrink_quad_along_width(
quads[left_idx], begin_width_ratio=left_ratio, end_width_ratio=1
)
right_idx, right_ratio = get_cut_info(upper_edge_list, upper_len_right)
right_quad = self.shrink_quad_along_width(
quads[right_idx], begin_width_ratio=0, end_width_ratio=right_ratio
)
out_quad_list = []
if left_idx == right_idx:
out_quad_list.append(
[left_quad[0], right_quad[1], right_quad[2], left_quad[3]]
)
else:
out_quad_list.append(left_quad)
for idx in range(left_idx + 1, right_idx):
out_quad_list.append(quads[idx])
out_quad_list.append(right_quad)
return np.array(out_quad_list), list(range(left_idx, right_idx + 1))
def vector_angle(self, A, B):
"""
Calculate the angle between vector AB and x-axis positive direction.
"""
AB = np.array([B[1] - A[1], B[0] - A[0]])
return np.arctan2(*AB)
def theta_line_cross_point(self, theta, point):
"""
Calculate the line through given point and angle in ax + by + c =0 form.
"""
x, y = point
cos = np.cos(theta)
sin = np.sin(theta)
return [sin, -cos, cos * y - sin * x]
def line_cross_two_point(self, A, B):
"""
Calculate the line through given point A and B in ax + by + c =0 form.
"""
angle = self.vector_angle(A, B)
return self.theta_line_cross_point(angle, A)
def average_angle(self, poly):
"""
Calculate the average angle between left and right edge in given poly.
"""
p0, p1, p2, p3 = poly
angle30 = self.vector_angle(p3, p0)
angle21 = self.vector_angle(p2, p1)
return (angle30 + angle21) / 2
def line_cross_point(self, line1, line2):
"""
line1 and line2 in 0=ax+by+c form, compute the cross point of line1 and line2
"""
a1, b1, c1 = line1
a2, b2, c2 = line2
d = a1 * b2 - a2 * b1
if d == 0:
# print("line1", line1)
# print("line2", line2)
print("Cross point does not exist")
return np.array([0, 0], dtype=np.float32)
else:
x = (b1 * c2 - b2 * c1) / d
y = (a2 * c1 - a1 * c2) / d
return np.array([x, y], dtype=np.float32)
def quad2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point. (4, 2)
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32
)
p0_3 = poly[0] + (poly[3] - poly[0]) * ratio_pair
p1_2 = poly[1] + (poly[2] - poly[1]) * ratio_pair
return np.array([p0_3[0], p1_2[0], p1_2[1], p0_3[1]])
def poly2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point.
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32
)
tcl_poly = np.zeros_like(poly)
point_num = poly.shape[0]
for idx in range(point_num // 2):
point_pair = (
poly[idx]
+ (poly[point_num - 1 - idx] - poly[idx]) * ratio_pair
)
tcl_poly[idx] = point_pair[0]
tcl_poly[point_num - 1 - idx] = point_pair[1]
return tcl_poly
def gen_quad_tbo(self, quad, tcl_mask, tbo_map):
"""
Generate tbo_map for give quad.
"""
# upper and lower line function: ax + by + c = 0;
up_line = self.line_cross_two_point(quad[0], quad[1])
lower_line = self.line_cross_two_point(quad[3], quad[2])
quad_h = 0.5 * (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[1] - quad[2])
)
quad_w = 0.5 * (
np.linalg.norm(quad[0] - quad[1])
+ np.linalg.norm(quad[2] - quad[3])
)
# average angle of left and right line.
angle = self.average_angle(quad)
xy_in_poly = np.argwhere(tcl_mask == 1)
for y, x in xy_in_poly:
point = (x, y)
line = self.theta_line_cross_point(angle, point)
cross_point_upper = self.line_cross_point(up_line, line)
cross_point_lower = self.line_cross_point(lower_line, line)
# FIX, offset reverse
upper_offset_x, upper_offset_y = cross_point_upper - point
lower_offset_x, lower_offset_y = cross_point_lower - point
tbo_map[y, x, 0] = upper_offset_y
tbo_map[y, x, 1] = upper_offset_x
tbo_map[y, x, 2] = lower_offset_y
tbo_map[y, x, 3] = lower_offset_x
tbo_map[y, x, 4] = 1.0 / max(min(quad_h, quad_w), 1.0) * 2
return tbo_map
def poly2quads(self, poly):
"""
Split poly into quads.
"""
quad_list = []
point_num = poly.shape[0]
# point pair
point_pair_list = []
for idx in range(point_num // 2):
point_pair = [poly[idx], poly[point_num - 1 - idx]]
point_pair_list.append(point_pair)
quad_num = point_num // 2 - 1
for idx in range(quad_num):
# reshape and adjust to clock-wise
quad_list.append(
(np.array(point_pair_list)[[idx, idx + 1]]).reshape(4, 2)[
[0, 2, 3, 1]
]
)
return np.array(quad_list)
def __call__(self, data):
im = data["image"]
text_polys = data["polys"]
text_tags = data["ignore_tags"]
if im is None:
return None
if text_polys.shape[0] == 0:
return None
h, w, _ = im.shape
text_polys, text_tags, hv_tags = self.check_and_validate_polys(
text_polys, text_tags, (h, w)
)
if text_polys.shape[0] == 0:
return None
# set aspect ratio and keep area fix
asp_scales = np.arange(1.0, 1.55, 0.1)
asp_scale = np.random.choice(asp_scales)
if np.random.rand() < 0.5:
asp_scale = 1.0 / asp_scale
asp_scale = math.sqrt(asp_scale)
asp_wx = asp_scale
asp_hy = 1.0 / asp_scale
im = cv2.resize(im, dsize=None, fx=asp_wx, fy=asp_hy)
text_polys[:, :, 0] *= asp_wx
text_polys[:, :, 1] *= asp_hy
h, w, _ = im.shape
if max(h, w) > 2048:
rd_scale = 2048.0 / max(h, w)
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
text_polys *= rd_scale
h, w, _ = im.shape
if min(h, w) < 16:
return None
# no background
im, text_polys, text_tags, hv_tags = self.crop_area(
im, text_polys, text_tags, hv_tags, crop_background=False
)
if text_polys.shape[0] == 0:
return None
# continue for all ignore case
if np.sum((text_tags * 1.0)) >= text_tags.size:
return None
new_h, new_w, _ = im.shape
if (new_h is None) or (new_w is None):
return None
# resize image
std_ratio = float(self.input_size) / max(new_w, new_h)
rand_scales = np.array(
[0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0]
)
rz_scale = std_ratio * np.random.choice(rand_scales)
im = cv2.resize(im, dsize=None, fx=rz_scale, fy=rz_scale)
text_polys[:, :, 0] *= rz_scale
text_polys[:, :, 1] *= rz_scale
# add gaussian blur
if np.random.rand() < 0.1 * 0.5:
ks = np.random.permutation(5)[0] + 1
ks = int(ks / 2) * 2 + 1
im = cv2.GaussianBlur(im, ksize=(ks, ks), sigmaX=0, sigmaY=0)
# add brighter
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 + np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# add darker
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 - np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# Padding the im to [input_size, input_size]
new_h, new_w, _ = im.shape
if min(new_w, new_h) < self.input_size * 0.5:
return None
im_padded = np.ones(
(self.input_size, self.input_size, 3), dtype=np.float32
)
im_padded[:, :, 2] = 0.485 * 255
im_padded[:, :, 1] = 0.456 * 255
im_padded[:, :, 0] = 0.406 * 255
# Random the start position
del_h = self.input_size - new_h
del_w = self.input_size - new_w
sh, sw = 0, 0
if del_h > 1:
sh = int(np.random.rand() * del_h)
if del_w > 1:
sw = int(np.random.rand() * del_w)
# Padding
im_padded[sh : sh + new_h, sw : sw + new_w, :] = im.copy()
text_polys[:, :, 0] += sw
text_polys[:, :, 1] += sh
score_map, border_map, training_mask = self.generate_tcl_label(
(self.input_size, self.input_size), text_polys, text_tags, 0.25
)
# SAST head
tvo_map, tco_map = self.generate_tvo_and_tco(
(self.input_size, self.input_size),
text_polys,
text_tags,
tcl_ratio=0.3,
ds_ratio=0.25,
)
# print("test--------tvo_map shape:", tvo_map.shape)
im_padded[:, :, 2] -= 0.485 * 255
im_padded[:, :, 1] -= 0.456 * 255
im_padded[:, :, 0] -= 0.406 * 255
im_padded[:, :, 2] /= 255.0 * 0.229
im_padded[:, :, 1] /= 255.0 * 0.224
im_padded[:, :, 0] /= 255.0 * 0.225
im_padded = im_padded.transpose((2, 0, 1))
data["image"] = im_padded[::-1, :, :]
data["score_map"] = score_map[np.newaxis, :, :]
data["border_map"] = border_map.transpose((2, 0, 1))
data["training_mask"] = training_mask[np.newaxis, :, :]
data["tvo_map"] = tvo_map.transpose((2, 0, 1))
data["tco_map"] = tco_map.transpose((2, 0, 1))
return data
|
the-stack_0_4112 | import errno
from collections import defaultdict
from select import select
import socket
from circus import util
from circus import logger
from zmq.eventloop import ioloop
class BaseStatsCollector(ioloop.PeriodicCallback):
def __init__(self, streamer, name, callback_time=1., io_loop=None):
ioloop.PeriodicCallback.__init__(self, self._callback,
callback_time * 1000, io_loop)
self.streamer = streamer
self.name = name
def _callback(self):
logger.debug('Publishing stats about {0}'.format(self.name))
for stats in self.collect_stats():
if stats is None:
continue
self.streamer.publisher.publish(self.name, stats)
def collect_stats(self):
# should be implemented in subclasses
raise NotImplementedError() # PRAGMA: NOCOVER
class WatcherStatsCollector(BaseStatsCollector):
def _aggregate(self, aggregate):
res = {'pid': list(aggregate.keys())}
stats = list(aggregate.values())
# aggregating CPU does not mean anything
# but the average can be a good indicator
cpu = [stat['cpu'] for stat in stats]
if 'N/A' in cpu:
res['cpu'] = 'N/A'
else:
try:
res['cpu'] = sum(cpu) / len(cpu)
except ZeroDivisionError:
res['cpu'] = 0.
# aggregating memory does make sense
mem = [stat['mem'] for stat in stats]
if 'N/A' in mem:
res['mem'] = 'N/A'
else:
res['mem'] = sum(mem)
# finding out the older process
ages = [stat['age'] for stat in stats if stat['age'] != 'N/A']
if len(ages) == 0:
res['age'] = 'N/A'
else:
res['age'] = max(ages)
return res
def collect_stats(self):
aggregate = {}
# sending by pids
for pid in self.streamer.get_pids(self.name):
name = None
if self.name == 'circus':
if pid in self.streamer.circus_pids:
name = self.streamer.circus_pids[pid]
try:
info = util.get_info(pid)
aggregate[pid] = info
info['subtopic'] = pid
info['name'] = name
yield info
except util.NoSuchProcess:
# the process is gone !
pass
except Exception as e:
logger.exception('Failed to get info for %d. %s' % (pid,
str(e)))
# now sending the aggregation
yield self._aggregate(aggregate)
# RESOLUTION is a value in seconds that will be used
# to determine the poller timeout of the sockets stats collector
#
# The PeriodicCallback calls the poller every LOOP_RES ms, and block
# for RESOLUTION seconds unless a read ready event occurs in the
# socket.
#
# This timer is used to limit the number of polls done on the
# socket, so the circusd-stats process don't eat all your CPU
# when you have a high-loaded socket.
#
_RESOLUTION = .1
_LOOP_RES = 10
class SocketStatsCollector(BaseStatsCollector):
def __init__(self, streamer, name, callback_time=1., io_loop=None):
super(SocketStatsCollector, self).__init__(streamer, name,
callback_time, io_loop)
self._rstats = defaultdict(int)
self.sockets = [sock for sock, address, fd in self.streamer.sockets]
self._p = ioloop.PeriodicCallback(self._select, _LOOP_RES,
io_loop=io_loop)
def start(self):
self._p.start()
super(SocketStatsCollector, self).start()
def stop(self):
self._p.stop()
BaseStatsCollector.stop(self)
def _select(self):
try:
rlist, wlist, xlist = select(self.sockets, [], [], .01)
except socket.error as err:
if err.errno == errno.EBADF:
return
raise
if len(rlist) == 0:
return
for sock in rlist:
try:
fileno = sock.fileno()
except socket.error as err:
if err.errno == errno.EBADF:
continue
else:
raise
self._rstats[fileno] += 1
def _aggregate(self, aggregate):
raise NotImplementedError()
def collect_stats(self):
# sending hits by sockets
sockets = self.streamer.sockets
if len(sockets) == 0:
yield None
else:
fds = []
for sock, address, fd in sockets:
try:
fileno = sock.fileno()
except socket.error as err:
if err.errno == errno.EBADF:
continue
else:
raise
fds.append((address, fileno, fd))
total = {'addresses': [], 'reads': 0}
# we might lose a few hits here but it's ok
for address, monitored_fd, fd in fds:
info = {}
info['fd'] = info['subtopic'] = fd
info['reads'] = self._rstats[monitored_fd]
total['reads'] += info['reads']
total['addresses'].append(address)
info['address'] = address
self._rstats[monitored_fd] = 0
yield info
yield total
|
the-stack_0_4114 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2018 Michael Still and Aptira
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is utility methods that privsep depends on. Privsep isn't allowed
# to depend on anything outside the privsep directory, so these need to be
# here. That said, other parts of nova can call into these utilities if
# needed.
import errno
import mmap
import os
from oslo_log import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
def supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 4096 byte alignment
align_size = 4096
m = mmap.mmap(-1, align_size)
m.write(b"x" * align_size)
os.write(fd, m)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'",
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
finally:
# ensure unlink(filepath) will actually remove the file by deleting
# the remaining link to it in close(fd)
if fd is not None:
os.close(fd)
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
|
the-stack_0_4115 | import errno
import json
import logging
import os
import re
import subprocess
import sys
from distutils.util import strtobool
from urllib.parse import parse_qs
sys.path.insert(0, "lib")
import requests # noqa: E402
def get_database_config(development_mode=False):
if any(
[x.startswith("MXRUNTIME_Database") for x in list(os.environ.keys())]
):
return {}
url = get_database_uri_from_vcap()
if url is None:
url = os.environ["DATABASE_URL"]
patterns = [
r"(?P<type>[a-zA-Z0-9]+)://(?P<user>[^:]+):(?P<password>[^@]+)@(?P<host>[^/]+)/(?P<dbname>[^?]*)(?P<extra>\?.*)?", # noqa: E501
r"jdbc:(?P<type>[a-zA-Z0-9]+)://(?P<host>[^;]+);database=(?P<dbname>[^;]*);user=(?P<user>[^;]+);password=(?P<password>.*)$", # noqa: E501
]
supported_databases = {
"postgres": "PostgreSQL",
"postgresql": "PostgreSQL",
"mysql": "MySQL",
"db2": "Db2",
"sqlserver": "SQLSERVER",
}
for pattern in patterns:
match = re.search(pattern, url)
if match is not None:
break
else:
raise Exception(
"Could not parse database credentials from database uri %s" % url
)
database_type_input = match.group("type")
if database_type_input not in supported_databases:
raise Exception("Unknown database type: %s", database_type_input)
database_type = supported_databases[database_type_input]
config = {
"DatabaseType": database_type,
"DatabaseUserName": match.group("user"),
"DatabasePassword": match.group("password"),
"DatabaseHost": match.group("host"),
"DatabaseName": match.group("dbname"),
}
if "extra" in match.groupdict() and match.group("extra"):
extra = match.group("extra").lstrip("?")
jdbc_params = parse_qs(extra)
if "sslmode" in jdbc_params:
sslmode = jdbc_params["sslmode"]
if sslmode and sslmode[0] == "require":
config.update({"DatabaseUseSsl": True})
if development_mode:
config.update(
{
"ConnectionPoolingMaxIdle": 1,
"ConnectionPoolingMaxActive": 20,
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 1000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 1000,
}
)
elif database_type_input == "mysql":
config.update(
{
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 10000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 10000,
}
)
return config
def get_vcap_services_data():
if os.environ.get("VCAP_SERVICES"):
return json.loads(os.environ.get("VCAP_SERVICES"))
else:
return {}
def get_vcap_data():
if os.environ.get("VCAP_APPLICATION"):
return json.loads(os.environ.get("VCAP_APPLICATION"))
else:
return {
"application_uris": ["example.com"],
"application_name": "My App",
}
def get_database_uri_from_vcap():
vcap_services = get_vcap_services_data()
for service_type_name in (
"p-mysql",
"p.mysql",
"elephantsql",
"cleardb",
"PostgreSQL",
"dashDB",
"mariadb",
"postgresql",
"rds",
"postgresql_shared",
):
if vcap_services and service_type_name in vcap_services:
return vcap_services[service_type_name][0]["credentials"]["uri"]
if "azure-sqldb" in vcap_services:
return vcap_services["azure-sqldb"][0]["credentials"]["jdbcUrl"]
for key in vcap_services:
try:
uri = vcap_services[key][0]["credentials"]["uri"]
if key.startswith("rds"):
return uri
if key.startswith("dashDB"):
return uri
if uri.startswith("postgres"):
return uri
if uri.startswith("mysql"):
return uri
except (TypeError, KeyError):
pass
return None
def appdynamics_used():
for k, v in os.environ.items():
if k.startswith("APPDYNAMICS_"):
return True
return False
def get_new_relic_license_key():
vcap_services = get_vcap_services_data()
if vcap_services and "newrelic" in vcap_services:
return vcap_services["newrelic"][0]["credentials"]["licenseKey"]
return None
def is_appmetrics_enabled():
return os.getenv("APPMETRICS_TARGET") is not None
def get_tags():
return json.loads(os.getenv("TAGS", os.getenv("DD_TAGS", "[]")))
def get_hostname():
dd_hostname = os.environ.get("DD_HOSTNAME")
if dd_hostname is None:
domain = get_vcap_data()["application_uris"][0].split("/")[0]
dd_hostname = domain + "-" + os.getenv("CF_INSTANCE_INDEX", "")
return dd_hostname
def get_blobstore_url(filename):
main_url = os.environ.get("BLOBSTORE", "https://cdn.mendix.com")
if main_url[-1] == "/":
main_url = main_url[0:-1]
return main_url + filename
def download_and_unpack(url, destination, cache_dir="/tmp/downloads"):
file_name = url.split("/")[-1]
mkdir_p(cache_dir)
mkdir_p(destination)
cached_location = os.path.join(cache_dir, file_name)
logging.debug(
"Looking for {cached_location}".format(cached_location=cached_location)
)
if not os.path.isfile(cached_location):
download(url, cached_location)
logging.debug(
"downloaded to {cached_location}".format(
cached_location=cached_location
)
)
else:
logging.debug(
"found in cache: {cached_location}".format(
cached_location=cached_location
)
)
logging.debug(
"extracting: {cached_location} to {dest}".format(
cached_location=cached_location, dest=destination
)
)
if file_name.endswith(".tar.gz") or file_name.endswith(".tgz"):
unpack_cmd = ["tar", "xf", cached_location, "-C", destination]
if file_name.startswith(("mono-", "jdk-", "jre-")):
unpack_cmd.extend(("--strip", "1"))
subprocess.check_call(unpack_cmd)
else:
raise Exception(
"do not know how to unpack {cached_location}".format(
cached_location=cached_location
)
)
logging.debug(
"source {file_name} retrieved & unpacked in {destination}".format(
file_name=file_name, destination=destination
)
)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_buildpack_loglevel():
if os.getenv("BUILDPACK_XTRACE", "false") == "true":
return logging.DEBUG
else:
return logging.INFO
def download(url, destination):
logging.debug(
"downloading {url} to {destination}".format(
url=url, destination=destination
)
)
with open(destination, "wb") as file_handle:
response = requests.get(url, stream=True)
if not response.ok:
response.raise_for_status()
for block in response.iter_content(4096):
if not block:
break
file_handle.write(block)
def get_existing_directory_or_raise(dirs, error):
for directory in dirs:
if os.path.isdir(directory):
return directory
raise NotFoundException(error)
class NotFoundException(Exception):
pass
def get_java_version(mx_version):
versions = {"7": "7u80", "8u51": "8u51", "8": "8"}
if mx_version >= 6.6:
default = "8"
elif mx_version >= 5.18:
default = "8u51"
else:
default = "7"
main_java_version = os.getenv("JAVA_VERSION", default)
if main_java_version not in list(versions.keys()):
raise Exception(
"Invalid Java version specified: %s" % main_java_version
)
return versions[main_java_version]
def get_mpr_file_from_dir(directory):
mprs = [x for x in os.listdir(directory) if x.endswith(".mpr")]
if len(mprs) == 1:
return os.path.join(directory, mprs[0])
elif len(mprs) > 1:
raise Exception("More than one .mpr file found, can not continue")
else:
return None
def ensure_mxbuild_in_directory(directory, mx_version, cache_dir):
if os.path.isdir(os.path.join(directory, "modeler")):
return
mkdir_p(directory)
url = os.environ.get("FORCED_MXBUILD_URL")
if url:
# don't ever cache with a FORCED_MXBUILD_URL
download_and_unpack(url, directory, cache_dir="/tmp/downloads")
else:
try:
_checkout_from_git_rootfs(directory, mx_version)
except NotFoundException as e:
logging.debug(str(e))
download_and_unpack(
get_blobstore_url(
"/runtime/mxbuild-%s.tar.gz" % str(mx_version)
),
directory,
cache_dir=cache_dir,
)
def _checkout_from_git_rootfs(directory, mx_version):
mendix_runtimes_path = "/usr/local/share/mendix-runtimes.git"
if not os.path.isdir(mendix_runtimes_path):
raise NotFoundException()
env = dict(os.environ)
env["GIT_WORK_TREE"] = directory
# checkout the runtime version
try:
subprocess.check_call(
("git", "checkout", str(mx_version), "-f"),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return
except Exception:
try:
subprocess.check_call(
(
"git",
"fetch",
"origin",
"refs/tags/{0}:refs/tags/{0}".format(str(mx_version)),
),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
subprocess.check_call(
("git", "checkout", str(mx_version), "-f"),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logging.debug("found mx version after updating runtimes.git")
return
except Exception:
logging.debug("tried updating git repo, also failed")
raise NotFoundException(
"Could not download mxbuild "
+ str(mx_version)
+ " from updated git repo"
)
def _get_env_with_monolib(mono_dir):
env = dict(os.environ)
env["LD_LIBRARY_PATH"] = mono_dir + "/lib"
env["MONO_STRICT_MS_COMPLIANT"] = "yes"
if not os.path.isfile(os.path.join(mono_dir, "lib", "libgdiplus.so")):
raise Exception("libgdiplus.so not found in dir %s" % mono_dir)
return env
def _detect_mono_version(mx_version):
logging.debug(
"Detecting Mono Runtime using mendix version: " + str(mx_version)
)
if mx_version < 7:
target = "mono-3.10.0"
else:
target = "mono-4.6.2.16"
logging.info("Selecting Mono Runtime: " + target)
return target
def _get_mono_path(directory, mono_version):
return get_existing_directory_or_raise(
[
os.path.join(directory, mono_version),
"/opt/" + mono_version,
"/tmp/" + mono_version,
],
"Mono not found",
)
def lazy_remove_file(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def ensure_and_get_mono(mx_version, cache_dir):
logging.debug(
"ensuring mono for mendix {mx_version}".format(
mx_version=str(mx_version)
)
)
mono_version = _detect_mono_version(mx_version)
fallback_location = "/tmp/opt"
try:
mono_location = _get_mono_path("/tmp/opt", mono_version)
except NotFoundException:
logging.debug("Mono not found in default locations")
download_and_unpack(
get_blobstore_url("/mx-buildpack/" + mono_version + "-mx.tar.gz"),
os.path.join(fallback_location, mono_version),
cache_dir,
)
mono_location = _get_mono_path(fallback_location, mono_version)
logging.debug("Using {mono_location}".format(mono_location=mono_location))
return mono_location
def ensure_and_get_jvm(
mx_version, cache_dir, dot_local_location, package="jdk"
):
logging.debug("Begin download and install java %s" % package)
java_version = get_java_version(mx_version)
rootfs_java_path = "/usr/lib/jvm/jdk-%s-oracle-x64" % java_version
if not os.path.isdir(rootfs_java_path):
logging.debug("rootfs without java sdk detected")
download_and_unpack(
get_blobstore_url(
"/mx-buildpack/%s-%s-linux-x64.tar.gz"
% (package, java_version)
),
os.path.join(
dot_local_location,
"usr/lib/jvm/%s-%s-oracle-x64" % (package, java_version),
),
cache_dir,
)
else:
logging.debug("rootfs with java sdk detected")
logging.debug("end download and install java %s" % package)
return get_existing_directory_or_raise(
[
"/usr/lib/jvm/jdk-%s-oracle-x64" % java_version,
os.path.join(
dot_local_location,
"usr/lib/jvm/%s-%s-oracle-x64" % (package, java_version),
),
],
"Java not found",
)
def i_am_primary_instance():
return os.getenv("CF_INSTANCE_INDEX", "0") == "0"
def bypass_loggregator_logging():
env_var = os.getenv("BYPASS_LOGGREGATOR", "False")
# Throws a useful message if you put in a nonsensical value.
# Necessary since we store these in cloud portal as strings.
try:
bypass_loggregator = strtobool(env_var)
except ValueError as e:
logging.warning(
"Bypass loggregator has a nonsensical value: %s. "
"Falling back to old loggregator-based metric reporting.",
env_var,
)
return False
if bypass_loggregator:
if os.getenv("TRENDS_STORAGE_URL"):
return True
else:
logging.warning(
"BYPASS_LOGGREGATOR is set to true, but no metrics URL is "
"set. Falling back to old loggregator-based metric reporting."
)
return False
return False
def get_metrics_url():
return os.getenv("TRENDS_STORAGE_URL")
|
the-stack_0_4117 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataloader utils functions."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.dataloaders import utils
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_process_empty_source_id(self):
source_id = tf.constant([], dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertEqual(-1, utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([128, 256], [128, 256]),
([128, 32, 16], [128, 32, 16]),
)
def test_process_source_id(self, source_id, expected_result):
source_id = tf.constant(source_id, dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertSequenceAlmostEqual(expected_result,
utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([[10, 20, 30, 40]], [[100]], [[0]], 10, None),
([[0.1, 0.2, 0.5, 0.6]], [[0.5]], [[1]], 2, [[1.0, 2.0]]),
)
def test_pad_groundtruths_to_fixed_size(self, boxes, area, classes, size,
attributes):
groundtruths = {}
groundtruths['boxes'] = tf.constant(boxes)
groundtruths['is_crowds'] = tf.constant([[0]])
groundtruths['areas'] = tf.constant(area)
groundtruths['classes'] = tf.constant(classes)
if attributes:
groundtruths['attributes'] = {'depth': tf.constant(attributes)}
actual_result = utils.pad_groundtruths_to_fixed_size(
groundtruths=groundtruths, size=size)
# Check that the first dimension is padded to the expected size.
for key in actual_result:
if key == 'attributes':
for _, v in actual_result[key].items():
pad_shape = v.shape[0]
self.assertEqual(size, pad_shape)
else:
pad_shape = actual_result[key].shape[0]
self.assertEqual(size, pad_shape)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_4118 | # Code source from Jiayuan Gu: https://github.com/Jiayuan-Gu/torkit3d
import torch
import torch.nn as nn
from ..common.mlp import mlp1d_bn_relu, mlp_bn_relu, mlp_relu, mlp1d_relu
__all__ = ["PointNet"]
class PointNet(nn.Module):
"""PointNet for classification.
Notes:
1. The original implementation includes dropout for global MLPs.
2. The original implementation decays the BN momentum.
"""
def __init__(
self,
in_channels=3,
local_channels=(64, 64, 64, 128, 1024),
global_channels=(512, 256),
):
super().__init__()
self.in_channels = in_channels
self.out_channels = (local_channels + global_channels)[-1]
self.mlp_local = mlp1d_bn_relu(in_channels, local_channels)
self.mlp_global = mlp_bn_relu(local_channels[-1], global_channels)
self.reset_parameters()
def forward(self, points, points_feature=None, points_mask=None) -> dict:
# points: [B, 3, N]; points_feature: [B, C, N], points_mask: [B, N]
if points_feature is not None:
input_feature = torch.cat([points, points_feature], dim=1)
else:
input_feature = points
local_feature = self.mlp_local(input_feature)
if points_mask is not None:
local_feature = torch.where(
points_mask.unsqueeze(1), local_feature, torch.zeros_like(local_feature)
)
global_feature, max_indices = torch.max(local_feature, 2)
output_feature = self.mlp_global(global_feature)
return {"feature": output_feature, "max_indices": max_indices}
def reset_parameters(self):
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d)):
if module.bias is not None:
nn.init.zeros_(module.bias)
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
module.momentum = 0.01
|
the-stack_0_4120 | import os
import pymongo
from crawl_terms import make_vocab_to_def_json
# using dotenv to fetch MongoDB Atlas URL environment variable
MONGO_URL = os.getenv("MONGO_URL")
print("Connecting MongoDB Atlas to: " + MONGO_URL)
# accessing MongoDB Atlas with pymongo MongoClient
client = pymongo.MongoClient(MONGO_URL)
# connect to the mongodb atlas database and collection
vocab_db = client.get_database("vocab")
vocab_terms_collection = vocab_db.vocab_terms
# testing database connection
db = client.test
print(db)
# testing collection connection
db = client.get_database("vocab")
vocab_terms_mongodb = db.vocab_terms
all_documents_no = vocab_terms_mongodb.count_documents({})
print(all_documents_no)
# insert a test document at the collection
new_term = {"name": "namaewa?", "url": "localhost"}
vocab_terms_collection.insert_one(new_term)
# insert investopedia data to the mongodb atlas
test_json_document = make_vocab_to_def_json(
"https://www.investopedia.com/terms/b/buyersmarket.asp"
)
vocab_terms_collection.insert_one(test_json_document)
# insert multiple test documents at the collection
new_terms = [
{"name": "namaewa?", "url": "localhost"},
{"name": "taki-kun!", "url": "localhost"},
]
vocab_terms_collection.insert_many(new_terms)
# find individual documents from mongodb atlas
one_doc = vocab_terms_collection.find_one({"vocabulary": "buyersmarket"})
print(one_doc)
# find all documents from mongodb atlas
all_docs = list(vocab_terms_collection.find())
print(all_docs, type(all_docs))
# update a single document
one_update = {"vocabulary": "sellersmarket"}
vocab_terms_collection.update_one({"vocabulary": "buyersmarket"}, {"$set": one_update})
# delete a single document
vocab_terms_collection.delete_one({"vocabulary": "sellersmarket"})
|
the-stack_0_4121 | import logging
from logging import handlers
class logger(object):
level_relations = {
'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'crit':logging.CRITICAL
}#日志级别关系映射
#def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(levelname)s: %(message)s'):
self.logger = logging.getLogger(filename)
format_str = logging.Formatter(fmt)#设置日志格式
self.logger.setLevel(self.level_relations.get(level))#设置日志级别
sh = logging.StreamHandler()#往屏幕上输出
sh.setFormatter(format_str) #设置屏幕上显示的格式
th = handlers.TimedRotatingFileHandler(filename=filename,when=when,backupCount=backCount,encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器
#实例化TimedRotatingFileHandler
#interval是时间间隔,backupCount是备份文件的个数,如果超过这个个数,就会自动删除,when是间隔的时间单位,单位有以下几种:
# S 秒
# M 分
# H 小时、
# D 天、
# W 每星期(interval==0时代表星期一)
# midnight 每天凌晨
th.setFormatter(format_str)#设置文件里写入的格式
self.logger.addHandler(sh) #把对象加到logger里
self.logger.addHandler(th)
|
the-stack_0_4122 | from pybullet_utils import pd_controller_stable
from pybullet_envs.deep_mimic.env import humanoid_pose_interpolator
import math
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
jointFrictionForce = 0
class HumanoidStablePD(object):
def __init__(self, pybullet_client, mocap_data, timeStep, useFixedBase=True):
self._pybullet_client = pybullet_client
self._mocap_data = mocap_data
print("LOADING humanoid!")
self._sim_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.889540259, 0],
globalScaling=0.25,
useFixedBase=useFixedBase,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
#self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,-1,collisionFilterGroup=0,collisionFilterMask=0)
#for j in range (self._pybullet_client.getNumJoints(self._sim_model)):
# self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,j,collisionFilterGroup=0,collisionFilterMask=0)
self._end_effectors = [5, 8, 11, 14] #ankle and wrist, both left and right
self._kin_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.85, 0],
globalScaling=0.25,
useFixedBase=True,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
self._pybullet_client.changeDynamics(self._sim_model, -1, lateralFriction=0.9)
for j in range(self._pybullet_client.getNumJoints(self._sim_model)):
self._pybullet_client.changeDynamics(self._sim_model, j, lateralFriction=0.9)
self._pybullet_client.changeDynamics(self._sim_model, -1, linearDamping=0, angularDamping=0)
self._pybullet_client.changeDynamics(self._kin_model, -1, linearDamping=0, angularDamping=0)
#todo: add feature to disable simulation for a particular object. Until then, disable all collisions
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
-1,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
-1,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
alpha = 0.4
self._pybullet_client.changeVisualShape(self._kin_model, -1, rgbaColor=[1, 1, 1, alpha])
for j in range(self._pybullet_client.getNumJoints(self._kin_model)):
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
j,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
j,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
self._pybullet_client.changeVisualShape(self._kin_model, j, rgbaColor=[1, 1, 1, alpha])
self._poseInterpolator = humanoid_pose_interpolator.HumanoidPoseInterpolator()
for i in range(self._mocap_data.NumFrames() - 1):
frameData = self._mocap_data._motion_data['Frames'][i]
self._poseInterpolator.PostProcessMotionData(frameData)
self._stablePD = pd_controller_stable.PDControllerStableMultiDof(self._pybullet_client)
self._timeStep = timeStep
self._kpOrg = [
0, 0, 0, 0, 0, 0, 0, 1000, 1000, 1000, 1000, 100, 100, 100, 100, 500, 500, 500, 500, 500,
400, 400, 400, 400, 400, 400, 400, 400, 300, 500, 500, 500, 500, 500, 400, 400, 400, 400,
400, 400, 400, 400, 300
]
self._kdOrg = [
0, 0, 0, 0, 0, 0, 0, 100, 100, 100, 100, 10, 10, 10, 10, 50, 50, 50, 50, 50, 40, 40, 40,
40, 40, 40, 40, 40, 30, 50, 50, 50, 50, 50, 40, 40, 40, 40, 40, 40, 40, 40, 30
]
self._jointIndicesAll = [
chest, neck, rightHip, rightKnee, rightAnkle, rightShoulder, rightElbow, leftHip, leftKnee,
leftAnkle, leftShoulder, leftElbow
]
for j in self._jointIndicesAll:
#self._pybullet_client.setJointMotorControlMultiDof(self._sim_model, j, self._pybullet_client.POSITION_CONTROL, force=[1,1,1])
self._pybullet_client.setJointMotorControl2(self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=jointFrictionForce)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, jointFrictionForce])
self._pybullet_client.setJointMotorControl2(self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=0)
self._pybullet_client.setJointMotorControlMultiDof(
self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, 0])
self._jointDofCounts = [4, 4, 4, 1, 4, 4, 1, 4, 1, 4, 4, 1]
#only those body parts/links are allowed to touch the ground, otherwise the episode terminates
self._allowed_body_parts = [5, 11]
#[x,y,z] base position and [x,y,z,w] base orientation!
self._totalDofs = 7
for dof in self._jointDofCounts:
self._totalDofs += dof
self.setSimTime(0)
self.resetPose()
def resetPose(self):
#print("resetPose with self._frame=", self._frame, " and self._frameFraction=",self._frameFraction)
pose = self.computePose(self._frameFraction)
self.initializePose(self._poseInterpolator, self._sim_model, initBase=True)
self.initializePose(self._poseInterpolator, self._kin_model, initBase=False)
def initializePose(self, pose, phys_model, initBase, initializeVelocity=True):
if initializeVelocity:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetBaseVelocity(phys_model, pose._baseLinVel, pose._baseAngVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot,
pose._chestVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, pose._neckVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
pose._rightHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot,
pose._rightKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
pose._rightAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, pose._rightShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
pose._rightElbowVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
pose._leftHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot,
pose._leftKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
pose._leftAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, pose._leftShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot,
pose._leftElbowVel)
else:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
[0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot, [0])
def calcCycleCount(self, simTime, cycleTime):
phases = simTime / cycleTime
count = math.floor(phases)
loop = True
#count = (loop) ? count : cMathUtil::Clamp(count, 0, 1);
return count
def getCycleTime(self):
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
return cycleTime
def setSimTime(self, t):
self._simTime = t
#print("SetTimeTime time =",t)
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = self.getCycleTime()
#print("self._motion_data.NumFrames()=",self._mocap_data.NumFrames())
self._cycleCount = self.calcCycleCount(t, cycleTime)
#print("cycles=",cycles)
frameTime = t - self._cycleCount * cycleTime
if (frameTime < 0):
frameTime += cycleTime
#print("keyFrameDuration=",keyFrameDuration)
#print("frameTime=",frameTime)
self._frame = int(frameTime / keyFrameDuration)
#print("self._frame=",self._frame)
self._frameNext = self._frame + 1
if (self._frameNext >= self._mocap_data.NumFrames()):
self._frameNext = self._frame
self._frameFraction = (frameTime - self._frame * keyFrameDuration) / (keyFrameDuration)
def computeCycleOffset(self):
firstFrame = 0
lastFrame = self._mocap_data.NumFrames() - 1
frameData = self._mocap_data._motion_data['Frames'][0]
frameDataNext = self._mocap_data._motion_data['Frames'][lastFrame]
basePosStart = [frameData[1], frameData[2], frameData[3]]
basePosEnd = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]
self._cycleOffset = [
basePosEnd[0] - basePosStart[0], basePosEnd[1] - basePosStart[1],
basePosEnd[2] - basePosStart[2]
]
return self._cycleOffset
def computePose(self, frameFraction):
frameData = self._mocap_data._motion_data['Frames'][self._frame]
frameDataNext = self._mocap_data._motion_data['Frames'][self._frameNext]
self._poseInterpolator.Slerp(frameFraction, frameData, frameDataNext, self._pybullet_client)
#print("self._poseInterpolator.Slerp(", frameFraction,")=", pose)
self.computeCycleOffset()
oldPos = self._poseInterpolator._basePos
self._poseInterpolator._basePos = [
oldPos[0] + self._cycleCount * self._cycleOffset[0],
oldPos[1] + self._cycleCount * self._cycleOffset[1],
oldPos[2] + self._cycleCount * self._cycleOffset[2]
]
pose = self._poseInterpolator.GetPose()
return pose
def convertActionToPose(self, action):
pose = self._poseInterpolator.ConvertFromAction(self._pybullet_client, action)
return pose
def computePDForces(self, desiredPositions, desiredVelocities, maxForces):
if desiredVelocities == None:
desiredVelocities = [0] * self._totalDofs
taus = self._stablePD.computePD(bodyUniqueId=self._sim_model,
jointIndices=self._jointIndicesAll,
desiredPositions=desiredPositions,
desiredVelocities=desiredVelocities,
kps=self._kpOrg,
kds=self._kdOrg,
maxForces=maxForces,
timeStep=self._timeStep)
return taus
def applyPDForces(self, taus):
dofIndex = 7
scaling = 1
for index in range(len(self._jointIndicesAll)):
jointIndex = self._jointIndicesAll[index]
if self._jointDofCounts[index] == 4:
force = [
scaling * taus[dofIndex + 0], scaling * taus[dofIndex + 1],
scaling * taus[dofIndex + 2]
]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(self._sim_model,
jointIndex,
self._pybullet_client.TORQUE_CONTROL,
force=force)
if self._jointDofCounts[index] == 1:
force = [scaling * taus[dofIndex]]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
jointIndex,
controlMode=self._pybullet_client.TORQUE_CONTROL,
force=force)
dofIndex += self._jointDofCounts[index]
def setJointMotors(self, desiredPositions, maxForces):
controlMode = self._pybullet_client.POSITION_CONTROL
startIndex = 7
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
kp = 0.2
forceScale = 1
#self._jointDofCounts=[4,4,4,1,4,4,1,4,1,4,4,1]
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
chest,
controlMode,
targetPosition=self._poseInterpolator._chestRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
neck,
controlMode,
targetPosition=self._poseInterpolator._neckRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightHip,
controlMode,
targetPosition=self._poseInterpolator._rightHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightKnee,
controlMode,
targetPosition=self._poseInterpolator._rightKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightAnkle,
controlMode,
targetPosition=self._poseInterpolator._rightAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightElbow,
controlMode,
targetPosition=self._poseInterpolator._rightElbowRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftHip,
controlMode,
targetPosition=self._poseInterpolator._leftHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftKnee,
controlMode,
targetPosition=self._poseInterpolator._leftKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftAnkle,
controlMode,
targetPosition=self._poseInterpolator._leftAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftShoulder,
controlMode,
targetPosition=self._poseInterpolator._leftShoulderRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftElbow,
controlMode,
targetPosition=self._poseInterpolator._leftElbowRot,
positionGain=kp,
force=maxForce)
#print("startIndex=",startIndex)
def getPhase(self):
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
phase = self._simTime / cycleTime
phase = math.fmod(phase, 1.0)
if (phase < 0):
phase += 1
return phase
def buildHeadingTrans(self, rootOrn):
#align root transform 'forward' with world-space x axis
eul = self._pybullet_client.getEulerFromQuaternion(rootOrn)
refDir = [1, 0, 0]
rotVec = self._pybullet_client.rotateVector(rootOrn, refDir)
heading = math.atan2(-rotVec[2], rotVec[0])
heading2 = eul[1]
#print("heading=",heading)
headingOrn = self._pybullet_client.getQuaternionFromAxisAngle([0, 1, 0], -heading)
return headingOrn
def buildOriginTrans(self):
rootPos, rootOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
#print("rootPos=",rootPos, " rootOrn=",rootOrn)
invRootPos = [-rootPos[0], 0, -rootPos[2]]
#invOrigTransPos, invOrigTransOrn = self._pybullet_client.invertTransform(rootPos,rootOrn)
headingOrn = self.buildHeadingTrans(rootOrn)
#print("headingOrn=",headingOrn)
headingMat = self._pybullet_client.getMatrixFromQuaternion(headingOrn)
#print("headingMat=",headingMat)
#dummy, rootOrnWithoutHeading = self._pybullet_client.multiplyTransforms([0,0,0],headingOrn, [0,0,0], rootOrn)
#dummy, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0,0,0],rootOrnWithoutHeading, invOrigTransPos, invOrigTransOrn)
invOrigTransPos, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0, 0, 0],
headingOrn,
invRootPos,
[0, 0, 0, 1])
#print("invOrigTransPos=",invOrigTransPos)
#print("invOrigTransOrn=",invOrigTransOrn)
invOrigTransMat = self._pybullet_client.getMatrixFromQuaternion(invOrigTransOrn)
#print("invOrigTransMat =",invOrigTransMat )
return invOrigTransPos, invOrigTransOrn
def getState(self):
stateVector = []
phase = self.getPhase()
#print("phase=",phase)
stateVector.append(phase)
rootTransPos, rootTransOrn = self.buildOriginTrans()
basePos, baseOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosRel, dummy = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, [0, 0, 0, 1])
#print("!!!rootPosRel =",rootPosRel )
#print("rootTransPos=",rootTransPos)
#print("basePos=",basePos)
localPos, localOrn = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, baseOrn)
localPos = [
localPos[0] - rootPosRel[0], localPos[1] - rootPosRel[1], localPos[2] - rootPosRel[2]
]
#print("localPos=",localPos)
stateVector.append(rootPosRel[1])
#self.pb2dmJoints=[0,1,2,9,10,11,3,4,5,12,13,14,6,7,8]
self.pb2dmJoints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#print("joint order:",j)
ls = self._pybullet_client.getLinkState(self._sim_model, j, computeForwardKinematics=True)
linkPos = ls[0]
linkOrn = ls[1]
linkPosLocal, linkOrnLocal = self._pybullet_client.multiplyTransforms(
rootTransPos, rootTransOrn, linkPos, linkOrn)
if (linkOrnLocal[3] < 0):
linkOrnLocal = [-linkOrnLocal[0], -linkOrnLocal[1], -linkOrnLocal[2], -linkOrnLocal[3]]
linkPosLocal = [
linkPosLocal[0] - rootPosRel[0], linkPosLocal[1] - rootPosRel[1],
linkPosLocal[2] - rootPosRel[2]
]
for l in linkPosLocal:
stateVector.append(l)
#re-order the quaternion, DeepMimic uses w,x,y,z
if (linkOrnLocal[3] < 0):
linkOrnLocal[0] *= -1
linkOrnLocal[1] *= -1
linkOrnLocal[2] *= -1
linkOrnLocal[3] *= -1
stateVector.append(linkOrnLocal[3])
stateVector.append(linkOrnLocal[0])
stateVector.append(linkOrnLocal[1])
stateVector.append(linkOrnLocal[2])
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
ls = self._pybullet_client.getLinkState(self._sim_model, j, computeLinkVelocity=True)
linkLinVel = ls[6]
linkAngVel = ls[7]
linkLinVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkLinVel, [0, 0, 0, 1])
#linkLinVelLocal=[linkLinVelLocal[0]-rootPosRel[0],linkLinVelLocal[1]-rootPosRel[1],linkLinVelLocal[2]-rootPosRel[2]]
linkAngVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkAngVel, [0, 0, 0, 1])
for l in linkLinVelLocal:
stateVector.append(l)
for l in linkAngVelLocal:
stateVector.append(l)
#print("stateVector len=",len(stateVector))
#for st in range (len(stateVector)):
# print("state[",st,"]=",stateVector[st])
return stateVector
def terminates(self):
#check if any non-allowed body part hits the ground
terminates = False
pts = self._pybullet_client.getContactPoints()
for p in pts:
part = -1
#ignore self-collision
if (p[1] == p[2]):
continue
if (p[1] == self._sim_model):
part = p[3]
if (p[2] == self._sim_model):
part = p[4]
if (part >= 0 and part not in self._allowed_body_parts):
#print("terminating part:", part)
terminates = True
return terminates
def quatMul(self, q1, q2):
return [
q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]
]
def calcRootAngVelErr(self, vel0, vel1):
diff = [vel0[0] - vel1[0], vel0[1] - vel1[1], vel0[2] - vel1[2]]
return diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]
def calcRootRotDiff(self, orn0, orn1):
orn0Conj = [-orn0[0], -orn0[1], -orn0[2], orn0[3]]
q_diff = self.quatMul(orn1, orn0Conj)
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(q_diff)
return angle * angle
def getReward(self, pose):
#from DeepMimic double cSceneImitate::CalcRewardImitate
#todo: compensate for ground height in some parts, once we move to non-flat terrain
pose_w = 0.5
vel_w = 0.05
end_eff_w = 0.15
root_w = 0.2
com_w = 0 #0.1
total_w = pose_w + vel_w + end_eff_w + root_w + com_w
pose_w /= total_w
vel_w /= total_w
end_eff_w /= total_w
root_w /= total_w
com_w /= total_w
pose_scale = 2
vel_scale = 0.1
end_eff_scale = 40
root_scale = 5
com_scale = 10
err_scale = 1
reward = 0
pose_err = 0
vel_err = 0
end_eff_err = 0
root_err = 0
com_err = 0
heading_err = 0
#create a mimic reward, comparing the dynamics humanoid with a kinematic one
#pose = self.InitializePoseFromMotionData()
#print("self._kin_model=",self._kin_model)
#print("kinematicHumanoid #joints=",self._pybullet_client.getNumJoints(self._kin_model))
#self.ApplyPose(pose, True, True, self._kin_model, self._pybullet_client)
#const Eigen::VectorXd& pose0 = sim_char.GetPose();
#const Eigen::VectorXd& vel0 = sim_char.GetVel();
#const Eigen::VectorXd& pose1 = kin_char.GetPose();
#const Eigen::VectorXd& vel1 = kin_char.GetVel();
#tMatrix origin_trans = sim_char.BuildOriginTrans();
#tMatrix kin_origin_trans = kin_char.BuildOriginTrans();
#
#tVector com0_world = sim_char.CalcCOM();
#tVector com_vel0_world = sim_char.CalcCOMVel();
#tVector com1_world;
#tVector com_vel1_world;
#cRBDUtil::CalcCoM(joint_mat, body_defs, pose1, vel1, com1_world, com_vel1_world);
#
root_id = 0
#tVector root_pos0 = cKinTree::GetRootPos(joint_mat, pose0);
#tVector root_pos1 = cKinTree::GetRootPos(joint_mat, pose1);
#tQuaternion root_rot0 = cKinTree::GetRootRot(joint_mat, pose0);
#tQuaternion root_rot1 = cKinTree::GetRootRot(joint_mat, pose1);
#tVector root_vel0 = cKinTree::GetRootVel(joint_mat, vel0);
#tVector root_vel1 = cKinTree::GetRootVel(joint_mat, vel1);
#tVector root_ang_vel0 = cKinTree::GetRootAngVel(joint_mat, vel0);
#tVector root_ang_vel1 = cKinTree::GetRootAngVel(joint_mat, vel1);
mJointWeights = [
0.20833, 0.10416, 0.0625, 0.10416, 0.0625, 0.041666666666666671, 0.0625, 0.0416, 0.00,
0.10416, 0.0625, 0.0416, 0.0625, 0.0416, 0.0000
]
num_end_effs = 0
num_joints = 15
root_rot_w = mJointWeights[root_id]
rootPosSim, rootOrnSim = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosKin, rootOrnKin = self._pybullet_client.getBasePositionAndOrientation(self._kin_model)
linVelSim, angVelSim = self._pybullet_client.getBaseVelocity(self._sim_model)
linVelKin, angVelKin = self._pybullet_client.getBaseVelocity(self._kin_model)
root_rot_err = self.calcRootRotDiff(rootOrnSim, rootOrnKin)
pose_err += root_rot_w * root_rot_err
root_vel_diff = [
linVelSim[0] - linVelKin[0], linVelSim[1] - linVelKin[1], linVelSim[2] - linVelKin[2]
]
root_vel_err = root_vel_diff[0] * root_vel_diff[0] + root_vel_diff[1] * root_vel_diff[
1] + root_vel_diff[2] * root_vel_diff[2]
root_ang_vel_err = self.calcRootAngVelErr(angVelSim, angVelKin)
vel_err += root_rot_w * root_ang_vel_err
for j in range(num_joints):
curr_pose_err = 0
curr_vel_err = 0
w = mJointWeights[j]
simJointInfo = self._pybullet_client.getJointStateMultiDof(self._sim_model, j)
#print("simJointInfo.pos=",simJointInfo[0])
#print("simJointInfo.vel=",simJointInfo[1])
kinJointInfo = self._pybullet_client.getJointStateMultiDof(self._kin_model, j)
#print("kinJointInfo.pos=",kinJointInfo[0])
#print("kinJointInfo.vel=",kinJointInfo[1])
if (len(simJointInfo[0]) == 1):
angle = simJointInfo[0][0] - kinJointInfo[0][0]
curr_pose_err = angle * angle
velDiff = simJointInfo[1][0] - kinJointInfo[1][0]
curr_vel_err = velDiff * velDiff
if (len(simJointInfo[0]) == 4):
#print("quaternion diff")
diffQuat = self._pybullet_client.getDifferenceQuaternion(simJointInfo[0], kinJointInfo[0])
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(diffQuat)
curr_pose_err = angle * angle
diffVel = [
simJointInfo[1][0] - kinJointInfo[1][0], simJointInfo[1][1] - kinJointInfo[1][1],
simJointInfo[1][2] - kinJointInfo[1][2]
]
curr_vel_err = diffVel[0] * diffVel[0] + diffVel[1] * diffVel[1] + diffVel[2] * diffVel[2]
pose_err += w * curr_pose_err
vel_err += w * curr_vel_err
is_end_eff = j in self._end_effectors
if is_end_eff:
linkStateSim = self._pybullet_client.getLinkState(self._sim_model, j)
linkStateKin = self._pybullet_client.getLinkState(self._kin_model, j)
linkPosSim = linkStateSim[0]
linkPosKin = linkStateKin[0]
linkPosDiff = [
linkPosSim[0] - linkPosKin[0], linkPosSim[1] - linkPosKin[1],
linkPosSim[2] - linkPosKin[2]
]
curr_end_err = linkPosDiff[0] * linkPosDiff[0] + linkPosDiff[1] * linkPosDiff[
1] + linkPosDiff[2] * linkPosDiff[2]
end_eff_err += curr_end_err
num_end_effs += 1
if (num_end_effs > 0):
end_eff_err /= num_end_effs
#double root_ground_h0 = mGround->SampleHeight(sim_char.GetRootPos())
#double root_ground_h1 = kin_char.GetOriginPos()[1]
#root_pos0[1] -= root_ground_h0
#root_pos1[1] -= root_ground_h1
root_pos_diff = [
rootPosSim[0] - rootPosKin[0], rootPosSim[1] - rootPosKin[1], rootPosSim[2] - rootPosKin[2]
]
root_pos_err = root_pos_diff[0] * root_pos_diff[0] + root_pos_diff[1] * root_pos_diff[
1] + root_pos_diff[2] * root_pos_diff[2]
#
#root_rot_err = cMathUtil::QuatDiffTheta(root_rot0, root_rot1)
#root_rot_err *= root_rot_err
#root_vel_err = (root_vel1 - root_vel0).squaredNorm()
#root_ang_vel_err = (root_ang_vel1 - root_ang_vel0).squaredNorm()
root_err = root_pos_err + 0.1 * root_rot_err + 0.01 * root_vel_err + 0.001 * root_ang_vel_err
#com_err = 0.1 * (com_vel1_world - com_vel0_world).squaredNorm()
#print("pose_err=",pose_err)
#print("vel_err=",vel_err)
pose_reward = math.exp(-err_scale * pose_scale * pose_err)
vel_reward = math.exp(-err_scale * vel_scale * vel_err)
end_eff_reward = math.exp(-err_scale * end_eff_scale * end_eff_err)
root_reward = math.exp(-err_scale * root_scale * root_err)
com_reward = math.exp(-err_scale * com_scale * com_err)
reward = pose_w * pose_reward + vel_w * vel_reward + end_eff_w * end_eff_reward + root_w * root_reward + com_w * com_reward
# pose_reward,vel_reward,end_eff_reward, root_reward, com_reward);
#print("reward=",reward)
#print("pose_reward=",pose_reward)
#print("vel_reward=",vel_reward)
#print("end_eff_reward=",end_eff_reward)
#print("root_reward=",root_reward)
#print("com_reward=",com_reward)
return reward
|
the-stack_0_4123 | import sys
from notebook import Notebook
class Menu:
'''display a menu and respond to choices when run.'''
def __init__(self):
self._notebook = Notebook()
self._choices = {"1" : self._show_notes,
"2": self._search_notes,
"3": self._add_note,
"4": self._modify_note,
"5": self._quit}
def run(self):
'''Display the menu and respond to choices'''
while True:
self._display_menu()
choice = input("Enter an option: ")
action = self._choices.get(choice)
if action:
action()
else:
print("{0} is not a valid choice".format(choice))
def _display_menu(self):
print("""Notebook Menu
1. Show all notes
2. Search notes
3. Add note
4. Modify note
5. Quit
""")
def _show_notes(self, notes=None):
if not notes:
notes = self._notebook.notes
for note in notes:
print("{0}: {1}\n{2}".format(note.id, note.tags, note.memo))
def _search_notes(self):
filter = input("Search for: ")
notes = self._notebook.search(filter)
self._show_notes(notes)
def _add_note(self):
memo = input("Enter a memo: ")
self._notebook.new_note(memo)
print("Your note has been added.")
def _modify_note(self):
id = input("Enter a note id: ")
memo = input("Enter a memo: ")
tags = ("Enter tags: ")
if memo:
self._notebook.modify_memo(id, memo)
if tags:
self._notebook.modify_tags(id, tags)
def _quit(self):
print("Thank you for using your notebook today.")
sys.exit(0)
if __name__ == "__main__":
Menu().run()
|
the-stack_0_4124 | # coding=utf-8
import json
from cStringIO import StringIO
import cv2
import numpy as np
import requests
class InferenceService(object):
"""
Die Klasse ist für das Senden von Bildern zum Inferece Server zuständig.
"""
def __init__(self, interference_server_address):
"""
Konstruktor zum Erzeugen eines neuen InferenceService.
:param interference_server_address: URL des Inference Servers
"""
self.interference_server_address = interference_server_address
def run_interference_on_images(self, images):
"""
Sendet die übergebenen Bilder an den Inference Server und gibt die erkannten ImageNet-Knoten mit Score zurück.
:param images: zu erkennenden Bilder als numpy-Matrix im BGR-Format.
:return: vom Inference Server gelieferten Predictions mit ImageNet-Knoten und Score
"""
converted_images = []
for image in images:
cv2_rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
converted_images.append(cv2_rgb_img)
stacked_images = np.stack(images)
memory_file = StringIO() # erzeugt eine in-memory datei, die für np.save verwendet wird
np.save(memory_file, stacked_images, allow_pickle=False)
memory_file.seek(0)
res = requests.post(self.interference_server_address, data=memory_file)
if res.ok:
predictions = json.loads(res.content)
return predictions
|
the-stack_0_4125 | """
:copyright: 2010-2015 by Ronny Pfannschmidt
:license: MIT
"""
import os
import warnings
from .config import Configuration
from .utils import function_has_arg, string_types
from .version import format_version, meta
from .discover import iter_matching_entrypoints
PRETEND_KEY = "SETUPTOOLS_SCM_PRETEND_VERSION"
TEMPLATES = {
".py": """\
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = {version!r}
""",
".txt": "{version}",
}
def version_from_scm(root):
warnings.warn(
"version_from_scm is deprecated please use get_version",
category=DeprecationWarning,
)
config = Configuration()
config.root = root
# TODO: Is it API?
return _version_from_entrypoint(config, "setuptools_scm.parse_scm")
def _call_entrypoint_fn(config, fn):
if function_has_arg(fn, "config"):
return fn(config.absolute_root, config=config)
else:
warnings.warn(
"parse functions are required to provide a named argument"
" 'config' in the future.",
category=PendingDeprecationWarning,
stacklevel=2,
)
return fn(config.absolute_root)
def _version_from_entrypoint(config, entrypoint):
for ep in iter_matching_entrypoints(config.absolute_root, entrypoint):
version = _call_entrypoint_fn(config, ep.load())
if version:
return version
def dump_version(root, version, write_to, template=None):
assert isinstance(version, string_types)
if not write_to:
return
target = os.path.normpath(os.path.join(root, write_to))
ext = os.path.splitext(target)[1]
template = template or TEMPLATES.get(ext)
if template is None:
raise ValueError(
"bad file format: '{}' (of {}) \nonly *.txt and *.py are supported".format(
os.path.splitext(target)[1], target
)
)
with open(target, "w") as fp:
fp.write(template.format(version=version))
def _do_parse(config):
pretended = os.environ.get(PRETEND_KEY)
if pretended:
# we use meta here since the pretended version
# must adhere to the pep to begin with
return meta(tag=pretended, preformatted=True, config=config)
if config.parse:
parse_result = _call_entrypoint_fn(config, config.parse)
if isinstance(parse_result, string_types):
raise TypeError(
"version parse result was a string\nplease return a parsed version"
)
version = parse_result or _version_from_entrypoint(
config, "setuptools_scm.parse_scm_fallback"
)
else:
# include fallbacks after dropping them from the main entrypoint
version = _version_from_entrypoint(
config, "setuptools_scm.parse_scm"
) or _version_from_entrypoint(
config, "setuptools_scm.parse_scm_fallback"
)
if version:
return version
raise LookupError(
"setuptools-scm was unable to detect version for %r.\n\n"
"Make sure you're either building from a fully intact git repository "
"or PyPI tarballs. Most other sources (such as GitHub's tarballs, a "
"git checkout without the .git folder) don't contain the necessary "
"metadata and will not work.\n\n"
"For example, if you're using pip, instead of "
"https://github.com/user/proj/archive/master.zip "
"use git+https://github.com/user/proj.git#egg=proj" % config.absolute_root
)
def get_version(
root=".",
version_scheme="guess-next-dev",
local_scheme="node-and-date",
write_to=None,
write_to_template=None,
relative_to=None,
tag_regex=None,
parse=None,
):
"""
If supplied, relative_to should be a file from which root may
be resolved. Typically called by a script or module that is not
in the root of the repository to direct setuptools_scm to the
root of the repository by supplying ``__file__``.
"""
config = Configuration()
config.root = root
config.version_scheme = version_scheme
config.local_scheme = local_scheme
config.write_to = write_to
config.write_to_template = write_to_template
config.relative_to = relative_to
config.tag_regex = tag_regex
config.parse = parse
parsed_version = _do_parse(config)
if parsed_version:
version_string = format_version(
parsed_version, version_scheme=version_scheme, local_scheme=local_scheme
)
dump_version(
root=root,
version=version_string,
write_to=write_to,
template=write_to_template,
)
return version_string
|
the-stack_0_4133 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import os
from typing import Iterable, List, Tuple, Union
import pandas as pd
from pytest_regressions.data_regression import DataRegressionFixture
from pytest_regressions.dataframe_regression import DataFrameRegressionFixture
from pytest_regressions.file_regression import FileRegressionFixture
from parlai.crowdsourcing.utils.tests import AbstractOneTurnCrowdsourcingTest
TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
def get_hashed_combo_path(
root_dir: str,
subdir: str,
task: str,
combos: Iterable[Union[List[str], Tuple[str, str]]],
) -> str:
"""
Return a unique path for the given combinations of models.
:param root_dir: root save directory
:param subdir: immediate subdirectory of root_dir
:param task: the ParlAI task being considered
:param combos: the combinations of models being compared
"""
# Sort the names in each combo, as well as the overall combos
sorted_combos = []
for combo in combos:
assert len(combo) == 2
sorted_combos.append(tuple(sorted(combo)))
sorted_combos = sorted(sorted_combos)
os.makedirs(os.path.join(root_dir, subdir), exist_ok=True)
path = os.path.join(
root_dir,
subdir,
hashlib.sha1(
'___and___'.join(
[f"{m1}vs{m2}.{task.replace(':', '_')}" for m1, m2 in sorted_combos]
).encode('utf-8')
).hexdigest()[:10],
)
return path
class AbstractFastAcuteTest(AbstractOneTurnCrowdsourcingTest):
"""
Abstract test class for testing Fast ACUTE code.
"""
TASK_DIRECTORY = TASK_DIRECTORY
MODELS = ['model1', 'model2']
MODEL_STRING = ','.join(MODELS)
TASK_DATA = {
"final_data": [
{"speakerChoice": "human_as_model", "textReason": "Makes more sense"},
{"speakerChoice": "model1", "textReason": "Makes more sense"},
{"speakerChoice": "model2", "textReason": "Makes more sense"},
{"speakerChoice": "model1", "textReason": "Makes more sense"},
{"speakerChoice": "model2", "textReason": "Makes more sense"},
]
}
def _get_common_overrides(self, root_dir: str) -> List[str]:
"""
Return overrides for all subclassed Fast ACUTE test code.
"""
# TODO: clean this up when Hydra has support for recursive defaults
return [
'+mephisto.blueprint.acute_eval_type=engaging',
'mephisto.blueprint.block_on_onboarding_fail=False',
'+mephisto.blueprint.matchups_per_pair=60',
'+mephisto.blueprint.num_self_chats=5',
f'+mephisto.blueprint.onboarding_path={self.TASK_DIRECTORY}/task_config/onboarding.json',
f'+mephisto.blueprint.root_dir={root_dir}',
'+mephisto.blueprint.sufficient_matchups_multiplier=2',
'+mephisto.blueprint.task=blended_skill_talk',
'mephisto.task.task_name=acute_eval_test',
]
def test_agent_state(self, setup_teardown, data_regression: DataRegressionFixture):
outputs = setup_teardown
self._check_agent_state(state=outputs['state'], data_regression=data_regression)
def test_all_convo_pairs_txt(
self, setup_teardown, file_regression: FileRegressionFixture
):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='all_convo_pairs.txt',
file_regression=file_regression,
)
def test_all_html(self, setup_teardown, file_regression: FileRegressionFixture):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='all.html',
file_regression=file_regression,
)
def test_full_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='full.csv',
dataframe_regression=dataframe_regression,
)
def test_grid_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='grid.csv',
dataframe_regression=dataframe_regression,
)
def test_grid_winners_as_rows_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='grid.winners_as_rows.csv',
dataframe_regression=dataframe_regression,
)
def test_ratings_per_worker_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='ratings_per_worker.csv',
dataframe_regression=dataframe_regression,
)
def test_reason_html(self, setup_teardown, file_regression: FileRegressionFixture):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='reason.html',
file_regression=file_regression,
)
def test_significance_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='significance.csv',
dataframe_regression=dataframe_regression,
)
def _check_dataframe(
self,
results_folder: str,
file_suffix: str,
dataframe_regression: DataFrameRegressionFixture,
):
file_path = self._get_matching_file_path(
results_folder=results_folder, file_suffix=file_suffix
)
df = pd.read_csv(file_path)
dataframe_regression.check(data_frame=df)
def _check_file_contents(
self,
results_folder: str,
file_suffix: str,
file_regression: FileRegressionFixture,
):
file_path = self._get_matching_file_path(
results_folder=results_folder, file_suffix=file_suffix
)
with open(file_path) as f:
contents = f.read()
file_regression.check(contents=contents)
def _get_matching_file_path(self, results_folder: str, file_suffix: str) -> str:
matching_files = [
obj for obj in os.listdir(results_folder) if obj.endswith(file_suffix)
]
assert len(matching_files) == 1
return os.path.join(results_folder, matching_files[0])
|
the-stack_0_4134 | """
Module containing helper functions for the JASMIN notifications app.
"""
__author__ = "Matt Pryor"
__copyright__ = "Copyright 2015 UK Science and Technology Facilities Council"
from datetime import date
from django.conf import settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import NotificationType, UserNotification, EmailNotification
def notification_context(notification):
"""
Takes a notification and returns a template context dictionary for that notification.
This context dictionary will contain:
* ``notification_type`` - the notification type as a string
* ``level`` - the notification level as a string
* ``email`` - the email that the notification is for
* ``user`` - the user the notification is for, or ``None`` if the notification
is to an email that is not associated with a user
* ``target`` - the target object for the notification
* ``link`` - the *fully qualified* link underlying the notification
* ``follow_link`` - the *fully qualified* link to follow the notification
* ``created_at`` - the datetime at which the notification was created
* ``followed_at`` - the datetime at which the notification was followed, or
``None`` if it has not been followed
* Any variables specified as ``extra_context``
"""
if isinstance(notification, UserNotification):
user = notification.user
email = user.email
else:
# For email notifications, try to find a user with the email address to go
# into the context
email = notification.email
user = get_user_model().objects.filter(email = email).first()
# Create the context
link_prefix = '' if notification.link.startswith('http') else settings.BASE_URL
context = {
'notification_type' : notification.notification_type.name,
'level' : notification.notification_type.level.value,
'email' : email,
'user' : user,
'target' : notification.target,
'link' : link_prefix + notification.link,
'follow_link' : settings.BASE_URL + reverse(
'jasmin_notifications:follow', kwargs = { 'uuid' : notification.uuid }
),
'created_at' : notification.created_at,
'followed_at' : notification.followed_at,
}
context.update(notification.extra_context)
return context
def notify(notification_type, target, link, user = None, email = None, cc = None, **extra_context):
"""
Creates a notification with the given ``notification_type``, ``target`` and ``link``.
``notification_type`` can be given as a string.
If ``user`` is given, a :py:class:`~.models.UserNotification` is created (even
if ``email`` is also given), otherwise an :py:class:``~.models.EmailNotification``
is created.
Any additional ``kwargs`` are based as context variables for template rendering,
both for emails and messages (if appropriate).
"""
if not isinstance(notification_type, NotificationType):
notification_type = NotificationType.objects.get(name = notification_type)
if user:
notification = UserNotification(user = user)
elif email:
notification = EmailNotification(email = email, cc = cc)
else:
raise ValueError('One of user or email must be given')
notification.notification_type = notification_type
notification.target = target
notification.link = link
notification.extra_context = extra_context
notification.save()
def notify_if_not_exists(notification_type, target, link, user = None, email = None, **extra_context):
"""
Creates a notification with the given ``notification_type``, ``target`` and
``email``\ /``user`` only if such a notification does not already exist.
See :py:func:`notify` for more details.
"""
if user:
query = UserNotification.objects.filter(user = user)
elif email:
query = EmailNotification.objects.filter(email = email)
else:
raise ValueError('One of user or email must be given')
if not query.filter_type(notification_type).filter_target(target).exists():
notify(notification_type, target, link, user, email, **extra_context)
def notify_pending_deadline(deadline, deltas, notification_type, target,
link, user = None, email = None, **extra_context):
"""
Ensures that a notification of the given type, target and email/user is sent
exactly once for each of the given ``deltas`` before the given ``deadline``.
It is assumed that ``deltas`` are given in descending order, i.e. the longest
delta first.
If ``user`` is present in ``kwargs``, :py:class:`~.models.UserNotification`\ s
are created, otherwise :py:class:``~.models.EmailNotification``\ s are created.
"""
# If the deadline has already passed, there is nothing to do
today = date.today()
if deadline < today:
return
# Work out whether we are using email or user notifications
if user:
query = UserNotification.objects.filter(user = user)
elif email:
query = EmailNotification.objects.filter(email = email)
else:
raise ValueError('One of user or email must be given')
# Find the most recent notification for the type/target/recipient combo
latest = query.filter_type(notification_type) \
.filter_target(target) \
.order_by('-created_at') \
.first()
# Add the deadline and the number of notifications to the context
extra_context.update(deadline = deadline, n = len(deltas))
for i, delta in enumerate(deltas, start = 1):
threshold = deadline - delta
# Deltas should be given longest first, so if we are before the threshold
# for this delta, we are done
if today <= threshold:
return
# Now we know threshold < today <= deadline
# So send a notification unless one has already been sent in the window
if not latest or latest.created_at.date() < threshold:
# Add the number of this notification to the context
extra_context = dict(extra_context, i = i)
notify(notification_type, target, link, user, email, **extra_context)
return
|
the-stack_0_4136 | """Doc2Vec sklearn wrapper"""
from pathlib import Path
import multiprocessing
import statistics
import logging
from sklearn.base import BaseEstimator, TransformerMixin
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import numpy as np
logging.getLogger("gensim").setLevel(logging.WARNING)
class Doc2VecVectorizer(BaseEstimator, TransformerMixin):
def __init__(
self,
vector_size=100,
window_size=5,
n_jobs=1,
min_count=2,
negative=5,
sample=1e-5,
epochs=20,
learning_rate=0.025,
model="dm",
pretrained=None,
):
"""
Args:
vector_size: size of vector to represent text
window_size: words left and right of context words used to create representation
min_count: filter words that appear less than min_count. default: 2
negative: number of negative words to be used for training.
if zero hierarchical softmax is used. default: 5
sample: threshold for downsampling high frequency words. default: 1e-5
learning_rate: learning rate used by SGD. default: 0.025
model: underlying model architecture, one of dm or dbow. default: dm
epochs: number of passes over training data. default: 20
n_jobs: number of cores to use (-1 for all). default: 1
pretrained: path to directory containing saved pretrained doc2vec artifacts
"""
self.vector_size = vector_size
self.window_size = window_size
self.epochs = epochs
self.min_count = min_count
self.negative = negative
self.sample = sample
self.n_jobs = n_jobs
self.learning_rate = learning_rate
self.model = model
self.pretrained = pretrained
def _tokenize(self, x):
return x.lower().split()
def _yield_tagged_documents(self, X):
for i, x in enumerate(X):
yield TaggedDocument(self._tokenize(x), [i])
def fit(self, X, *_):
"""
Args:
X: list of texts (strings)
"""
# If pretrained, just load, no need to fit
if self.pretrained:
self.load(self.pretrained)
return self
if self.n_jobs == -1:
workers = multiprocessing.cpu_count()
else:
workers = self.n_jobs
# TODO: Debug streaming implementation below
# atm it gives different result than non streaming
# tagged_documents = self._yield_tagged_documents(X)
# self.model = Doc2Vec(
# vector_size=self.vector_size, window_size=self.window_size,
# workers=workers, min_count=self.min_count, epochs=self.epochs
# )
# self.model.build_vocab(tagged_documents)
# self.model.train(tagged_documents, total_examples=self.model.corpus_count,
# epochs=self.model.epochs)
tagged_documents = list(self._yield_tagged_documents(X))
self.model = Doc2Vec(
tagged_documents,
vector_size=self.vector_size,
window=self.window_size,
workers=workers,
min_count=self.min_count,
epochs=self.epochs,
negative=self.negative,
sample=self.sample,
alpha=self.learning_rate,
dm=1 if self.model == "dm" else 0,
hs=1 if self.negative == 0 else 0,
)
return self
def transform(self, X):
"""
Args:
X: list of texts (strings)
Returns:
docvectors: matrix of size (nb_docs, vector_size)
"""
return np.array([self.model.infer_vector(self._tokenize(x)) for x in X])
def score(self, X):
"""
Args:
X: list of texts (strings). Needs to be the same used for fit.
Returns:
score: percentage of documents that are most similar with themselves
"""
correct = []
docvecs = self.transform(X)
for doc_id, inferred_vector in enumerate(docvecs):
sims = self.model.docvecs.most_similar(
[inferred_vector], topn=len(self.model.docvecs)
)
rank = [docid for docid, sim in sims].index(doc_id)
correct.append(int(rank == 0))
return statistics.mean(correct)
def _get_model_path(self, model_dir):
return "{}/doc2vec".format(model_dir)
def save(self, model_dir):
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path = self._get_model_path(model_dir)
self.model.save(model_path)
def load(self, model_dir):
model_path = self._get_model_path(model_dir)
self.model = Doc2Vec.load(model_path)
|
the-stack_0_4137 | """
Bundle Adjustment using GBP.
"""
import numpy as np
import argparse
from gbp import gbp_ba
import vis
parser = argparse.ArgumentParser()
parser.add_argument("--bal_file", required=True,
help="BAL style file with BA data")
parser.add_argument("--n_iters", type=int, default=200,
help="Number of iterations of GBP")
parser.add_argument("--gauss_noise_std", type=int, default=2,
help="Standard deviation of Gaussian noise of measurement model.")
parser.add_argument("--loss", default=None,
help="Loss function: None (squared error), huber or constant.")
parser.add_argument("--Nstds", type=float, default=3.,
help="If loss is not None, number of stds at which point the "
"loss transitions to linear or constant.")
parser.add_argument("--beta", type=float, default=0.01,
help="Threshold for the change in the mean of adjacent beliefs for "
"relinearisation at a factor.")
parser.add_argument("--num_undamped_iters", type=int, default=6,
help="Number of undamped iterations at a factor node after relinearisation.")
parser.add_argument("--min_linear_iters", type=int, default=8,
help="Minimum number of iterations between consecutive relinearisations of a factor.")
parser.add_argument("--eta_damping", type=float, default=0.4,
help="Max damping of information vector of messages.")
parser.add_argument("--prior_std_weaker_factor", type=float, default=50.,
help="Ratio of std of information matrix at measurement factors / "
"std of information matrix at prior factors.")
parser.add_argument("--float_implementation", action='store_true', default=False,
help="Float implementation, so start with strong priors that are weakened")
parser.add_argument("--final_prior_std_weaker_factor", type=float, default=100.,
help="Ratio of information at measurement factors / information at prior factors "
"after the priors are weakened (for floats implementation).")
parser.add_argument("--num_weakening_steps", type=int, default=5,
help="Number of steps over which the priors are weakened (for floats implementation)")
args = parser.parse_args()
print('Configs: \n', args)
configs = dict({
'gauss_noise_std': args.gauss_noise_std,
'loss': args.loss,
'Nstds': args.Nstds,
'beta': args.beta,
'num_undamped_iters': args.num_undamped_iters,
'min_linear_iters': args.min_linear_iters,
'eta_damping': args.eta_damping,
'prior_std_weaker_factor': args.prior_std_weaker_factor,
})
if args.float_implementation:
configs['final_prior_std_weaker_factor'] = args.final_prior_std_weaker_factor
configs['num_weakening_steps'] = args.num_weakening_steps
weakening_factor = np.log10(args.final_prior_std_weaker_factor) / args.num_weakening_steps
graph = gbp_ba.create_ba_graph(args.bal_file, configs)
print(f'\nData: {args.bal_file}\n')
print(f'Number of keyframes: {len(graph.cam_nodes)}')
print(f'Number of landmarks: {len(graph.lmk_nodes)}')
print(f'Number of measurement factors: {len(graph.factors)}\n')
# Sets prior factors automatically to be much weaker than measurement factors.
graph.generate_priors_var(weaker_factor=args.prior_std_weaker_factor)
graph.update_all_beliefs()
# Set up visualisation
scene = vis.ba_vis.create_scene(graph)
viewer = vis.ba_vis.TrimeshSceneViewer(scene=scene, resolution=scene.camera.resolution)
viewer.show()
for i in range(args.n_iters):
# To copy weakening of strong priors as must be done on IPU with float
if args.float_implementation and (i+1) % 2 == 0 and (i < args.num_weakening_steps * 2):
print('Weakening priors')
graph.weaken_priors(weakening_factor)
# At the start, allow a larger number of iterations before linearising
if i == 3 or i == 8:
for factor in graph.factors:
factor.iters_since_relin = 1
are = graph.are()
energy = graph.energy()
n_factor_relins = 0
for factor in graph.factors:
if factor.iters_since_relin == 0:
n_factor_relins += 1
print(f'Iteration {i} // ARE {are:.4f} // Energy {energy:.4f} // Num factors relinearising {n_factor_relins}')
viewer.update(graph)
graph.synchronous_iteration(robustify=True, local_relin=True)
|
the-stack_0_4138 | """
Copyright (c) 2017 Max deGroot, Ellis Brown
Released under the MIT license
https://github.com/amdegroot/ssd.pytorch
Updated by: Takuya Mouri
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
# handbook
# from torch.autograd import Variable
# handbook
from data import coco as cfg
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
# 推論結果をオフセット、確信度、ボックス座標にセット
loc_data, conf_data, priors = predictions
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
# 正解座標のオフセット、正解ラベルのテンソルを作成
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
# バッチサイズ毎にループし、訓練データを正解座標、正解ラベルに分解
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
# 正解座標とボックス座標のマッチング
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
# handbook
#loc_t = loc_t.cuda()
#conf_t = conf_t.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
loc_t = loc_t.to(device)
conf_t = conf_t.to(device)
# handbook
# wrap targets
# handbook
#loc_t = Variable(loc_t, requires_grad=False)
#conf_t = Variable(conf_t, requires_grad=False)
# handbook
# クラス番号が0より大きいPositiveのボックスのリスト作成
pos = conf_t > 0
# Positiveのボックス数
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
# Positiveのボックスのインデックスpos_idxを取得
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
# 推論結果のオフセット
loc_p = loc_data[pos_idx].view(-1, 4)
# 正解座標のオフセット
loc_t = loc_t[pos_idx].view(-1, 4)
# 位置の損失関数
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
# handbook
#loss_c[pos] = 0 # filter out pos boxes for now
#loss_c = loss_c.view(num, -1)
loss_c = loss_c.view(num, -1)
loss_c[pos] = 0 # filter out pos boxes for now
# handbook
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
# 推論結果の確信度conf_dataをpos_idx+neg_idxで絞り込み
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
# 正解ラベルのconf_tをposとnegで絞り込み
targets_weighted = conf_t[(pos+neg).gt(0)]
# クラス確信度の損失関数
loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
# handbook
#N = num_pos.data.sum()
N = num_pos.data.sum().double()
loss_l = loss_l.double()
loss_c = loss_c.double()
# handbook
loss_l /= N
loss_c /= N
return loss_l, loss_c
|
the-stack_0_4139 | #/usr/bin/env python
# -*- coding: utf-8 -*-
'''from: http://www.dongwm.com/archives/pythonban-ge-ren-jian-li/'''
import re
import random
def color(messages):
color = '\x1B[%d;%dm' % (1, random.randint(30, 37))
return '%s %s\x1B[0m' % (color, messages)
def len_zh(data):
temp = re.findall('[^a-zA-Z0-9._ ]+', data)
count = 0
for i in temp:
count += len(i)
return(count)
def colorprint(mes, flag=True):
def _deco(func):
def wrapper(*args):
res = func(*args)
print(color(mes + ':\n'))
if flag:
for k1, v1 in res.items():
zh = len_zh(k1)
if not isinstance(v1, dict):
print('{0}: {1}'.format(k1.ljust(20 + zh), v1))
else:
print('{0}:'.format(k1.ljust(20 + zh)))
for k2, v2 in v1.items():
zh = len_zh(k2.decode('utf-8'))
print(' {0}: {1}'.format(k2.ljust(16 + zh), v2))
else:
for i in res:
if not isinstance(i[1], dict):
print(i)
else:
for k, v in i[1].items():
zh = len_zh(k.decode('utf-8'))
print('{0}[{1}]: {2}'.format(
k.ljust(17 + zh), i[0], v))
print('\n')
return res
return wrapper
return _deco
class Resume(object):
def __str__(self):
return color('董伟明的python简历'.center(400))
@property
@colorprint('个人信息')
def personal_information(self):
return {
'Name': '董伟明',
'Gender': 'Male',
'Born': [1985, 8, 9],
'Education': {
'School Name': '保定科技职业学院',
'Major': '烹饪工艺与营养',
'Degree': 'Three-year college',
'Graduation': 2009
},
'QQ': '6196622X',
'Tel': '13552651XXX',
'Email': '[email protected]',
'Target Positions': re.compile(
"'Python Developer'|DevOps", re.I | re.M).pattern
}
@property
@colorprint('个人特点')
def characteristics(self):
return {
'心里承受能力强': '从非计算机专业-Linux运维-Python开发',
'热衷和喜爱': '正是因为喜欢IT, 我才会放弃大学所学专业',
'自学能力强': '没有大学的计算机基础, 都是自学',
'毅力和耐性': '从不放弃一个解决不了的难题,看过的计算机专业技术多于700页的书籍>30本', # noqa
'is_geek': True
}
@property
@colorprint('个人能力')
def skills(self):
return {
'Language': {
'熟悉': ['Python', 'Ruby', 'Bash'],
'了解': ['Haskell', 'Lisp', 'Erlang']},
'OS': ['Gentoo', 'Debian', 'Centos/Rhel', 'Opensuse'],
'Tool': ['Vim', 'Mercurial', 'Git'],
'Databaseandtools': ['MySQL',
'PostgreSQL', 'MongoDB', 'Redis', 'Memcached', 'SQLAlchemy'],
'WebFramework': {
'熟悉': ['Tornado', 'Django', 'Gae'],
'了解': ['Flask']
},
'OtherFramework': ['Twisted', 'gevent',
'stackless', 'scrapy', 'mechanize'],
'GUI': 'pyqt',
'Network': 'Cisco Certified Security Professional',
'Other': '给Gentoo和Centos提交过bug'
}
@property
@colorprint('工作经验', False)
def work_experience(self):
return enumerate([
{
'Time period': '2011.10-2012.08',
'Company Name': 'XX(北京)科技有限公司',
'Position': '运维开发工程师'
},
{
'Time period': '2009.10-2011.10',
'Company Name': 'XX(北京)科技有限公司',
'Position': '运维工程师'
},
])
@property
@colorprint('项目经验', False)
def project_experience(self):
return enumerate([
{
'Project': 'kvm远程管理系统',
'Description': ('前台(django)接手至其它同事并完成维护,'
'后台独立完成,用来创建,修改,删除kvm,查看状态信息等')
},
{
'Project': 'postfix群发邮件系统',
'Description': ('前台(tornado),为其它部门提供发送邮件的web端, '
'并作为数据收集服务端,前后台独立完成')
},
{
'Project': 'windows个人安全终端系统',
'Description': ('前后台和接收数据的socket服务器独立完成,'
'客户端图形编程使用qt')
},
{
'Project': '地推IDC质量测试系统',
'Description': ('还在代码实现中,前台flask, 数据接收服务准备'
'使用twisted,客户端为windows进程')
}
])
@property
@colorprint('@Where', False)
def findme(self):
return enumerate([
{
'Link': 'http://www.dongwm.com',
'Description': '个人技术博客'},
{
'Link': 'http://www.zhihu.com/people/dongweiming',
'Description': '知乎'},
{
'Link': 'http://youhouer.appspot.com',
'Description': '基于Google App Engine的前端网站'
}
])
def show(self):
prolist = [i for i in dir(self) if not i.startswith('_')
and not i.startswith('personal')]
self.personal_information
for pro in prolist:
getattr(self, pro)
if __name__ == '__main__':
resume = Resume()
resume.show()
resume.__module__()
|
the-stack_0_4140 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
"""
解析docker 容器的启动参数
require: python 3
author: Song yanlin
mail: [email protected]
date: 2021-12-26
"""
import os
from sys import argv
import docker
from docker.errors import APIError
def unit_converter(size: int) -> str or int:
"""存储单位转换
byte 转换 GB、MB、KB
:param size: 字节数
:return:
"""
if size <= 0:
return 0
if (size >> 30) > 0:
return f"{size >> 30}GB"
elif (size >> 20) > 0:
return f"{size >> 20}MB"
elif (size >> 10) > 0:
return f"{size >> 10}KB"
else:
return size
def get_user_methods_by_class_(cls) ->list:
"""获取指定类的用户自定义方法
:param cls: class
类实例
:return: list
用户定义的方法名列表.
返回的list元素为方法(fundtion),而非str型的方法名,是可以调用的方法对象。
"""
methds = []
for method in dir(cls):
if method.startswith('__'):
continue
if method.endswith('__'):
continue
if callable(getattr(cls, method)):
methds.append(getattr(cls, method))
return methds
def camel2connector(s: str):
"""驼峰字符转连接字符格式。
DriverOpts -> driver-opt
:param s:
:return:
"""
if len(s) <= 1:
return s.lower()
s_list = list(s)
for i in range(len(s_list)):
if i != 0 and ('A' <= s_list[i] <= 'Z'):
s_list[i] = s_list[i].lower()
s_list.insert(i, '-')
ss = "".join(s_list).lower()
if ss.endswith("s"):
ss = ss[:-1]
return ss
def file_mode_converter(num: int):
"""
十进制mode 转 user:group:other mode
0444 <--> 292
444 --> 100 100 100(2) --> 292(10)
:param num: 十进制数字
:return: ser:group:other mode
格式:0ugo
"""
# 数字转二进制字符串
user = (num & 0b111000000) >> 6
group = (num & 0b111000) >> 3
other = num & 0b111
return f"0{user}{group}{other}"
def list_or_dict_to_ini(o, key: str):
"""list或dict对象转 initialization file 格式
:return:
"""
ini = ""
try:
if type(o) == list:
if o :
for i in o:
ini += f"{camel2connector(key)}={i},"
elif type(o) == dict:
for k in o:
ini += f"{camel2connector(key)}={k}={o[k]},"
# 去掉最后一个","
if ini and ini.endswith(","):
ini = ini[:-1]
except:
pass
return ini
class MYDOCKER(object):
def __init__(self, service=None):
super(MYDOCKER, self).__init__()
if not os.path.exists("/var/run/docker.sock"):
self.help_msg()
exit(1)
self.client = docker.DockerClient(base_url='unix://var/run/docker.sock')
self.api_client = docker.APIClient(base_url='unix://var/run/docker.sock')
# service name or service id. type is str
self.service = service
self.inspect:dict = {}
self.docker_service_create = ""
self.options = {"kv": [], "k": []}
self.image = None # str
self.args = []
self.entity_info = {'type': None, 'name': None} # Options: container, service, stack
def get_services(self) -> list:
return self.api_client.services()
def _print_command(self):
"""
Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...]
:return: str
运行容器的完整命令
"""
if self.entity_info['type'] == "stack":
print(f"This is a docker stack: {self.entity_info['name']}.")
print("Reverse stack to a compose file reference `https://hub.docker.com/repository/docker/cucker/stack2compose`")
print("docker service create command: ")
if not self.inspect:
return
options_key = ""
# if self.options['k']:
# options_key = "-"
# key 型options
for k in self.options['k']:
options_key += f"{k} "
options_key = options_key.strip(" ")
# key-value 型options
options_kv = ""
is_pretty = len(self.options['kv']) > 2
if is_pretty:
for dic in self.options['kv']:
_k = list(dic.keys())[0]
if _k.endswith('='):
options_kv += f" {_k}{dic[_k]} \\\n"
else:
options_kv += f" {_k} {dic[_k]} \\\n"
if options_key:
options = f"{options_key} \\\n {options_kv.lstrip(' ')}"
else:
options = f"{options_kv}".lstrip(" ")
else:
for dic in self.options['kv']:
_k = list(dic.keys())[0]
if _k.endswith('='):
options_kv += f"{_k}{dic[_k]} "
else:
options_kv += f"{_k} {dic[_k]} "
options = f"{options_key} {options_kv}".strip()
command = ""
if self.args:
# _args = " ".join(self.args[0])
_args = ""
for i in self.args[0]:
# sh -c “xxx” 命令中,-c 后的子命令需要引号包裹的情况
if i.__contains__(' "'):
i = f"'{i}'"
elif i.__contains__(" '"):
i = f'"{i}"'
elif i.__contains__(" "):
i = f'"{i}"'
_args += f"{i} "
command = f"docker service creat {options} {self.image} {_args}"
else:
command = f"docker service creat {options} {self.image}"
print(command)
def check_entity_type(self):
"""判断传入的实体类型
:return:
"""
if not self.inspect:
return
if 'Spec' in list(self.inspect.keys()):
is_stack = False
try:
if self.inspect['Spec']['Labels']['com.docker.stack.namespace']:
is_stack = True
except:
pass
if is_stack:
self.entity_info['type'] = "stack"
self.entity_info['name'] = self.inspect['Spec']['Labels']['com.docker.stack.namespace']
else:
self.entity_info['type'] = "service"
def _get_inspect(self):
"""get service inspect
:return:
"""
try:
self.inspect = self.api_client.inspect_service(self.service)
except APIError as e:
print(e)
exit(-1)
def _parse_service_inspect(self):
if not self.entity_info['type']:
self.check_entity_type()
# if self.entity_info['type'] != "service":
# return
if not self.inspect:
return
# image
self.image: str = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Image'].split('@')[0]
if self.image.startswith('sha256:'):
self.image = self.image.split(':')[1]
# # name of service
# self.options['kv'].append(
# {'--name': self.inspect['Spec']['Name']}
# )
# parse options
obj = PARSE_OPTIONS(self.inspect, self.options, self.args)
for m in get_user_methods_by_class_(obj):
try:
m()
except:
pass
def get_network_name_by_id(self, network_id: str):
return self.api_client.inspect_network(network_id)['Name']
@staticmethod
def help_msg():
_MSG = """Usage:
# Command alias
echo "alias get_command_service='docker run --rm -v /var/run/docker.sock:/var/run/docker.sock cucker/get_command_by_service'" >> ~/.bashrc
. ~/.bashrc
# Excute command
## For all services
get_command_service {all}
## For one or more services
get_command_service <SERVICE> [SERVICE...]
"""
print(_MSG)
def start(self):
self._get_inspect()
self._parse_service_inspect()
self._print_command()
class PARSE_OPTIONS(object):
"""从service inspect信息中解析docker service create命令的options
"""
dock = MYDOCKER()
def __init__(self, inspect: dict, options: dict, args: list):
self.inspect = inspect
self.options = options
self.args = args
# --name
# 方法名前缀为_,可以在dir(类型) 时排前
def _name(self):
self.options['kv'].append(
{'--name': self.inspect['Spec']['Name']}
)
# --replicas, Number of tasks
def replicas(self):
if "Replicated" in list(self.inspect['Spec']['Mode'].keys()):
if self.inspect['Spec']['Mode']['Replicated']['Replicas'] !=1:
self.options['kv'].append(
{'--replicas': self.inspect['Spec']['Mode']['Replicated']['Replicas']}
)
# --mode, options: replicated, global, replicated-job, or global-job. replicated is the default.
# --max-concurrent
def mode(self):
mode: list = list(self.inspect['Spec']['Mode'].keys())
# global
if "Global" in mode:
self.options['kv'].append(
{'--mode': 'global'}
)
# replicated-job
"""
"Mode": {
"ReplicatedJob": {
"MaxConcurrent": 2,
"TotalCompletions": 10
}
},
"""
if "ReplicatedJob" in mode:
self.options['kv'].append(
{'--mode': 'replicated-job'}
)
# --max-concurrent
if self.inspect['Spec']['Mode']['ReplicatedJob']['MaxConcurrent'] != 1:
self.options['kv'].append({
'--max-concurrent': self.inspect['Spec']['Mode']['ReplicatedJob']['MaxConcurrent']
})
if self.inspect['Spec']['Mode']['ReplicatedJob']['TotalCompletions'] != 1:
self.options['kv'].append(
{'--replicas': self.inspect['Spec']['Mode']['ReplicatedJob']['TotalCompletions']}
)
# global-job
if "GlobalJob" in mode:
self.options['kv'].append({
'--mode': 'global-job'
})
# --publish, -p
def publish(self):
ports:list = self.inspect['Spec']['EndpointSpec']['Ports']
if ports:
for port in ports:
if port['PublishMode'] == "ingress":
if port['Protocol'] == "tcp":
p = f"{port['PublishedPort']}:{port['TargetPort']}"
else:
p = f"{port['PublishedPort']}:{port['TargetPort']}/{port['Protocol']}"
else:
p = f"published={port['PublishedPort']},target={port['TargetPort']},protocol={port['Protocol']},mode=host"
self.options['kv'].append(
{'--publish': p}
)
# --mount
def mount(self):
mounts: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Mounts']
for m in mounts:
try:
readonly = ""
keys_m = list(m.keys())
if "ReadOnly" in keys_m:
if m['ReadOnly']:
readonly = f",readonly=true"
else:
readonly = f",readonly=false"
v = ""
if "VolumeOptions" in keys_m:
if "DriverConfig" in list(m['VolumeOptions'].keys()) and m['VolumeOptions']['DriverConfig']:
v = f"type={m['Type']}{readonly},volume-driver={m['VolumeOptions']['DriverConfig']['Name']},source={m['Source']},destination={m['Target']}"
elif "Labels" in list(m['VolumeOptions'].keys()):
labels: dict = m['VolumeOptions']['Labels']
lab = ""
for _k in labels:
lab += f'volume-label="{_k}={labels[_k]}",'
if lab.endswith(","):
lab = lab[:-1]
v = f"type={m['Type']}{readonly},source={m['Source']},destination={m['Target']},{lab}"
else:
v = f"type={m['Type']}{readonly},source={m['Source']},destination={m['Target']}"
if v:
self.options['kv'].append(
{'--mount': v}
)
except:
pass
# --network
def network(self):
networks: list = self.inspect['Spec']['TaskTemplate']['Networks']
for net in networks:
if len(net.keys()) == 1:
v = PARSE_OPTIONS.dock.get_network_name_by_id(net['Target'])
else:
v = f"name={PARSE_OPTIONS.dock.get_network_name_by_id(net['Target'])}"
for k in net:
if k == "Target":
continue
v += f",{list_or_dict_to_ini(net[k], k)}"
self.options['kv'].append(
{'--network': v}
)
# --env , -e
# --env-file
def environment(self):
env: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Env']
for e in env:
self.options['kv'].append(
{'--env': e}
)
# --workdir, -w
def workdir(self):
dir: str = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Dir']
if dir:
self.options['kv'].append(
{'--workdir': dir}
)
# --constraint
def constraint(self):
constraints = self.inspect['Spec']['TaskTemplate']['Placement']['Constraints']
if not constraints:
return
for c in constraints:
self.options['kv'].append(
{'--constraint': c}
)
# --container-label
def container_label(self):
labels: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Labels']
for k in labels:
self.options['kv'].append(
{'--container-label': f"{k}={labels[k]}"}
)
# --health-cmd
# --health-interval
# --health-retries
# --health-start-period
# --health-timeout
# --no-healthcheck
def health_check(self):
hc: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
# --health-cmd
try:
if hc['Test'][0] == "CMD-SHELL":
self.options['kv'].append(
{'--health-cmd': f'"{hc["Test"][1]}"'}
)
# --no-healthcheck
elif hc['Test'][0] == "NONE":
self.options['k'].append("--no-healthcheck")
except:
pass
# --health-interval
try:
if hc['Interval']:
self.options['kv'].append(
{'--health-interval': f"{int(hc['Interval'] / 10**9)}s"}
)
except:
pass
# --health-retries
try:
if hc['Retries']:
self.options['kv'].append(
{'--health-retries': hc['Retries']}
)
except:
pass
# --health-start-period
try:
if hc['StartPeriod']:
self.options['kv'].append(
{'--health-start-period': f"{int(hc['StartPeriod'] / 10**9)}s"}
)
except:
pass
# --health-timeout
if hc['Timeout']:
self.options['kv'].append(
{'--health-timeout': f"{int(hc['Timeout'] / 10**9)}s"}
)
# --secret
def secret(self):
secrets: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
for sec in secrets:
v = ""
if sec['File']['UID'] == "0" and sec['File']['GID'] == "0":
if sec['File']['Mode'] == 292:
v = f"source={sec['SecretName']},target={sec['File']['Name']}"
else:
v = f"source={sec['SecretName']},target={sec['File']['Name']},mode={file_mode_converter(sec['File']['Mode'])}"
else:
if sec['File']['Mode'] == 292:
v = f"source={sec['SecretName']},target={sec['File']['Name']},uid={sec['File']['UID']}," \
f"gid={sec['File']['GID']}"
else:
v = f"source={sec['SecretName']},target={sec['File']['Name']},uid={sec['File']['UID']}," \
f"gid={sec['File']['GID']},mode={file_mode_converter(sec['File']['Mode'])}"
self.options['kv'].append(
{'--secret': v}
)
# --tty , -t
def tty(self):
tty = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['TTY']
if tty:
self.options['k'].append('-t')
# --cap-add
def cap_add(self):
caps: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['CapabilityAdd']
for cap in caps:
self.options['kv'].append(
{'--cap-add': cap}
)
# --cap-drop
def cap_drop(self):
caps = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['CapabilityDrop']
for cap in caps:
self.options['kv'].append(
{'--cap-drop': cap}
)
# --config
def config(self):
cs: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Configs']
for c in cs:
v = ""
if c['File']['UID'] == "0" and c['File']['GID'] == "0":
if c['File']['Mode'] == 292: # 292 --> mode=0444
if c['ConfigName'] == c['ConfigName']['File']['Name']:
v = c['ConfigName']
else:
v = f"source={c['ConfigName']},target={c['File']['Name']}"
else:
v = f"source={c['ConfigName']},target={c['File']['Name']},mode={file_mode_converter(c['File']['Mode'])}"
print(v)
else:
if c['File']['Mode'] == 292:
v = f"source={c['ConfigName']},target={c['File']['Name']},uid={c['File']['UID']},gid={c['File']['GID']}"
else:
v = f"source={c['ConfigName']},target={c['File']['Name']},uid={c['File']['UID']}," \
f"gid={c['File']['GID']},mode={file_mode_converter(c['File']['Mode'])}"
self.options['kv'].append(
{'--config': v}
)
# --detach , -d
# --dns
# --dns-option
# --dns-search
def dns_config(self):
dnsconfig: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
if not dnsconfig:
return
keys = list(dnsconfig.keys())
## --dns
if "Nameservers" in keys:
for ns in dnsconfig['Nameservers']:
self.options['kv'].append(
{'--dns': f'"{ns}"'}
)
## --dns-search
if "Search" in keys:
for s in dnsconfig['Search']:
self.options['kv'].append(
{'--dns-search': s}
)
## --dns-option
if "Options" in keys:
for op in dnsconfig['Options']:
self.options['kv'].append(
{'--dns-option': op}
)
# --endpoint-mode, default is vip (vip or dnsrr)
def endpoint_mode(self):
if self.inspect['Spec']['EndpointSpec']['Mode'] != "vip":
self.options['kv'].append(
{'--endpoint-mode': self.inspect['Spec']['EndpointSpec']['Mode']}
)
# --entrypoint
def entrypoint(self):
containerSpec: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']
if "Command" in list(containerSpec.keys()):
ep = " ".join(containerSpec['Command'])
if ep.__contains__(' "'):
v = f"'{ep}'"
elif ep.__contains__(" '"):
v = f'"{ep}"'
elif ep.__contains__(" "):
v = f'"{ep}"'
else:
v = ep
self.options['kv'].append(
{"--entrypoint": v}
)
# --generic-resource
def generic_resource(self):
grs: list = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['GenericResources']
for gr in grs:
self.options['kv'].append(
{'--generic-resource': f'"{gr["DiscreteResourceSpec"]["Kind"]}={gr["DiscreteResourceSpec"]["Value"]}"'}
)
# --group, 该用户组,要在主机中存在.
def group(self):
gs: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Groups']
for g in gs:
self.options['kv'].append(
{'--group': g}
)
# --host, Set one or more custom host-to-IP mappings (host:ip)
def host(self):
hosts = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
for h in hosts:
h_split = h.split(" ")
self.options['kv'].append(
{'--host': f"{h_split[1]}:{h_split[0]}"}
)
# --hostname
def hostname(self):
if "Hostname" not in list(self.inspect['Spec']['TaskTemplate']['ContainerSpec'].keys()):
return
self.options['kv'].append(
{'--hostname': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Hostname']}
)
# --init, Use an init inside each service container to forward signals and reap processes
def init(self):
if self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Init']:
self.options['k'].append("--init")
# --isolation
def isolation(self):
if self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Isolation'] != "default":
self.options['kv'].append(
{'--isolation': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Isolation']}
)
# --label , -l
def label(self):
labels = self.inspect['Spec']['Labels']
if labels:
for k in labels:
self.options['kv'].append(
{'--label': f"{k}={labels[k]}"}
)
# --limit-cpu
# --limit-memory
# --limit-pids, Limit maximum number of processes (default 0 = unlimited)
def resources_limits(self):
rl: dict = self.inspect['Spec']['TaskTemplate']['Resources']['Limits']
## --limit-memory
keys = list(rl.keys())
if "MemoryBytes" in keys:
self.options['kv'].append(
{'--limit-memory': unit_converter(rl['MemoryBytes'])}
)
## --limit-cpu
if "NanoCPUs" in keys:
self.options['kv'].append(
{'--limit-cpu': rl['NanoCPUs'] / 10**9}
)
## --limit-pids
if "Pids" in keys:
self.options['kv'].append(
{'--limit-pids': rl['Pids']}
)
# --log-driver
# --log-opt
def log_driver(self):
logdriver: dict = self.inspect['Spec']['TaskTemplate']['LogDriver']
## --log-driver
if "Name" in list(logdriver.keys()):
self.options['kv'].append(
{'--log-driver': logdriver['Name']}
)
## --log-opt
if "Options" in list(logdriver.keys()):
for k in logdriver['Options']:
self.options['kv'].append(
{'--log-opt': f"{k}={logdriver['Options'][k]}"}
)
# --no-resolve-image
def no_resolve_image(self):
image = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Image']
if not image.__contains__("sha256:"):
self.options['k'].append("--no-resolve-image")
# --placement-pref
def placement_pref(self):
preferences: list = self.inspect['Spec']['TaskTemplate']['Placement']['Preferences']
for p in preferences:
v = ""
for k in p:
# p[k] 的第一个kv对应的key
pk = list(p[k].keys())[0]
v += f"{camel2connector(k)}={p[k][pk]},"
if v.endswith(","):
v = v[:-1]
if not v:
continue
self.options['kv'].append(
{'--placement-pref': f'"{v}"'}
)
# -quiet, -q
# --read-only
def read_only(self):
if "ReadOnly" in list(self.inspect['Spec']['TaskTemplate']['ContainerSpec'].keys()):
self.options['k'].append("--read-only")
# --replicas-max-per-node, Maximum number of tasks per node (default 0 = unlimited)
def replicas_max_per_node(self):
if self.inspect['Spec']['TaskTemplate']['Placement']['MaxReplicas']:
self.options['kv'].append(
{'--replicas-max-per-node': self.inspect['Spec']['TaskTemplate']['Placement']['MaxReplicas']}
)
# --reserve-cpu
def reserve_cpu(self):
nc = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['NanoCPUs']
self.options['kv'].append(
{'--reserve-cpu': nc / 10**9}
)
# --reserve-memory
def reserve_memory(self):
mb = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['MemoryBytes']
self.options['kv'].append(
{'--reserve-memory': unit_converter(mb)}
)
# --restart-condition, Restart when condition is met ("none"|"on-failure"|"any") (default "any")
# --restart-delay, Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)
# --restart-max-attempts, Maximum number of restarts before giving up
def restart_policy(self):
rp: dict = self.inspect['Spec']['TaskTemplate']['RestartPolicy']
## --restart-condition
if rp['Condition'] != "any":
self.options['kv'].append(
{'--restart-condition': rp['Condition']}
)
## --restart-delay
if rp['Delay'] != 5000000000:
self.options['kv'].append(
{'--restart-delay': f"{int(rp['Delay'] / 10**9)}s"}
)
## --restart-max-attempts
if rp['MaxAttempts'] != 0:
self.options['kv'].append(
{'--restart-max-attempts': rp['MaxAttempts']}
)
# --rollback-delay, Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
# --rollback-failure-action, Action on rollback failure ("pause"|"continue") (default "pause")
# --rollback-max-failure-ratio
# --rollback-monitor, Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)
# --rollback-order, Rollback order ("start-first"|"stop-first") (default "stop-first")
# --rollback-parallelism, Maximum number of tasks rolled back simultaneously (0 to roll back all at once), The default value is 1
def rollback_config(self):
rc: dict = self.inspect['Spec']['RollbackConfig']
## --rollback-parallelism
if rc['Parallelism'] != 1:
self.options['kv'].append(
{'--rollback-parallelism': rc['Parallelism']}
)
## --rollback-failure-action
if rc['FailureAction'] != "pause":
self.options['kv'].append(
{'--rollback-failure-action': rc['FailureAction']}
)
## --rollback-monitor
if rc['Monitor'] != 5000000000:
self.options['kv'].append(
{'--rollback-monitor': f"{int(rc['Monitor'] / 10**9)}s"}
)
## --rollback-max-failure-ratio
if rc['MaxFailureRatio'] != 0:
self.options['kv'].append(
{'--rollback-max-failure-ratio': rc['MaxFailureRatio']}
)
## --rollback-order
if rc['Order'] != "stop-first":
self.options['kv'].append(
{'--rollback-order': rc['Order']}
)
## --rollback-delay
try:
if rc['Delay']:
self.options['kv'].append(
{'--rollback-delay': f"{int(rc['Delay'] / 10 ** 9)}s"}
)
except:
pass
# --stop-grace-period, Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)
def stop_grace_period(self):
sgp = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['StopGracePeriod']
if sgp != 10000000000:
self.options['kv'].append(
{'--stop-grace-period': f"{int(sgp / 10**6)}ms"}
)
# --stop-signal
def stop_signal(self):
self.options['kv'].append(
{'--stop-signal': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['StopSignal']}
)
# --sysctl
def sysctl(self):
sysctls: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Sysctls']
for k in sysctls:
self.options['kv'].append(
{'--sysctl': f"{k}={sysctls[k]}"}
)
# --ulimit
def ulimit(self):
ulimits: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Ulimits']
for u in ulimits:
if u['Hard'] != u['Soft']:
v = f"{u['Soft']}:{u['Hard']}"
else:
v = u['Soft']
self.options['kv'].append(
{'--ulimit': f"{u['Name']}={v}"}
)
# --update-delay
# --update-parallelism, Maximum number of tasks updated simultaneously (0 to update all at once)
# --update-failure-action, Action on update failure ("pause"|"continue"|"rollback") (default "pause")
# --update-monitor
# --update-max-failure-ratio, Failure rate to tolerate during an update (default 0)
# --update-order, Update order ("start-first"|"stop-first") (default "stop-first")
def update_config(self):
uc: dict = self.inspect['Spec']['UpdateConfig']
## --update-parallelism
if uc['Parallelism'] != 1:
self.options['kv'].append(
{'--update-parallelism': uc['Parallelism']}
)
## --update-failure-action
if uc['FailureAction'] != "pause":
self.options['kv'].append(
{'--update-failure-action': uc['FailureAction']}
)
## --update-monitor
if uc['Monitor'] != 5000000000:
self.options['kv'].append(
{'--rollback-monitor': f"{int(uc['Monitor'] / 10 ** 9)}s"}
)
## --update-max-failure-ratio
if uc['MaxFailureRatio'] != 0:
self.options['kv'].append(
{'--update-max-failure-ratio': uc['MaxFailureRatio']}
)
## --update-order
if uc['Order'] != "stop-first":
self.options['kv'].append(
{'--update-order': uc['Order']}
)
## --update-delay
try:
if uc['Delay']:
self.options['kv'].append(
{'--update-delay': f"{int(uc['Delay'] / 10 ** 9)}s"}
)
except:
pass
# --user, -u, Username or UID (format: <name|uid>[:<group|gid>])
def user(self):
u = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['User']
self.options['kv'].append(
{'--user': u}
)
# --with-registry-auth
# Args, docker service create command args
def arguments(self):
li: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Args']
if li:
self.args.append(li)
if __name__ == '__main__':
if len(argv) < 2 or argv[1] == "--help":
MYDOCKER.help_msg()
exit(1)
# 查看所有service的docker service create命令
elif argv[1] == "{all}":
for serv in MYDOCKER().get_services():
print(f"=== service: {serv['Spec']['Name']} ===")
try:
MYDOCKER(serv['Spec']['Name']).start()
except:
pass
print("\n")
elif len(argv) > 2:
for s in argv[1:]:
print(f"=== service: {s} ===")
try:
MYDOCKER(s).start()
except:
pass
print("\n")
else:
mydocker = MYDOCKER(argv[1])
ret = mydocker.start() |
the-stack_0_4143 | import numpy as np
import pytest
import sys
from tempfile import TemporaryDirectory
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Callable
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import ChiSquareDrift, KSDrift, MMDDrift, TabularDrift
from alibi_detect.cd.preprocess import UAE
from alibi_detect.models.autoencoder import DecoderLSTM, EncoderLSTM
from alibi_detect.od import (IForest, LLR, Mahalanobis, OutlierAEGMM, OutlierVAE, OutlierVAEGMM,
OutlierProphet, SpectralResidual, OutlierSeq2Seq, OutlierAE)
from alibi_detect.utils.saving import save_detector, load_detector # type: ignore
input_dim = 4
latent_dim = 2
n_gmm = 2
threshold = 10.
samples = 6
seq_len = 10
p_val = .05
X_ref = np.random.rand(samples * input_dim).reshape(samples, input_dim)
X_ref_cat = np.tile(np.array([np.arange(samples)] * input_dim).T, (2, 1))
X_ref_mix = X_ref.copy()
X_ref_mix[:, 0] = np.tile(np.array(np.arange(samples // 2)), (1, 2)).T[:, 0]
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
kwargs = {'encoder_net': encoder_net,
'decoder_net': decoder_net}
preprocess_kwargs = {'model': UAE(encoder_net=encoder_net)}
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(5, activation=tf.nn.relu)
]
)
# define model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
detector = [
AdversarialAE(threshold=threshold,
model=model,
**kwargs),
ModelDistillation(threshold=threshold,
model=model,
distilled_model=model),
IForest(threshold=threshold),
LLR(threshold=threshold, model=model),
Mahalanobis(threshold=threshold),
OutlierAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
**kwargs),
OutlierVAE(threshold=threshold,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierAE(threshold=threshold,
**kwargs),
OutlierVAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierProphet(threshold=.7,
growth='logistic'),
SpectralResidual(threshold=threshold,
window_amp=10,
window_local=10),
OutlierSeq2Seq(input_dim,
seq_len,
threshold=threshold,
threshold_net=threshold_net,
latent_dim=latent_dim),
KSDrift(p_val=p_val,
X_ref=X_ref,
preprocess_X_ref=False,
preprocess_kwargs=preprocess_kwargs),
MMDDrift(p_val=p_val,
X_ref=X_ref,
preprocess_X_ref=False,
preprocess_kwargs=preprocess_kwargs,
n_permutations=10,
chunk_size=10),
ChiSquareDrift(p_val=p_val,
X_ref=X_ref_cat,
preprocess_X_ref=True),
TabularDrift(p_val=p_val,
X_ref=X_ref_mix,
categories_per_feature={0: None},
preprocess_X_ref=True)
]
n_tests = len(detector)
@pytest.fixture
def select_detector(request):
return detector[request.param]
@pytest.mark.parametrize('select_detector', list(range(n_tests)), indirect=True)
def test_save_load(select_detector):
det = select_detector
det_name = det.meta['name']
# save and load functionality does not work for OutlierProphet and Python 3.6.
# https://github.com/facebook/prophet/issues/1361
if sys.version_info.minor == 6 and isinstance(det, OutlierProphet):
return
with TemporaryDirectory() as temp_dir:
temp_dir += '/'
save_detector(det, temp_dir)
if isinstance(det, (KSDrift, MMDDrift)):
det_load = load_detector(temp_dir, **{'preprocess_kwargs': preprocess_kwargs})
else:
det_load = load_detector(temp_dir)
det_load_name = det_load.meta['name']
assert det_load_name == det_name
if not type(det_load) in [OutlierProphet, ChiSquareDrift, KSDrift, MMDDrift, TabularDrift]:
assert det_load.threshold == det.threshold == threshold
if type(det_load) in [OutlierVAE, OutlierVAEGMM]:
assert det_load.samples == det.samples == samples
if type(det_load) == AdversarialAE or type(det_load) == ModelDistillation:
for layer in det_load.model.layers:
assert not layer.trainable
if type(det_load) == MMDDrift:
assert det_load.infer_sigma
assert isinstance(det_load.permutation_test, Callable)
if type(det_load) == KSDrift:
assert det_load.n_features == latent_dim
if type(det_load) in [ChiSquareDrift, TabularDrift]:
assert isinstance(det_load.categories_per_feature, dict)
assert isinstance(det_load.X_ref_count, dict)
if type(det_load) == OutlierAEGMM:
assert isinstance(det_load.aegmm.encoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.aegmm, tf.keras.Model)
assert det_load.aegmm.n_gmm == n_gmm
elif type(det_load) == OutlierVAEGMM:
assert isinstance(det_load.vaegmm.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.vaegmm, tf.keras.Model)
assert det_load.vaegmm.latent_dim == latent_dim
assert det_load.vaegmm.n_gmm == n_gmm
elif type(det_load) in [AdversarialAE, OutlierAE]:
assert isinstance(det_load.ae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae, tf.keras.Model)
elif type(det_load) == ModelDistillation:
assert isinstance(det_load.model, tf.keras.Sequential) or isinstance(det_load.model, tf.keras.Model)
assert (isinstance(det_load.distilled_model, tf.keras.Sequential) or
isinstance(det_load.distilled_model, tf.keras.Model))
elif type(det_load) == OutlierVAE:
assert isinstance(det_load.vae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae, tf.keras.Model)
assert det_load.vae.latent_dim == latent_dim
elif type(det_load) == Mahalanobis:
assert det_load.clip is None
assert det_load.mean == det_load.C == det_load.n == 0
assert det_load.meta['detector_type'] == 'online'
elif type(det_load) == OutlierProphet:
assert det_load.model.interval_width == .7
assert det_load.model.growth == 'logistic'
assert det_load.meta['data_type'] == 'time-series'
elif type(det_load) == SpectralResidual:
assert det_load.window_amp == 10
assert det_load.window_local == 10
elif type(det_load) == OutlierSeq2Seq:
assert isinstance(det_load.seq2seq, tf.keras.Model)
assert isinstance(det_load.seq2seq.threshold_net, tf.keras.Sequential)
assert isinstance(det_load.seq2seq.encoder, EncoderLSTM)
assert isinstance(det_load.seq2seq.decoder, DecoderLSTM)
assert det_load.latent_dim == latent_dim
assert det_load.threshold == threshold
assert det_load.shape == (-1, seq_len, input_dim)
elif type(det_load) in [KSDrift, MMDDrift]:
assert det_load.p_val == p_val
assert (det_load.X_ref == X_ref).all()
assert isinstance(det_load.preprocess_fn, Callable)
assert det_load.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) in [ChiSquareDrift, TabularDrift]:
assert det_load.p_val == p_val
x = X_ref_cat.copy() if isinstance(det_load, ChiSquareDrift) else X_ref_mix.copy()
assert (det_load.X_ref == x).all()
elif type(det_load) == LLR:
assert isinstance(det_load.dist_s, tf.keras.Model)
assert isinstance(det_load.dist_b, tf.keras.Model)
assert not det_load.sequential
assert not det_load.has_log_prob
|
the-stack_0_4144 | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.managers.platform import PlatformBase
class Espressif8266Platform(PlatformBase):
def configure_default_packages(self, variables, targets):
framework = variables.get("pioframework", [])
if "arduino" not in framework:
self.packages['toolchain-xtensa']['version'] = "~1.40802.0"
if "buildfs" in targets:
self.packages['tool-mkspiffs']['optional'] = False
return PlatformBase.configure_default_packages(
self, variables, targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_upload_protocols(result)
else:
for key, value in result.items():
result[key] = self._add_upload_protocols(result[key])
return result
def _add_upload_protocols(self, board):
if not board.get("upload.protocols", []):
board.manifest['upload']['protocols'] = ["esptool", "espota"]
if not board.get("upload.protocol", ""):
board.manifest['upload']['protocol'] = "esptool"
return board
|
the-stack_0_4145 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX wrapper for Pubsub API requests."""
import functools
from google.cloud.gapic.pubsub.v1.publisher_api import PublisherApi
from google.cloud.gapic.pubsub.v1.subscriber_api import SubscriberApi
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.pubsub.v1.pubsub_pb2 import PubsubMessage
from google.pubsub.v1.pubsub_pb2 import PushConfig
from grpc import insecure_channel
from grpc import StatusCode
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _pb_timestamp_to_rfc3339
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.iterator import Iterator
from google.cloud.iterator import Page
from google.cloud.pubsub.topic import Topic
_FAKE_ITEMS_KEY = 'not-a-key'
class _PublisherAPI(object):
"""Helper mapping publisher-related APIs.
:type gax_api: :class:`google.pubsub.v1.publisher_api.PublisherApi`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_topics(self, project, page_size=0, page_token=None):
"""List topics for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.pubsub.topic.Topic`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_topics(
path, page_size=page_size, options=options)
page_iter = functools.partial(_recast_page_iterator, page_iter)
return Iterator(client=self._client, path=path,
item_to_value=_item_to_topic,
page_iter=page_iter)
def topic_create(self, topic_path):
"""API call: create a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.Conflict` if the topic already
exists
"""
try:
topic_pb = self._gax_api.create_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return {'name': topic_pb.name}
def topic_get(self, topic_path):
"""API call: retrieve a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/get
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
try:
topic_pb = self._gax_api.get_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return {'name': topic_pb.name}
def topic_delete(self, topic_path):
"""API call: delete a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
"""
try:
self._gax_api.delete_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
def topic_publish(self, topic_path, messages):
"""API call: publish one or more messages to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type messages: list of dict
:param messages: messages to be published.
:rtype: list of string
:returns: list of opaque IDs for published messages.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
options = CallOptions(is_bundling=False)
message_pbs = [_message_pb_from_mapping(message)
for message in messages]
try:
result = self._gax_api.publish(topic_path, message_pbs,
options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return result.message_ids
def topic_list_subscriptions(self, topic_path, page_size=0,
page_token=None):
"""API call: list subscriptions bound to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics.subscriptions/list
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: list of strings
:returns: fully-qualified names of subscriptions for the supplied
topic.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
try:
page_iter = self._gax_api.list_topic_subscriptions(
topic_path, page_size=page_size, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
subs = page_iter.next()
token = page_iter.page_token or None
return subs, token
class _SubscriberAPI(object):
"""Helper mapping subscriber-related APIs.
:type gax_api: :class:`google.pubsub.v1.publisher_api.SubscriberApi`
:param gax_api: API object used to make GAX requests.
"""
def __init__(self, gax_api):
self._gax_api = gax_api
def list_subscriptions(self, project, page_size=0, page_token=None):
"""List subscriptions for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: tuple, (list, str)
:returns: list of ``Subscription`` resource dicts, plus a
"next page token" string: if not None, indicates that
more topics can be retrieved with another call (pass that
value as ``page_token``).
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_subscriptions(
path, page_size=page_size, options=options)
subscriptions = [_subscription_pb_to_mapping(sub_pb)
for sub_pb in page_iter.next()]
token = page_iter.page_token or None
return subscriptions, token
def subscription_create(self, subscription_path, topic_path,
ack_deadline=None, push_endpoint=None):
"""API call: create a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type topic_path: str
:param topic_path: the fully-qualified path of the topic being
subscribed, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type ack_deadline: int
:param ack_deadline:
(Optional) the deadline (in seconds) by which messages pulled from
the back-end must be acknowledged.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
if push_endpoint is not None:
push_config = PushConfig(push_endpoint=push_endpoint)
else:
push_config = None
if ack_deadline is None:
ack_deadline = 0
try:
sub_pb = self._gax_api.create_subscription(
subscription_path, topic_path,
push_config=push_config, ack_deadline_seconds=ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return _subscription_pb_to_mapping(sub_pb)
def subscription_get(self, subscription_path):
"""API call: retrieve a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
try:
sub_pb = self._gax_api.get_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return _subscription_pb_to_mapping(sub_pb)
def subscription_delete(self, subscription_path):
"""API call: delete a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
"""
try:
self._gax_api.delete_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_push_config(self, subscription_path,
push_endpoint):
"""API call: update push config of a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
"""
push_config = PushConfig(push_endpoint=push_endpoint)
try:
self._gax_api.modify_push_config(subscription_path, push_config)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_pull(self, subscription_path, return_immediately=False,
max_messages=1):
"""API call: retrieve messages for a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type return_immediately: bool
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:rtype: list of dict
:returns: the ``receivedMessages`` element of the response.
"""
try:
response_pb = self._gax_api.pull(
subscription_path, max_messages,
return_immediately=return_immediately)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return [_received_message_pb_to_mapping(rmpb)
for rmpb in response_pb.received_messages]
def subscription_acknowledge(self, subscription_path, ack_ids):
"""API call: acknowledge retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
"""
try:
self._gax_api.acknowledge(subscription_path, ack_ids)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_ack_deadline(self, subscription_path, ack_ids,
ack_deadline):
"""API call: update ack deadline for retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
"""
try:
self._gax_api.modify_ack_deadline(
subscription_path, ack_ids, ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def _message_pb_from_mapping(message):
"""Helper for :meth:`_PublisherAPI.topic_publish`.
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return PubsubMessage(data=_to_bytes(message['data']),
attributes=message['attributes'])
def _subscription_pb_to_mapping(sub_pb):
"""Helper for :meth:`list_subscriptions`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
mapping = {
'name': sub_pb.name,
'topic': sub_pb.topic,
'ackDeadlineSeconds': sub_pb.ack_deadline_seconds,
}
if sub_pb.push_config.push_endpoint != '':
mapping['pushConfig'] = {
'pushEndpoint': sub_pb.push_config.push_endpoint,
}
return mapping
def _message_pb_to_mapping(message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'messageId': message_pb.message_id,
'data': message_pb.data,
'attributes': message_pb.attributes,
'publishTime': _pb_timestamp_to_rfc3339(message_pb.publish_time),
}
def _received_message_pb_to_mapping(received_message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'ackId': received_message_pb.ack_id,
'message': _message_pb_to_mapping(
received_message_pb.message),
}
def make_gax_publisher_api(connection):
"""Create an instance of the GAX Publisher API.
If the ``connection`` is intended for a local emulator, then
an insecure ``channel`` is created pointing at the local
Pub / Sub server.
:type connection: :class:`~google.cloud.pubsub.connection.Connection`
:param connection: The connection that holds configuration details.
:rtype: :class:`~google.cloud.pubsub.v1.publisher_api.PublisherApi`
:returns: A publisher API instance with the proper connection
configuration.
:rtype: :class:`~google.cloud.pubsub.v1.subscriber_api.SubscriberApi`
"""
channel = None
if connection.in_emulator:
channel = insecure_channel(connection.host)
return PublisherApi(channel=channel)
def make_gax_subscriber_api(connection):
"""Create an instance of the GAX Subscriber API.
If the ``connection`` is intended for a local emulator, then
an insecure ``channel`` is created pointing at the local
Pub / Sub server.
:type connection: :class:`~google.cloud.pubsub.connection.Connection`
:param connection: The connection that holds configuration details.
:rtype: :class:`~google.cloud.pubsub.v1.subscriber_api.SubscriberApi`
:returns: A subscriber API instance with the proper connection
configuration.
"""
channel = None
if connection.in_emulator:
channel = insecure_channel(connection.host)
return SubscriberApi(channel=channel)
def _item_to_topic(iterator, resource):
"""Convert a JSON job to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: :class:`google.pubsub.v1.pubsub_pb2.Topic`
:param resource: A topic returned from the API.
:rtype: :class:`~google.cloud.pubsub.topic.Topic`
:returns: The next topic in the page.
"""
return Topic.from_api_repr(
{'name': resource.name}, iterator.client)
def _recast_page_iterator(page_iter, iterator):
"""Wrap GAX pages generator.
In particular, wrap each page and capture some state from the
GAX iterator.
Yields :class:`~google.cloud.iterator.Page` instances
:type page_iter: :class:`~google.gax.PageIterator`
:param page_iter: The iterator to wrap.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that owns each page.
"""
for items in page_iter:
fake_response = {_FAKE_ITEMS_KEY: items}
page = Page(
iterator, fake_response, _FAKE_ITEMS_KEY, _item_to_topic)
iterator.next_page_token = page_iter.page_token or None
iterator.num_results += page.num_items
yield page
|
the-stack_0_4148 | """This module provides classes that make up an issue report."""
import logging
import json
import operator
from jinja2 import PackageLoader, Environment
from typing import Dict, List
import hashlib
from mythril.solidity.soliditycontract import SolidityContract
from mythril.analysis.swc_data import SWC_TO_TITLE
from mythril.support.source_support import Source
from mythril.support.start_time import StartTime
from mythril.support.support_utils import get_code_hash
from mythril.support.signatures import SignatureDB
from time import time
log = logging.getLogger(__name__)
class Issue:
"""Representation of an issue and its location."""
def __init__(
self,
contract,
function_name,
address,
swc_id,
title,
bytecode,
gas_used=(None, None),
severity=None,
description_head="",
description_tail="",
transaction_sequence=None,
):
"""
:param contract: The contract
:param function_name: Function name where the issue is detected
:param address: The address of the issue
:param swc_id: Issue's corresponding swc-id
:param title: Title
:param bytecode: bytecode of the issue
:param gas_used: amount of gas used
:param severity: The severity of the issue
:param description_head: The top part of description
:param description_tail: The bottom part of the description
:param debug: The transaction sequence
"""
self.title = title
self.contract = contract
self.function = function_name
self.address = address
self.description_head = description_head
self.description_tail = description_tail
self.description = "%s\n%s" % (description_head, description_tail)
self.severity = severity
self.swc_id = swc_id
self.min_gas_used, self.max_gas_used = gas_used
self.filename = None
self.code = None
self.lineno = None
self.source_mapping = None
self.discovery_time = time() - StartTime().global_start_time
self.bytecode_hash = get_code_hash(bytecode)
self.transaction_sequence = transaction_sequence
@property
def transaction_sequence_users(self):
""" Returns the transaction sequence without pre-generated block data"""
return self.transaction_sequence
@property
def transaction_sequence_jsonv2(self):
""" Returns the transaction sequence as a json string with pre-generated block data"""
return (
self.add_block_data(self.transaction_sequence)
if self.transaction_sequence
else None
)
@staticmethod
def add_block_data(transaction_sequence: Dict):
""" Adds sane block data to a transaction_sequence """
for step in transaction_sequence["steps"]:
step["gasLimit"] = "0x7d000"
step["gasPrice"] = "0x773594000"
step["blockCoinbase"] = "0xcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcb"
step["blockDifficulty"] = "0xa7d7343662e26"
step["blockGasLimit"] = "0x7d0000"
step["blockNumber"] = "0x66e393"
step["blockTime"] = "0x5bfa4639"
return transaction_sequence
@property
def as_dict(self):
"""
:return:
"""
issue = {
"title": self.title,
"swc-id": self.swc_id,
"contract": self.contract,
"description": self.description,
"function": self.function,
"severity": self.severity,
"address": self.address,
"tx_sequence": self.transaction_sequence,
"min_gas_used": self.min_gas_used,
"max_gas_used": self.max_gas_used,
"sourceMap": self.source_mapping,
}
if self.filename and self.lineno:
issue["filename"] = self.filename
issue["lineno"] = self.lineno
if self.code:
issue["code"] = self.code
return issue
def _set_internal_compiler_error(self):
"""
Adds the false positive to description and changes severity to low
"""
self.severity = "Low"
self.description_tail += (
" This issue is reported for internal compiler generated code."
)
self.description = "%s\n%s" % (self.description_head, self.description_tail)
self.code = ""
def add_code_info(self, contract):
"""
:param contract:
"""
if self.address and isinstance(contract, SolidityContract):
codeinfo = contract.get_source_info(
self.address, constructor=(self.function == "constructor")
)
self.filename = codeinfo.filename
self.code = codeinfo.code
self.lineno = codeinfo.lineno
if self.lineno is None:
self._set_internal_compiler_error()
self.source_mapping = codeinfo.solc_mapping
else:
self.source_mapping = self.address
def resolve_function_names(self):
""" Resolves function names for each step """
if (
self.transaction_sequence is None
or "steps" not in self.transaction_sequence
):
return
signatures = SignatureDB()
for step in self.transaction_sequence["steps"]:
_hash = step["input"][:10]
try:
sig = signatures.get(_hash)
if len(sig) > 0:
step["name"] = sig[0]
else:
step["name"] = "unknown"
except ValueError:
step["name"] = "unknown"
class Report:
"""A report containing the content of multiple issues."""
environment = Environment(
loader=PackageLoader("mythril.analysis"), trim_blocks=True
)
def __init__(self, contracts=None, exceptions=None):
"""
:param contracts:
:param exceptions:
"""
self.issues = {}
self.solc_version = ""
self.meta = {}
self.source = Source()
self.source.get_source_from_contracts_list(contracts)
self.exceptions = exceptions or []
def sorted_issues(self):
"""
:return:
"""
issue_list = [issue.as_dict for key, issue in self.issues.items()]
return sorted(issue_list, key=operator.itemgetter("address", "title"))
def append_issue(self, issue):
"""
:param issue:
"""
m = hashlib.md5()
m.update((issue.contract + str(issue.address) + issue.title).encode("utf-8"))
issue.resolve_function_names()
self.issues[m.digest()] = issue
def as_text(self):
"""
:return:
"""
name = self._file_name()
template = Report.environment.get_template("report_as_text.jinja2")
return template.render(filename=name, issues=self.sorted_issues())
def as_json(self):
"""
:return:
"""
result = {"success": True, "error": None, "issues": self.sorted_issues()}
return json.dumps(result, sort_keys=True)
def _get_exception_data(self) -> dict:
if not self.exceptions:
return {}
logs = [] # type: List[Dict]
for exception in self.exceptions:
logs += [{"level": "error", "hidden": "true", "msg": exception}]
return {"logs": logs}
def as_swc_standard_format(self):
"""Format defined for integration and correlation.
:return:
"""
_issues = []
for key, issue in self.issues.items():
idx = self.source.get_source_index(issue.bytecode_hash)
try:
title = SWC_TO_TITLE[issue.swc_id]
except KeyError:
title = "Unspecified Security Issue"
extra = {"discoveryTime": int(issue.discovery_time * 10 ** 9)}
if issue.transaction_sequence_jsonv2:
extra["testCases"] = [issue.transaction_sequence_jsonv2]
_issues.append(
{
"swcID": "SWC-" + issue.swc_id,
"swcTitle": title,
"description": {
"head": issue.description_head,
"tail": issue.description_tail,
},
"severity": issue.severity,
"locations": [{"sourceMap": "%d:1:%d" % (issue.address, idx)}],
"extra": extra,
}
)
meta_data = self._get_exception_data()
result = [
{
"issues": _issues,
"sourceType": self.source.source_type,
"sourceFormat": self.source.source_format,
"sourceList": self.source.source_list,
"meta": meta_data,
}
]
return json.dumps(result, sort_keys=True)
def as_markdown(self):
"""
:return:
"""
filename = self._file_name()
template = Report.environment.get_template("report_as_markdown.jinja2")
return template.render(filename=filename, issues=self.sorted_issues())
def _file_name(self):
"""
:return:
"""
if len(self.issues.values()) > 0:
return list(self.issues.values())[0].filename
|
the-stack_0_4149 |
import os
from flask_babel import _
from flask_login import current_user, login_required
from flask import render_template, redirect, url_for, flash, request,abort
from app.main.forms import PostForm, ProfileForm, BeginForm, MiddleForm, FinalForm
from app.main import bp
from app.models import User, Post
from ext import mongo
from werkzeug.urls import url_parse
from werkzeug.utils import secure_filename
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
if current_user.get_admin():
return redirect(url_for('admin.index'))
return render_template('Index.html')
@bp.route('/post', methods=['GET', 'POST'])
def post():
if not current_user.get_admin():
return redirect(url_for('index'))
form = PostForm()
if form.validate_on_submit():
stage=int(form.data.stage)
users = mongo.db.users
user = users.find_one({'name': form.username.data})
user['projects'][stage]['passed'] = True
users.save(user)
return redirect(url_for('profile', username=form.username.data))
return abort(404)
@bp.route('/profile/<username>', methods=['GET', 'POST'])
@login_required
def profile(username):
if not current_user.get_admin():
return redirect(url_for('index'))
form = ProfileForm()
users = mongo.db.users
user = users.find_one({'name': username})
if 'pass' not in user.keys():
user['passed'] = False
mongo.db.users.save(user)
post = user['pass']
if not user or user['post_num'] < 1:
abort(404)
return render_template(
'Profile.html',
forms=user['posts'],
form=form,
post=post,
username=username,
admin=False)
@bp.route('/info/<page>')
@login_required
def info(page):
if page not in ['college', "class", "money", "preresult", "proccess", "accept", "finish", "eval"]:
abort(404)
return '功能正在完善'
@bp.route('/input_0', methods=['GET', 'POST'])
@login_required
def input_0():
if current_user.get_admin():
return redirect(url_for('admin'))
return render_template('waitting.html')
@bp.route('/input_1', methods=['GET', 'POST'])
@login_required
def input_1():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 0:
return redirect(url_for('input_0'))
form = BeginForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-'+secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'project': form.project.data,
'person': form.person.data,
'money': form.money.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_1=post)
p.submit()
current_user.set_post_num(1)
return redirect(url_for('input_0'))
return render_template('BeginForm.html', title='项目申请', form=form)
@bp.route('/input_2', methods=['GET', 'POST'])
@login_required
def input_2():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 1:
return redirect(url_for('input_0'))
form = MiddleForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-' + secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'schedule': form.schedule.data,
'preview': form.preview.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_2=post)
p.submit()
current_user.set_post_num(3)
return redirect(url_for('input_0'))
return render_template('MiddleForm.html', title='中期检查', form=form)
@bp.route('/input_3', methods=['GET', 'POST'])
@login_required
def input_3():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 3:
return redirect(url_for('input_0'))
form = FinalForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-' + secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'change': form.change.data,
'achievement': form.achievement.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_3=post)
p.submit()
current_user.set_post_num(7)
return redirect(url_for('input_0'))
return render_template('FinalForm.html', title='成果验收', form=form)
|
the-stack_0_4153 | """
This module uses ROSEGRAPHICS to demonstrate:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES.
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Michael Kuznicki.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
#
# DONE: 2.
# RUN this program. Then answer the following,
# GETTING HELP AS NEED! (Ask questions!!!)
#
# a. For the RoseGraphics coordinate system:
#
# -- Where is the (0, 0) point on the screen?
# It is in the upper left corner.
#
# -- In what direction on the screen
# does the positive X-axis point?
# It points to the right.
#
# -- In what direction on the screen
# does the positive Y-axis point?
# It points down.
#
# b. Write a line of code that constructs a RoseWindow object:
# window = rg.RoseWindow(400,400)
#
#
#
#
#
#
#
#
# e. Use the DOT trick to answer the following:
#
# -- Write the names of two types of graphics objects that
# you can construct OTHER than Circle and Point:
# Arc and Button
#
# -- Write the names of three METHODs that Circle objects have:
# fill_color and attach_to
#
# -- Write the names of three INSTANCE VARIABLEs that Circle
# objects have:
# center and radius
#
# f. What does a RoseWindow RENDER method do?
# It draws the objects.
#
# g. When is a RoseWindow close_on_mouse_click method call
# necessary? Why?
# It is necessary when you want to control when the window
# closes instead of it closing on its own.
#
# ASK QUESTIONS ** NOW ** if you do not understand how the
# RoseGraphics graphics system works.
#
# When you are confident that you have written correct answers
# to the above questions (ASK QUESTIONS AS NEEDED!),
# change the above TODO to DONE.
#
########################################################################
import rosegraphics as rg
def main():
"""
Uses ROSEGRAPHICS to demonstrate:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES
"""
example1()
example2()
example3()
def example1():
""" Displays an empty window. """
window = rg.RoseWindow(500, 300, 'Example 1: An empty window')
window.close_on_mouse_click()
def example2():
""" Displays two Point objects. """
# ------------------------------------------------------------------
# Construct the window in which objects will be drawn.
# ------------------------------------------------------------------
window = rg.RoseWindow()
# ------------------------------------------------------------------
# Construct some rg.Point objects.
# Note: the y-axis goes DOWN from the TOP.
# ------------------------------------------------------------------
point1 = rg.Point(100, 150)
point2 = rg.Point(200, 50)
# ------------------------------------------------------------------
# A RoseGraphics object is not associated with a window,
# and hence are not drawn, until you ATTACH it to a window.
# ------------------------------------------------------------------
point1.attach_to(window)
point2.attach_to(window)
# ------------------------------------------------------------------
# And they still are not DRAWN until you RENDER the window.
# That will draw ALL the objects on the window.
# This two-step approach is important for animation.
# ------------------------------------------------------------------
window.render()
window.close_on_mouse_click()
def example3():
""" Displays a Circle and a Rectangle. """
# ------------------------------------------------------------------
# RoseWindow: optionally takes its width and height.
# ------------------------------------------------------------------
width = 700
height = 400
window = rg.RoseWindow(width, height)
# ------------------------------------------------------------------
# Circle: needs its center and radius.
# Has fill_color instance variable.
# ------------------------------------------------------------------
center_point = rg.Point(300, 100)
radius = 50
circle = rg.Circle(center_point, radius)
circle.fill_color = 'green'
circle.attach_to(window)
# ------------------------------------------------------------------
# Rectangle: needs two opposite corners.
# ------------------------------------------------------------------
point1 = rg.Point(100, 150)
point2 = rg.Point(200, 50)
rectangle = rg.Rectangle(point1, point2)
rectangle.attach_to(window)
# ------------------------------------------------------------------
# render: Draw ALL the objects attached to this window.
# ------------------------------------------------------------------
window.render()
# ------------------------------------------------------------------
# A Rectangle has instance variables corner_1 and corner2.
# ------------------------------------------------------------------
corner1 = rectangle.corner_1
corner2 = rectangle.corner_2
print(corner1, corner2) # You can also PRINT RoseGraphics objects.
print(rectangle) # See the Console for the output.
# ------------------------------------------------------------------
# close_on_mouse_click: Keeps the window open until user clicks.
# ------------------------------------------------------------------
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
the-stack_0_4154 | #! /usr/bin/python
from __future__ import absolute_import
from __future__ import print_function
from . import rowingdata
from sys import argv
def main():
readFile=argv[1]
try:
rowerFile=argv[2]
except IndexError:
rowerFile="defaultrower.txt"
rower=rowingdata.getrower(rowerFile)
csvoutput=readFile+"_o.CSV"
rp=rowingdata.ErgDataParser(readFile)
rp.write_csv(csvoutput)
res=rowingdata.rowingdata(csvoutput,rowtype="On-water",
rower=rower)
res.plotmeters_erg()
print((res.allstats()))
print(("done "+readFile))
|
the-stack_0_4155 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Kimora Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends KMR to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more KMR to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import KimoraTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(KimoraTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
|
the-stack_0_4156 | import scrapy
from sec.items import SecItem
import re
class SecSpider(scrapy.Spider):
name = 'sec'
allowed_domains = ['sec.gov']
start_urls = [
'https://www.sec.gov/cgi-bin/browse-edgar?CIK=t&owner=exclude&action=getcompany&count=100',
]
def parse(self, response):
for sel in response.xpath('//table[@class="tableFile2"]/tr'):
item = SecItem()
item['filing'] = sel.xpath('td[1]/text()').extract()
item['link'] = sel.xpath('td[2]/a/@href').extract()
item['date'] = sel.xpath('td[4]/text()').extract()
print(item)
yield item
next_page = response.xpath("//input[@type='button']/@onclick")
# print(next_page)
if next_page:
path = re.findall("'((?:.|\n)*?)'", next_page.pop().extract()).pop()
url = 'https://www.sec.gov' + path
yield scrapy.Request(url, self.parse)
|
the-stack_0_4157 | """
Migration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text.
"""
import logging
from sqlalchemy import (
MetaData,
Table
)
log = logging.getLogger(__name__)
metadata = MetaData()
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
Table("tool_dependency", metadata, autoload=True)
# Change the tool_dependency table's version column from TrimmedString to Text.
if migrate_engine.name in ['postgres', 'postgresql']:
cmd = "ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;"
elif migrate_engine.name == 'mysql':
cmd = "ALTER TABLE tool_dependency MODIFY COLUMN version Text;"
else:
# We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html:
# 1.0 Storage Classes and Datatypes
# Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:
# NULL. The value is a NULL value.
# INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
# REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.
# TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
# BLOB. The value is a blob of data, stored exactly as it was input.
cmd = None
if cmd:
try:
migrate_engine.execute(cmd)
except Exception:
log.exception("Altering tool_dependency.version column from TrimmedString(40) to Text failed.")
def downgrade(migrate_engine):
# Not necessary to change column type Text to TrimmedString(40).
pass
|
the-stack_0_4158 | from collections import OrderedDict
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from .models import Event, Signup
# Create your views here.
def upcoming(request, page):
return render(request, 'events/list.html', getContext(page, 'Upcoming'))
def previous(request, page):
return render(request, 'events/list.html', getContext(page, 'Previous'))
# puting duplicate code from upcoming/previous in one place - Sorc
def getContext(page, event_type):
if 'Upcoming' == event_type:
events_all = Event.objects.filter(when__gte=timezone.now()).order_by('when')
else:
events_all = Event.objects.filter(when__lte=timezone.now()).order_by('-when')
paginator = Paginator(events_all, 12)
try:
events_page = paginator.page(page)
except InvalidPage:
events_page = paginator.page(1)
events = events_page if 'Upcoming' == event_type else events_page
# Solution copied from uwcs-zarya
weeks_dict = OrderedDict()
for event in events:
event_week = event.when.isocalendar()[1]
key = '{year}-{week}'.format(year=event.when.year, week=event_week)
if weeks_dict.get(key):
weeks_dict.get(key).append(event)
else:
weeks_dict[key] = [event]
weeks = list()
for _, week in weeks_dict.items():
weeks.append(week)
return {
'weeks': weeks,
'paginator_page': events_page,
'list_type': event_type,
}
def event_view(request, event_id):
template = 'events/view.html'
event = get_object_or_404(Event, id=event_id)
if not event.signup_required():
context = {
'event': event,
'signup_required': False,
}
return render(request, template, context)
user_is_signed_up = False if not request.user.is_authenticated else event.already_signed_up(request.user.member)
context = {
'event': event,
'signups': Signup.objects.filter(event=event).order_by("-created"),
'signup_required': True,
'user_is_signed_up': user_is_signed_up,
'event_is_full': event.is_full(),
'closed': event.closed(),
'opened': event.opened()
}
return render(request, template, context)
@login_required
def signup(request, event_id):
event = get_object_or_404(Event, id=event_id)
if request.method == 'POST':
if event.is_full():
messages.error(request, 'Event is full')
elif not event.signup_required():
messages.error(request, 'Signups are not required for this event')
elif event.already_signed_up(request.user.member):
messages.error(request, 'You have already signed up for this event')
elif event.closed():
messages.error(request, 'Signups for this event are closed')
elif not event.opened():
messages.error(request, 'Signups for this event are not open yet')
else:
new_signup = Signup(
who=request.user.member,
event=Event.objects.get(id=event_id),
comment=request.POST['signup_comment'],
created=timezone.now()
)
new_signup.save()
messages.success(request, 'Signup for event successful')
return HttpResponseRedirect(reverse('events:view', args=[event.id]))
@login_required
def cancel(request, event_id):
event = get_object_or_404(Event, id=event_id)
if request.method == 'POST':
if event.closed():
messages.error(request, 'Signups for this event are closed')
else:
Signup.objects.filter(event=event, who=request.user.member).delete();
messages.success(request, 'Canceling successful')
return HttpResponseRedirect(reverse('events:view', args=[event.id]))
|
the-stack_0_4160 | """
This file must not depend on any other CuPy modules.
"""
import ctypes
import json
import os
import os.path
import shutil
import sys
import warnings
# '' for uninitialized, None for non-existing
_cuda_path = ''
_nvcc_path = ''
_rocm_path = ''
_hipcc_path = ''
_cub_path = ''
"""
Library Preloading
------------------
Wheel packages are built against specific versions of CUDA libraries
(cuTENSOR/NCCL/cuDNN).
To avoid loading wrong version, these shared libraries are manually
preloaded.
# TODO(kmaehashi): Support NCCL
Example of `_preload_config` is as follows:
{
# installation source
'packaging': 'pip',
# CUDA version string
'cuda': '11.0',
'cudnn': {
# cuDNN version string
'version': '8.0.0',
# names of the shared library
'filenames': ['libcudnn.so.X.Y.Z'] # or `cudnn64_X.dll` for Windows
}
}
The configuration file is intended solely for internal purposes and
not expected to be parsed by end-users.
"""
_preload_config = None
_preload_libs = {
'cudnn': None,
'nccl': None,
'cutensor': None,
}
_preload_logs = []
def _log(msg):
# TODO(kmaehashi): replace with the standard logging
_preload_logs.append(msg)
def get_cuda_path():
# Returns the CUDA installation path or None if not found.
global _cuda_path
if _cuda_path == '':
_cuda_path = _get_cuda_path()
return _cuda_path
def get_nvcc_path():
# Returns the path to the nvcc command or None if not found.
global _nvcc_path
if _nvcc_path == '':
_nvcc_path = _get_nvcc_path()
return _nvcc_path
def get_rocm_path():
# Returns the ROCm installation path or None if not found.
global _rocm_path
if _rocm_path == '':
_rocm_path = _get_rocm_path()
return _rocm_path
def get_hipcc_path():
# Returns the path to the hipcc command or None if not found.
global _hipcc_path
if _hipcc_path == '':
_hipcc_path = _get_hipcc_path()
return _hipcc_path
def get_cub_path():
# Returns the CUB header path or None if not found.
global _cub_path
if _cub_path == '':
_cub_path = _get_cub_path()
return _cub_path
def _get_cuda_path():
# Use environment variable
cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
if os.path.exists(cuda_path):
return cuda_path
# Use nvcc path
nvcc_path = shutil.which('nvcc')
if nvcc_path is not None:
return os.path.dirname(os.path.dirname(nvcc_path))
# Use typical path
if os.path.exists('/usr/local/cuda'):
return '/usr/local/cuda'
return None
def _get_nvcc_path():
# Honor the "NVCC" env var
nvcc_path = os.environ.get('NVCC', None)
if nvcc_path is not None:
return nvcc_path
# Lookup <CUDA>/bin
cuda_path = get_cuda_path()
if cuda_path is None:
return None
return shutil.which('nvcc', path=os.path.join(cuda_path, 'bin'))
def _get_rocm_path():
# Use environment variable
rocm_path = os.environ.get('ROCM_HOME', '')
if os.path.exists(rocm_path):
return rocm_path
# Use hipcc path
hipcc_path = shutil.which('hipcc')
if hipcc_path is not None:
return os.path.dirname(os.path.dirname(hipcc_path))
# Use typical path
if os.path.exists('/opt/rocm'):
return '/opt/rocm'
return None
def _get_hipcc_path():
# TODO(leofang): Introduce an env var HIPCC?
# Lookup <ROCM>/bin
rocm_path = get_rocm_path()
if rocm_path is None:
return None
return shutil.which('hipcc', path=os.path.join(rocm_path, 'bin'))
def _get_cub_path():
# runtime discovery of CUB headers
from cupy_backends.cuda.api import runtime
current_dir = os.path.dirname(os.path.abspath(__file__))
if not runtime.is_hip:
cuda_path = get_cuda_path()
if os.path.isdir(os.path.join(current_dir, '_core/include/cupy/cub')):
_cub_path = '<bundle>'
elif cuda_path is not None and os.path.isdir(
os.path.join(cuda_path, 'include/cub')):
# use built-in CUB for CUDA 11+
_cub_path = '<CUDA>'
else:
_cub_path = None
else:
# the bundled CUB does not work in ROCm
rocm_path = get_rocm_path()
if rocm_path is not None and os.path.isdir(
os.path.join(rocm_path, 'include/hipcub')):
# use hipCUB
_cub_path = '<ROCm>'
else:
_cub_path = None
return _cub_path
def _setup_win32_dll_directory():
# Setup DLL directory to load CUDA Toolkit libs and shared libraries
# added during the build process.
if sys.platform.startswith('win32'):
is_conda = ((os.environ.get('CONDA_PREFIX') is not None)
or (os.environ.get('CONDA_BUILD_STATE') is not None))
# Path to the CUDA Toolkit binaries
cuda_path = get_cuda_path()
if cuda_path is not None:
if is_conda:
cuda_bin_path = cuda_path
else:
cuda_bin_path = os.path.join(cuda_path, 'bin')
else:
cuda_bin_path = None
warnings.warn(
'CUDA path could not be detected.'
' Set CUDA_PATH environment variable if CuPy fails to load.')
_log('CUDA_PATH: {}'.format(cuda_path))
# Path to shared libraries in wheel
wheel_libdir = os.path.join(
get_cupy_install_path(), 'cupy', '.data', 'lib')
if os.path.isdir(wheel_libdir):
_log('Wheel shared libraries: {}'.format(wheel_libdir))
else:
_log('Not wheel distribution ({} not found)'.format(
wheel_libdir))
wheel_libdir = None
if (3, 8) <= sys.version_info:
if cuda_bin_path is not None:
_log('Adding DLL search path: {}'.format(cuda_bin_path))
os.add_dll_directory(cuda_bin_path)
if wheel_libdir is not None:
_log('Adding DLL search path: {}'.format(wheel_libdir))
os.add_dll_directory(wheel_libdir)
else:
# Users are responsible for adding `%CUDA_PATH%/bin` to PATH.
if wheel_libdir is not None:
_log('Adding to PATH: {}'.format(wheel_libdir))
path = os.environ.get('PATH', '')
os.environ['PATH'] = wheel_libdir + os.pathsep + path
def get_cupy_install_path():
# Path to the directory where the package is installed.
return os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
def get_cupy_cuda_lib_path():
"""Returns the directory where CUDA external libraries are installed.
This environment variable only affects wheel installations.
Shared libraries are looked up from
`$CUPY_CUDA_LIB_PATH/$CUDA_VER/$LIB_NAME/$LIB_VER/{lib,lib64,bin}`,
e.g., `~/.cupy/cuda_lib/11.2/cudnn/8.1.1/lib64/libcudnn.so.8.1.1`.
The default $CUPY_CUDA_LIB_PATH is `~/.cupy/cuda_lib`.
"""
cupy_cuda_lib_path = os.environ.get('CUPY_CUDA_LIB_PATH', None)
if cupy_cuda_lib_path is None:
return os.path.expanduser('~/.cupy/cuda_lib')
return os.path.abspath(cupy_cuda_lib_path)
def get_preload_config():
global _preload_config
if _preload_config is None:
config_path = os.path.join(
get_cupy_install_path(), 'cupy', '.data', '_wheel.json')
if not os.path.exists(config_path):
return None
with open(config_path) as f:
_preload_config = json.load(f)
return _preload_config
def _can_attempt_preload(lib: str) -> bool:
"""Returns if the preload can be attempted."""
config = get_preload_config()
if (config is None) or (config['packaging'] == 'conda'):
# We don't do preload if CuPy is installed from Conda-Forge, as we
# cannot guarantee the version pinned in _wheel.json, which is
# encoded in config[lib]['filenames'], is always available on
# Conda-Forge. See here for the configuration files used in
# Conda-Forge distributions.
# https://github.com/conda-forge/cupy-feedstock/blob/master/recipe/preload_config/
_log(f'Cannot preload {lib} as this is not a wheel installation')
return False
if lib not in _preload_libs:
raise AssertionError(f'Unknown preload library: {lib}')
if lib not in config:
_log(f'Preload {lib} not configured in wheel')
return False
if _preload_libs[lib] is not None:
_log(f'Preload already attempted: {lib}')
return False
return True
def _preload_library(lib):
"""Preload dependent shared libraries.
The preload configuration file (cupy/.data/_wheel.json) will be added
during the wheel build process.
"""
_log(f'Preloading triggered for library: {lib}')
if not _can_attempt_preload(lib):
return
_preload_libs[lib] = {}
config = get_preload_config()
cuda_version = config['cuda']
_log('CuPy wheel package built for CUDA {}'.format(cuda_version))
cupy_cuda_lib_path = get_cupy_cuda_lib_path()
_log('CuPy CUDA library directory: {}'.format(cupy_cuda_lib_path))
version = config[lib]['version']
filenames = config[lib]['filenames']
for filename in filenames:
_log(f'Looking for {lib} version {version} ({filename})')
# "lib": cuTENSOR (Linux/Windows) / NCCL (Linux)
# "lib64": cuDNN (Linux)
# "bin": cuDNN (Windows)
libpath_cands = [
os.path.join(
cupy_cuda_lib_path, config['cuda'], lib, version, x,
filename)
for x in ['lib', 'lib64', 'bin']]
for libpath in libpath_cands:
if not os.path.exists(libpath):
_log('Rejected candidate (not found): {}'.format(libpath))
continue
try:
_log(f'Trying to load {libpath}')
# Keep reference to the preloaded module.
_preload_libs[lib][libpath] = ctypes.CDLL(libpath)
_log('Loaded')
break
except Exception as e:
e_type = type(e).__name__ # NOQA
msg = (
f'CuPy failed to preload library ({libpath}): '
f'{e_type} ({e})')
_log(msg)
warnings.warn(msg)
else:
_log('File {} could not be found'.format(filename))
# Lookup library with fully-qualified version (e.g.,
# `libcudnn.so.X.Y.Z`).
_log(f'Trying to load {filename} from default search path')
try:
_preload_libs[lib][filename] = ctypes.CDLL(filename)
_log('Loaded')
except Exception as e:
# Fallback to the standard shared library lookup which only
# uses the major version (e.g., `libcudnn.so.X`).
_log(f'Library {lib} could not be preloaded: {e}')
def _get_preload_logs():
return '\n'.join(_preload_logs)
def _preload_warning(lib, exc):
config = get_preload_config()
if config is not None and lib in config:
msg = '''
{lib} library could not be loaded.
Reason: {exc_type} ({exc})
You can install the library by:
'''
if config['packaging'] == 'pip':
msg += '''
$ python -m cupyx.tools.install_library --library {lib} --cuda {cuda}
'''
elif config['packaging'] == 'conda':
msg += '''
$ conda install -c conda-forge {lib}
'''
else:
raise AssertionError
msg = msg.format(
lib=lib, exc_type=type(exc).__name__, exc=str(exc),
cuda=config['cuda'])
warnings.warn(msg)
def _detect_duplicate_installation():
# importlib.metadata only available in Python 3.8+.
if sys.version_info < (3, 8):
return
import importlib.metadata
# List of all CuPy packages, including out-dated ones.
known = [
'cupy',
'cupy-cuda80',
'cupy-cuda90',
'cupy-cuda91',
'cupy-cuda92',
'cupy-cuda100',
'cupy-cuda101',
'cupy-cuda102',
'cupy-cuda110',
'cupy-cuda111',
'cupy-cuda112',
'cupy-cuda113',
'cupy-cuda114',
'cupy-cuda115',
'cupy-cuda116',
'cupy-rocm-4-0',
'cupy-rocm-4-1',
'cupy-rocm-4-2',
'cupy-rocm-4-3',
]
cupy_installed = [
name for name in known
if list(importlib.metadata.distributions(name=name))]
if 1 < len(cupy_installed):
cupy_packages_list = ', '.join(sorted(cupy_installed))
warnings.warn(f'''
--------------------------------------------------------------------------------
CuPy may not function correctly because multiple CuPy packages are installed
in your environment:
{cupy_packages_list}
Follow these steps to resolve this issue:
1. For all packages listed above, run the following command to remove all
existing CuPy installations:
$ pip uninstall <package_name>
If you previously installed CuPy via conda, also run the following:
$ conda uninstall cupy
2. Install the appropriate CuPy package.
Refer to the Installation Guide for detailed instructions.
https://docs.cupy.dev/en/stable/install.html
--------------------------------------------------------------------------------
''')
def _diagnose_import_error() -> str:
# TODO(kmaehashi): provide better diagnostics.
return '''\
Failed to import CuPy.
If you installed CuPy via wheels (cupy-cudaXXX or cupy-rocm-X-X), make sure that the package matches with the version of CUDA or ROCm installed.
On Linux, you may need to set LD_LIBRARY_PATH environment variable depending on how you installed CUDA/ROCm.
On Windows, try setting CUDA_PATH environment variable.
Check the Installation Guide for details:
https://docs.cupy.dev/en/latest/install.html''' # NOQA
|
the-stack_0_4161 | import string
BASE_CHARACTERS = string.ascii_letters + string.digits
SAFE_CHARACTERS = frozenset(BASE_CHARACTERS + '-_.')
KEY_LENGTH = (2, 128)
NONCE_LENGTH = (6, 128)
SECRET_LENGTH = (6, 128)
default_app_config = 'django_lti_login.apps.DjangoLTILoginConfig'
|
the-stack_0_4162 | # -*- coding: utf-8 -*-
import numpy as np
import os
from VDE.VASPMoleculeFeature import VASP_DataExtract
import pickle
from dlmep.DatasetOffer import print_file
class DatasetMaker(object):
def __init__(self,dir_list):
if not isinstance(dir_list,list):
dir_list = [dir_list]
self.in_dir = dir_list
self.vasp_dirs = []
for i in self.in_dir:
self.vasp_dirs.extend(self.get_vasp_dirs(i))
print("Get total %s vasp dirs" % len(self.vasp_dirs))
if len(self.vasp_dirs) == 0:
raise ValueError("No vasp dirs Available")
self.total_info = {}
self.total_info["instance"] = "DatasetMaker"
self.atom_cases = set([])
for i in self.vasp_dirs:
self.total_info[i] = {}
self.total_info[i]["generated"] = 0
self.b_make_dataset = 0
def make_dataset(self):
t = len(self.vasp_dirs)
for i in range(t):
print_file("Process For generating dataset: %s / %s"%(i, t))
#print("Process for %s" % self.vasp_dirs[i])
self.__make_one_dataset(self.vasp_dirs[i])
self.b_make_dataset = 1
def save_dataset(self,pkl_path):
if self.b_make_dataset == 0:
raise ValueError("make dataset before save dataset!")
if os.path.isdir(pkl_path):
pkl_path += "/atom_dataset.pkl"
if not pkl_path.endswith(".pkl"):
pkl_path += '.pkl'
with open(pkl_path, "wb") as f:
pickle.dump(self.total_info,f)
def give_out_dataset(self):
if self.b_make_dataset == 0:
raise ValueError("make dataset before save dataset!")
return self.total_info
def __make_one_dataset(self,vasp_dir):
test = VASP_DataExtract(vasp_dir=vasp_dir)
test.get_atom_and_position_info()
a = test.get_output_as_atom3Dspace()
if len(a.atoms_pos_info) <=4: # 如果样本不够,这是很可能出现的
print_file("No enough samples for %s, which have %s." % (vasp_dir, len(a.atoms_pos_info)))
del self.total_info[vasp_dir]
return
print_file("vasp_dir %s have sample %s" % (vasp_dir, len(a.atoms_pos_info)))
self.total_info[vasp_dir]["generated"] = 1
# 这里的x y不是坐标而是坐标x和能量y
self.total_info[vasp_dir]['x'], self.total_info[vasp_dir]['y'], atom_cases = a.generate_data()
self.atom_cases = self.atom_cases.union(atom_cases)
print("AtomCases",self.atom_cases)
self.total_info[vasp_dir]['atom_cases'] = self.atom_cases
# 这里要以一系列数据集为核心建立模型,包含所有的原子
def get_vasp_dirs(self,dir):
files = os.walk(dir)
vasp_dir = []
for i in files:
if "OUTCAR" in i[2]:
vasp_dir.append(i[0])
return vasp_dir
if __name__ == '__main__':
aim_vasp_path = "C:\\Users\wang\Desktop\运行结果\嵌套运行结果\interm\Pt\AllSurfaceG1"
temp = DatasetMaker(aim_vasp_path)
temp.make_dataset()
print(temp.total_info)
#temp.save_dataset("C:\\Users\wang\Desktop\运行结果")
# 现在的问题是,OUT.ANI有2倍的坐标数据于能量数据: 最终选择:直接从OUTCAR中提取坐标,寻找以前有的代码
# TODO 数据集需要规定原子种类,如果有没有的原子,它的结果不应该增加到能量中,因为有bias的存在,所以不能用feature为0000来进行 |
the-stack_0_4163 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from opencensus.trace import execution_context
# By default the blacklist urls are not tracing, currently just include the
# health check url. The paths are literal string matched instead of regular
# expressions. Do not include the '/' at the beginning of the path.
DEFAULT_BLACKLIST_PATHS = [
'_ah/health',
]
# Pattern for matching the 'https://', 'http://', 'ftp://' part.
URL_PATTERN = '^(https?|ftp):\\/\\/'
def get_func_name(func):
"""Return a name which includes the module name and function name."""
func_name = getattr(func, '__name__', func.__class__.__name__)
module_name = func.__module__
if module_name is not None:
module_name = func.__module__
return '{}.{}'.format(module_name, func_name)
return func_name
def disable_tracing_url(url, blacklist_paths=None):
"""Disable tracing on the provided blacklist paths, by default not tracing
the health check request.
If the url path starts with the blacklisted path, return True.
:type blacklist_paths: list
:param blacklist_paths: Paths that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_paths is None:
blacklist_paths = DEFAULT_BLACKLIST_PATHS
# Remove the 'https?|ftp://' if exists
url = re.sub(URL_PATTERN, '', url)
# Split the url by the first '/' and get the path part
url_path = url.split('/', 1)[1]
for path in blacklist_paths:
if url_path.startswith(path):
return True
return False
def disable_tracing_hostname(url, blacklist_hostnames=None):
"""Disable tracing for the provided blacklist URLs, by default not tracing
the exporter url.
If the url path starts with the blacklisted path, return True.
:type blacklist_hostnames: list
:param blacklist_hostnames: URL that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_hostnames is None:
# Exporter host_name are not traced by default
_tracer = execution_context.get_opencensus_tracer()
try:
blacklist_hostnames = [
'{}:{}'.format(
_tracer.exporter.host_name,
_tracer.exporter.port
)
]
except(AttributeError):
blacklist_hostnames = []
return url in blacklist_hostnames
|
the-stack_0_4164 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
from shutil import copyfile
import tensorflow as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 130
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB PNG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def copy_evaluting_images_file(filename_list, dataset_dir):
wd_snapshot_root = os.path.join(dataset_dir, 'wd_snapshot_photo')
evaluting_dir = os.path.join(wd_snapshot_root, 'evaluting_dir')
if not tf.gfile.Exists(evaluting_dir):
tf.gfile.MakeDirs(evaluting_dir)
for filename in filename_list:
_str_basename = os.path.basename(filename)
_dst_dir_file = os.path.join(evaluting_dir, _str_basename)
copyfile(filename, _dst_dir_file)
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
wd_snapshot_root = os.path.join(dataset_dir, 'wd_snapshot_photo')
directories = []
class_names = []
for filename in os.listdir(wd_snapshot_root):
if filename == "evaluting_dir" or filename == "bad_case_dir":
continue
path = os.path.join(wd_snapshot_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'wdsnapshot_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'png', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
#dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
_validation_num = int(len(photo_filenames)/5)
training_filenames = photo_filenames[_validation_num:]
validation_filenames = photo_filenames[:_validation_num]
copy_evaluting_images_file(validation_filenames, dataset_dir)
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the wd_snapshot dataset!')
|
the-stack_0_4165 | from django.db import migrations
from job_board.models.job_type import JobType
from job_board.resources.job_types import JOB_TYPES
class Migration(migrations.Migration):
dependencies = [
('job_board', '0001_initial'),
]
def generate_jobType_data(apps, schema_editor):
for jobType in JOB_TYPES:
JobType(text=jobType).save()
operations = [
migrations.RunPython(generate_jobType_data),
]
|
the-stack_0_4167 | from __future__ import division
from itertools import product, groupby
import numpy as np
import logging
from scipy.special._ufuncs import erfc
from statsmodels.stats.multitest import fdrcorrection
logging.basicConfig(level=logging.DEBUG)
def timelag_by_for_loop (timeseries1, timeseries2):
"""Returns for each event in the first time series the time lags for the event in the second time series
that precedes, succeeds. Both time series must be sorted in increasing values."""
preceding_time_lags = []
succeeding_time_lags = []
for time1 in timeseries1:
preceding_time_lags.append(next((time2 - time1 for time2 in reversed(timeseries2) if time2 < time1), []))
succeeding_time_lags.append(next((time2 - time1 for time2 in timeseries2 if time2 > time1), []))
return np.sort(np.hstack(preceding_time_lags + succeeding_time_lags))
def sawtooth(timeseries, dtype = np.float32):
"""Sawtooth function expressing the time lag to the next event in the timeseries."""
epsilon = np.finfo(dtype).eps
gaps = np.diff(timeseries)
x = np.column_stack((timeseries[0:-1], timeseries[1:] - epsilon)).flatten()
y = np.column_stack((gaps, np.zeros_like(gaps))).flatten()
return [x, y]
def timelag_by_sawtooth (timeseries1, timeseries2):
"""Returns for each event in the first time series the time lags for the event in the second time series
that precedes, succeeds. Both time series must be sorted in increasing values. Faster than timelag_by_for_loop."""
try:
preceding_time_lags = - np.interp(np.flipud(-timeseries1), *sawtooth(-np.flipud(timeseries2)), left=np.nan, right=np.nan)
except ValueError:
preceding_time_lags = []
try:
succeeding_time_lags = np.interp(timeseries1, *sawtooth(timeseries2), left=np.nan, right=np.nan)
except ValueError:
succeeding_time_lags = []
time_lags = np.sort(np.hstack([preceding_time_lags, succeeding_time_lags]))
valid_time_lags = (np.ma.fix_invalid(time_lags))
return np.ma.compressed(valid_time_lags)
timelag = timelag_by_sawtooth
def timelag_hist (timelags, min_timelag=-0.005, max_timelag=0.005, bin_n=100):
bins = np.linspace(min_timelag, max_timelag, bin_n + 1, endpoint=True)
return np.histogram(timelags, bins=bins)
def swap_intervals (timeseries, indicies):
"""Swap intervals between adjacent intervals indicated by indicies"""
intervals = np.diff(timeseries)
for index in indicies:
intervals[index], intervals[index+1] = intervals[index+1], intervals[index]
return np.hstack([timeseries[0], timeseries[0]+np.cumsum(intervals)])
def randomize_intervals_by_swapping (timeseries, factor):
"""Randomize timeseries by randomly swapping adjacent intervals, total factor times the length of timeseries"""
length = len(timeseries)-1
times = round(factor*length,0)
indicies = np.random.randint(0,length-1,int(times))
return swap_intervals(timeseries,indicies)
def randomize_intervals_by_gaussian (timeseries, factor):
"""Randomize timeseries by assuming indicies make a random walk with (+factor,-factor) of equal probability.
Much faster than randomize_intervals_by_swapping."""
gaps = np.diff(timeseries)
length = len(gaps)
new_positions = range(length) + np.random.normal(0, factor, length)
index = np.argsort(new_positions)
return timeseries[0] + np.hstack((0,np.cumsum(gaps[index])))
randomize_intervals = randomize_intervals_by_gaussian
def surrogate_timeseries (timeseries, n=10, factor=2):
return [randomize_intervals(timeseries,factor=factor) for i in range(n)]
def timelag_standardscore(timeseries1, timeseries2, surrogates):
"""Returns timelags (midpoints of bins) and standard score as well as the counts from the orginal timeseries
and mean and standard deviation for the counts from surrogate timeseries"""
timeseries_hist, bins = timelag_hist(timelag(timeseries1, timeseries2))
timelags = (bins[:-1] + bins[1:])/2 * 1000 # ms
surrogates_hist = np.vstack([timelag_hist(timelag(timeseries1, surrogate))[0] for surrogate in surrogates])
surrogates_mean = surrogates_hist.mean(0)
surrogates_std = np.std(surrogates_hist, 0)
try: std_score = (timeseries_hist - surrogates_mean) / surrogates_std
except ZeroDivisionError: pass
return timelags, std_score, timeseries_hist, surrogates_mean, surrogates_std
def timeseries_to_surrogates(timeseries, n=10, factor=2):
"""Generating surrogate timeseries (this can take a while)"""
timeseries_surrogates = dict([(key, surrogate_timeseries(timeseries[key], n=n, factor=factor)) for key in timeseries])
return timeseries_surrogates
def all_timelag_standardscore (timeseries, timeseries_surrogates):
"""Compute standardscore time histograms"""
all_std_score = []
all_timeseries_hist = []
for pair in product(timeseries, repeat=2):
timelags, std_score, timeseries_hist,surrogates_mean, surrogates_std \
= timelag_standardscore(timeseries[pair[0]], timeseries[pair[1]], timeseries_surrogates[pair[1]])
logging.info ( "Timeseries %d->%d" % pair )
all_std_score.append((pair, std_score))
all_timeseries_hist.append((pair, timeseries_hist))
# if logging.getLogger().getEffectiveLevel()==logging.DEBUG:
# plot_pair_func(timelags, timeseries_hist, surrogates_mean, surrogates_std, std_score,
# "Timeseries %d->%d" % pair)
# plt.show()
return timelags, dict(all_std_score), dict(all_timeseries_hist)
def all_peaks (timelags, std_score_dict, structural_delay_dict=None, minimal_synapse_delay=0):
"""
Return the largest standard score peak for each functional connection, rejecting false positives.
Implemented is the forward direction, that is looking for peaks at post-synaptic time lags
After converting z values into p values by p = erfc(z/sqrt(2), the Benjamini-Hochberg procedure is applied to
control the false discovery rate in the multiple comparisons with a false discover rat fixed at one in all
comparisons. (alpha = 1/number of standard scores)
If provided only timelags larger than the sum of axonal and synaptic delay are considered, but the returned
time lags correspond to the response times to the presynaptic spikes that exclude the axonal delays. This implies
that only structural connected neuron pairs are tested.
:param timelags: array with time lags for standard scores
:param std_score_dict: standard scores indexed by neuron pair
:param structural_delay_dict: (optional) axonal delays indexed by neuron pair
:param minimal_synapse_delay: (optional) time lag must be larger than this synapse delay (and axonal delay)
:return: all_score_max: standard score index py neuron pair
all_timelag_max: time lags indexed by neuron pair
z_thr: threshold for standard score
"""
# TODO Implement reverse directions, that is looking for peaks at pre-synaptic spike time lags
# TODO Implement detection of negative peaks (inhibitory connections)
if structural_delay_dict is None:
pairs = std_score_dict
offset = lambda pair: 0
else: # consider axonal delays
pairs = structural_delay_dict
offset = lambda pair: structural_delay_dict[pair]
# first, collect all z values and determine threshold
z_values = list()
for pair in pairs:
use = timelags > offset(pair)
if pair in std_score_dict:
std_score = std_score_dict[pair]
z_values += list(std_score[use])
z_thr = BH_threshold(z_values)
# second, determine peak z value and check if above threshold
all_score_max, all_timelag_max = [], []
for pair in pairs:
use = timelags > offset(pair) + minimal_synapse_delay
if pair in std_score_dict:
std_score = std_score_dict[pair]
try:
index_max = np.argmax(std_score[use])
timelag_max = timelags[use][index_max]
score_max = std_score[use][index_max]
except ValueError: # ValueError: attempt to get argmax of an empty sequence
score_max = 0
if score_max > z_thr: # looking at positive peaks only
all_score_max.append((pair, score_max))
all_timelag_max.append((pair, timelag_max))
logging.info(("Timeseries %d->%d" % pair) +
(": max z = %f at %f s" % (score_max, timelag_max)))
else:
logging.info(("Timeseries %d->%d" % pair) + ': no peak (above threshold)')
logging.info('FDR correction %d --> %d with z>%f' % (len(std_score_dict), len(all_score_max), z_thr))
return dict(all_score_max), dict(all_timelag_max), z_thr
def BH_threshold(z_values):
"""
Threshold for standard scores by the Benjamini-Hochberg procedure.
:param z_values: standard scores
:return: z_threshold: for absolute value of the standard scores, that is abs(z)>z_threshold
"""
abs_z_values = np.abs(z_values)
p_values = erfc(abs_z_values / np.sqrt(2))
FDR = 1 / p_values.size
rejected, p_values_corrected = fdrcorrection(p_values, alpha=FDR, method='indep', is_sorted=False)
z_thr = min(abs_z_values[rejected == True])
return z_thr
|
the-stack_0_4169 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Marker classes for indicating which additional features gates support.
For example: some gates are reversible, some have known matrices, etc.
"""
from typing import (
Any, Dict, Optional, Sequence, Tuple, Iterable, TypeVar, Union,
)
import string
from cirq import abc, value
from cirq.ops import op_tree, raw_types
from cirq.study import ParamResolver
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
"""Indicates operations should be equal under some qubit permutations."""
def qubit_index_to_equivalence_group_key(self, index: int) -> int:
"""Returns a key that differs between non-interchangeable qubits."""
return 0
class ReversibleEffect(metaclass=abc.ABCMeta):
"""A gate whose effect can be undone in a known way."""
@abc.abstractmethod
def inverse(self) -> 'ReversibleEffect':
"""Returns a gate with an exactly opposite effect."""
TSelf_ExtrapolatableEffect = TypeVar('TSelf_ExtrapolatableEffect',
bound='ExtrapolatableEffect')
class ExtrapolatableEffect(ReversibleEffect,
metaclass=abc.ABCMeta):
"""A gate whose effect can be continuously scaled up/down/negated."""
@abc.abstractmethod
def extrapolate_effect(self: TSelf_ExtrapolatableEffect,
factor: Union[float, value.Symbol]
) -> TSelf_ExtrapolatableEffect:
"""Augments, diminishes, or reverses the effect of the receiving gate.
Args:
factor: The amount to scale the gate's effect by.
Returns:
A gate equivalent to applying the receiving gate 'factor' times.
"""
def __pow__(self: TSelf_ExtrapolatableEffect,
power: Union[float, value.Symbol]
) -> TSelf_ExtrapolatableEffect:
"""Extrapolates the effect of the gate.
Note that there are cases where (G**a)**b != G**(a*b). For example,
start with a 90 degree rotation then cube it then raise it to a
non-integer power such as 3/2. Assuming that rotations are always
normalized into the range (-180, 180], note that:
((rot 90)**3)**1.5 = (rot 270)**1.5 = (rot -90)**1.5 = rot -135
but
(rot 90)**(3*1.5) = (rot 90)**4.5 = rot 405 = rot 35
Because normalization discards the winding number.
Args:
power: The extrapolation factor.
Returns:
A gate with the extrapolated effect.
"""
return self.extrapolate_effect(power)
def inverse(self: TSelf_ExtrapolatableEffect) -> TSelf_ExtrapolatableEffect:
return self.extrapolate_effect(-1)
class CompositeOperation(metaclass=abc.ABCMeta):
"""An operation with a known decomposition into simpler operations."""
@abc.abstractmethod
def default_decompose(self) -> op_tree.OP_TREE:
"""Yields simpler operations for performing the receiving operation."""
class CompositeGate(metaclass=abc.ABCMeta):
"""A gate with a known decomposition into simpler gates."""
@abc.abstractmethod
def default_decompose(
self, qubits: Sequence[raw_types.QubitId]) -> op_tree.OP_TREE:
"""Yields operations for performing this gate on the given qubits.
Args:
qubits: The qubits the gate should be applied to.
"""
class TextDiagramInfoArgs:
"""
Attributes:
known_qubits: The qubits the gate is being applied to. None means this
information is not known by the caller.
known_qubit_count: The number of qubits the gate is being applied to
None means this information is not known by the caller.
use_unicode_characters: If true, the wire symbols are permitted to
include unicode characters (as long as they work well in fixed
width fonts). If false, use only ascii characters. ASCII is
preferred in cases where UTF8 support is done poorly, or where
the fixed-width font being used to show the diagrams does not
properly handle unicode characters.
precision: The number of digits after the decimal to show for numbers in
the text diagram. None means use full precision.
qubit_map: The map from qubits to diagram positions.
"""
UNINFORMED_DEFAULT = None # type: TextDiagramInfoArgs
def __init__(self,
known_qubits: Optional[Tuple[raw_types.QubitId, ...]],
known_qubit_count: Optional[int],
use_unicode_characters: bool,
precision: Optional[int],
qubit_map: Optional[Dict[raw_types.QubitId, int]]) -> None:
self.known_qubits = known_qubits
self.known_qubit_count = known_qubit_count
self.use_unicode_characters = use_unicode_characters
self.precision = precision
self.qubit_map = qubit_map
TextDiagramInfoArgs.UNINFORMED_DEFAULT = TextDiagramInfoArgs(
known_qubits=None,
known_qubit_count=None,
use_unicode_characters=True,
precision=3,
qubit_map=None)
class TextDiagramInfo:
def __init__(self,
wire_symbols: Tuple[str, ...],
exponent: Any = 1,
connected: bool = True) -> None:
"""
Args:
wire_symbols: The symbols that should be shown on the qubits
affected by this operation. Must match the number of qubits that
the operation is applied to.
exponent: An optional convenience value that will be appended onto
an operation's final gate symbol with a caret in front
(unless it's equal to 1). For example, the square root of X gate
has a text diagram exponent of 0.5 and symbol of 'X' so it is
drawn as 'X^0.5'.
connected: Whether or not to draw a line connecting the qubits.
"""
self.wire_symbols = wire_symbols
self.exponent = exponent
self.connected = connected
def _eq_tuple(self):
return (TextDiagramInfo, self.wire_symbols,
self.exponent, self.connected)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._eq_tuple() == other._eq_tuple()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._eq_tuple())
def __repr__(self):
return ('cirq.TextDiagramInfo(' +
'wire_symbols={!r}, '.format(self.wire_symbols) +
'exponent={!r}, '.format(self.exponent) +
'connected={!r})'.format(self.connected)
)
class TextDiagrammable(metaclass=abc.ABCMeta):
"""A thing which can be printed in a text diagram."""
@abc.abstractmethod
def text_diagram_info(self, args: TextDiagramInfoArgs) -> TextDiagramInfo:
"""Describes how to draw something in a text diagram.
Args:
args: A TextDiagramInfoArgs instance encapsulating various pieces of
information (e.g. how many qubits are we being applied to) as
well as user options (e.g. whether to avoid unicode characters).
Returns:
A TextDiagramInfo instance describing what to print.
"""
TSelf_PhaseableEffect = TypeVar('TSelf_PhaseableEffect',
bound='PhaseableEffect')
class PhaseableEffect(metaclass=abc.ABCMeta):
"""An effect that can be phased around the Z axis of target qubits."""
@abc.abstractmethod
def phase_by(self: TSelf_PhaseableEffect,
phase_turns: float,
qubit_index: int) -> TSelf_PhaseableEffect:
"""Returns a phased version of the effect.
For example, an X gate phased by 90 degrees would be a Y gate.
Args:
phase_turns: The amount to phase the gate, in fractions of a whole
turn.
qubit_index: The index of the target qubit the phasing applies to.
Returns:
The phased gate or operation.
"""
class BoundedEffect(metaclass=abc.ABCMeta):
"""An effect with known bounds on how easy it is to detect.
Used when deciding whether or not an operation is negligible. For example,
the trace distance between the states before and after a Z**0.00000001
operation is very close to 0, so it would typically be considered
negligible.
"""
@abc.abstractmethod
def trace_distance_bound(self) -> float:
"""A maximum on the trace distance between this effect's input/output.
Generally this method is used when deciding whether to keep gates, so
only the behavior near 0 is important. Approximations that overestimate
the maximum trace distance are permitted. Even ones that exceed 1.
Underestimates are not permitted.
"""
class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly one qubit."""
def validate_args(self, qubits):
if len(qubits) != 1:
raise ValueError(
'Single-qubit gate applied to multiple qubits: {}({})'.
format(self, qubits))
def on_each(self, targets: Iterable[raw_types.QubitId]) -> op_tree.OP_TREE:
"""Returns a list of operations apply this gate to each of the targets.
Args:
targets: The qubits to apply this gate to.
Returns:
Operations applying this gate to the target qubits.
"""
return [self.on(target) for target in targets]
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly two qubits."""
def validate_args(self, qubits):
if len(qubits) != 2:
raise ValueError(
'Two-qubit gate not applied to two qubits: {}({})'.
format(self, qubits))
class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly three qubits."""
def validate_args(self, qubits):
if len(qubits) != 3:
raise ValueError(
'Three-qubit gate not applied to three qubits: {}({})'.
format(self, qubits))
TSelf_ParameterizableEffect = TypeVar('TSelf_ParameterizableEffect',
bound='ParameterizableEffect')
class ParameterizableEffect(metaclass=abc.ABCMeta):
"""An effect that can be parameterized by Symbols."""
@abc.abstractmethod
def is_parameterized(self) -> bool:
"""Whether the effect is parameterized.
Returns True if the gate has any unresolved Symbols and False otherwise.
"""
@abc.abstractmethod
def with_parameters_resolved_by(self: TSelf_ParameterizableEffect,
param_resolver: ParamResolver
) -> TSelf_ParameterizableEffect:
"""Resolve the parameters in the effect.
Returns a gate or operation of the same type, but with all Symbols
replaced with floats according to the given ParamResolver.
"""
class QasmOutputArgs(string.Formatter):
"""
Attributes:
precision: The number of digits after the decimal to show for numbers in
the text diagram.
version: The QASM version to output. QasmConvertibleGate/Operation may
return different text depending on version.
qubit_id_map: A dictionary mapping qubits to qreg QASM identifiers.
meas_key_id_map: A dictionary mapping measurement keys to creg QASM
identifiers.
"""
def __init__(self,
precision: int = 10,
version: str = '2.0',
qubit_id_map: Dict[raw_types.QubitId, str] = None,
meas_key_id_map: Dict[str, str] = None,
) -> None:
self.precision = precision
self.version = version
self.qubit_id_map = {} if qubit_id_map is None else qubit_id_map
self.meas_key_id_map = ({} if meas_key_id_map is None
else meas_key_id_map)
def format_field(self, value: Any, spec: str) -> str:
"""Method of string.Formatter that specifies the output of format()."""
if isinstance(value, float):
value = round(value, self.precision)
if spec == 'half_turns':
value = 'pi*{}'.format(value) if value != 0 else '0'
spec = ''
elif isinstance(value, raw_types.QubitId):
value = self.qubit_id_map[value]
elif isinstance(value, str) and spec == 'meas':
value = self.meas_key_id_map[value]
spec = ''
return super().format_field(value, spec)
def validate_version(self, *supported_versions: str) -> None:
if self.version not in supported_versions:
raise ValueError('QASM version {} output is not supported.'.format(
self.version))
class QasmConvertibleGate(metaclass=abc.ABCMeta):
"""A gate that knows its representation in QASM."""
@abc.abstractmethod
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: QasmOutputArgs) -> Optional[str]:
"""Returns lines of QASM output representing the gate on the given
qubits or None if a simple conversion is not possible.
"""
class QasmConvertibleOperation(metaclass=abc.ABCMeta):
"""An operation that knows its representation in QASM."""
@abc.abstractmethod
def known_qasm_output(self, args: QasmOutputArgs) -> Optional[str]:
"""Returns lines of QASM output representing the operation or None if a
simple conversion is not possible."""
|
the-stack_0_4171 | #!/usr/bin/env python3
import copy
import os
import pickle
import sys
import time
from src.channel_maps import channel_loc_map
from src.data_preprocess import data_1D_to_2D
from src.movement_onset_detect import *
db_dir = os.getcwd()
ME_db_fname = "prelim_ME_db_128.pickle"
ME_Kin_db_fname = "noneeg_ME_db_128.pickle"
rej_ME_db_fname = "reject_ME_db_128.pickle"
fs = 128
ME_db = {}
ME_kin_db = {}
rej_ME_db = {}
######## Load databases from files in db_dir
t1 = time.time()
with open(db_dir + "/" + rej_ME_db_fname, "rb") as f:
rej_ME_db = pickle.load(f)
with open(db_dir + "/" + ME_db_fname, "rb") as f:
ME_db = pickle.load(f)
with open(db_dir + "/" + ME_Kin_db_fname, "rb") as f:
ME_kin_db = pickle.load(f)
print("Loaded ME database in %f s" % (time.time() - t1))
######## Baseline subtraction and infs/NaNs rejection
t1 = time.time()
ME_db_norm = copy.deepcopy(ME_db)
for i in range(1, 8):
for j in range(0, 900):
try:
signal.detrend(ME_db_norm[i][j], axis=0, overwrite_data=True)
except ValueError as e: # add trials with infs/NaNs to rejected db
rej_ME_db[i][j] = 1
print("Baseline subtraction and infs/NaNs rejection finished in %f s" % (time.time() - t1))
# map event type to event label
# class 1: 0x600 = 1536 (elbow flexion)
# class 2: 0x601 = 1537 (elbow extension)
# class 3: 0x602 = 1538 (supination)
# class 4: 0x603 = 1539 (pronation)
# class 5: 0x604 = 1540 (hand close)
# class 6: 0x605 = 1541 (hand open)
# class 7: 0x606 = 1542 (rest)
######## Movement onset detection
onsetAll = np.zeros((8, 900))
chElbow = np.array([87, 88, 89]) - 65 # adjust for offset as indexed in ME_kin_db
chForeArm = np.array([94]) - 65
chHand = np.arange(65, 80) - 65
plot = False
t1 = time.time()
detectOnset(ME_kin_db, onsetAll, 1, chElbow, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 2, chElbow, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 3, chForeArm, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 4, chForeArm, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnsetPCA(ME_kin_db, onsetAll, 5, chHand, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnsetPCA(ME_kin_db, onsetAll, 6, chHand, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
onsetAll[7, :] = np.mean(onsetAll[1:7, :])
onsetAll = onsetAll.astype(int)
print("Found movement onset in %f s" % (time.time() - t1))
######## Movement onset alignment
t1 = time.time()
ME_db_aligned = alignTrials(ME_db_norm, onsetAll, fs)
print("Created ME_db_aligned in %f s" % (time.time() - t1))
######## Removing artifacts
t1 = time.time()
num_good_trials = np.zeros(8, dtype=int) # list storing the number of good trials per class after trial rejection
ME_db_aligned_no_art = {}
for clas in range(1, 8):
ME_db_aligned_no_art[clas] = None
for clas in range(1, 8):
reject_mask = np.array(rej_ME_db[clas])
ME_db_aligned_no_art[clas] = np.delete(ME_db_aligned[clas], np.nonzero(reject_mask == 1), axis=0)
num_good_trials[clas] = ME_db_aligned_no_art[clas].shape[0]
print("Removing artifacts %f s" % (time.time() - t1))
######## Find minimum number of good trials and balance out all classes
min_num_good_trials = np.min(num_good_trials[1:])
for clas in range(1, 8):
ME_db_aligned_no_art[clas] = ME_db_aligned_no_art[clas][0:min_num_good_trials, :, :]
print(ME_db_aligned_no_art[1].shape)
######## Converting 1D to 2D mesh
CLM = channel_loc_map()
# populate the mesh with the electrodes
mesh = [["" for y in range(0, 9)] for x in range(0, 9)]
for chan in range(0, np.shape(CLM)[0]):
mesh[CLM[chan][0]][CLM[chan][1]] = channel_label_map[chan + 1]
# print the 2D mesh of channels
for x in range(0, 9):
print(mesh[x])
t1 = time.time()
ME_db_final_2D_mesh = data_1D_to_2D(ME_db_aligned_no_art, 9, 9, CLM)
print("Converting 1D to 2D mesh takes %f s" % (time.time() - t1))
t1 = time.time()
with open("mesh_ME_db_128.pickle", "wb") as f:
i_str = pickle.dumps(ME_db_final_2D_mesh)
f_size = sys.getsizeof(i_str) / 1048576
f.write(i_str)
print("Finished writing %.2f MB of data to mesh_ME_db_128.pickle in %f s" % (f_size, time.time() - t1))
|
the-stack_0_4174 | import apsw
import logging
import re
import threading
import traceback
from collections import deque
import ob2.config as config
from ob2.database import DbCursor
from ob2.database.helpers import (
assign_grade_batch,
get_repo_owners,
get_users_by_ids,
)
from ob2.dockergrader.job import JobFailedError
from ob2.dockergrader.queue import dockergrader_queue
from ob2.mailer import send_template
from ob2.util.build_constants import QUEUED, IN_PROGRESS, SUCCESS, FAILED
from ob2.util.config_data import get_assignment_by_name
from ob2.util.hooks import get_job
from ob2.util.time import now, now_str, slip_units
class Worker(object):
def __init__(self):
self.lock = threading.Lock()
self.log = deque(maxlen=100)
self.status = None
self.updated = now()
self.identifier = dockergrader_queue.register_worker(self)
def probe(self, with_log=False):
with self.lock:
if with_log:
return self.identifier, self.status, self.updated, list(self.log)
else:
return self.identifier, self.status, self.updated
def _log(self, message, exc=False):
payload = (now(), message)
if exc:
payload += (traceback.format_exc(),)
else:
payload += (None,)
with self.lock:
self.log.append(payload)
def _dequeue_job(self):
with self.lock:
self.status = None
self.updated = now()
self._log("Waiting for a new job to run")
return dockergrader_queue.dequeue()
def _sanitize_name(self, name):
return re.sub(r'[^a-zA-Z0-9]+', '_', name)
def _process_job(self, job):
build_name = job.build_name
with self.lock:
self.status = build_name
self.updated = now()
# Mark the job as In Progress
while True:
try:
with DbCursor() as c:
c.execute('''SELECT source, `commit`, message, job, started FROM builds
WHERE build_name = ? AND status = ? LIMIT 1''',
[build_name, QUEUED])
row = c.fetchone()
if row is None:
self._log("Build %s was missing from the database. Skipping." % build_name)
return
source, commit, message, job_name, started = row
owners = get_repo_owners(c, source)
owner_emails = {owner: email for owner, (_, _, _, _, _, email)
in get_users_by_ids(c, owners).items()}
c.execute("UPDATE builds SET status = ?, updated = ? WHERE build_name = ?",
[IN_PROGRESS, now_str(), build_name])
break
except apsw.Error:
self._log("Exception raised while setting status to IN_PROGRESS. Retrying...",
exc=True)
logging.exception("Failed to retrieve next dockergrader job")
self._log("Started building %s" % build_name)
try:
# if the job doesn't exist for some reason, the resulting TypeError will be caught
# and logged
assignment = get_assignment_by_name(job_name)
due_date = assignment.due_date
job_handler = get_job(job_name)
log, score = job_handler(source, commit)
# Ignore any special encoding inside the log, and just treat it as a bytes
log = buffer(log)
min_score, max_score = assignment.min_score, assignment.max_score
full_score = assignment.full_score
if score < min_score or score > max_score:
raise ValueError("A score of %s is not in the acceptable range of %f to %f" %
(str(score), min_score, max_score))
except JobFailedError as e:
self._log("Failed %s with JobFailedError" % build_name, exc=True)
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, updated = ?, log = ?
WHERE build_name = ?''', [FAILED, now_str(), str(e), build_name])
if config.mailer_enabled:
try:
for owner in owners:
email = owner_emails.get(owner)
if not email:
continue
subject = "%s failed to complete" % build_name
send_template("build_failed", email, subject, build_name=build_name,
job_name=job_name, source=source, commit=commit,
message=message, error_message=str(e))
except Exception:
self._log("Exception raised while reporting JobFailedError", exc=True)
logging.exception("Exception raised while reporting JobFailedError")
else:
self._log("JobFailedError successfully reported via email")
return
except Exception as e:
self._log("Exception raised while building %s" % build_name, exc=True)
logging.exception("Internal error within build %s" % build_name)
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, updated = ?, log = ?
WHERE build_name = ?''',
[FAILED, now_str(), "Build failed due to an internal error.", build_name])
return
self._log("Autograder build %s complete (score: %s)" % (build_name, str(score)))
while True:
try:
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, score = ?, updated = ?,
log = ? WHERE build_name = ?''',
[SUCCESS, score, now_str(), log, build_name])
slipunits = slip_units(due_date, started)
affected_users = assign_grade_batch(c, owners, job_name, float(score),
slipunits, build_name, "Automatic build.",
"autograder", dont_lower=True)
break
except apsw.Error:
self._log("Exception raised while assigning grades", exc=True)
logging.exception("Failed to update build %s after build completed" % build_name)
return
if config.mailer_enabled:
try:
for owner in owners:
email = owner_emails.get(owner)
if not email:
continue
subject = "%s complete - score %s / %s" % (build_name, str(score),
str(full_score))
if owner not in affected_users:
subject += " (no effect on grade)"
else:
if slipunits == 1:
subject += " (1 %s used)" % config.slip_unit_name_singular
elif slipunits > 0:
subject += " (%s slip %s used)" % (str(slipunits),
config.slip_unit_name_plural)
send_template("build_finished", email, subject, build_name=build_name,
job_name=job_name, score=score, full_score=str(full_score),
slipunits=slipunits, log=log, source=source, commit=commit,
message=message, affected=(owner in affected_users))
except Exception:
self._log("Exception raised while reporting grade", exc=True)
logging.exception("Exception raised while reporting grade")
else:
self._log("Grade successfully reported via email")
def run(self):
while True:
job = self._dequeue_job()
self._process_job(job)
|
the-stack_0_4175 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# test_model.py
#
#
from datetime import datetime
import mock
import pytz
from django.conf import settings
from django.test import TestCase
from qa.mixins import DateMixin
TZ = settings.TIME_ZONE
# This is the function that replaces django.utils.timezone.now()
def mocked_now():
return pytz.timezone(TZ).localize(datetime(2000, 6, 1))
class MixinsTestCase(TestCase):
@mock.patch('django.utils.timezone.now', side_effect=mocked_now)
def test_datemixin(self, mocked):
now = DateMixin()
now.pub_date = pytz.timezone(TZ).localize(datetime(2000, 6, 1))
secs = DateMixin()
secs.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 23, 59, 57))
minutes = DateMixin()
minutes.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 23, 58, 59))
hours = DateMixin()
hours.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 22, 59, 59))
days = DateMixin()
days.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 30, 23, 59, 59))
date = DateMixin()
date.pub_date = pytz.timezone(TZ).localize(datetime(2000, 3, 1))
self.assertEqual(now.pub_date_verbose(), "just now")
self.assertEqual(secs.pub_date_verbose(), "3 seconds ago")
self.assertEqual(minutes.pub_date_verbose(), "1 minutes ago")
self.assertEqual(hours.pub_date_verbose(), "1 hours ago")
self.assertEqual(days.pub_date_verbose(), "1 days ago")
self.assertEqual(date.pub_date_verbose(), "2000/03/01")
|
the-stack_0_4176 | import os
import datetime
import argparse
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from progress.spinner import Spinner
import ntpath
def make_safe(unsafe_string):
return "".join([c for c in unsafe_string if c.isalpha() or c.isdigit()]).rstrip()
parser = argparse.ArgumentParser(description='Extract lightning frames from a video.')
parser.add_argument('video_file_name', type=str,
help='The file with the lightning in it')
parser.add_argument('--threshold', dest='threshold', action='store',
default=10,
help='Use a non-default (default is 10) threshold for detirming what a lightning flash is.')
parser.add_argument('--outfolder', dest='outfolder', action='store',
help='Specify a folder for the frames and data to be saved to.')
args = parser.parse_args()
THRESHOLD = args.threshold
VIDEO_FILE_NAME = args.video_file_name
print(f"Using Threshold: {THRESHOLD}")
print(f"Using Video File Name: {VIDEO_FILE_NAME}")
if __name__ == '__main__':
if not os.path.isfile(VIDEO_FILE_NAME):
print(f"File not found: {VIDEO_FILE_NAME}")
print("Exiting...")
exit(404)
if not args.outfolder:
OUTFOLDER = f"{ntpath.dirname(VIDEO_FILE_NAME)}/{ make_safe(ntpath.basename(VIDEO_FILE_NAME))}__OUTPUT"
else:
OUTFOLDER = args.outfolder
print(f"Output going to folder: {OUTFOLDER}")
print(f"Starting at: {datetime.datetime.now().isoformat()}")
if not os.path.isdir(OUTFOLDER):
os.makedirs(OUTFOLDER, exist_ok=True)
cap = cv2.VideoCapture(VIDEO_FILE_NAME)
frame_num = 0
frame_data = []
spinner = Spinner('Processing ')
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
print(f"Looks like we are done at Frame number: {frame_num}")
break
mean_brightness = np.mean(frame)
store_frame = mean_brightness > THRESHOLD
if store_frame:
cv2.imwrite(f"{OUTFOLDER}/frame_{str(frame_num)}.jpg", frame)
frame_data.append([frame_num, mean_brightness, store_frame])
frame_num += 1
spinner.next()
cap.release()
cv2.destroyAllWindows()
print(f"Ending at: {datetime.datetime.now().isoformat()}")
if len(frame_data) == 0:
print(f"Looks like no data was found, was this file location ok?:{VIDEO_FILE_NAME}")
exit(400)
df = pd.DataFrame(frame_data, columns=["frame_num", "brightness", "stored_image"] )
print(df)
df.to_csv(f"{OUTFOLDER}/frame_brighness_data.csv", columns=["frame_num", "brightness", "stored_image"], index=False)
df.plot(x="frame_num", y="brightness")
plt.show()
plt.savefig(f"{OUTFOLDER}/frame_brighness_data.pdf")
|
the-stack_0_4177 | import tkinter as tk
from predict import load_model, classify_text
class Todo(tk.Tk):
def __init__(self, tasks=None):
super().__init__()
self.title("Text Language Identifier")
self.geometry("600x210")
self.language_note = tk.Label(self, text="Language Identified", bg="lightgrey", fg="black", pady=10)
self.language_note.pack(side=tk.TOP, fill=tk.X)
self.language_identified = tk.StringVar()
self.language_identified.set("")
self.classification_region = tk.Label(self, textvariable=self.language_identified, bg="grey", fg="white", pady=10)
self.classification_region.pack(side=tk.TOP, fill=tk.X)
self.text_region = tk.Text(self, height=10, bg="white", fg="black")
self.text_region.pack(side=tk.BOTTOM, fill=tk.X)
self.text_region.focus_set()
self.bind("<Return>", self.classify_language)
self.text_region_note = tk.Label(self, text="--- Type or Paste Text Here, and Press Enter ---", bg="lightgrey", fg="black", pady=10)
self.text_region_note.pack(side=tk.BOTTOM, fill=tk.X)
self.colour_schemes = [{"bg": "lightgrey", "fg": "black"}, {"bg": "grey", "fg": "white"}]
def classify_language(self, event=None):
text_input = self.text_region.get(1.0,tk.END).strip()
self.language_identified.set(lc[classify_text(text = text_input, model = model, le = le, n_gram_list = n_gram_list)])
self.text_region.delete(1.0, tk.END)
if __name__ == "__main__":
model, le, lc, n_gram_list = load_model()
todo = Todo()
todo.mainloop()
|
the-stack_0_4178 | graph = {
'0': ['1', '2'],
'1': ['0', '2', '3', '4', '10'],
'2': ['0', '1', '3', '4', '7', '9'],
'3': ['1', '2', '4', '5'],
'4': ['1', '2', '3', '5', '6', '7', '8'],
'5': ['3', '4', '6', '8'],
'6': ['4', '5'],
'7': ['2', '4', '9', '10'],
'8': ['4', '5'],
'9': ['2', '7'],
'10': ['1', '7']
}
def bfs(graph, start, end):
visited = set()
# maintain a queue of paths
queue = []
# push the first path into the queue
queue.append([start])
visited.add(start)
while queue:
# get the first path from the queue
path = queue.pop(0)
# print(path)
# get the last node from the path
node = path[-1]
# print(node)
# path found
if node == end:
return path
# enumerate all adjacent nodes, construct a new path and push it into the queue
for adjacent in graph.get(node, []):
if adjacent not in visited:
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
visited.add(adjacent)
print(visited)
print(bfs(graph, '0', '6'))
from queue import PriorityQueue
q = PriorityQueue()
a = ((1, 1), (10,2), (1, 0))
q.put(a)
print(any((1, 1) in item for item in q.queue)) |
the-stack_0_4179 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListEndpoints
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync]
from google.cloud import aiplatform_v1beta1
def sample_list_endpoints():
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEndpointsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync]
|
the-stack_0_4180 | from __future__ import absolute_import
from tornado import web, testing
from tornado.ioloop import IOLoop
from pyswagger import SwaggerApp
from pyswagger.contrib.client.tornado import TornadoClient
from ...utils import create_pet_db, get_test_data_folder, pet_Mary
import json
import sys
import pytest
import six
sapp = SwaggerApp._create_(get_test_data_folder(version='1.2', which='wordnik'))
received_file = None
received_meta = None
""" refer to pyswagger.tests.data.v1_2.wordnik for details """
class RESTHandler(web.RequestHandler):
""" base implementation of RequestHandler,
accept a db as init paramaeter.
"""
def initialize(self, db):
self.db = db
def prepare(self):
"""
According to FAQ of tornado, they won't handle json media-type.
"""
super(RESTHandler, self).prepare()
content_type = self.request.headers.get('Content-Type')
if content_type and content_type.startswith('application/json'):
# handle media-type: json
if content_type.rfind('charset=UTF-8'):
self.json_args = json.loads(self.request.body.decode('utf-8'))
else:
raise web.HTTPError('unsupported application type:' + content_type)
class PetRequestHandler(RESTHandler):
""" refer to /pet """
def put(self):
pet = self.json_args
if not isinstance(pet['id'], int):
self.set_status(400)
if not self.db.update_(**pet):
self.set_status(404)
else:
self.set_status(200)
self.finish()
def post(self):
pet = self.json_args
if self.db.read_(pet['id']) != None:
self.set_status(409)
else:
self.db.create_(**pet)
self.set_status(200)
self.finish()
class PetIdRequestHandler(RESTHandler):
""" refer to /pet/{petId} """
def delete(self, id):
if not self.db.delete_(int(id)):
self.set_status(400)
self.finish()
def get(self, id):
pet = self.db.read_(int(id))
if not pet:
self.set_status(404)
else:
self.write(json.dumps(pet))
self.finish()
class ImageRequestHandler(web.RequestHandler):
""" test for file upload """
def post(self):
""" pass additionalMetadata and file to global
variables.
"""
global received_file
global received_meta
received_file = self.request.files['file'][0].body
received_meta = self.get_argument('additionalMetadata')
""" global variables """
pet_db = create_pet_db()
app = web.Application([
(r'/api/pet', PetRequestHandler, dict(db=pet_db)),
(r'/api/pet/(\d+)', PetIdRequestHandler, dict(db=pet_db)),
(r'/api/pet/uploadImage', ImageRequestHandler)
], debug=True)
@pytest.mark.skipif(sys.version_info[:2] >= (3, 3), reason='httpretty corrupt in python3')
class TornadoTestCase(testing.AsyncHTTPTestCase):
"""
"""
def setUp(self):
global received_file
global received_meta
# reset global
received_file = received_meta = None
super(TornadoTestCase, self).setUp()
self.client = TornadoClient()
def get_new_ioloop(self):
return IOLoop.instance()
def get_app(self):
global app
return app
@testing.gen_test
def test_updatePet(self):
""" updatePet """
global pet_db
resp = yield self.client.request(
sapp.op['updatePet'](body=dict(id=1, name='Tom1')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(1)['name'], 'Tom1')
@testing.gen_test
def test_addPet(self):
""" addPet """
global pet_db
resp = yield self.client.request(
sapp.op['addPet'](body=dict(id=5, name='Mission')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(5)['name'], 'Mission')
@testing.gen_test
def test_deletePet(self):
""" deletePet """
resp = yield self.client.request(
sapp.op['deletePet'](petId=5),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(5), None)
@testing.gen_test
def test_getPetById(self):
""" getPetById """
resp = yield self.client.request(
sapp.op['getPetById'](petId=2),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(resp.data, pet_Mary)
@testing.gen_test
def test_uploadFile(self):
""" uploadFile """
global received_file
global received_meta
resp = yield self.client.request(
sapp.op['uploadFile'](
additionalMetadata='a test file', file=dict(data=six.StringIO('a test Content'), filename='test.txt')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(received_file.decode(), 'a test Content')
self.assertEqual(received_meta, 'a test file')
|
the-stack_0_4181 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20151222_0052'),
]
operations = [
migrations.AlterModelOptions(
name='photo',
options={'permissions': (('view_photo', 'View photo'),)},
),
]
|
the-stack_0_4182 | #!/usr/bin/env python
from time import sleep
from drive import RosAriaDriver
from math import sin, cos
### replace X with the robot number
robot=RosAriaDriver('/PIONIER4')
skan=robot.ReadLaser()
### read and write in json format
import json
with open('data_stereo.json','w') as json_data_file:
json.dump(skan,json_data_file)
## print to stdout
#print(json.dumps(skan))
## read data from file
#json_data = open('skan.json')
#data = json.load(json_data)
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
x = np.arange(0,512)
theta = (np.pi/512 )*(x-256) # angle in rad
#fig2 = plt.figure()
#ax2 = fig2.add_axes([0.1,0.1,0.8,0.8])
#line, = ax2.plot(theta,skan,lw=2.5)
#ax2.set_xlim(-3,3)
#ax2.set_ylim(-3,3) # distance range
#plt.show()
plt.show()
skan=robot.ReadLaser()
a=[]
b=[]
for i in range(0,511):
xx = cos(theta[i])*skan[i]
a.append(xx)
yy = sin(theta[i])*skan[i]
b.append(yy)
fig3 = plt.figure()
ax3 = fig3.add_axes([0.1,0.1,0.8,0.8])
line, = ax3.plot(a,b)
# distance range
while True:
skan=robot.ReadLaser()
aa=[]
bb=[]
for i in range(0,511):
xx = cos(theta[i])*skan[i]
aa.append(xx)
yy = sin(theta[i])*skan[i]
bb.append(yy)
line.set_xdata(aa)
line.set_ydata(bb)
plt.draw()
plt.pause(0.05)
|
the-stack_0_4184 | import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
""" attention pad mask """
def get_attn_pad_mask(seq_q, seq_k, i_pad):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(i_pad).unsqueeze(1).expand(batch_size, len_q, len_k) # <pad>
return pad_attn_mask
""" attention decoder mask """
def get_attn_decoder_mask(seq):
subsequent_mask = torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))
subsequent_mask = subsequent_mask.triu(diagonal=1) # upper triangular part of a matrix(2-D)
return subsequent_mask
""" scale dot product attention """
class ScaledDotProductAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dropout = nn.Dropout(config.dropout)
self.scale = 1 / (self.config.d_head ** 0.5)
def forward(self, Q, K, V, attn_mask):
# (bs, n_head, n_q_seq, n_k_seq)
scores = torch.matmul(Q, K.transpose(-1, -2)).mul_(self.scale)
scores.masked_fill_(attn_mask, -1e9)
# (bs, n_head, n_q_seq, n_k_seq)
attn_prob = nn.Softmax(dim=-1)(scores)
attn_prob = self.dropout(attn_prob)
# (bs, n_head, n_q_seq, d_v)
context = torch.matmul(attn_prob, V)
# (bs, n_head, n_q_seq, d_v), (bs, n_head, n_q_seq, n_v_seq)
return context, attn_prob
""" multi head attention """
class MultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.W_Q = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.W_K = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.W_V = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.scaled_dot_attn = ScaledDotProductAttention(self.config)
self.linear = nn.Linear(self.config.n_head * self.config.d_head, self.config.d_hidn)
self.dropout = nn.Dropout(config.dropout)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
# (bs, n_head, n_q_seq, d_head)
q_s = self.W_Q(Q).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_k_seq, d_head)
k_s = self.W_K(K).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_v_seq, d_head)
v_s = self.W_V(V).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_q_seq, n_k_seq)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.config.n_head, 1, 1)
# (bs, n_head, n_q_seq, d_head), (bs, n_head, n_q_seq, n_k_seq)
context, attn_prob = self.scaled_dot_attn(q_s, k_s, v_s, attn_mask)
# (bs, n_head, n_q_seq, h_head * d_head)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.n_head * self.config.d_head)
# (bs, n_head, n_q_seq, e_embd)
output = self.linear(context)
output = self.dropout(output)
# (bs, n_q_seq, d_hidn), (bs, n_head, n_q_seq, n_k_seq)
return output, attn_prob
""" feed forward """
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels=self.config.d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=self.config.d_hidn, kernel_size=1)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
# (bs, d_ff, n_seq)
output = self.active(self.conv1(inputs.transpose(1, 2)))
# (bs, n_seq, d_hidn)
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
# (bs, n_seq, d_hidn)
return output
""" encoder layer """
class EncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm2 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)
def forward(self, inputs, attn_mask):
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
att_outputs, attn_prob = self.self_attn(inputs, inputs, inputs, attn_mask)
att_outputs = self.layer_norm1(inputs + att_outputs)
# (bs, n_enc_seq, d_hidn)
ffn_outputs = self.pos_ffn(att_outputs)
ffn_outputs = self.layer_norm2(ffn_outputs + att_outputs)
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
return ffn_outputs, attn_prob
""" encoder """
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.enc_emb = nn.Embedding(self.config.n_enc_vocab, self.config.d_hidn)
self.pos_emb = nn.Embedding(self.config.n_enc_seq + 1, self.config.d_hidn)
self.seg_emb = nn.Embedding(self.config.n_seg_type, self.config.d_hidn)
self.layers = nn.ModuleList([EncoderLayer(self.config) for _ in range(self.config.n_layer)])
def forward(self, inputs, segments):
positions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).expand(inputs.size(0), inputs.size(1)).contiguous() + 1
pos_mask = inputs.eq(self.config.i_pad)
positions.masked_fill_(pos_mask, 0)
# (bs, n_enc_seq, d_hidn)
outputs = self.enc_emb(inputs) + self.pos_emb(positions) + self.seg_emb(segments)
# (bs, n_enc_seq, n_enc_seq)
attn_mask = get_attn_pad_mask(inputs, inputs, self.config.i_pad)
attn_probs = []
for layer in self.layers:
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
outputs, attn_prob = layer(outputs, attn_mask)
attn_probs.append(attn_prob)
# (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
return outputs, attn_probs
""" bert """
class BERT(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.encoder = Encoder(self.config)
self.linear = nn.Linear(config.d_hidn, config.d_hidn)
self.activation = torch.tanh
def forward(self, inputs, segments):
# (bs, n_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, self_attn_probs = self.encoder(inputs, segments)
# (bs, d_hidn)
outputs_cls = outputs[:, 0].contiguous()
outputs_cls = self.linear(outputs_cls)
outputs_cls = self.activation(outputs_cls)
# (bs, n_enc_seq, n_enc_vocab), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
return outputs, outputs_cls, self_attn_probs
def save(self, epoch, loss, path):
torch.save({
"epoch": epoch,
"loss": loss,
"state_dict": self.state_dict()
}, path)
def load(self, path, map_location=None):
save = torch.load(path, map_location)
self.load_state_dict(save["state_dict"], )
return save["epoch"], save["loss"]
""" BERT pretrain """
class BERTPretrain(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = BERT(self.config)
# classfier
self.projection_cls = nn.Linear(self.config.d_hidn, 2, bias=False)
# lm
self.projection_lm = nn.Linear(self.config.d_hidn, self.config.n_enc_vocab, bias=False)
self.projection_lm.weight = self.bert.encoder.enc_emb.weight
def forward(self, inputs, segments):
# (bs, n_enc_seq, d_hidn), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, outputs_cls, attn_probs = self.bert(inputs, segments)
# (bs, 2)
logits_cls = self.projection_cls(outputs_cls)
# (bs, n_enc_seq, n_enc_vocab)
logits_lm = self.projection_lm(outputs)
# (bs, n_enc_vocab), (bs, n_enc_seq, n_enc_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)]
return logits_cls, logits_lm, attn_probs
""" naver movie classfication """
class MovieClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = BERT(self.config)
# classfier
self.projection_cls = nn.Linear(self.config.d_hidn, self.config.n_output, bias=False)
def forward(self, inputs, segments):
# (bs, n_enc_seq, d_hidn), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, outputs_cls, attn_probs = self.bert(inputs, segments)
# (bs, n_output)
logits_cls = self.projection_cls(outputs_cls)
# (bs, n_output), [(bs, n_head, n_enc_seq, n_enc_seq)]
return logits_cls, attn_probs
def save(self, epoch, loss, score, path):
torch.save({
"epoch": epoch,
"loss": loss,
"score": score,
"state_dict": self.state_dict()
}, path)
def load(self, path):
save = torch.load(path)
self.load_state_dict(save["state_dict"])
return save["epoch"], save["loss"], save["score"]
|
the-stack_0_4185 | from flask import Blueprint, render_template, url_for, request, redirect
from logzero import logger
from base.utils.auth import admin_required, get_jwt
from base.forms import AdminEditToolContainerVersion
from caendr.services.tool_versions import get_all_containers, get_available_version_tags, get_container, get_version, update_version
admin_tools_bp = Blueprint('admin_tools',
__name__,
template_folder='templates')
@admin_tools_bp.route('/', methods=['GET'])
@admin_required()
def admin_tools():
title = 'Tool Container Versions'
alt_parent_breadcrumb = {"title": "Admin/Tools", "url": url_for('admin_tools.admin_tools')}
containers = get_all_containers()
return render_template('admin/tool/list.html', **locals())
@admin_tools_bp.route('/<id>/edit', methods=["GET", "POST"])
@admin_required()
def edit_tool(id):
if id is None:
raise UnprocessableEntity('Error: No profile id in URL')
title = f'{id}'
alt_parent_breadcrumb = {"title": "Admin/Tools", "url": url_for('admin_tools.admin_tools')}
jwt_csrf_token = (get_jwt() or {}).get("csrf")
tool = get_container(id)
versions = get_available_version_tags(tool)
versions.reverse()
form = AdminEditToolContainerVersion(version=get_version(tool))
form.version.choices = [(ver, ver) for ver in versions]
if request.method == 'POST' and form.validate_on_submit():
update_version(tool, request.form.get('version'))
return redirect(url_for("admin_tools.admin_tools"), code=302)
return render_template('admin/tool/edit.html', **locals())
|
the-stack_0_4186 | '''
Created on Sep 6, 2021
@author: mhindle
'''
import numpy as np
import numbers
from typing import Tuple, List, Dict, Union, Set
import itertools
from collections import defaultdict
import pandas as pd
class JointAllellicDistribution(object):
def __init__(self, snp_ordered, chromosome2snp=None, pseudocount = 1, surround_size=1):
self.pseudocount = pseudocount
self.frequency: Dict[Tuple[str,int],Dict[Tuple[str,int],Dict[Tuple[str,int],int]]] = dict()
self.n_observations: Dict[Tuple[str,str,str]] = defaultdict(int)
self.surround_size = surround_size
self.window_size = (surround_size*2)+1
self.snp_ordered = snp_ordered
self.chromosome2snp = chromosome2snp
def getWindow(self, targetSnp):
'''
targetSnp is the snp around which to extract the symetric window of +- window_size
'''
targetpos = self.snp_ordered.index(targetSnp)
startpos_snp = targetpos-self.surround_size
if startpos_snp < 0:
startpos_snp = 0
endpos_snp = targetpos+self.surround_size+1
if endpos_snp >= len(self.snp_ordered):
endpos_snp = len(self.snp_ordered)-1
snpWindow = self.snp_ordered[startpos_snp:endpos_snp]
if self.chromosome2snp is not None:
targetchr = self.chromosome2snp[targetSnp]
return([snpId for snpId in snpWindow if self.chromosome2snp[snpId] == targetchr])
return(snpWindow)
def getCountTable(self, observedstates: dict, targetSnp):
all_obs = [(snpid,observedstates[snpid]) for snpid in self.getWindow(targetSnp)]
def copypastefunc(x):
return([(snpid,state) if snpid != targetSnp else (targetSnp, x) for snpid,state in all_obs])
for state, query in enumerate(list(map(copypastefunc, [0,1,2]))):
#print("%s == %s" % (state, query))
workinghash = self.frequency
for item in query:
workinghash = workinghash[item]
if "obs" in workinghash:
yield workinghash["obs"] #it should be the result
else:
print("query %s" % query)
print("first %s" % self.frequency[query[0]])
print("workinghash %s" % workinghash)
print("item %s" % "_".join(map(str,item)))
raise Exception("incomplete traversal of nested hash: final %s state %s" % (workinghash, state))
def countJointFrq(self, table, mask, column_names: List[str], conditions_index=[0,1,2,9]):
column_names = np.array(column_names)
subset = table[np.all(mask,axis=1),:]
for values in list(itertools.product(conditions_index, repeat=self.window_size)):
conditions = list(zip(column_names, values))
nine_truth = np.ones((subset.shape[0],1), dtype=bool)
rows_that_meet = np.logical_and.reduce([nine_truth if value == 9 else np.equal(subset[:,column_names == snp],value) for snp,value in conditions])
keys = list(zip(column_names, values))
obs = np.count_nonzero(rows_that_meet)
self.recurse_set_dict(self.frequency, keys, obs)
if 9 not in values: # only count complete real value arrays
self.n_observations[tuple(column_names)] += (obs+self.pseudocount) # this we keep track of how many observations there have been for these three snps
def recurse_set_dict(self, d, queue, value):
f = queue.pop(0)
if len(queue) > 0:
if f not in d:
d[f] = dict()
self.recurse_set_dict(d[f], queue, value)
else:
if f not in d:
d[f] = dict()
if "obs" not in d[f]:
d[f]["obs"] = value+self.pseudocount # we record the observations for this state combo
elif d[f]["obs"] != value+self.pseudocount:
raise Exception("overwriting value %s with %s " % (d[f]["obs"], value))
def countJointFrqAll(self, table:pd.DataFrame, mask=None):
'''
table expect pandas Dataframe with columns as snpids and rows being observations
mask expect numpy bool matrix but will deal with pandas bool Dataframe
'''
if mask is None:
mask = np.ones(table.shape,dtype=bool)
elif mask is pd.DataFrame:
mask = mask.to_numpy(dtype=bool)
for targetSnp in self.snp_ordered:
snp_window = self.getWindow(targetSnp)
indexofsnps = [x in snp_window for x in table.columns]
self.countJointFrq(table.loc[:,snp_window].to_numpy(dtype=int), mask[:,indexofsnps], snp_window)
|
the-stack_0_4188 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import OrgGroupUserData
from ppmessage.core.constant import API_LEVEL
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.api.handlers.ppaddorggroupuserhandler import update_group_icon
import json
import logging
class PPRemoveOrgGroupUserHandler(BaseHandler):
"""
"""
def _remove(self, _group_uuid, _user_uuid):
_redis = self.application.redis
_key = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
if _redis.sismember(_key, _user_uuid) == False:
self.setErrorCode(API_ERR.NOT_GROUP_USER)
logging.error("user: %s not in group:%s" % (_user_uuid, _group_uuid))
return False
_key = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid + \
".user_uuid." + _user_uuid
_data_uuid = _redis.get(_key)
if _data_uuid == None:
self.setErrorCode(API_ERR.NOT_GROUP_USER)
logging.error("user: %s group:%s not bind." % (_user_uuid, _group_uuid))
return False
_row = OrgGroupUserData(uuid=_data_uuid)
_row.async_delete(_redis)
_row.delete_redis_keys(_redis)
return True
def _get(self, _app_uuid, _group_uuid, _user_list):
_redis = self.application.redis
for _user_uuid in _user_list:
_r = self._remove(_group_uuid, _user_uuid)
update_group_icon(_redis, _group_uuid)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPRemoveOrgGroupUserHandler, self)._Task()
_body = json.loads(self.request.body)
_app_uuid = _body.get("app_uuid")
_group_uuid = _body.get("group_uuid")
_user_list = _body.get("user_list")
if _app_uuid == None or _group_uuid == None or _user_list == None:
self.setErrorCode(API_ERR.NO_PARA)
return
if not isinstance(_user_list, list):
self.setErrorCode(API_ERR.NOT_LIST)
return
self._get(_app_uuid, _group_uuid, _user_list)
return
|
the-stack_0_4190 | #!/usr/bin/env python
from _title_word import ngram_line, ngram, run
from collections import Counter, defaultdict
def parse_word(title, txt):
count = defaultdict(int)
# word_set = set()
total = len(title) + len(txt)
for i in ngram_line(title):
if len(i) > 1:
count[i] = 1
for i in ngram_line(txt):
if i in count:
count[i] += 1
for word, n in sorted(count.items(), key=lambda x: len(x[0])):
if n >= 3:
if len(word) > 2:
for i in ngram(word, len(word)):
count[i] -= n
r = []
for word, n in count.items():
len_word = len(word)
if len_word > 2:
for i in ngram(word, len_word):
if n < count[i]:
n = 0
break
if n > 3 and (n * len(word)) / total > 0.005:
r.append(word)
return r
run(__file__, parse_word)
|
the-stack_0_4192 | """Benchmark for merge0.
Trains a small percentage of autonomous vehicles to dissipate shockwaves caused
by merges in an open network. The autonomous penetration rate in this example
is 10%.
- **Action Dimension**: (5, )
- **Observation Dimension**: (25, )
- **Horizon**: 750 steps
"""
from flow.envs import MergePOEnvAvgVel
from flow.networks import MergeNetwork
from copy import deepcopy
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \
InFlows, SumoCarFollowingParams
from flow.networks.merge import ADDITIONAL_NET_PARAMS
from flow.core.params import VehicleParams, SumoLaneChangeParams
from flow.controllers import SimCarFollowingController, RLController,IDMController,SimLaneChangeController
# time horizon of a single rollout
HORIZON = 1500
# inflow rate at the highway
FLOW_RATE = 2000
# percent of autonomous vehicles
RL_PENETRATION = 0.1
# num_rl term (see ADDITIONAL_ENV_PARAMs)
NUM_RL = 20
# We consider a highway network with an upstream merging lane producing
# shockwaves
additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS)
additional_net_params["merge_lanes"] = 1
additional_net_params["highway_lanes"] = 2
additional_net_params["pre_merge_length"] = 500
# RL vehicles constitute 5% of the total number of vehicles
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {
}
),
lane_change_controller=(SimLaneChangeController,{}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
#model="SL2015",
lane_change_mode=1621,
#lc_pushy=0,
#lc_assertive=5,
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
lane_change_controller=(SimLaneChangeController,{}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
#model="SL2015",
lane_change_mode=1621,
#lc_pushy=0,
#lc_assertive=5,
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
# Vehicles are introduced from both sides of merge, with RL vehicles entering
# from the highway portion as well
inflow = InFlows()
inflow.add(
veh_type="human",
edge="inflow_highway",
vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE,
depart_lane=0,#"first",#"free",
depart_speed=10)
inflow.add(
veh_type="rl",
edge="inflow_highway",
vehs_per_hour=RL_PENETRATION * FLOW_RATE,
depart_lane=0,#"free",
depart_speed=10)
inflow.add(
veh_type="human",
edge="inflow_merge",
vehs_per_hour=200,
depart_lane="first",#"free",
depart_speed=7.5)
flow_params = dict(
# name of the experiment
exp_tag="merge_4_Sim_AvgVel_MultiLane",
# name of the flow environment the experiment is running on
env_name=MergePOEnvAvgVel,
# name of the network class the experiment is running on
network=MergeNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True,
sim_step=0.5,
render=False,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
sims_per_step=2,
warmup_steps=0,
additional_params={
"max_accel": 9,
"max_decel": 9,
"target_velocity": 30,
"num_rl": NUM_RL,
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
additional_params=additional_net_params,
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(),
)
|
the-stack_0_4194 | from collections import OrderedDict
from datetime import timedelta as td
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from cachecow.decorators import cached_function
from django.utils.translation import ugettext, ugettext_lazy as _, pgettext, pgettext_lazy
from django.conf import settings
from django.db.models import *
from django.db.models.signals import post_save
from canvas import bgwork, util
from canvas.cache_patterns import CachedCall
from canvas.models import BaseCanvasModel, Comment, Content, get_mapping_id_from_short_id, Visibility
from canvas.redis_models import redis, RealtimeChannel, RedisSortedSet
from canvas.util import UnixTimestampField
from canvas.notifications.actions import Actions
from drawquest import knobs
from drawquest.apps.drawquest_auth.models import User, AnonymousUser
from drawquest.apps.push_notifications.models import push_notification
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests import signals
from drawquest.pagination import Paginator
from drawquest.apps.quest_invites.models import InvitedQuests
from drawquest.apps.quests.top import top_quests_buffer, get_quest_score
from services import Services
class ScheduledQuest(BaseCanvasModel):
quest = ForeignKey('Quest', null=False)
curator = ForeignKey(User, blank=True, null=True, default=None, related_name='scheduled_quests')
timestamp = UnixTimestampField(default=0)
appeared_on = UnixTimestampField(null=True, db_index=True)
sort = IntegerField()
class Meta:
ordering = ['-appeared_on']
@classmethod
def get_or_create(cls, quest):
if quest.parent_comment_id:
quest = quest.parent_comment
try:
return cls.objects.get(quest=quest.id)
except cls.DoesNotExist:
return cls.objects.create(quest=Quest.objects.get(pk=quest.id), sort=1)
@classmethod
def archived(cls, select_quests=False):
qs = cls.objects
if select_quests:
qs = qs.select_related('quest')
current_quest_id = redis.get('dq:current_scheduled_quest')
if current_quest_id:
qs = qs.exclude(id=current_quest_id)
return qs.exclude(appeared_on__isnull=True).order_by('-appeared_on')
@classmethod
def unarchived(cls):
return cls.objects.filter(appeared_on__isnull=True).order_by('sort')
def _publish_quest_of_the_day(self):
signals.current_quest_changed.send(ScheduledQuest, instance=self)
RealtimeChannel('qotd', 1).publish({'quest_id': self.quest_id})
push_notification('quest_of_the_day',
_(u"Today's Quest: %(quest_title)s" % {'quest_title': self.quest.title}),
extra_metadata={'quest_id': self.quest.id},
badge=1)
def set_as_current_quest(self):
redis.set('dq:current_scheduled_quest', self.id)
self.appeared_on = Services.time.time()
self.save()
self.quest.details.force()
self._publish_quest_of_the_day()
@classmethod
def rollover_next_quest(cls):
""" Sets the next scheduled quest as the currently active one / quest of the day. """
try:
cls.unarchived().order_by('sort')[0].set_as_current_quest()
except IndexError:
cls.archived().exclude(quest__title='Give him a smile!').order_by('appeared_on')[0].set_as_current_quest()
@classmethod
def current_scheduled_quest(cls):
""" The `ScheduledQuest` instance representing the current quest of the day. """
scheduled_quest_id = redis.get('dq:current_scheduled_quest')
if scheduled_quest_id:
return cls.objects.get(id=scheduled_quest_id)
class QuestManager(Visibility.PublicOnlyManager):
def get_query_set(self):
return super(QuestManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestAllManager(Manager):
def get_query_set(self):
return super(QuestAllManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestPublishedManager(Visibility.PublishedOnlyManager):
def get_query_set(self):
return super(QuestPublishedManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestVisibleOnlyManager(Visibility.PublishedOnlyManager):
def get_query_set(self):
return super(QuestVisibleOnlyManager, self).get_query_set().filter(parent_comment__isnull=True)
class Quest(Comment):
objects = QuestManager()
all_objects = QuestAllManager()
published = QuestPublishedManager()
visible = QuestVisibleOnlyManager()
class Meta:
proxy = True
@property
def comments_url(self):
return settings.API_PREFIX + 'quests/comments'
@property
def comments(self):
return self.replies
@classmethod
def completed_by_user_count(self, user):
""" The number of quests a user has completed. """
return QuestComment.by_author(user).values('parent_comment_id').distinct().count()
def first_appeared_on(self):
if self.ugq:
return self.timestamp
if self.scheduledquest_set.exists():
return self.scheduledquest_set.all()[0].appeared_on
def get_absolute_url(self):
if not slugify(self.title):
return '/q/' + util.base36encode(self.id)
return reverse('quest', args=[util.base36encode(self.id), slugify(self.title)])
def author_count(self):
return self.replies.values_list('author_id', flat=True).distinct().count()
def drawing_count(self):
return self.replies.exclude(reply_content__isnull=True).count()
def schedule(self, ordinal, curator=None):
""" Returns `scheduled_quest` instance. """
scheduled_quest = ScheduledQuest.get_or_create(self)
if not scheduled_quest.curator:
scheduled_quest.curator = curator
scheduled_quest.timestamp = Services.time.time()
scheduled_quest.sort = ordinal
scheduled_quest.save()
return scheduled_quest
def is_currently_scheduled(self):
""" 'currently scheduled' means it's the quest of the day. """
scheduled_quest = ScheduledQuest.objects.get(id=redis.get('dq:current_scheduled_quest'))
return scheduled_quest.quest_id == self.id
def is_onboarding_quest(self):
return str(knobs.ONBOARDING_QUEST_ID) == str(self.id)
def user_has_completed(self, user):
""" Whether `user` has contributed a drawing for this quest. """
return self.replies.filter(author=user).exclude(reply_content__isnull=True).exists()
def attribute_to_user(self, user, attribution_copy):
self.attribution_user = user
self.attribution_copy = attribution_copy
self.save()
self.details.force()
def clear_attribution(self):
self.attribution_user = None
self.attribution_copy = ''
self.save()
self.details.force()
def dismiss(self, dismisser):
dismisser.redis.dismissed_quests.dismiss_quest(self)
def update_score(self):
score = get_quest_score(self)
top_quests_buffer.bump(self.id, score)
return score
@property
def invited_users(self):
from drawquest.apps.quest_invites.models import InvitedUsers
return InvitedUsers(self)
def _details(self):
content_details = self.reply_content.details().to_backend() if self.reply_content else {}
ts = self.timestamp
if self.scheduledquest_set.exists():
ts = self.scheduledquest_set.all().order_by('-appeared_on')[0].appeared_on or ts
ret = {
'id': self.id,
'author_id': self.author_id,
'content': content_details,
'timestamp': ts,
'title': self.title,
'comments_url': self.comments_url,
'author_count': self.author_count(),
'drawing_count': self.drawing_count(),
'visibility': self.visibility,
'attribution_copy': self.attribution_copy,
'ugq': self.ugq,
}
try:
ret['attribution_username'] = self.attribution_user.username
user = User.objects.get(id=self.attribution_user_id)
if user.userinfo.avatar:
ret['attribution_avatar_url'] = user.userinfo.avatar.details().get_absolute_url_for_image_type('archive')
ret['attribution_avatar_urls'] = user.details().avatar_urls['gallery']
except AttributeError:
ret['attribution_username'] = None
return ret
@classmethod
def details_by_id(cls, quest_id, promoter=None):
from drawquest.apps.quests.details_models import QuestDetails
if promoter is None:
promoter = QuestDetails
def inner_call():
return cls.all_objects.get(id=quest_id)._details()
return CachedCall(
'quest:{}:details_v15'.format(quest_id),
inner_call,
24*60*60,
promoter=promoter,
)
@property
def details(self):
return self.details_by_id(self.id)
@classmethod
def _auto_moderation(cls, author):
""" Returns (skip_moderation, curate,) booleans. """
curate = ((author.userinfo.trusted is None and redis.get('dq:auto_curate'))
or author.userinfo.trusted == False)
return False, curate
@classmethod
def create_and_post(cls, request, author, title, content=None, ugq=False):
skip_moderation, curate = cls._auto_moderation(author)
quest = super(Quest, cls).create_and_post(
request,
author,
False,
None,
content,
curate=curate,
skip_moderation=skip_moderation,
ugq=ugq,
title=title,
)
if ugq:
author.redis.ugq_buffer.bump(quest.id)
@bgwork.defer
def followee_created_ugq():
Actions.followee_created_ugq(author, quest)
return quest
def get_share_page_url(self, absolute=False):
slug = slugify(self.title)
if slug:
url = reverse('quest', args=[util.base36encode(self.id), slug])
else:
url = '/q/{}'.format(util.base36encode(self.id))
if absolute:
url = 'http://' + settings.DOMAIN + url
return url
class DismissedQuests(RedisSortedSet):
def __init__(self, user):
self.user_id = getattr(user, 'id', user)
super(DismissedQuests, self).__init__('user:{}:dismissed_quests'.format(self.user_id))
def dismiss_quest(self, quest):
""" `comment` can be a Comment or CommentDetails. """
self.zadd(quest.id, Services.time.time())
def filter_quests(self, quests):
hidden_quest_ids = set(int(id_) for id_ in self.zrange(0, -1))
return [quest for quest in quests if int(quest.id) not in hidden_quest_ids]
def _dedupe_quests(quests):
''' Each quest should be a dict with id and timestamp. Favors recency. '''
quests = sorted(quests, key=lambda quest: quest['timestamp'])
quests = dict((cmt['id'], cmt['timestamp']) for cmt in quests)
quests = [{'id': id_, 'timestamp': timestamp} for id_, timestamp in quests.items()]
return quests
@cached_function(timeout=td(days=30), key=[
'completed_quest_ids_with_timestamps', 'v5',
lambda user: getattr(user, 'id', user),
])
def completed_quest_ids_with_timestamps(user):
from drawquest.apps.quest_comments.models import QuestComment
user_id = getattr(user, 'id', user)
comments = QuestComment.objects.filter(author_id=user_id).exclude(reply_content__isnull=True).values('parent_comment_id', 'timestamp')
quests = [{'id': cmt['parent_comment_id'], 'timestamp': cmt['timestamp']}
for cmt in comments]
quests = _dedupe_quests(quests)
quests = list(sorted(quests, key=lambda cmt: cmt['timestamp']))
return quests
# Cache invalidation for completed_quest_ids.
post_save.connect(
lambda sender, instance, **kwargs: completed_quest_ids_with_timestamps.delete_cache(instance.author_id),
sender=QuestComment, dispatch_uid='post_save_for_completed_quest_ids_with_timestamps_api', weak=False
)
def completed_quest_ids(user):
quests = completed_quest_ids_with_timestamps(user)
quests = sorted(quests, key=lambda quest: quest['timestamp'])
return [quest['id'] for quest in quests]
def archived_quests(offset=None):
""" Returns quest details. """
def get_cached(archived_quests):
return CachedCall.multicall([archived.quest.details for archived in archived_quests])
archived_quests = ScheduledQuest.archived(select_quests=True)
if offset is None:
return get_cached(archived_quests)
pagination = Paginator(archived_quests, knobs.QUESTS_PER_PAGE, offset=offset)
archived_quests = pagination.items
archived_quests = get_cached(archived_quests)
return archived_quests, pagination
def current_quest_details():
try:
quest = ScheduledQuest.current_scheduled_quest().quest
except AttributeError:
return None
return quest.details()
def _followee_quest_ids(user, since_timestamp=None):
buffer_keys = ['ugq_by_user:{}'.format(followee_id)
for followee_id in user.redis.new_following.zrange(0, -1)]
items = redis.zunion(buffer_keys, withscores=True, transaction=False)
if since_timestamp is not None:
items = [item for item in items if item[1] > since_timestamp]
items = sorted(items, key=lambda item: -item[1])
return [int(item[0]) for item in items]
def _current_quest_for_inbox(user):
try:
current_quest = ScheduledQuest.current_scheduled_quest().quest
if current_quest.replies.filter(author=user).exists():
return None
else:
return current_quest.details()
except AttributeError:
return None
def quest_inbox(user):
"""
Returns quest details in a tuple: current_quest, quests.
current_quest may be None.
"""
from drawquest.apps.quests.details_models import QuestDetails
if not user.is_authenticated():
return (current_quest_details(), [])
current_quest = _current_quest_for_inbox(user)
user_completed_quest_ids = completed_quest_ids(user)
followee_quest_ids = _followee_quest_ids(user)
followee_quest_ids = [id_ for id_ in followee_quest_ids
if id_ not in user_completed_quest_ids]
followee_quests = QuestDetails.from_ids(followee_quest_ids[:knobs.QUEST_INBOX_SIZE])
followee_quests = [(quest, quest.timestamp) for quest in followee_quests]
invited_quests = user.redis.quest_invites.uncompleted_invites()
invited_quests = [
(quest, ts)
for quest, ts in invited_quests
if ((current_quest is None or quest.id != current_quest.id)
and quest.id not in followee_quest_ids)
]
quests = followee_quests + invited_quests
quests = [(quest, ts) for quest, ts in quests
if int(quest.id) not in user_completed_quest_ids]
quests = [quest for quest, ts in sorted(quests, key=lambda q: -q[1])]
quests = user.redis.dismissed_quests.filter_quests(quests)
quests = quests[:knobs.QUEST_INBOX_SIZE]
if (current_quest is not None
and (current_quest.id in user_completed_quest_ids
or str(current_quest.id) in user.redis.dismissed_quests)):
current_quest = None
return current_quest, quests
def has_new_inbox_items(user, since_timestamp):
since_timestamp = int(since_timestamp)
if _current_quest_for_inbox(user) is not None:
return True
user_completed_quest_ids = completed_quest_ids(user)
followee_quest_ids = _followee_quest_ids(user, since_timestamp=since_timestamp)
if any(id_ for id_ in followee_quest_ids if id_ not in user_completed_quest_ids):
return True
invited_quests = user.redis.quest_invites.uncompleted_invites()
if any(ts > since_timestamp for quest, ts in invited_quests):
return True
return False
def quest_history(user):
""" Returns quest details. """
from drawquest.apps.quests.details_models import QuestDetails
if not user.is_authenticated():
return []
completed_quests = completed_quest_ids_with_timestamps(user)
completed_quests = sorted(completed_quests, key=lambda q: -q['timestamp'])
completed_quests = completed_quests[:knobs.QUEST_HISTORY_SIZE]
ugq = Quest.objects.filter(author=user).order_by('-id').values('id', 'timestamp')
ugq = list(ugq[:knobs.QUEST_HISTORY_SIZE])
dismissed_quests = user.redis.dismissed_quests.zrevrange(0, knobs.QUEST_HISTORY_SIZE,
withscores=True)
dismissed_quests = [{'id': int(item[0]), 'timestamp': item[1]}
for item in dismissed_quests]
history = completed_quests + ugq + dismissed_quests
history = _dedupe_quests(history)
history = sorted(history, key=lambda quest: -quest['timestamp'])
history = history[:knobs.QUEST_HISTORY_SIZE]
history = [quest['id'] for quest in history]
return QuestDetails.from_ids(history)
|
the-stack_0_4195 | #!/usr/bin/env python
import os
import re
import sys
from setuptools import setup, find_packages
version = re.compile(r'VERSION\s*=\s*\((.*?)\)')
def get_package_version():
"returns package version without importing it"
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, "flower/__init__.py")) as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return ".".join(m.groups()[0].split(", "))
def get_requirements(filename):
return open('requirements/' + filename).read().splitlines()
classes = """
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Topic :: System :: Distributed Computing
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: OS Independent
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
install_requires = get_requirements('default.txt')
if sys.version_info < (3, 0):
install_requires.append('futures')
setup(
name='ma-flower',
version=get_package_version(),
description='Celery Flower',
long_description=open('README.rst').read(),
author='Mher Movsisyan',
author_email='[email protected]',
url='https://github.com/mher/flower',
license='BSD',
classifiers=classifiers,
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=install_requires,
test_suite="tests",
tests_require=get_requirements('test.txt'),
package_data={'flower': ['templates/*', 'static/*.*',
'static/**/*.*', 'static/**/**/*.*']},
entry_points={
'console_scripts': [
'flower = flower.__main__:main',
],
'celery.commands': [
'flower = flower.command:FlowerCommand',
],
},
)
|
the-stack_0_4196 | # -*- coding: utf-8 -*-
"""
Trac WebAdmin plugin for administration of custom fields.
License: BSD
(c) 2005-2012 ::: www.CodeResort.com - BV Network AS ([email protected])
(c) 2007-2009 ::: www.Optaros.com (.....)
"""
from pkg_resources import resource_filename
from trac.config import Option
from trac.core import *
from trac.web.chrome import Chrome, ITemplateProvider, add_script, add_warning
from trac.admin.api import IAdminPanelProvider
from customfieldadmin.api import CustomFields, _
class CustomFieldAdminPage(Component):
implements(ITemplateProvider, IAdminPanelProvider)
def __init__(self):
# Init CustomFields so translations work from first request
# FIXME: It actually only works from SECOND request - Trac bug?!
CustomFields(self.env)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TICKET_ADMIN' in req.perm('admin', 'ticket/customfields'):
yield ('ticket', _("Ticket System"),
'customfields', _("Custom Fields"))
def render_admin_panel(self, req, cat, page, customfield):
req.perm('admin', 'ticket/customfields').require('TICKET_ADMIN')
add_script(req, 'customfieldadmin/js/customfieldadmin.js')
def _customfield_from_req(self, req):
cfield = {'name': req.args.get('name','').encode('utf-8'),
'label': req.args.get('label','').encode('utf-8'),
'type': req.args.get('type','').encode('utf-8'),
'value': req.args.get('value','').encode('utf-8'),
'options': [x.strip().encode('utf-8') for x in \
req.args.get('options','').split("\n")],
'cols': req.args.get('cols','').encode('utf-8'),
'rows': req.args.get('rows','').encode('utf-8'),
'order': req.args.get('order', '').encode('utf-8'),
'format': req.args.get('format', '').encode('utf-8')}
return cfield
cf_api = CustomFields(self.env)
cf_admin = {} # Return values for template rendering
# Detail view?
if customfield:
cfield = None
for a_cfield in cf_api.get_custom_fields():
if a_cfield['name'] == customfield:
cfield = a_cfield
break
if not cfield:
raise TracError(_("Custom field %(name)s does not exist.",
name=customfield))
if req.method == 'POST':
if req.args.get('save'):
cfield.update(_customfield_from_req(self, req))
cf_api.update_custom_field(cfield)
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
if cfield.has_key('options'):
optional_line = ''
if cfield.get('optional', False):
optional_line = "\n\n"
cfield['options'] = optional_line + "\n".join(cfield['options'])
cf_admin['cfield'] = cfield
cf_admin['cf_display'] = 'detail'
else:
if req.method == 'POST':
# Add Custom Field
if req.args.get('add') and req.args.get('name'):
cfield = _customfield_from_req(self, req)
cf_api.update_custom_field(cfield, create=True)
req.redirect(req.href.admin(cat, page))
# Remove Custom Field
elif req.args.get('remove') and req.args.get('sel'):
sel = req.args.get('sel')
sel = isinstance(sel, list) and sel or [sel]
if not sel:
raise TracError(_("No custom field selected"))
for name in sel:
cfield = {'name': name}
cf_api.delete_custom_field(cfield)
req.redirect(req.href.admin(cat, page))
elif req.args.get('apply'):
# Change order
order = dict([(key[6:], req.args.get(key)) for key
in req.args.keys()
if key.startswith('order_')])
cfields = cf_api.get_custom_fields()
for current_cfield in cfields:
new_order = order.get(current_cfield['name'], 0)
if new_order:
current_cfield['order'] = new_order
cf_api.update_custom_field(current_cfield)
req.redirect(req.href.admin(cat, page))
cfields = []
orders_in_use = []
for item in cf_api.get_custom_fields():
item['href'] = req.href.admin(cat, page, item['name'])
item['registry'] = ('ticket-custom',
item['name']) in Option.registry
cfields.append(item)
orders_in_use.append(int(item.get('order')))
cf_admin['cfields'] = cfields
cf_admin['cf_display'] = 'list'
if sorted(orders_in_use) != range(1, len(cfields)+1):
add_warning(req, _("Custom Fields are not correctly sorted. " \
"This may affect appearance when viewing tickets."))
if hasattr(Chrome(self.env), 'jenv'):
return 'customfieldadmin.html', cf_admin, None
else:
return 'customfieldadmin.html', cf_admin
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return [('customfieldadmin', resource_filename(__name__, 'htdocs'))]
|
the-stack_0_4197 | from whatsapp_defines import (
WATags,
WASingleByteTokens,
WADoubleByteTokens,
WAWebMessageInfo,
)
class WABinaryReader:
def __init__(self, data):
self.data = data
self.index = 0
def checkEOS(self, length):
if self.index + length > len(self.data):
raise EOFError("end of stream reached")
def readByte(self):
self.checkEOS(1)
ret = ord(self.data[self.index])
self.index += 1
return ret
def readIntN(self, n, littleEndian=False):
self.checkEOS(n)
ret = 0
for i in range(n):
currShift = i if littleEndian else n - 1 - i
ret |= ord(self.data[self.index + i]) << (currShift * 8)
self.index += n
return ret
def readInt16(self, littleEndian=False):
return self.readIntN(2, littleEndian)
def readInt20(self):
self.checkEOS(3)
ret = (
((ord(self.data[self.index]) & 15) << 16)
+ (ord(self.data[self.index + 1]) << 8)
+ ord(self.data[self.index + 2])
)
self.index += 3
return ret
def readInt32(self, littleEndian=False):
return self.readIntN(4, littleEndian)
def readInt64(self, littleEndian=False):
return self.readIntN(8, littleEndian)
def readPacked8(self, tag):
startByte = self.readByte()
ret = ""
for i in range(startByte & 127):
currByte = self.readByte()
ret += self.unpackByte(tag, (currByte & 0xF0) >> 4) + self.unpackByte(
tag, currByte & 0x0F
)
if (startByte >> 7) != 0:
ret = ret[: len(ret) - 1]
return ret
def unpackByte(self, tag, value):
if tag == WATags.NIBBLE_8:
return self.unpackNibble(value)
elif tag == WATags.HEX_8:
return self.unpackHex(value)
def unpackNibble(self, value):
if value >= 0 and value <= 9:
return chr(ord("0") + value)
elif value == 10:
return "-"
elif value == 11:
return "."
elif value == 15:
return "\0"
raise ValueError("invalid nibble to unpack: " + value)
def unpackHex(self, value):
if value < 0 or value > 15:
raise ValueError("invalid hex to unpack: " + str(value))
if value < 10:
return chr(ord("0") + value)
else:
return chr(ord("A") + value - 10)
def readRangedVarInt(self, minVal, maxVal, desc="unknown"):
ret = self.readVarInt()
if ret < minVal or ret >= maxVal:
raise ValueError("varint for " + desc + " is out of bounds: " + str(ret))
return ret
def isListTag(self, tag):
return tag == WATags.LIST_EMPTY or tag == WATags.LIST_8 or tag == WATags.LIST_16
def readListSize(self, tag):
if tag == WATags.LIST_EMPTY:
return 0
elif tag == WATags.LIST_8:
return self.readByte()
elif tag == WATags.LIST_16:
return self.readInt16()
raise ValueError("invalid tag for list size: " + str(tag))
def readString(self, tag):
if tag >= 3 and tag <= 235:
token = self.getToken(tag)
if token == "s.whatsapp.net":
token = "c.us"
return token
if (
tag == WATags.DICTIONARY_0
or tag == WATags.DICTIONARY_1
or tag == WATags.DICTIONARY_2
or tag == WATags.DICTIONARY_3
):
return self.getTokenDouble(tag - WATags.DICTIONARY_0, self.readByte())
elif tag == WATags.LIST_EMPTY:
return
elif tag == WATags.BINARY_8:
return self.readStringFromChars(self.readByte())
elif tag == WATags.BINARY_20:
return self.readStringFromChars(self.readInt20())
elif tag == WATags.BINARY_32:
return self.readStringFromChars(self.readInt32())
elif tag == WATags.JID_PAIR:
i = self.readString(self.readByte())
j = self.readString(self.readByte())
if i is None or j is None:
raise ValueError("invalid jid pair: " + str(i) + ", " + str(j))
return i + "@" + j
elif tag == WATags.NIBBLE_8 or tag == WATags.HEX_8:
return self.readPacked8(tag)
else:
raise ValueError("invalid string with tag " + str(tag))
def readStringFromChars(self, length):
self.checkEOS(length)
ret = self.data[self.index : self.index + length]
self.index += length
return ret
def readAttributes(self, n):
ret = {}
if n == 0:
return
for i in range(n):
index = self.readString(self.readByte())
ret[index] = self.readString(self.readByte())
return ret
def readList(self, tag):
ret = []
for i in range(self.readListSize(tag)):
ret.append(self.readNode())
return ret
def readNode(self):
listSize = self.readListSize(self.readByte())
descrTag = self.readByte()
if descrTag == WATags.STREAM_END:
raise ValueError("unexpected stream end")
descr = self.readString(descrTag)
if listSize == 0 or not descr:
raise ValueError("invalid node")
attrs = self.readAttributes((listSize - 1) >> 1)
if listSize % 2 == 1:
return [descr, attrs, None]
tag = self.readByte()
if self.isListTag(tag):
content = self.readList(tag)
elif tag == WATags.BINARY_8:
content = self.readBytes(self.readByte())
elif tag == WATags.BINARY_20:
content = self.readBytes(self.readInt20())
elif tag == WATags.BINARY_32:
content = self.readBytes(self.readInt32())
else:
content = self.readString(tag)
return [descr, attrs, content]
def readBytes(self, n):
ret = ""
for i in range(n):
ret += chr(self.readByte())
return ret
def getToken(self, index):
if index < 3 or index >= len(WASingleByteTokens):
raise ValueError("invalid token index: " + str(index))
return WASingleByteTokens[index]
def getTokenDouble(self, index1, index2):
n = 256 * index1 + index2
if n < 0 or n >= len(WADoubleByteTokens):
raise ValueError("invalid token index: " + str(n))
return WADoubleByteTokens[n]
def whatsappReadMessageArray(msgs):
if not isinstance(msgs, list):
return msgs
ret = []
for x in msgs:
ret.append(
WAWebMessageInfo.decode(x[2])
if isinstance(x, list) and x[0] == "message"
else x
)
return ret
def whatsappReadBinary(data, withMessages=False):
node = WABinaryReader(data).readNode()
if (
withMessages
and node is not None
and isinstance(node, list)
and node[1] is not None
):
node[2] = whatsappReadMessageArray(node[2])
return node
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.