max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
corehq/messaging/smsbackends/start_enterprise/migrations/0001_initial.py | dimagilg/commcare-hq | 471 | 12705617 | # Generated by Django 1.10.8 on 2017-10-12 10:35
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StartEnterpriseDeliveryReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sms_id', models.CharField(db_index=True, max_length=126)),
('message_id', models.CharField(db_index=True, max_length=126, unique=True)),
('received_on', models.DateTimeField(db_index=True, null=True)),
('info', jsonfield.fields.JSONField(default=dict, null=True)),
],
),
]
|
rojak-analyzer/show_word_frequency.py | pyk/rojak | 107 | 12705621 | import csv
from bs4 import BeautifulSoup
from collections import Counter
import re
csv_file = open('data_detikcom_740.csv')
csv_reader = csv.DictReader(csv_file)
words = []
for row in csv_reader:
title = row['title'].strip().lower()
raw_content = row['raw_content']
clean_content = BeautifulSoup(raw_content, 'lxml').text
# Compile regex to remove non-alphanum char
nonalpha = re.compile('[\W_]+')
for word in title.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
for word in clean_content.split(' '):
word = word.lower()
word = nonalpha.sub('', word)
if word != '':
words.append(word)
counter = Counter(words)
for word in counter.most_common(len(counter)):
print '{},{}'.format(word[0], word[1])
csv_file.close()
|
src/embedding/recognition.py | mykiscool/DeepCamera | 914 | 12705673 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, json, time, sys
import shutil
import time
import os.path
import requests
from uuid import uuid1
import numpy as np
from utilslib.save2gst import generate_protocol_string
import classifier_classify_new as classifier
from faces import save_embedding
all_face_index = 0 #每当识别出一个人脸就+1,当2个人同时出现在图片里面并且都不认识,需要区分开来
BASEDIR = os.getenv('RUNTIME_BASEDIR',os.path.abspath(os.path.dirname(__file__)))
TMP_DIR_PATH = os.path.join(BASEDIR, 'data', 'faces', 'tmp_pic_path')
UPLOAD_FOLDER = os.path.join(BASEDIR, 'image')
DATABASE = 'sqlite:///' + os.path.join(BASEDIR, 'data', 'data.sqlite')
face_tmp_objid = None
obje_tmp_objid = None
EN_OBJECT_DETECTION = False
FACE_DETECTION_WITH_DLIB = False # Disable DLIB at this time
EN_SOFTMAX = False
SOFTMAX_ONLY = False
isUpdatingDataSet = False
webShowFace = False
EXT_IMG='png'
DO_NOT_UPLOAD_IMAGE = False
DO_NOT_REPORT_TO_SERVER = False
FOR_ARLO = True
USE_DEFAULT_DATA=True # Enable to use "groupid_default" for SVM training
SVM_CLASSIFIER_ENABLED=True
SVM_SAVE_TEST_DATASET=True
SVM_TRAIN_WITHOUT_CATEGORY=True
SVM_HIGH_SCORE_WITH_DB_CHECK=True
svm_face_dataset=None
svm_face_embedding=None
svm_tmp_dir=None
svm_face_testdataset=None
svm_stranger_testdataset=None
data_collection=None
def init_fs():
global svm_face_dataset
global svm_face_embedding
global svm_tmp_dir
global svm_face_testdataset
global svm_stranger_testdataset
try:
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
# if not os.path.exists(os.path.join(BASEDIR, 'data.sqlite')):
# db.create_all()
if not os.path.exists(os.path.join(BASEDIR, 'data', 'data.sqlite')):
if os.path.exists(os.path.join(BASEDIR, 'data_init')):
shutil.copyfile(os.path.join(BASEDIR, 'data_init'), os.path.join(BASEDIR, 'data', 'data.sqlite'))
if not os.path.exists(TMP_DIR_PATH):
os.makedirs(TMP_DIR_PATH)
if SVM_CLASSIFIER_ENABLED:
svm_face_dataset = os.path.join(BASEDIR, 'data', 'face_dataset')
svm_face_embedding = os.path.join(BASEDIR, 'data', 'face_embedding')
svm_tmp_dir = os.path.join(BASEDIR, 'data', 'faces', 'noname', 'person')
svm_face_testdataset = os.path.join(BASEDIR, 'data', 'face_testdataset')
svm_stranger_testdataset = os.path.join(BASEDIR, 'data', 'stranger_testdataset')
if not os.path.exists(svm_face_dataset):
os.mkdir(svm_face_dataset)
if not os.path.exists(svm_face_embedding):
os.mkdir(svm_face_embedding)
if not os.path.exists(svm_tmp_dir):
os.makedirs(svm_tmp_dir)
if not os.path.exists(svm_face_testdataset):
os.mkdir(svm_face_testdataset)
if not os.path.exists(svm_stranger_testdataset):
os.mkdir(svm_stranger_testdataset)
except Exception as e:
print(e)
def updatePeopleImgURL(ownerid, url, embedding, uuid, objid, img_type, accuracy, fuzziness, sqlId, style, img_ts, tid,
p_ids, waiting):
print('debug updatePeopleImgURL 1')
if len(url) < 1 or len(uuid) < 1 or len(objid) < 1 or len(img_type) < 1:
return
if not DO_NOT_REPORT_TO_SERVER:
print('save2gst')
save2gst(uuid, objid, url, '', 'face', accuracy, int(fuzziness), int(sqlId), style, img_ts, tid, p_ids, waiting) # 发送请求给workai
def upload_forecast_result(key, forecast_result, json_data, num_p):
uuid = forecast_result['uuid']
face_id = forecast_result['face_id']
face_accuracy = forecast_result['face_accuracy']
people_sqlId = forecast_result['people_sqlId']
align_image_path = forecast_result['align_image_path']
img_style_str = forecast_result['img_style_str']
ts = forecast_result['ts']
trackerId = forecast_result['trackerId']
face_fuzziness = forecast_result['face_fuzziness']
people = forecast_result['people']
p_ids = forecast_result['p_ids']
embedding_string = forecast_result['embedding_string']
#embedding_bytes = embedding_string.encode('utf-8')
img_type = forecast_result['img_type']
waiting = forecast_result['waiting']
do_not_report_to_server = DO_NOT_REPORT_TO_SERVER
uploadedimgurl = None
return generate_protocol_string(key, face_id, align_image_path,
embedding='', uuid=uuid,
DO_NOT_REPORT_TO_SERVER=do_not_report_to_server,block=False,
objid=face_id, img_type=img_type,
accuracy=face_accuracy, fuzziness=face_fuzziness, sqlId=people_sqlId,
style=img_style_str, ts=ts, tid=str(trackerId), p_ids=p_ids, waiting = waiting)
def face_recognition_on_embedding(align_image_path, embedding, totalPeople, blury, uuid,
current_groupid, style, trackerId,
timestamp1, ts, embedding_path):
img_objid = trackerId
print("img_objid = {}".format(img_objid))
print("number of people=%d" % (totalPeople))
if totalPeople > 1:
trackerId = str(uuid1())
number_people=totalPeople
img_style_str = style
img_style = img_style_str
json_data = {'detected':False, 'recognized': False, 'style': img_style_str}
forecast_result = {'people':None,
'img_type': 'face',
'people_sqlId': 0,
'face_id': None, # 当前这个人脸图片的objId/face_id
'trackerId': trackerId,
'uuid': uuid,
'ts': ts,
'img_style_str': img_style_str,
'align_image_path': align_image_path,
'face_accuracy': 0,
'face_fuzziness': 0,
'embedding_string': '',
'waiting': False,
'p_ids': None,
} # 保存发送给uploadImage的数据
forecast_result['face_fuzziness'] = blury
print(">>> blury=%d" %(blury))
json_data['detected'] = True
print("2 %.2f seconds" % (time.time() - timestamp1))
if forecast_result['face_fuzziness'] < int(data_collection.get("blury_threhold")):
json_data, forecast_result = get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid,
forecast_result)
print("Too blurry image, skip it img_objid={}, trackerId={}".format(img_objid, trackerId))
elif SVM_CLASSIFIER_ENABLED is True:
#img_style = 'front'
# embedding = embedding.reshape((1, -1))
forecast_result['waiting'] = False
json_data, forecast_result = SVM_classifier(embedding,align_image_path,
uuid,current_groupid,img_style,number_people,img_objid,json_data,forecast_result, embedding_path)
print("3 %.2f seconds" % (time.time() - timestamp1))
if json_data['recognized'] is True:
if webShowFace is True:
showRecognizedImage(forecast_result['align_image_path'], 1)
forecast_result['trackerId'] = trackerId
# 人脸预测结果发送
key = str(<KEY>
_,api_url,payload = upload_forecast_result(key, forecast_result, json_data, number_people)
json_data['key'] = key
json_data['face_fuzziness'] = blury
return json_data, {'api_url':api_url,'payload':payload}
def get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid, forecast_result):
"""
当softmax无结果时(无模型/预测置信度低)调用遍历数据库识别
:param current_groupid:
:param uuid:
:param embedding:
:param img_style:
:param number_people:
:param img_objid:
:return:
"""
json_data = {'detected': True, 'recognized': False}
face_id = img_objid + str(all_face_index).zfill(4)
json_data['recognized'] = False
json_data['face_id'] = face_id
json_data['accuracy'] = 0
json_data['style'] = img_style
forecast_result['face_id'] = face_id
forecast_result['face_accuracy'] = 0
embedding_string = ','.join(str(x) for x in embedding)
forecast_result['embedding_string'] = embedding_string
return json_data, forecast_result
def SVM_classifier(embedding,align_image_path,uuid,current_groupid,img_style,number_people, img_objid,json_data, forecast_result, embedding_path):
#Save image to src/face_dataset_classify/group/person/
if SVM_SAVE_TEST_DATASET is True:
group_path = os.path.join(BASEDIR,svm_face_testdataset, current_groupid)
if not os.path.exists(group_path):
os.mkdir(group_path)
print('test dataset group_path=%s' % group_path)
pkl_path = ""
if SVM_TRAIN_WITHOUT_CATEGORY is True:
pkl_path = '{}/data/faces/{}/{}/classifier_182.pkl'.format(BASEDIR,current_groupid, 'front')
face_dataset_path = '{}/data/faces/{}/{}/face_dataset'.format(BASEDIR,current_groupid, 'front')
else:
pkl_path = '{}/data/faces/{}/{}/classifier_182.pkl'.format(BASEDIR,current_groupid, img_style)
face_dataset_path = '{}/data/faces/{}/{}/face_dataset'.format(BASEDIR,current_groupid, img_style)
svm_detected = False
if os.path.exists(pkl_path):
nrof_classes = 0
if os.path.exists(face_dataset_path):
classes = [path for path in os.listdir(face_dataset_path) \
if os.path.isdir(os.path.join(face_dataset_path, path))]
nrof_classes = len(classes)
print("SVM_classifier: nrof_classes={}".format(nrof_classes))
#tmp_image_path = BASEDIR + '/data/faces/noname/person/face_tmp.'+EXT_IMG
#shutil.copyfile(align_image_path, tmp_image_path)
# 输入embedding的预测方法, 速度很快
svm_stime = time.time()
_, human_string, score, top_three_name, judge_result = classifier.classify([embedding], pkl_path, embedding_path)
if top_three_name:
top_three_faceid = [name.split(' ')[1] for name in top_three_name]
else:
top_three_faceid = None
print('-> svm classify cost {}s'.format(time.time()-svm_stime))
print("current value of score_1 ", float(data_collection.get("score_1")))
print("current value of score_2 ", float(data_collection.get("score_2")))
print("current value of fuzziness_1 ", float(data_collection.get("fuzziness_1")))
print("current value of fuzziness_2 ", float(data_collection.get("fuzziness_2")))
print("current value of update interval ", float(data_collection.get("_interval")))
if human_string is not None:
message = ""
message2 = ""
face_id = human_string.split(' ')[1]
score = round(score, 2)
fuzziness = forecast_result['face_fuzziness']
if USE_DEFAULT_DATA is True:
if face_id == 'defaultfaceid':
score = 0.0
if not FOR_ARLO:
if score >= float(data_collection.get("score_1")) or (score >= float(data_collection.get("score_2")) and fuzziness >= float(data_collection.get("fuzziness_1")) and fuzziness < float(data_collection.get("fuzziness_2"))):
found, total = check_embedding_on_detected_person_forSVM(current_groupid=current_groupid,
embedding=embedding,style=img_style,classid=face_id, nrof_classes=nrof_classes)
if found > 0:
svm_detected = True
message = "<DB Recognized> Face ID: %s %s/%s, 2nd %s/%s" % (face_id, score, img_style, found, total)
else:
message = "<DB 2nd Score Low> Face ID: %s %s/%s, 2nd %s/%s" % (face_id, score, img_style, found, total)
elif 0.35<score<0.8:
message = "Send this face to Zone Waiting: %s,%s"%(score, fuzziness)
forecast_result['waiting'] = True
else:
message = "<1st Score Low> Face ID: %s %s/%s" % (face_id, score, img_style)
else:
if score > float(data_collection.get("score_2")):#0.40
found, total = check_embedding_on_detected_person_forSVM_ByDir(current_groupid=current_groupid,
embedding=embedding,style=img_style,classid=human_string.replace(' ', '_'),nrof_classes=nrof_classes)
if found > 0:
if score > float(data_collection.get("score_1")) or judge_result != 0:#0.9
svm_detected = True
message = "<1, SVM Recognized> Face ID: %s %s/%s, 2nd %s/%s" % (face_id, score, img_style, found, total)
else:
message = "<2, SVM Recognized> Face ID: %s %s/%s, 2nd %s/%s" % (face_id, score, img_style, found, total)
else:
message = "<3, SVM Recognized Not, found=0> Face ID: %s %s/%s, 2nd %s/%s" % (face_id, score, img_style, found, total)
else:
message = "<4, SVM Recognized Not, Low score> Face ID: %s %s/%s" % (face_id, score, img_style)
print(message)
if (message2 != ""):
print(message2)
if svm_detected is True:
json_data['recognized'] = True
json_data['face_id'] = face_id
json_data['accuracy'] = int(score*100)
json_data['style'] = img_style
forecast_result['face_id'] = face_id
forecast_result['face_accuracy'] = score
embedding_string = ','.join(str(x) for x in embedding)
forecast_result['embedding_string'] = embedding_string
else:
forecast_result['p_ids'] = top_three_faceid
print("Not Recognized %s" % face_id)
#sendDebugLogToGroup(uuid, current_groupid, message)
#Save image to src/face_dataset_classify/group/person/
if SVM_SAVE_TEST_DATASET is True:
if svm_detected is True:
svm_face_testdataset_person_path = os.path.join(group_path, human_string)
else:
svm_face_testdataset_person_path = os.path.join(group_path, 'noname')
if not os.path.exists(svm_face_testdataset_person_path):
os.mkdir(svm_face_testdataset_person_path)
print('test dataset person path=%s' % svm_face_testdataset_person_path)
dir = os.path.basename(align_image_path)
name = os.path.splitext(dir)[0]
save_testdataset_filepath = os.path.join(svm_face_testdataset_person_path, name+'_'+str(int(time.time()))+'.png')
print('save classified image to path: %s' % save_testdataset_filepath)
try:
shutil.copyfile(align_image_path, save_testdataset_filepath)
except IOError:
print('cant copy file from {} to {},need check later'.format(align_image_path,save_testdataset_filepath ))
pass
if svm_detected is False:
json_data, forecast_result = get_empty_faceid(current_groupid, uuid, embedding,
img_style, number_people, img_objid,
forecast_result)
print('not in train classification or need to more train_dataset')
return json_data, forecast_result
def check_embedding_on_detected_person_forSVM(current_groupid, embedding, style, classid, nrof_classes):
total = 0
found = 0
people = None
#遍历整个数据库, 检查这个人是谁
if SVM_TRAIN_WITHOUT_CATEGORY is True:
people = People.query.filter_by(group_id=current_groupid, classId=classid).all()
else:
people = People.query.filter_by(group_id=current_groupid, style=style, classId=classid).all()
if people:
for person in people:
val = compare2(embedding, person.embed)
total = total+1
face_accuracy = val
print('face_accuracy={}'.format(face_accuracy))
threshold = 0.70
if nrof_classes <= 5 and nrof_classes > 0:
threshold = 0.90
elif nrof_classes <= 10 and nrof_classes > 0:
threshold = 0.82
if face_accuracy >= threshold:
found = found+1
if total >= 500:
break
return found, total
def check_embedding_on_detected_person_forSVM_ByDir(current_groupid, embedding, style, classid, nrof_classes):
total = 0
found = 0
people = None
if SVM_TRAIN_WITHOUT_CATEGORY is True:
facedir = '{}/data/faces/{}/{}/face_dataset/{}'.format(BASEDIR,current_groupid, 'front', classid)
else:
facedir = '{}/data/faces/{}/{}/face_dataset/{}'.format(BASEDIR,current_groupid, style, classid)
print("check embedding: facedir = {}".format(facedir))
embedding_array = []
if os.path.isdir(facedir):
image_paths = []
images = os.listdir(facedir)
if len(images) < 1:
print("Check embedding: Empty directory: facedir={}".format(facedir))
return 0, 0
for img in images:
img_path = os.path.join(facedir, img)
emb_path = save_embedding.get_embedding_path(img_path)
emb = save_embedding.read_embedding_string(emb_path)
emb = np.asarray(emb)
embedding_array.append(emb)
if len(embedding_array) > 0:
for emb in embedding_array:
val = compare2(embedding, emb)
total = total+1
face_accuracy = val
print('face_accuracy={}'.format(face_accuracy))
'''
threshold = 1.0
if nrof_classes <= 5 and nrof_classes > 0:
threshold = 1.0
elif nrof_classes <= 10 and nrof_classes > 0:
threshold = 1.0
'''
threshold = 0.42
if face_accuracy > threshold:
found = found+1
if total >= 500:
break
return found, total
def compare(emb1, emb2):
dist = np.sqrt(np.sum(np.square(np.subtract(emb1, emb2))))
# d = emb1 - emb2
# sqL2 = np.dot(d, d)
# print("+ Squared l2 distance between representations: {:0.3f}, dist is {:0.3f}".format(sqL2,dist))
# print("+ distance between representations: {:0.3f}".format(dist))
# return sqL2
return dist
def compare2(emb1, emb2):
dist = np.sum([emb2]*emb1, axis=1)
return dist
def compare3(emb1, emb2):
return np.sum(np.square(emb1-emb2))
class DataCollection(object):
def __init__(self, update_freq=10):
self.url = "http://localhost:5000/api/parameters"
self.pre_time = time.time()
self.update_freq = update_freq
self.items = self.fetch()
def fetch(self):
try:
resp = requests.get(self.url)
assert resp.status_code == 200
r = resp.json()
except Exception as e:
print(e)
#print("status_code: %s" %resp.status_code)
# if web server not work return default values.
r = None
if not FOR_ARLO:
r = {
"blury_threhold": "10",
"fuzziness_1": "40",
"fuzziness_2": "200",
"score_1": "0.75",
"score_2": "0.60",
}
else:
r = {
"blury_threhold": "60",
"fuzziness_1": "40",
"fuzziness_2": "200",
"score_1": "0.90",
"score_2": "0.40",
}
if "_interval" in r:
_freq = int(r["_interval"])
if self.update_freq != _freq:
self.update_freq = _freq
r.update({"_interval": self.update_freq})
return r
def reload(self):
cur_time = time.time()
if cur_time - self.pre_time > self.update_freq:
self.pre_time = cur_time
self.items = self.fetch()
def get(self, key):
key = key.lower()
self.reload()
return self.items.get(key, None)
data_collection = DataCollection()
init_fs()
|
Master Script/manual db updater.py | avinashkranjan/PraticalPythonProjects | 930 | 12705696 | from optparse import OptionParser
import json
import sys
import os
usage = """
<Script> [Options]
[Options]
-h, --help Show this help message and exit.
-a, --add Goes straight to the add script phase
"""
# Load args
parser = OptionParser()
parser.add_option("-a", "--add", action="store_true", dest="add", help="Goes straight to the add script phase")
# The database is automatically updated after the PR is merged.
# ONLY Use this function if you were asked to, to manually add projects to the database.
def add_script():
""" Add a Contributor script through a series of inputs """
print("Double check inputs before pressing enter. If one input is incorrect press CTRL-C and re-run the script")
category = input("Enter What category does your script belongs to > ")
name = input("Enter script title > ")
path = input("Enter folder name that contains your script > ")
requirments_path = input("Enter requirements.txt path (else none) > ")
entry = input("Enter name of the file that runs the script > ")
arguments = input("Enter scripts arugments if needed ( '-' seperated + no whitespaces) (else none) > ")
contributor = input("Enter your GitHub username > ")
description = input("Enter a description for your script > ")
new_data = {category: {name: [path, entry, arguments, requirments_path, contributor, description]}}
data_store = read_data()
try:
# If category doesn't exist try will fail and except will ask to add a new category with the project
if data_store[category]: # Check for existing category or a new one
data_store[category].update(new_data[category]) # Add script
except:
sure = "Y"
sure = input("A new category is about to be added. You sure? Y/n > ")
if sure.lower() == "y" or sure == "":
data_store.update(new_data) # Add new category
else:
print("Data wasn't added please re-run the script and add the correct inputs.")
sys.exit(1)
with open("datastore.json", "w") as file:
json.dump(data_store, file)
print("Script added to database")
def read_data():
""" Loads datastore.json """
with open("datastore.json", "r") as file:
data = json.load(file)
return data
def check_data():
""" Validates that all projects exists in the datastore and prints out those are not in the DB """
data = read_data()
paths = []
for category in data:
for project in data[category]:
paths.append(data[category][project][0])
i=0
repo_dir = os.listdir("../")
ignore = [".deepsource.toml", ".git", ".github", ".gitignore",
"CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "LICENSE",
"README.md", "SCRIPTS.md", "script_updater.py",
"Template for README.md", "Master Script", ]
for element in repo_dir:
if (not element in paths) and (not element in ignore):
print(element)
i+=1
print(f"Total of {i} non-added projects.")
# Start checkpoint
if __name__ == "__main__":
(options, args) = parser.parse_args()
# Inputs
add = options.add
if add:
add_script()
#add_script()
check_data()
|
templates/powershell/ps_system_time.py | ohoph/Ebowla | 738 | 12705710 | buildcode="""
function Get-SystemTime(){
$time_mask = @()
$the_time = Get-Date
$time_mask += [string]$the_time.Year + "0000"
$time_mask += [string]$the_time.Year + [string]$the_time.Month + "00"
$time_mask += [string]$the_time.Year + [string]$the_time.Month + [string]$the_time.Day
return $time_mask
}
"""
callcode="""
$key_combos += ,(Get-SystemTime)
""" |
examples/generate_embeddings.py | JackHopkins/onnxt5 | 197 | 12705718 | <gh_stars>100-1000
from onnxruntime import InferenceSession
from transformers import T5Tokenizer
from onnxt5.api import get_encoder_decoder_tokenizer, run_embeddings_text, get_sess
# The easiest way is to use the onnxt5 api and load the default pre-trained version of t5
decoder_sess, encoder_sess, tokenizer = get_encoder_decoder_tokenizer()
# You can load pre-exported models with get_sess (do note you need the tokenizer you trained also)
# decoder_sess, encoder_sess = get_sess(output_path)
# You can also load model_data manually:
# decoder_sess = InferenceSession('/home/abel/t5-decoder-with-lm-head.onnx')
# encoder_sess = InferenceSession('/home/abel/t5-encoder.onnx')
# The tokenizer should be the one you trained in the case of fine-tuning
# tokenizer = T5Tokenizer.from_pretrained('t5-base')
prompt = 'Listen, <NAME> has come unstuck in time.'
# To get embeddings you can either use our utility function
encoder_embeddings, decoder_embeddings = run_embeddings_text(encoder_sess, decoder_sess, tokenizer, prompt)
# Or do it manually as follow
input_ids = tokenizer.encode(prompt, return_tensors='pt').numpy()
# To generate the encoder's last hidden state
encoder_output = encoder_sess.run(None, {"input_ids": input_ids})[0]
# To generate the full model's embeddings
decoder_output = decoder_sess.run(None, {
"input_ids": input_ids,
"encoder_hidden_states": encoder_output
})[0] |
raspberryio/project/admin.py | cvautounix/raspberryio | 113 | 12705727 | <gh_stars>100-1000
try:
from PIL import Image
except ImportError:
import Image
from django import forms
from django.contrib import admin
from raspberryio.project.models import (FeaturedProject, Project, ProjectStep,
ProjectCategory)
class FeaturedProjectAdminForm(forms.ModelForm):
class Meta:
model = FeaturedProject
def clean_photo(self):
photo = self.cleaned_data.get('photo', False)
if 'photo' in self.changed_data:
img = Image.open(photo)
if photo.size > 5 * 1024 * 1024:
error = "Photo file too large ( maximum 5MB )"
raise forms.ValidationError(error)
if img.size[0] < 1252 or img.size[1] < 626:
error = "Photo dimensions too small ( minimum 1252x636 pixels )"
raise forms.ValidationError(error)
return photo
class FeaturedProjectAdmin(admin.ModelAdmin):
model = FeaturedProject
form = FeaturedProjectAdminForm
list_display = ('project', 'featured_start_date')
class ProjectAdminForm(forms.ModelForm):
class Meta:
model = Project
fields = (
'title', 'status', 'publish_date', 'user', 'featured_photo',
'featured_video', 'tldr', 'categories'
)
class ProjectStepInline(admin.TabularInline):
model = ProjectStep
extra = 1
class ProjectAdmin(admin.ModelAdmin):
model = Project
form = ProjectAdminForm
list_display = ('title', 'created_datetime', 'admin_thumb')
inlines = (ProjectStepInline,)
raw_id_fields = ('user',)
class ProjectCategoryAdmin(admin.ModelAdmin):
model = ProjectCategory
fields = ('title',)
admin.site.register(FeaturedProject, FeaturedProjectAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(ProjectCategory, ProjectCategoryAdmin)
|
synapse/lib/queue.py | ackroute/synapse | 216 | 12705731 | import asyncio
import collections
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.base as s_base
class AQueue(s_base.Base):
'''
An async queue with chunk optimized sync compatible consumer.
'''
async def __anit__(self):
await s_base.Base.__anit__(self)
self.fifo = []
self.event = asyncio.Event()
self.onfini(self.event.set)
def put(self, item):
'''
Add an item to the queue.
'''
if self.isfini:
return False
self.fifo.append(item)
if len(self.fifo) == 1:
self.event.set()
return True
async def slice(self):
# sync interface to the async queue
if len(self.fifo) == 0:
await self.event.wait()
retn = list(self.fifo)
self.fifo.clear()
self.event.clear()
return retn
class Queue:
'''
An asyncio Queue with batch methods and graceful close.
'''
def __init__(self, maxsize=None):
self.q = asyncio.Queue(maxsize=maxsize)
self.closed = False
async def close(self):
await self.q.put(s_common.novalu)
self.closed = True
async def put(self, item):
if self.closed:
mesg = 'The Queue has been closed.'
raise s_exc.BadArg(mesg=mesg)
await self.q.put(item)
async def size(self):
size = self.q.qsize()
if self.closed:
size -= 1
return size
async def puts(self, items):
if self.closed:
mesg = 'The Queue has been closed.'
raise s_exc.BadArg(mesg=mesg)
for item in items:
await self.q.put(item)
async def slice(self, size=1000):
if self.closed and self.q.qsize() == 0:
return None
items = []
item = await self.q.get()
if item is s_common.novalu:
return None
items.append(item)
size -= 1
for i in range(min(size, self.q.qsize())):
item = await self.q.get()
if item is s_common.novalu:
break
items.append(item)
return items
async def slices(self, size=1000):
while True:
items = await self.slice(size=size)
if items is None:
return
yield items
class Window(s_base.Base):
'''
A Queue like object which yields added items. If the queue ever reaches
its maxsize, it will be fini()d. On fini(), the Window will continue to
yield results until empty and then return.
'''
async def __anit__(self, maxsize=None):
await s_base.Base.__anit__(self)
self.maxsize = maxsize
self.event = asyncio.Event()
self.linklist = collections.deque()
async def fini():
self.event.set()
self.onfini(fini)
async def __aiter__(self):
while True:
if self.linklist:
yield self.linklist.popleft()
continue
if self.isfini:
return
self.event.clear()
await self.event.wait()
async def put(self, item):
'''
Add a single item to the Window.
'''
if self.isfini:
return False
self.linklist.append(item)
self.event.set()
if self.maxsize is not None and len(self.linklist) >= self.maxsize:
await self.fini()
return True
async def puts(self, items):
'''
Add multiple items to the window.
'''
if self.isfini:
return False
self.linklist.extend(items)
self.event.set()
if self.maxsize is not None and len(self.linklist) >= self.maxsize:
await self.fini()
return True
|
tests/plugins/test_lyx_filters.py | dsoto/dexy | 136 | 12705739 | from tests.utils import assert_output
def test_lyx():
assert_output("lyxjinja",
"dexy:foo.py|idio:multiply",
"<< d['foo.py|idio']['multiply'] >>",
".tex")
|
data/operator/bbox/spatial/vectorized/torch/utility/normalize.py | zhangzhengde0225/SwinTrack | 143 | 12705764 | <reponame>zhangzhengde0225/SwinTrack
import torch
class BoundingBoxNormalizationHelper:
def __init__(self, interval, range_):
assert interval in ('[)', '[]')
self.right_open = (interval == '[)')
assert range_[1] > range_[0]
self.scale = range_[1] - range_[0]
self.offset = range_[0]
def normalize_(self, bbox: torch.Tensor, image_size):
if self.right_open:
bbox[..., ::2] /= image_size[0]
bbox[..., 1::2] /= image_size[1]
else:
bbox[..., ::2] /= (image_size[0] - 1)
bbox[..., 1::2] /= (image_size[1] - 1)
bbox *= self.scale
bbox += self.offset
return bbox
def normalize(self, bbox: torch.Tensor, image_size):
return self.normalize_(bbox.clone(), image_size)
def denormalize_(self, bbox: torch.Tensor, image_size):
bbox -= self.offset
bbox /= self.scale
if self.right_open:
bbox[..., ::2] *= image_size[0]
bbox[..., 1::2] *= image_size[1]
else:
bbox[..., ::2] *= (image_size[0] - 1)
bbox[..., 1::2] *= (image_size[1] - 1)
return bbox
def denormalize(self, bbox: torch.Tensor, image_size):
return self.denormalize_(bbox.clone(), image_size)
|
video_funnel/server.py | fakegit/video-funnel | 113 | 12705771 | import asyncio
from pathlib import Path
import aiohttp
from aiohttp import web
from .funnel import Funnel
from .utils import (
HttpRange,
RangeNotSupportedError,
convert_unit,
load_browser_cookies,
retry,
)
async def make_response(request, url, block_size, piece_size, cookies_from,
use_original_url):
session = request.app['session']
if cookies_from:
session.cookie_jar.update_cookies(
load_browser_cookies(cookies_from, url))
@retry
async def get_info():
nonlocal url
async with session.head(url, allow_redirects=True) as resp:
if resp.headers.get('Accept-Ranges') != 'bytes':
raise RangeNotSupportedError
if not use_original_url:
url = resp.url
return resp.content_length, resp.content_type
try:
content_length, content_type = await get_info()
except RangeNotSupportedError as exc:
msg = str(exc)
print(msg)
return web.Response(status=501, text=msg)
except aiohttp.ClientError as exc:
print(exc)
return web.Response(status=exc.status)
range = request.headers.get('Range')
if range is None:
# not a Range request - the whole file
range = HttpRange(0, content_length - 1)
resp = web.StreamResponse(
status=200,
headers={
'Content-Length': str(content_length),
'Content-Type': content_type,
'Accept-Ranges': 'bytes'
})
else:
try:
range = HttpRange.from_str(range, content_length)
except ValueError:
return web.Response(
status=416, headers={'Content-Range': f'*/{content_length}'})
else:
resp = web.StreamResponse(
status=206,
headers={
'Content-Type':
content_type,
'Content-Range':
f'bytes {range.begin}-{range.end}/{content_length}'
})
if request.method == 'HEAD':
return resp
await resp.prepare(request)
async with Funnel(
url,
range,
session,
block_size,
piece_size,
) as funnel:
try:
async for chunk in funnel:
await resp.write(chunk)
return resp
except (aiohttp.ClientError, RangeNotSupportedError) as exc:
print(exc)
return web.Response(status=exc.status)
except asyncio.CancelledError:
raise
ROOT = Path(__file__).parent
async def index(request):
return web.FileResponse(ROOT / 'index.html')
async def cli(request):
args = request.app['args']
url = request.raw_path[1:] or args.url
return await make_response(
request,
url,
convert_unit(args.block_size),
convert_unit(args.piece_size),
args.cookies_from,
args.use_original_url,
)
async def api(request):
args = request.app['args']
query = request.query
block_size = convert_unit(query.get('block_size', args.block_size))
piece_size = convert_unit(query.get('piece_size', args.piece_size))
return await make_response(
request,
query.get('url', args.url),
block_size,
piece_size,
query.get('cookies_from', args.cookies_from),
query.get('use_original_url', args.use_original_url),
)
async def make_app(args):
app = web.Application()
app['args'] = args
if args.url is None:
app.router.add_get('/', index)
# app.router.add_static('/static', ROOT / 'static')
app.router.add_get('/api', api)
app.router.add_get('/{_:https?://.+}', cli)
else:
app.router.add_get('/', cli)
app.router.add_get('/{_:https?://.+}', cli)
async def session(app):
app['session'] = aiohttp.ClientSession(raise_for_status=True)
yield
await app['session'].close()
app.cleanup_ctx.append(session)
return app
|
tests/api/test_docker.py | peddamat/home-assistant-supervisor-test | 597 | 12705772 | """Test Docker API."""
import pytest
@pytest.mark.asyncio
async def test_api_docker_info(api_client):
"""Test docker info api."""
resp = await api_client.get("/docker/info")
result = await resp.json()
assert result["data"]["logging"] == "journald"
assert result["data"]["storage"] == "overlay2"
assert result["data"]["version"] == "1.0.0"
|
setup.py | bingqingsuimeng/ctypesgen | 200 | 12705778 | #!/usr/bin/env python3
"""Examples:
setup.py sdist
setup.py bdist_wininst
"""
from setuptools import setup
if __name__ == "__main__":
setup()
|
k8s_handle/__init__.py | jetbrains-infra/k8s-handle | 152 | 12705781 | <reponame>jetbrains-infra/k8s-handle
import argparse
import logging
import os
import sys
from kubernetes import client
from kubernetes.config import list_kube_config_contexts, load_kube_config
from k8s_handle import config
from k8s_handle import settings
from k8s_handle import templating
from k8s_handle.exceptions import ProvisioningError, ResourceNotAvailableError
from k8s_handle.filesystem import InvalidYamlError
from k8s_handle.k8s.deprecation_checker import ApiDeprecationChecker
from k8s_handle.k8s.provisioner import Provisioner
from k8s_handle.k8s.diff import Diff
from k8s_handle.k8s.availability_checker import ResourceAvailabilityChecker, make_resource_getters_list
COMMAND_DEPLOY = 'deploy'
COMMAND_DIFF = 'diff'
COMMAND_DESTROY = 'destroy'
log = logging.getLogger(__name__)
logging.basicConfig(level=settings.LOG_LEVEL, format=settings.LOG_FORMAT, datefmt=settings.LOG_DATE_FORMAT)
def handler_deploy(args):
_handler_deploy_destroy(args, COMMAND_DEPLOY)
def handler_destroy(args):
_handler_deploy_destroy(args, COMMAND_DESTROY)
def handler_apply(args):
_handler_apply_delete(args, COMMAND_DEPLOY)
def handler_delete(args):
_handler_apply_delete(args, COMMAND_DESTROY)
def handler_render(args):
context = config.load_context_section(args.get('section'))
templating.Renderer(
settings.TEMPLATES_DIR,
args.get('tags'),
args.get('skip_tags')
).generate_by_context(context)
def handler_diff(args):
_handler_deploy_destroy(args, COMMAND_DIFF)
def _handler_deploy_destroy(args, command):
context = config.load_context_section(args.get('section'))
resources = templating.Renderer(
settings.TEMPLATES_DIR,
args.get('tags'),
args.get('skip_tags')
).generate_by_context(context)
if args.get('dry_run'):
return
_handler_provision(
command,
resources,
config.PriorityEvaluator(args, context, os.environ),
args.get('use_kubeconfig'),
args.get('sync_mode'),
args.get('show_logs')
)
def _handler_apply_delete(args, command):
_handler_provision(
command,
[os.path.join(settings.TEMP_DIR, args.get('resource'))],
config.PriorityEvaluator(args, {}, os.environ),
args.get('use_kubeconfig'),
args.get('sync_mode'),
args.get('show_logs')
)
def _handler_provision(command, resources, priority_evaluator, use_kubeconfig, sync_mode, show_logs):
kubeconfig_namespace = None
if priority_evaluator.environment_deprecated():
log.warning("K8S_HOST and K8S_CA environment variables support is deprecated "
"and will be discontinued in the future. Use K8S_MASTER_URI and K8S_CA_BASE64 instead.")
# INFO rvadim: https://github.com/kubernetes-client/python/issues/430#issuecomment-359483997
if use_kubeconfig:
try:
load_kube_config()
kubeconfig_namespace = list_kube_config_contexts()[1].get('context').get('namespace')
except Exception as e:
raise RuntimeError(e)
else:
client.Configuration.set_default(priority_evaluator.k8s_client_configuration())
settings.K8S_NAMESPACE = priority_evaluator.k8s_namespace_default(kubeconfig_namespace)
log.info('Default namespace "{}"'.format(settings.K8S_NAMESPACE))
if not settings.K8S_NAMESPACE:
log.info("Default namespace is not set. "
"This may lead to provisioning error, if namespace is not set for each resource.")
try:
deprecation_checker = ApiDeprecationChecker(client.VersionApi().get_code().git_version[1:])
available_checker = ResourceAvailabilityChecker(make_resource_getters_list())
for resource in resources:
deprecation_checker.run(resource)
available_checker.run(resource)
except client.exceptions.ApiException:
log.warning("Error while getting API version, deprecation check will be skipped.")
if command == COMMAND_DIFF:
executor = Diff()
else:
executor = Provisioner(command, sync_mode, show_logs)
for resource in resources:
executor.run(resource)
parser = argparse.ArgumentParser(description='CLI utility generate k8s resources by templates and apply it to cluster')
subparsers = parser.add_subparsers(dest="command")
subparsers.required = True
parser_target_config = argparse.ArgumentParser(add_help=False)
parser_target_config.add_argument('-s', '--section', required=True, type=str, help='Section to deploy from config file')
parser_target_config.add_argument('-c', '--config', required=False, help='Config file, default: config.yaml')
parser_target_config.add_argument('--tags', action='append', required=False,
help='Only use templates tagged with these values')
parser_target_config.add_argument('--skip-tags', action='append', required=False,
help='Only use templates whose tags do not match these values')
parser_target_resource = argparse.ArgumentParser(add_help=False)
parser_target_resource.add_argument('-r', '--resource', required=True, type=str,
help='Resource spec path, absolute (started with slash) or relative from TEMP_DIR')
parser_deprecated = argparse.ArgumentParser(add_help=False)
parser_deprecated.add_argument('--dry-run', required=False, action='store_true',
help='Don\'t run kubectl commands. Deprecated, use "k8s-handle template" instead')
parser_provisioning = argparse.ArgumentParser(add_help=False)
parser_provisioning.add_argument('--sync-mode', action='store_true', required=False, default=False,
help='Turn on sync mode and wait deployment ending')
parser_provisioning.add_argument('--tries', type=int, required=False, default=360,
help='Count of tries to check deployment status')
parser_provisioning.add_argument('--retry-delay', type=int, required=False, default=5,
help='Sleep between tries in seconds')
parser_provisioning.add_argument('--strict', action='store_true', required=False,
help='Check existence of all env variables in config.yaml and stop if var is not set')
parser_provisioning.add_argument('--use-kubeconfig', action='store_true', required=False,
help='Try to use kube config')
parser_provisioning.add_argument('--k8s-handle-debug', action='store_true', required=False,
help='Show K8S client debug messages')
parser_logs = argparse.ArgumentParser(add_help=False)
parser_logs.add_argument('--show-logs', action='store_true', required=False, default=False, help='Show logs for jobs')
parser_logs.add_argument('--tail-lines', type=int, required=False, help='Lines of recent log file to display')
arguments_connection = parser_provisioning.add_argument_group()
arguments_connection.add_argument('--k8s-master-uri', required=False, help='K8S master to connect to')
arguments_connection.add_argument('--k8s-ca-base64', required=False, help='base64-encoded K8S certificate authority')
arguments_connection.add_argument('--k8s-token', required=False, help='K8S token to use')
parser_deploy = subparsers.add_parser(
'deploy',
parents=[parser_provisioning, parser_target_config, parser_logs, parser_deprecated],
help='Do attempt to create specs from templates and deploy K8S resources of the selected section')
parser_deploy.set_defaults(func=handler_deploy)
parser_apply = subparsers.add_parser('apply', parents=[parser_provisioning, parser_target_resource, parser_logs],
help='Do attempt to deploy K8S resource from the existing spec')
parser_apply.set_defaults(func=handler_apply)
parser_destroy = subparsers.add_parser('destroy',
parents=[parser_provisioning, parser_target_config, parser_deprecated],
help='Do attempt to destroy K8S resources of the selected section')
parser_destroy.set_defaults(func=handler_destroy)
parser_delete = subparsers.add_parser('delete', parents=[parser_provisioning, parser_target_resource],
help='Do attempt to destroy K8S resource from the existing spec')
parser_delete.set_defaults(func=handler_delete)
parser_template = subparsers.add_parser('render', parents=[parser_target_config],
help='Make resources from the template and config. '
'Created resources will be placed into the TEMP_DIR')
parser_template.set_defaults(func=handler_render)
parser_diff = subparsers.add_parser('diff', parents=[parser_target_config],
help='Show diff between current rendered yamls and apiserver yamls')
parser_diff.add_argument('--use-kubeconfig', action='store_true', required=False,
help='Try to use kube config')
parser_diff.set_defaults(func=handler_diff)
def main():
# INFO furiousassault: backward compatibility rough attempt
# must be removed later according to https://github.com/2gis/k8s-handle/issues/40
deprecation_warnings = 0
filtered_arguments = []
for argument in sys.argv[1:]:
if argument in ['--sync-mode=true', '--sync-mode=True', '--dry-run=true', '--dry-run=True']:
deprecation_warnings += 1
filtered_arguments.append(argument.split('=')[0])
continue
if argument in ['--sync-mode=false', '--sync-mode=False', '--dry-run=false', '--dry-run=False']:
deprecation_warnings += 1
continue
filtered_arguments.append(argument)
args, unrecognized_args = parser.parse_known_args(filtered_arguments)
if deprecation_warnings or unrecognized_args:
log.warning("Explicit true/false arguments to --sync-mode and --dry-run keys are deprecated "
"and will be discontinued in the future. Use these keys without arguments instead.")
args_dict = vars(args)
settings.CHECK_STATUS_TRIES = args_dict.get('tries')
settings.CHECK_DAEMONSET_STATUS_TRIES = args_dict.get('tries')
settings.CHECK_STATUS_TIMEOUT = args_dict.get('retry_delay')
settings.CHECK_DAEMONSET_STATUS_TIMEOUT = args_dict.get('retry_delay')
settings.GET_ENVIRON_STRICT = args_dict.get('strict')
settings.COUNT_LOG_LINES = args_dict.get('tail_lines')
settings.CONFIG_FILE = args_dict.get('config') or settings.CONFIG_FILE
try:
args.func(args_dict)
except templating.TemplateRenderingError as e:
log.error('Template generation error: {}'.format(e))
sys.exit(1)
except InvalidYamlError as e:
log.error('{}'.format(e))
sys.exit(1)
except RuntimeError as e:
log.error('RuntimeError: {}'.format(e))
sys.exit(1)
except ResourceNotAvailableError as e:
log.error('Resource not available: {}'.format(e))
sys.exit(1)
except ProvisioningError:
sys.exit(1)
print(r'''
_(_)_ wWWWw _
@@@@ (_)@(_) vVVVv _ @@@@ (___) _(_)_
@@()@@ wWWWw (_)\ (___) _(_)_ @@()@@ Y (_)@(_)
@@@@ (___) `|/ Y (_)@(_) @@@@ \|/ (_)
/ Y \| \|/ /(_) \| |/ |
\ | \ |/ | / \ | / \|/ |/ \| \|/
\|// \|/// \|// \|/// \|/// \|// |// \|//
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^''')
|
examples/mark_safe_secure.py | mikelolasagasti/bandit | 4,016 | 12705785 | <gh_stars>1000+
import os
from django.utils import safestring
safestring.mark_safe('<b>secure</b>')
safestring.SafeText('<b>secure</b>')
safestring.SafeUnicode('<b>secure</b>')
safestring.SafeString('<b>secure</b>')
safestring.SafeBytes('<b>secure</b>')
my_secure_str = '<b>Hello World</b>'
safestring.mark_safe(my_secure_str)
my_secure_str, _ = ('<b>Hello World</b>', '')
safestring.mark_safe(my_secure_str)
also_secure_str = my_secure_str
safestring.mark_safe(also_secure_str)
def try_secure():
try:
my_secure_str = 'Secure'
except Exception:
my_secure_str = 'Secure'
else:
my_secure_str = 'Secure'
finally:
my_secure_str = 'Secure'
safestring.mark_safe(my_secure_str)
def format_secure():
safestring.mark_safe('<b>{}</b>'.format('secure'))
my_secure_str = 'secure'
safestring.mark_safe('<b>{}</b>'.format(my_secure_str))
safestring.mark_safe('<b>{} {}</b>'.format(my_secure_str, 'a'))
safestring.mark_safe('<b>{} {}</b>'.format(*[my_secure_str, 'a']))
safestring.mark_safe('<b>{b}</b>'.format(b=my_secure_str)) # nosec TODO
safestring.mark_safe('<b>{b}</b>'.format(**{'b': my_secure_str})) # nosec TODO
my_secure_str = '<b>{}</b>'.format(my_secure_str)
safestring.mark_safe(my_secure_str)
def percent_secure():
safestring.mark_safe('<b>%s</b>' % 'secure')
my_secure_str = 'secure'
safestring.mark_safe('<b>%s</b>' % my_secure_str)
safestring.mark_safe('<b>%s %s</b>' % (my_secure_str, 'a'))
safestring.mark_safe('<b>%(b)s</b>' % {'b': my_secure_str}) # nosec TODO
def with_secure(path):
with open(path) as f:
safestring.mark_safe('Secure')
def loop_secure():
my_secure_str = ''
for i in range(ord(os.urandom(1))):
my_secure_str += ' Secure'
safestring.mark_safe(my_secure_str)
while ord(os.urandom(1)) % 2 == 0:
my_secure_str += ' Secure'
safestring.mark_safe(my_secure_str)
def all_secure_case():
if ord(os.urandom(1)) % 2 == 0:
my_secure_str = 'Secure'
elif ord(os.urandom(1)) % 2 == 0:
my_secure_str = 'Secure'
else:
my_secure_str = 'Secure'
safestring.mark_safe(my_secure_str)
|
hypernets/core/callbacks.py | Enpen/Hypernets | 1,080 | 12705790 | # -*- coding:utf-8 -*-
"""
"""
import datetime
import json
import os
import time
import numpy as np
import pandas as pd
from IPython.display import display, update_display, display_markdown
from tqdm.auto import tqdm
from ..utils import logging, fs, to_repr
logger = logging.get_logger(__name__)
class Callback():
def __init__(self):
pass
def on_search_start(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,
**fit_kwargs):
pass
def on_search_end(self, hyper_model):
pass
def on_search_error(self, hyper_model):
pass
def on_build_estimator(self, hyper_model, space, estimator, trial_no):
pass
def on_trial_begin(self, hyper_model, space, trial_no):
pass
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
pass
def on_trial_error(self, hyper_model, space, trial_no):
pass
def on_skip_trial(self, hyper_model, space, trial_no, reason, reward, improved, elapsed):
pass
def __repr__(self):
return to_repr(self)
class EarlyStoppingError(RuntimeError):
def __init__(self, *arg):
self.args = arg
class EarlyStoppingCallback(Callback):
REASON_TRIAL_LIMIT = 'max_no_improvement_trials'
REASON_TIME_LIMIT = 'time_limit'
REASON_EXPECTED_REWARD = 'expected_reward'
def __init__(self, max_no_improvement_trials=0, mode='min', min_delta=0, time_limit=None, expected_reward=None):
super(Callback, self).__init__()
# assert time_limit is None or time_limit > 60, 'If `time_limit` is not None, it must be greater than 60.'
# settings
if mode == 'min':
self.op = np.less
elif mode == 'max':
self.op = np.greater
else:
raise ValueError(f'Unsupported mode:{mode}')
self.max_no_improvement_trials = max_no_improvement_trials
self.mode = mode
self.min_delta = min_delta
self.time_limit = time_limit
self.expected_reward = expected_reward
# running state
self.start_time = None
self.best_reward = None
self.best_trial_no = None
self.counter_no_improvement_trials = 0
self.triggered = None
self.triggered_reason = None
def on_search_start(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,
**fit_kwargs):
self.triggered = False
self.triggered_reason = None
def on_trial_begin(self, hyper_model, space, trial_no):
if self.start_time is None:
self.start_time = time.time()
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
if self.start_time is None:
self.start_time = time.time()
time_total = time.time() - self.start_time
if self.time_limit is not None and self.time_limit > 0:
if time_total > self.time_limit:
self.triggered = True
self.triggered_reason = self.REASON_TIME_LIMIT
if self.expected_reward is not None and self.expected_reward != 0.0:
if self.op(reward, self.expected_reward):
self.triggered = True
self.triggered_reason = self.REASON_EXPECTED_REWARD
if self.max_no_improvement_trials is not None and self.max_no_improvement_trials > 0:
if self.best_reward is None:
self.best_reward = reward
self.best_trial_no = trial_no
else:
if self.op(reward, self.best_reward - self.min_delta):
self.best_reward = reward
self.best_trial_no = trial_no
self.counter_no_improvement_trials = 0
else:
self.counter_no_improvement_trials += 1
if self.counter_no_improvement_trials >= self.max_no_improvement_trials:
self.triggered = True
self.triggered_reason = self.REASON_TRIAL_LIMIT
if self.triggered:
msg = f'Early stopping on trial : {trial_no}, reason: {self.triggered_reason}, ' \
f'best reward: {self.best_reward}, best trial: {self.best_trial_no}, ' \
f'elapsed seconds: {time_total}'
if logger.is_info_enabled():
logger.info(msg)
raise EarlyStoppingError(msg)
class FileLoggingCallback(Callback):
def __init__(self, searcher, output_dir=None):
super(FileLoggingCallback, self).__init__()
self.output_dir = self._prepare_output_dir(output_dir, searcher)
@staticmethod
def open(file_path, mode):
return open(file_path, mode=mode)
@staticmethod
def mkdirs(dir_path, exist_ok):
os.makedirs(dir_path, exist_ok=exist_ok)
def _prepare_output_dir(self, log_dir, searcher):
if log_dir is None:
log_dir = 'log'
if log_dir[-1] == '/':
log_dir = log_dir[:-1]
running_dir = f'exp_{searcher.__class__.__name__}_{datetime.datetime.now().__format__("%m%d-%H%M%S")}'
output_path = os.path.expanduser(f'{log_dir}/{running_dir}')
self.mkdirs(output_path, exist_ok=True)
return output_path
def on_build_estimator(self, hyper_model, space, estimator, trial_no):
pass
def on_trial_begin(self, hyper_model, space, trial_no):
pass
# with open(f'{self.output_dir}/trial_{trial_no}.log', 'w') as f:
# f.write(space.params_summary())
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
with self.open(f'{self.output_dir}/trial_{improved}_{trial_no:04d}_{reward:010.8f}_{elapsed:06.2f}.log',
'w') as f:
f.write(space.params_summary())
f.write('\r\n----------------Summary for Searcher----------------\r\n')
f.write(hyper_model.searcher.summary())
topn = 10
diff = hyper_model.history.diff(hyper_model.history.get_top(topn))
with self.open(f'{self.output_dir}/top_{topn}_diff.txt', 'w') as f:
diff_str = json.dumps(diff, indent=5)
f.write(diff_str)
f.write('\r\n')
f.write(hyper_model.searcher.summary())
with self.open(f'{self.output_dir}/top_{topn}_config.txt', 'w') as f:
trials = hyper_model.history.get_top(topn)
configs = hyper_model.export_configuration(trials)
for trial, conf in zip(trials, configs):
f.write(f'Trial No: {trial.trial_no}, Reward: {trial.reward}\r\n')
f.write(conf)
f.write('\r\n---------------------------------------------------\r\n\r\n')
def on_skip_trial(self, hyper_model, space, trial_no, reason, reward, improved, elapsed):
with self.open(
f'{self.output_dir}/trial_{reason}_{improved}_{trial_no:04d}_{reward:010.8f}_{elapsed:06.2f}.log',
'w') as f:
f.write(space.params_summary())
topn = 5
diff = hyper_model.history.diff(hyper_model.history.get_top(topn))
with self.open(f'{self.output_dir}/top_{topn}_diff.txt', 'w') as f:
diff_str = json.dumps(diff, indent=5)
f.write(diff_str)
class FileStorageLoggingCallback(FileLoggingCallback):
@staticmethod
def open(file_path, mode):
return fs.open(file_path, mode=mode)
@staticmethod
def mkdirs(dir_path, exist_ok):
fs.mkdirs(dir_path, exist_ok=exist_ok)
class SummaryCallback(Callback):
def __init__(self):
super(SummaryCallback, self).__init__()
self.start_search_time = None
def on_search_start(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,
**fit_kwargs):
self.start_search_time = time.time()
def on_build_estimator(self, hyper_model, space, estimator, trial_no):
# if logger.is_info_enabled():
# logger.info(f'\nTrial No:{trial_no}')
# logger.info(space.params_summary())
estimator.summary()
def on_trial_begin(self, hyper_model, space, trial_no):
if logger.is_info_enabled():
msg = f'\nTrial No:{trial_no}{space.params_summary()}\ntrial {trial_no} begin'
logger.info(msg)
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
if logger.is_info_enabled():
logger.info(f'trial end. reward:{reward}, improved:{improved}, elapsed:{elapsed}')
logger.info(f'Total elapsed:{time.time() - self.start_search_time}')
def on_skip_trial(self, hyper_model, space, trial_no, reason, reward, improved, elapsed):
if logger.is_info_enabled():
logger.info(f'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
logger.info(f'trial skip. reason:{reason}, reward:{reward}, improved:{improved}, elapsed:{elapsed}')
logger.info(f'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
class NotebookCallback(Callback):
def __init__(self):
super(NotebookCallback, self).__init__()
self.current_trial_display_id = None
self.search_summary_display_id = None
self.best_trial_display_id = None
self.title_display_id = None
self.last_trial_no = 0
self.last_reward = 0
self.start_time = 0
self.max_trials = 0
def on_search_start(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,
**fit_kwargs):
self.start_time = time.time()
self.max_trials = max_trials
df_holder = pd.DataFrame()
settings = {'X': X.shape,
'y': y.shape,
'X_eval': X_eval.shape if X_eval is not None else None,
'y_eval': y_eval.shape if y_eval is not None else None,
'cv': cv,
'num_folds': num_folds,
'max_trials': max_trials,
# 'dataset_id': dataset_id,
# 'trail_store': trial_store,
'fit_kwargs': fit_kwargs.keys()
}
df_settings = pd.DataFrame({k: [v] for k, v in settings.items()})
display_markdown('#### Experiment Settings:', raw=True)
display(hyper_model, display_id=False)
display(df_settings, display_id=False)
display_markdown('#### Trials Summary:', raw=True)
handle = display(df_holder, display_id=True)
if handle is not None:
self.search_summary_display_id = handle.display_id
display_markdown('#### Best Trial:', raw=True)
handle = display(df_holder, display_id=True)
if handle is not None:
self.best_trial_display_id = handle.display_id
handle = display({'text/markdown': '#### Current Trial:'}, raw=True, include=['text/markdown'],
display_id=True)
if handle is not None:
self.title_display_id = handle.display_id
handle = display(df_holder, display_id=True)
if handle is not None:
self.current_trial_display_id = handle.display_id
def on_trial_begin(self, hyper_model, space, trial_no):
df_summary = pd.DataFrame([(trial_no, self.last_reward, hyper_model.best_trial_no,
hyper_model.best_reward,
time.time() - self.start_time,
len([t for t in hyper_model.history.trials if t.succeeded]),
self.max_trials)],
columns=['Trial No.', 'Previous reward', 'Best trial', 'Best reward',
'Total elapsed', 'Valid trials',
'Max trials'])
if self.search_summary_display_id is not None:
update_display(df_summary, display_id=self.search_summary_display_id)
if self.current_trial_display_id is not None:
update_display(space, display_id=self.current_trial_display_id)
def on_search_end(self, hyper_model):
df_summary = pd.DataFrame([(self.last_trial_no, self.last_reward, hyper_model.best_trial_no,
hyper_model.best_reward,
time.time() - self.start_time,
len([t for t in hyper_model.history.trials if t.succeeded]),
self.max_trials)],
columns=['Trial No.', 'Previous reward', 'Best trial', 'Best reward',
'Total elapsed', 'Valid trials',
'Max trials'])
if self.search_summary_display_id is not None:
update_display(df_summary, display_id=self.search_summary_display_id)
if self.title_display_id is not None:
update_display({'text/markdown': '#### Top trials:'}, raw=True, include=['text/markdown'],
display_id=self.title_display_id)
df_best_trials = pd.DataFrame([
(t.trial_no, t.reward, t.elapsed, t.space_sample.vectors) for t in hyper_model.get_top_trials(5)],
columns=['Trial No.', 'Reward', 'Elapsed', 'Space Vector'])
if self.current_trial_display_id is not None:
update_display(df_best_trials, display_id=self.current_trial_display_id)
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
self.last_trial_no = trial_no
self.last_reward = reward
best_trial = hyper_model.get_best_trial()
if best_trial is not None and self.best_trial_display_id is not None:
update_display(best_trial.space_sample, display_id=self.best_trial_display_id)
def on_trial_error(self, hyper_model, space, trial_no):
self.last_trial_no = trial_no
self.last_reward = 'ERR'
class ProgressiveCallback(Callback):
def __init__(self):
super(ProgressiveCallback, self).__init__()
self.pbar = None
def on_search_start(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,
**fit_kwargs):
self.pbar = tqdm(total=max_trials, leave=False, desc='search')
def on_search_end(self, hyper_model):
self.pbar.update(self.pbar.total)
self.pbar.close()
self.pbar = None
def on_search_error(self, hyper_model):
self.on_search_end(hyper_model)
def on_trial_end(self, hyper_model, space, trial_no, reward, improved, elapsed):
self.pbar.update(1)
def on_trial_error(self, hyper_model, space, trial_no):
self.pbar.update(1)
|
textworld/gym/spaces/text_spaces.py | JohnnySun8/TextWorld | 307 | 12705799 | <reponame>JohnnySun8/TextWorld
import re
import string
import numpy as np
import gym
import gym.spaces
class VocabularyHasDuplicateTokens(ValueError):
pass
class Char(gym.spaces.MultiDiscrete):
""" Character observation/action space
This space consists of a series of `gym.spaces.Discrete` objects all with
the same parameters. Each `gym.spaces.Discrete` can take integer values
between 0 and len(self.vocab).
Notes
-----
The following special token will be prepended (if needed) to the vocabulary:
* '#' : Padding token
"""
def __init__(self, max_length, vocab=None, extra_vocab=[]):
"""
Parameters
----------
max_length : int
Maximum number of characters in a text.
vocab : list of char, optional
Vocabulary defining this space. It shouldn't contain any
duplicate characters. If not provided, the vocabulary will consists
in characters [a-z0-9], punctuations [" ", "-", "'"] and padding '#'.
extra_vocab : list of char, optional
Additional tokens to add to the vocabulary.
"""
if vocab is None:
vocab = list(string.ascii_lowercase + string.digits)
vocab += [" ", "-", "'"]
vocab += extra_vocab
if len(vocab) != len(set(vocab)):
raise VocabularyHasDuplicateTokens()
self.max_length = max_length
self.PAD = "#"
special_tokens = [self.PAD]
self.vocab = [t for t in special_tokens if t not in vocab]
self.vocab += list(vocab)
self.vocab_set = set(self.vocab) # For faster lookup.
self.vocab_size = len(self.vocab)
self.id2c = {i: c for i, c in enumerate(self.vocab)}
self.c2id = {c: i for i, c in self.id2c.items()}
self.PAD_id = self.c2id[self.PAD]
super().__init__([len(self.vocab) - 1] * self.max_length)
self.dtype = np.int64 # Overwrite Gym's dtype=int8.
def filter_unknown(self, text):
""" Strip out all characters not in the vocabulary. """
return "".join(c for c in text if c in self.vocab_set)
def tokenize(self, text, padding=False):
""" Tokenize characters found in the vocabulary.
Note: text will be padded up to `self.max_length`.
"""
text = self.filter_unknown(text.lower())
ids = [self.c2id[c] for c in text]
# Add padding.
if padding:
nb_pads = self.max_length - len(ids)
msg = "Provided `max_length` was not large enough ({} chars).".format(len(ids))
assert nb_pads >= 0, msg
ids += [self.PAD_id] * nb_pads
return np.array(ids)
def __repr__(self):
return "Character({})".format(self.max_length)
class Word(gym.spaces.MultiDiscrete):
""" Word observation/action space
This space consists of a series of `gym.spaces.Discrete` objects all with
the same parameters. Each `gym.spaces.Discrete` can take integer values
between 0 and `len(self.vocab)`.
Notes
-----
The following special tokens will be prepended (if needed) to the vocabulary:
* '<PAD>' : Padding
* '<UNK>' : Unknown word
* '<S>' : Beginning of sentence
* '</S>' : End of sentence
Example
-------
Let's create an action space that can be used with
:py:meth:`textworld.gym.register_game <textworld.gym.utils.register_game>`.
We are going to assume actions are short phrases up to 8 words long.
>>> import textworld
>>> gamefiles = ["/path/to/game.ulx", "/path/to/another/game.z8"]
>>> vocab = textworld.vocab.extract_from(gamefiles)
>>> vocab = sorted(vocab) # Sorting the vocabulary, optional.
>>> action_space = textworld.gym.text_spaces.Word(max_length=8, vocab=vocab)
"""
def __init__(self, max_length, vocab):
"""
Parameters
----------
max_length : int
Maximum number of words in a text.
vocab : list of strings
Vocabulary defining this space. It shouldn't contain any
duplicate words.
"""
if len(vocab) != len(set(vocab)):
raise VocabularyHasDuplicateTokens()
self.max_length = max_length
self.PAD = "<PAD>"
self.UNK = "<UNK>"
self.BOS = "<S>"
self.EOS = "</S>"
self.SEP = "<|>"
special_tokens = [self.PAD, self.UNK, self.EOS, self.BOS, self.SEP]
self.vocab = [w for w in special_tokens if w not in vocab]
self.vocab += list(vocab)
self.vocab_set = set(self.vocab) # For faster lookup.
self.vocab_size = len(self.vocab)
self.id2w = {i: w for i, w in enumerate(self.vocab)}
self.w2id = {w: i for i, w in self.id2w.items()}
self.PAD_id = self.w2id[self.PAD]
self.UNK_id = self.w2id[self.UNK]
self.BOS_id = self.w2id[self.BOS]
self.EOS_id = self.w2id[self.EOS]
self.SEP_id = self.w2id[self.SEP]
super().__init__([len(self.vocab) - 1] * self.max_length)
self.dtype = np.int64 # Overwrite Gym's dtype=int8.
def tokenize(self, text, padding=False):
""" Tokenize words found in the vocabulary.
Note: text will be padded up to `self.max_length`.
"""
text = text.lower() # Work only with lowercase letters.
# Find beginning and end of sentences.
text = text.replace(".", " </S> <S> ")
text = "<S> " + text + " </S>"
# Strip out all non-alphabetic characters.
text = text.replace("'", "")
text = re.sub("[^a-z0-9 <S>/]", " ", text)
# TODO: convert numbers to text?
# Get words ids and replace unknown words with <UNK>.
words = text.split()
ids = [self.w2id.get(w, self.UNK_id) for w in words]
# Add padding.
if padding:
nb_pads = self.max_length - len(ids)
msg = "Provided `max_length` was not large enough ({} words).".format(len(ids))
assert nb_pads >= 0, msg
ids += [self.PAD_id] * nb_pads
return np.array(ids)
def __repr__(self):
return "Word(L={}, V={})".format(self.max_length, self.vocab_size)
|
tis/client.py | Fudan-Autonomous-Driving-Perception/BiSeNet | 966 | 12705819 |
import numpy as np
import cv2
import grpc
from tritonclient.grpc import service_pb2, service_pb2_grpc
import tritonclient.grpc.model_config_pb2 as mc
np.random.seed(123)
palette = np.random.randint(0, 256, (100, 3))
# url = '10.128.61.7:8001'
url = '127.0.0.1:8001'
model_name = 'bisenetv2'
model_version = '1'
inp_name = 'input_image'
outp_name = 'preds'
inp_dtype = 'FP32'
outp_dtype = np.int64
inp_shape = [1, 3, 1024, 2048]
outp_shape = [1024, 2048]
impth = '../example.png'
mean = [0.3257, 0.3690, 0.3223] # city, rgb
std = [0.2112, 0.2148, 0.2115]
option = [
('grpc.max_receive_message_length', 1073741824),
('grpc.max_send_message_length', 1073741824),
]
channel = grpc.insecure_channel(url, options=option)
grpc_stub = service_pb2_grpc.GRPCInferenceServiceStub(channel)
metadata_request = service_pb2.ModelMetadataRequest(
name=model_name, version=model_version)
metadata_response = grpc_stub.ModelMetadata(metadata_request)
print(metadata_response)
config_request = service_pb2.ModelConfigRequest(
name=model_name,
version=model_version)
config_response = grpc_stub.ModelConfig(config_request)
print(config_response)
request = service_pb2.ModelInferRequest()
request.model_name = model_name
request.model_version = model_version
inp = service_pb2.ModelInferRequest().InferInputTensor()
inp.name = inp_name
inp.datatype = inp_dtype
inp.shape.extend(inp_shape)
mean = np.array(mean).reshape(1, 1, 3)
std = np.array(std).reshape(1, 1, 3)
im = cv2.imread(impth)[:, :, ::-1]
im = cv2.resize(im, dsize=tuple(inp_shape[-1:-3:-1]))
im = ((im / 255.) - mean) / std
im = im[None, ...].transpose(0, 3, 1, 2)
inp_bytes = im.astype(np.float32).tobytes()
request.ClearField("inputs")
request.ClearField("raw_input_contents")
request.inputs.extend([inp,])
request.raw_input_contents.extend([inp_bytes,])
outp = service_pb2.ModelInferRequest().InferRequestedOutputTensor()
outp.name = outp_name
request.outputs.extend([outp,])
# sync
# resp = grpc_stub.ModelInfer(request).raw_output_contents[0]
# async
resp = grpc_stub.ModelInfer.future(request)
resp = resp.result().raw_output_contents[0]
out = np.frombuffer(resp, dtype=outp_dtype).reshape(*outp_shape)
out = palette[out]
cv2.imwrite('res.png', out)
|
ISMLnextGen/TornadoTestAsync2.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 12705821 | # coding: utf-8
from AsyncIteratorWrapper import AsyncIteratorWrapper
import asyncio
import functools
import aiohttp
import tornado.web
from tornado.platform.asyncio import AsyncIOMainLoop
from threading import Thread
headers={
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0",
}
session = aiohttp.ClientSession(headers=headers)
f=open('./tmp.txt','a',encoding='utf-8')
def printer(future):
print(future.result()[0])
f.write(future.result()[1])
print('done')
async def producer():
future=asyncio.run_coroutine_threadsafe(coro(),worker_loop)
future.add_done_callback(functools.partial(printer))
await asyncio.sleep(0)
async def coro():
async with session.get("https://www.internationalsaimoe.com",ssl=False) as res:
text = await res.text()
return [res.status,text]
class MainHandler(tornado.web.RequestHandler):
async def get(self):
async for i in AsyncIteratorWrapper(range(1)):
future=asyncio.run_coroutine_threadsafe(coro(),worker_loop)
future.add_done_callback(functools.partial(printer))
#await producer()
#await asyncio.run_coroutine_threadsafe(coro(), loop)
#text = await asyncio.get_event_loop().create_task(coro())
#self.write(text)
self.finish('It works!')
def post(self):
print(self.request.body)
#data=self.get_argument('data')
#print(data)
self.write('收到POST')
print('收到POST')
def start_loop(loop):
# 运行事件循环, loop以参数的形式传递进来运行
loop.run_forever()
if __name__ == "__main__":
AsyncIOMainLoop().install()
app = tornado.web.Application([(r"/", MainHandler)])
app.listen(8888)
print('TornadoTestAsync2@localhost:8888')
producer_loop=asyncio.get_event_loop()
tornadoThread=Thread(target=start_loop, args=(producer_loop,)) #生产者
tornadoThread.start() #启动生产者tornado
worker_loop=asyncio.get_event_loop()
|
sumatra/dependency_finder/core.py | usnistgov/corr-sumatra | 143 | 12705845 | <gh_stars>100-1000
"""
Functions for finding the version of a dependency.
Classes
-------
BaseDependency - base class for holding information about a program component.
Functions
---------
find_version_from_versioncontrol - determines whether a file is under version
control, and if so, obtains version
information from this.
find_version() - tries to find version information by calling a
series of functions in turn.
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import unicode_literals
from builtins import object
import os
from sumatra import versioncontrol
def find_versions_from_versioncontrol(dependencies):
"""Determine whether a file is under version control, and if so,
obtain version information from this."""
for dependency in dependencies:
if dependency.version == "unknown":
try:
wc = versioncontrol.get_working_copy(dependency.path)
except versioncontrol.VersionControlError:
pass # dependency.version remains "unknown"
else:
if wc.has_changed():
dependency.diff = wc.diff()
dependency.version = wc.current_version()
dependency.source = wc.repository.url
return dependencies
# add support for using packaging systems, e.g. apt, to find versions.
# add support for looking for Subversion $Id:$ tags, etc.
def find_versions(dependencies, heuristics):
"""
Try to find version information by calling a series of functions in turn.
*dependencies*:
a list of Dependency objects.
*heuristics*:
a list of functions that accept a component as the single
argument and return a version number or 'unknown'.
Returns a possibly modified list of dependencies
"""
for heuristic in heuristics:
dependencies = heuristic(dependencies)
return dependencies
def find_file(path, current_directory, search_dirs):
"""
Look for path as an absolute path then relative to the current directory,
then relative to *search_dirs*.
Return the absolute path.
"""
op = os.path
if op.exists(path):
return op.abspath(path)
for dir in [current_directory] + search_dirs:
search_path = op.join(dir, path)
if op.exists(search_path):
return search_path
raise IOError("File %s does not exist" % path)
class BaseDependency(object):
"""
Contains information about a program component, and tries to determine version information.
*name*:
an identifying name, e.g. the module name in Python
*path*:
the location of the dependency file in the local filesystem
*version*:
the version of the dependency, if that can be determined, otherwise
'unknown'. Always a string, even if the version can also be represented
as a number.
*diff*:
if the dependency is under version control and has been modified, the
diff between the actual version and the last-committed version.
*source*:
an identifier for where the dependency came from, if known, e.g. the
url of a version control repository or the name of a Linux package.
"""
def __init__(self, name, path=None, version='unknown', diff='', source=None):
self.name = name
self.path = path
self.diff = diff
self.version = version
self.source = source # e.g. url of (upstream?) repository
def __repr__(self):
return "%s (%s) version=%s%s" % (self.name, self.path, self.version, self.diff and "*" or '')
def __eq__(self, other):
return self.name == other.name and self.path == other.path and \
self.version == other.version and self.diff == other.diff
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name) ^ hash(self.path) ^ hash(self.version) ^ hash(self.diff)
|
tests/serializers.py | chachabooboo/king-phisher | 1,143 | 12705873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/serializers.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import xml.etree.ElementTree as ET
from king_phisher import testing
from king_phisher import serializers
class _SerializerTests(testing.KingPhisherServerTestCase):
serializer = None
serializer_output_type = None
test_objects = ('test', True, 100)
def test_dump_basic_types(self):
for obj in self.test_objects:
self.assertIsInstance(self.serializer.dumps(obj), self.serializer_output_type)
def test_simple_reload(self):
for obj in self.test_objects:
try:
self.serializer.loads(self.serializer.dumps(obj))
except ValueError:
self.fail("Invalid data type for serializer.{0}.loads()".format(self.serializer.name))
def test_special_types(self):
now = datetime.datetime.now()
serialized = self.serializer.dumps(now)
self.assertIsInstance(serialized, self.serializer_output_type)
self.assertNotEqual(now, serialized)
self.assertNotEqual(type(now), type(serialized))
now_loaded = self.serializer.loads(serialized)
self.assertEqual(now, now_loaded)
class _ElementTreeSerializer(object):
# this class defines a pseudo serializer that allows the functions to be
# tested in the same way as the serializers that are implemented as classes
def dumps(self, value):
parent = ET.Element('parent')
return serializers.to_elementtree_subelement(parent, 'child', value)
def loads(self, element):
return serializers.from_elementtree_element(element)
class ElementTreeTests(_SerializerTests):
serializer = _ElementTreeSerializer()
serializer_output_type = ET.Element
class JSONSerializerTests(_SerializerTests):
serializer = serializers.JSON
serializer_output_type = str
def test_loads_invalid(self):
with self.assertRaises(ValueError):
serializers.JSON.loads("'test")
class MsgPackSerializerTests(_SerializerTests):
serializer = serializers.MsgPack
serializer_output_type = bytes
|
cleo/io/buffered_io.py | Ivoz/cleo | 859 | 12705887 | <reponame>Ivoz/cleo
from typing import Optional
from typing import cast
from .inputs.input import Input
from .io import IO
from .outputs.buffered_output import BufferedOutput
class BufferedIO(IO):
def __init__(
self,
input: Optional[Input] = None,
decorated: bool = False,
supports_utf8: bool = True,
) -> None:
super(BufferedIO, self).__init__(
input,
BufferedOutput(decorated=decorated, supports_utf8=supports_utf8),
BufferedOutput(decorated=decorated, supports_utf8=supports_utf8),
)
self._output = cast(BufferedOutput, self._output)
self._error_output = cast(BufferedOutput, self._error_output)
def fetch_output(self) -> str:
return self._output.fetch()
def fetch_error(self) -> str:
return self._error_output.fetch()
def clear(self) -> None:
self._output.clear()
self._error_output.clear()
def clear_output(self) -> None:
self._output.clear()
def clear_error(self) -> None:
self._error_output.clear()
def supports_utf8(self) -> bool:
return self._output.supports_utf8()
def clear_user_input(self) -> None:
self._input.stream.truncate(0)
self._input.stream.seek(0)
def set_user_input(self, user_input: str) -> None:
self.clear_user_input()
self._input.stream.write(user_input)
self._input.stream.seek(0)
|
Python3/988.py | rakhi2001/ecom7 | 854 | 12705902 | <gh_stars>100-1000
__________________________________________________________________________________________________
sample 44 ms submission
class Solution:
def smallestFromLeaf(self, root: TreeNode) -> str:
def convert(num):
return chr(ord('a') + num)
def dfs(node, pval):
cur_val = convert(node.val)
if not node.left and not node.right:
return cur_val
l = r = None
if node.left:
l = dfs(node.left, convert(node.val)) + cur_val
if node.right:
r = dfs(node.right, convert(node.val)) + cur_val
if node.left and node.right:
if l + pval > r + pval:
return r
else:
return l
return l or r
ret = dfs(root, convert(root.val))
return ret
def smallestFromLeaf(self, root: TreeNode) -> str:
def convert(num):
return chr(ord('a') + num)
def dfs(node, pval):
if not node:
return ''
val = convert(node.val)
if not node.left and not node.right:
return val
lval = rval = ''
if node.left:
lval = dfs(node.left, val) + val
if node.right:
rval = dfs(node.right, val) + val
if not node.left or not node.right:
return lval or rval
if lval + pval < rval + pval:
return lval
return rval
return dfs(root, convert(root.val))
__________________________________________________________________________________________________
sample 14560 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def smallestFromLeaf(self, root: TreeNode) -> str:
def dfs(node, path):
if not node:
return
path.append(chr(ord('a') + node.val))
if not node.left and not node.right:
res[0] = min(res[0], ''.join(path)[::-1])
else:
dfs(node.left, path)
dfs(node.right, path)
del path[-1]
res = [str(chr(ord('z') + 1))]
dfs(root, [])
return res[0]
__________________________________________________________________________________________________
|
atlas/foundations_events/src/test/consumers/jobs/queued/test_creation_time.py | DeepLearnI/atlas | 296 | 12705945 |
import unittest
from mock import Mock
from foundations_events.consumers.jobs.queued.creation_time import CreationTime
class TestCreationTime(unittest.TestCase):
def setUp(self):
self._redis = Mock()
self._consumer = CreationTime(self._redis)
def test_call_saves_creation_time(self):
self._consumer.call({'job_id': 'space pinball'}, 34344, None)
self._redis.set.assert_called_with(
'jobs:space pinball:creation_time', '34344')
def test_call_saves_creation_time_different_job_id(self):
self._consumer.call({'job_id': 'dimensional pinball'}, 34344, None)
self._redis.set.assert_called_with(
'jobs:dimensional pinball:creation_time', '34344')
def test_call_saves_creation_time_different_time(self):
self._consumer.call({'job_id': 'space pinball'}, 99999, None)
self._redis.set.assert_called_with(
'jobs:space pinball:creation_time', '99999')
|
Split a string into two elements.py | DazEB2/SimplePyScripts | 117 | 12705969 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def split_by_pair(text):
items = list()
for i in range(0, len(text), 2):
pair = text[i] + text[i + 1]
items.append(pair)
return items
def split_by_pair_1(text):
result = [a + b for a, b in list(zip(text[::2], text[1::2]))]
return result
def split_by_pair_2(text):
import re
return re.findall('..', text)
if __name__ == '__main__':
text = 'a1b2c3d4f5'
items = split_by_pair(text)
print(items) # ['a1', 'b2', 'c3', 'd4', 'f5']
items = split_by_pair_1(text)
print(items) # ['a1', 'b2', 'c3', 'd4', 'f5']
items = split_by_pair_2(text)
print(items) # ['a1', 'b2', 'c3', 'd4', 'f5']
|
venv/lib/python3.8/site-packages/waitress/tests/fixtureapps/runner.py | ayfallen/urler | 978 | 12705976 | def app(): # pragma: no cover
return None
def returns_app(): # pragma: no cover
return app
|
osp/test/common/utils/test_parse_domain.py | davidmcclure/open-syllabus-project | 220 | 12705984 |
import pytest
from osp.common.utils import parse_domain
@pytest.mark.parametrize('url,domain', [
# Unchanged
(
'test.edu',
'test.edu',
),
# Strip protocol
(
'http://test.edu',
'test.edu',
),
(
'https://test.edu',
'test.edu',
),
# Stip subdomains
(
'www.test.edu',
'test.edu',
),
(
'sub.test.edu',
'test.edu',
),
# Strip path
(
'http://test.edu/syllabus.pdf',
'test.edu',
),
# Strip whitespace
(
' http://test.edu ',
'test.edu',
),
# Downcase
(
'WWW.TEST.EDU',
'test.edu',
),
# Take second domain in embedded URLs
(
'https://web.archive.org/123/http:/test.edu/syllabus.pdf',
'test.edu',
),
(
'https://web.archive.org/123/https:/test.edu/syllabus.pdf',
'test.edu',
),
])
def test_parse_domain(url, domain):
assert parse_domain(url) == domain
|
examples/cicd/sig-mlops-jenkins-classic/models/news_classifier/src/train_model.py | jsreid13/seldon-core | 3,049 | 12706006 | <gh_stars>1000+
from sklearn.datasets import fetch_20newsgroups
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import joblib
def fetch_data():
categories = ["alt.atheism", "soc.religion.christian", "comp.graphics", "sci.med"]
twenty_train = fetch_20newsgroups(
subset="train", categories=categories, shuffle=True, random_state=42
)
twenty_test = fetch_20newsgroups(
subset="test", categories=categories, shuffle=True, random_state=42
)
return twenty_train, twenty_test
def build_train_model(twenty_train):
text_clf = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", MultinomialNB()),
]
)
text_clf.fit(twenty_train.data, twenty_train.target)
return text_clf
def print_accuracy(twenty_test, text_clf):
predicted = text_clf.predict(twenty_test.data)
print(f"Accuracy: {np.mean(predicted == twenty_test.target):.2f}")
def save_model(text_clf):
joblib.dump(text_clf, "src/model.joblib")
if __name__ == "__main__":
twenty_train, twenty_test = fetch_data()
text_clf = build_train_model(twenty_train)
print_accuracy(twenty_test, text_clf)
save_model(text_clf)
|
hpccm/building_blocks/mkl.py | robertmaynard/hpc-container-maker | 340 | 12706117 | <reponame>robertmaynard/hpc-container-maker
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MKL building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class mkl(bb_base, hpccm.templates.envvars, hpccm.templates.wget):
"""The `mkl` building block downloads and installs the [Intel Math
Kernel Library](http://software.intel.com/mkl).
You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement)
to use this building block.
# Parameters
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and other variables) should be
modified to include MKL. The default is True.
eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement).
The default value is `False`.
mklvars: MKL provides an environment script (`mklvars.sh`) to
setup the MKL environment. If this value is `True`, the bashrc is
modified to automatically source this environment script.
However, the MKL environment is not automatically available to
subsequent container image build steps; the environment is
available when the container image is run. To set the MKL
environment in subsequent build steps you can explicitly call
`source /opt/intel/mkl/bin/mklvars.sh intel64` in each build step.
If this value is to set `False`, then the environment is set such
that the environment is visible to both subsequent container image
build steps and when the container image is run. However, the
environment may differ slightly from that set by `mklvars.sh`.
The default value is `True`.
ospackages: List of OS packages to install prior to installing
MKL. For Ubuntu, the default values are `apt-transport-https`,
`ca-certificates`, `gnupg`, and `wget`. For RHEL-based Linux
distributions, the default is an empty list.
version: The version of MKL to install. The default value is
`2020.0-088`.
# Examples
```python
mkl(eula=True, version='2018.3-051')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mkl, self).__init__(**kwargs)
# By setting this value to True, you agree to the
# corresponding Intel End User License Agreement
# (https://software.intel.com/en-us/articles/end-user-license-agreement)
self.__eula = kwargs.get('eula', False)
self.__mklvars = kwargs.get('mklvars', True)
self.__ospackages = kwargs.get('ospackages', [])
self.__version = kwargs.get('version', '2020.0-088')
self.__year = '2019' # Also used by 2018 and 2020 versions
self.__bashrc = '' # Filled in by __distro()
if hpccm.config.g_cpu_arch != cpu_arch.X86_64: # pragma: no cover
logging.warning('Using mkl on a non-x86_64 processor')
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('MKL version {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if not self.__eula:
raise RuntimeError('Intel EULA was not accepted. To accept, see the documentation for this building block')
self += packages(
apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
apt_repositories=['deb https://apt.repos.intel.com/mkl all main'],
ospackages=['intel-mkl-64bit-{}'.format(self.__version)],
yum_keys=['https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
yum_repositories=['https://yum.repos.intel.com/mkl/setup/intel-mkl.repo'])
# Set the environment
if self.__mklvars:
# Source the mklvars environment script when starting the
# container, but the variables not be available for any
# subsequent build steps.
self += shell(commands=['echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> {}'.format(self.__bashrc)])
else:
# Set the environment so that it will be available to
# subsequent build steps and when starting the container,
# but this may miss some things relative to the mklvars
# environment script.
self.environment_variables={
'CPATH': '/opt/intel/mkl/include:$CPATH',
'LD_LIBRARY_PATH': '/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LD_LIBRARY_PATH',
'LIBRARY_PATH': '/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LIBRARY_PATH',
'MKLROOT': '/opt/intel/mkl'}
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gnupg', 'wget']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = []
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mkl(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
return str(self)
|
tf3d/losses/box_prediction_losses_test.py | deepneuralmachine/google-research | 23,901 | 12706136 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ...tf3d.losses.box_prediction_losses."""
import tensorflow as tf
from tf3d import standard_fields
from tf3d.losses import box_prediction_losses
class BoxPredictionLossesTest(tf.test.TestCase):
def _get_random_inputs(self):
return {
standard_fields.InputDataFields.object_rotation_matrix_voxels:
tf.random.uniform([1, 100, 3, 3],
minval=-1.0,
maxval=1.0,
dtype=tf.float32),
standard_fields.InputDataFields.object_length_voxels:
tf.random.uniform([1, 100, 1],
minval=0.1,
maxval=2.0,
dtype=tf.float32),
standard_fields.InputDataFields.object_height_voxels:
tf.random.uniform([1, 100, 1],
minval=0.1,
maxval=2.0,
dtype=tf.float32),
standard_fields.InputDataFields.object_width_voxels:
tf.random.uniform([1, 100, 1],
minval=0.1,
maxval=2.0,
dtype=tf.float32),
standard_fields.InputDataFields.object_center_voxels:
tf.random.uniform([1, 100, 3],
minval=-5.0,
maxval=5.0,
dtype=tf.float32),
standard_fields.InputDataFields.object_class_voxels:
tf.random.uniform([1, 100, 1], minval=0, maxval=7, dtype=tf.int32),
standard_fields.InputDataFields.object_instance_id_voxels:
tf.random.uniform([1, 100, 1], minval=0, maxval=20, dtype=tf.int32),
}
def _get_empty_inputs(self):
inputs = self._get_random_inputs()
for key in inputs:
if key in inputs:
tensor_shape = inputs[key].shape.as_list()
tensor_shape[1] = 0
inputs[key] = tf.zeros(tensor_shape, dtype=inputs[key].dtype)
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[0], dtype=tf.int32)
return inputs
def _get_dictionaries_for_distance_loss_relative(self):
gt_box_center = tf.reshape(
tf.constant([10.0, -20.0, 30.0], dtype=tf.float32), [1, 1, 3])
gt_box_length = tf.reshape(
tf.constant([1.0], dtype=tf.float32), [1, 1, 1])
gt_box_height = tf.reshape(
tf.constant([2.0], dtype=tf.float32), [1, 1, 1])
gt_box_width = tf.reshape(
tf.constant([3.0], dtype=tf.float32), [1, 1, 1])
gt_box_r = tf.reshape(tf.eye(3, dtype=tf.float32), [1, 1, 3, 3])
gt_box_class = tf.reshape(tf.constant([1], dtype=tf.int32), [1, 1, 1])
gt_instance_ids = tf.reshape(tf.constant([1], dtype=tf.int32), [1, 1, 1])
pred_box_center1 = tf.reshape(
tf.constant([10.1, -20.1, 30.1], dtype=tf.float32), [1, 1, 3])
pred_box_length1 = tf.reshape(
tf.constant([1.1], dtype=tf.float32), [1, 1, 1])
pred_box_height1 = tf.reshape(
tf.constant([2.1], dtype=tf.float32), [1, 1, 1])
pred_box_width1 = tf.reshape(
tf.constant([3.1], dtype=tf.float32), [1, 1, 1])
pred_box_r1 = tf.reshape(tf.eye(3, dtype=tf.float32), [1, 1, 3, 3])
pred_box_center2 = tf.reshape(
tf.constant([10.1, -20.2, 30.2], dtype=tf.float32), [1, 1, 3])
pred_box_length2 = tf.reshape(
tf.constant([1.11], dtype=tf.float32), [1, 1, 1])
pred_box_height2 = tf.reshape(
tf.constant([2.11], dtype=tf.float32), [1, 1, 1])
pred_box_width2 = tf.reshape(
tf.constant([3.11], dtype=tf.float32), [1, 1, 1])
pred_box_r2 = tf.reshape(tf.eye(3, dtype=tf.float32), [1, 1, 3, 3])
inputs = {
standard_fields.InputDataFields.object_rotation_matrix_voxels:
gt_box_r,
standard_fields.InputDataFields.object_length_voxels:
gt_box_length,
standard_fields.InputDataFields.object_height_voxels:
gt_box_height,
standard_fields.InputDataFields.object_width_voxels:
gt_box_width,
standard_fields.InputDataFields.object_center_voxels:
gt_box_center,
standard_fields.InputDataFields.object_class_voxels:
gt_box_class,
standard_fields.InputDataFields.object_instance_id_voxels:
gt_instance_ids,
}
outputs1 = {
standard_fields.DetectionResultFields.object_rotation_matrix_voxels:
pred_box_r1,
standard_fields.DetectionResultFields.object_length_voxels:
pred_box_length1,
standard_fields.DetectionResultFields.object_height_voxels:
pred_box_height1,
standard_fields.DetectionResultFields.object_width_voxels:
pred_box_width1,
standard_fields.DetectionResultFields.object_center_voxels:
pred_box_center1,
}
outputs2 = {
standard_fields.DetectionResultFields.object_rotation_matrix_voxels:
pred_box_r2,
standard_fields.DetectionResultFields.object_length_voxels:
pred_box_length2,
standard_fields.DetectionResultFields.object_height_voxels:
pred_box_height2,
standard_fields.DetectionResultFields.object_width_voxels:
pred_box_width2,
standard_fields.DetectionResultFields.object_center_voxels:
pred_box_center2,
}
return inputs, outputs1, outputs2
def test_box_size_regression_loss_on_voxel_tensors_empty_inputs(self):
inputs = self._get_empty_inputs()
outputs = {
standard_fields.DetectionResultFields.object_length_voxels:
tf.zeros([1, 0, 3], dtype=tf.float32),
standard_fields.DetectionResultFields.object_height_voxels:
tf.zeros([1, 0, 3], dtype=tf.float32),
standard_fields.DetectionResultFields.object_width_voxels:
tf.zeros([1, 0, 3], dtype=tf.float32),
}
loss = box_prediction_losses.box_size_regression_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_size_regression_loss_on_voxel_tensors_correct_prediction(self):
inputs = self._get_random_inputs()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[100], dtype=tf.int32)
outputs = {
standard_fields.DetectionResultFields.object_length_voxels:
inputs[standard_fields.InputDataFields.object_length_voxels],
standard_fields.DetectionResultFields.object_height_voxels:
inputs[standard_fields.InputDataFields.object_height_voxels],
standard_fields.DetectionResultFields.object_width_voxels:
inputs[standard_fields.InputDataFields.object_width_voxels],
}
loss = box_prediction_losses.box_size_regression_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_size_regression_loss_on_voxel_tensors_relative(self):
(inputs, outputs1,
outputs2) = self._get_dictionaries_for_distance_loss_relative()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[1], dtype=tf.int32)
loss1 = box_prediction_losses.box_size_regression_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs1,
loss_type='huber')
loss2 = box_prediction_losses.box_size_regression_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs2,
loss_type='huber')
self.assertGreater(loss2.numpy(), loss1.numpy())
def test_box_center_distance_loss_on_voxel_tensors_empty_inputs(self):
inputs = self._get_empty_inputs()
outputs = {
standard_fields.DetectionResultFields.object_center_voxels:
tf.zeros([1, 0, 3], dtype=tf.float32),
}
loss = box_prediction_losses.box_center_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_center_distance_loss_on_voxel_tensors_correct_prediction(self):
inputs = self._get_random_inputs()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[100], dtype=tf.int32)
outputs = {
standard_fields.DetectionResultFields.object_center_voxels:
inputs[standard_fields.InputDataFields.object_center_voxels],
}
loss = box_prediction_losses.box_center_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_center_distance_loss_on_voxel_tensors_relative(self):
(inputs, outputs1,
outputs2) = self._get_dictionaries_for_distance_loss_relative()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[1], dtype=tf.int32)
loss1 = box_prediction_losses.box_center_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs1,
loss_type='huber')
loss2 = box_prediction_losses.box_center_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs2,
loss_type='huber')
self.assertGreater(loss2.numpy(), loss1.numpy())
def test_box_corner_distance_loss_on_voxel_tensors_empty_inputs(self):
inputs = self._get_empty_inputs()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[0], dtype=tf.int32)
outputs = {
standard_fields.DetectionResultFields.object_rotation_matrix_voxels:
tf.zeros([1, 0, 3, 3], dtype=tf.float32),
standard_fields.DetectionResultFields.object_length_voxels:
tf.zeros([1, 0, 1], dtype=tf.float32),
standard_fields.DetectionResultFields.object_height_voxels:
tf.zeros([1, 0, 1], dtype=tf.float32),
standard_fields.DetectionResultFields.object_width_voxels:
tf.zeros([1, 0, 1], dtype=tf.float32),
standard_fields.DetectionResultFields.object_center_voxels:
tf.zeros([1, 0, 3], dtype=tf.float32),
}
loss = box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='normalized_huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_corner_distance_loss_on_voxel_tensors_correct_prediction(self):
inputs = self._get_random_inputs()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[100], dtype=tf.int32)
outputs = {
standard_fields.DetectionResultFields.object_rotation_matrix_voxels:
inputs[standard_fields.InputDataFields.object_rotation_matrix_voxels
],
standard_fields.DetectionResultFields.object_length_voxels:
inputs[standard_fields.InputDataFields.object_length_voxels],
standard_fields.DetectionResultFields.object_height_voxels:
inputs[standard_fields.InputDataFields.object_height_voxels],
standard_fields.DetectionResultFields.object_width_voxels:
inputs[standard_fields.InputDataFields.object_width_voxels],
standard_fields.DetectionResultFields.object_center_voxels:
inputs[standard_fields.InputDataFields.object_center_voxels],
}
loss = box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
loss_type='normalized_huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_corner_distance_loss_on_voxel_tensors_relative(self):
(inputs, outputs1,
outputs2) = self._get_dictionaries_for_distance_loss_relative()
inputs[standard_fields.InputDataFields.num_valid_voxels] = tf.constant(
[1], dtype=tf.int32)
loss1 = box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs1,
loss_type='normalized_huber')
loss2 = box_prediction_losses.box_corner_distance_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs2,
loss_type='normalized_huber')
self.assertGreater(loss2.numpy(), loss1.numpy())
def test_box_corner_distance_loss_on_object_tensors_correct_prediction(self):
voxel_inputs = self._get_random_inputs()
inputs = {}
for key, value in standard_fields.get_input_voxel_to_object_field_mapping(
).items():
if key in voxel_inputs:
inputs[value] = [voxel_inputs[key][0, Ellipsis]]
outputs = {
standard_fields.DetectionResultFields.objects_rotation_matrix:
inputs[standard_fields.InputDataFields.objects_rotation_matrix],
standard_fields.DetectionResultFields.objects_length:
inputs[standard_fields.InputDataFields.objects_length],
standard_fields.DetectionResultFields.objects_height:
inputs[standard_fields.InputDataFields.objects_height],
standard_fields.DetectionResultFields.objects_width:
inputs[standard_fields.InputDataFields.objects_width],
standard_fields.DetectionResultFields.objects_center:
inputs[standard_fields.InputDataFields.objects_center],
}
loss = box_prediction_losses.box_corner_distance_loss_on_object_tensors(
inputs=inputs,
outputs=outputs,
loss_type='normalized_huber')
self.assertAllClose(loss.numpy(), 0.0)
def test_box_corner_distance_loss_on_object_tensors_relative(self):
(voxel_inputs, voxel_outputs1,
voxel_outputs2) = self._get_dictionaries_for_distance_loss_relative()
inputs = {}
outputs1 = {}
outputs2 = {}
for key, value in standard_fields.get_input_voxel_to_object_field_mapping(
).items():
if key in voxel_inputs:
inputs[value] = [voxel_inputs[key][0, Ellipsis]]
for key, value in standard_fields.get_output_voxel_to_object_field_mapping(
).items():
if key in voxel_outputs1:
outputs1[value] = [voxel_outputs1[key][0, Ellipsis]]
for key, value in standard_fields.get_output_voxel_to_object_field_mapping(
).items():
if key in voxel_outputs2:
outputs2[value] = [voxel_outputs2[key][0, Ellipsis]]
loss1 = box_prediction_losses.box_corner_distance_loss_on_object_tensors(
inputs=inputs,
outputs=outputs1,
loss_type='normalized_huber')
loss2 = box_prediction_losses.box_corner_distance_loss_on_object_tensors(
inputs=inputs,
outputs=outputs2,
loss_type='normalized_huber')
self.assertGreater(loss2.numpy(), loss1.numpy())
if __name__ == '__main__':
tf.test.main()
|
src/python/pants/jvm/util_rules_test.py | rcuza/pants | 1,806 | 12706148 | <reponame>rcuza/pants
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import hashlib
import pytest
from pants.engine.fs import EMPTY_FILE_DIGEST, CreateDigest, Digest, FileContent, FileDigest
from pants.jvm.util_rules import ExtractFileDigest
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
preserve_tmpdirs=True,
rules=[
*util_rules(),
QueryRule(FileDigest, (ExtractFileDigest,)),
],
)
def get_digest(rule_runner: RuleRunner, source_files: dict[str, str]) -> Digest:
files = [FileContent(path, content.encode()) for path, content in source_files.items()]
return rule_runner.request(Digest, [CreateDigest(files)])
def test_extract_empty_file(rule_runner: RuleRunner) -> None:
digest = get_digest(rule_runner, {"foo.txt": ""})
file_digest = rule_runner.request(
FileDigest,
[ExtractFileDigest(digest=digest, file_path="foo.txt")],
)
assert file_digest == EMPTY_FILE_DIGEST
def test_extract_nonempty_file(rule_runner: RuleRunner) -> None:
digest = get_digest(rule_runner, {"foo.txt": "bar"})
file_digest = rule_runner.request(
FileDigest,
[ExtractFileDigest(digest=digest, file_path="foo.txt")],
)
hasher = hashlib.sha256()
hasher.update(b"bar")
assert file_digest == FileDigest(fingerprint=hasher.hexdigest(), serialized_bytes_length=3)
def test_extract_missing_file(rule_runner: RuleRunner) -> None:
digest = get_digest(rule_runner, {"foo.txt": ""})
with pytest.raises(Exception, match=r".*?not found in.*?"):
rule_runner.request(
FileDigest,
[ExtractFileDigest(digest=digest, file_path="missing")],
)
def test_subset_with_multiple_files(rule_runner: RuleRunner) -> None:
digest = get_digest(rule_runner, {"foo.txt": "", "bar.txt": ""})
with pytest.raises(Exception, match=r".*?found multiple times.*?"):
rule_runner.request(
FileDigest,
[ExtractFileDigest(digest=digest, file_path="*")],
)
|
tools/bin/pythonSrc/pychecker-0.8.18/test/test_module.py | YangHao666666/hawq | 450 | 12706153 | <filename>tools/bin/pythonSrc/pychecker-0.8.18/test/test_module.py
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
'''
Tests related to modules.
'''
import unittest
import common
class SameModuleNameTestCase(common.TestCase):
'''
Test that modules with the same name do not shadow eachother.
'''
def test_getmodule(self):
self.checkMultiple('test_getmodule', [
'getmodule/A/C.py',
'getmodule/B/C.py',
])
if __name__ == '__main__':
unittest.main()
|
pdfarchiver.py | jordanwildon/Telepathy | 213 | 12706156 | <gh_stars>100-1000
#!/usr/bin/env python
"""Telepathy file archiver module:
A tool for archiving files in a chat which may contain metadata.
"""
from telethon.sync import TelegramClient
from telethon.tl.functions.messages import GetDialogsRequest
from telethon.errors import SessionPasswordNeededError
from telethon.tl.types import InputPeerEmpty
from telethon.utils import get_display_name
import pandas as pd
import details as ds
import getpass, csv
__author__ = "<NAME> (@jordanwildon)"
__license__ = "MIT License"
__version__ = "1.0.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
#Login details#
api_id = ds.apiID
api_hash = ds.apiHash
phone = ds.number
client = TelegramClient(phone, api_id, api_hash)
#Check authorisation#
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
client.sign_in(phone)
try:
client.sign_in(code=input('Enter code: '))
except SessionPasswordNeededError:
client.sign_in(password=<PASSWORD>(prompt='Password: ', stream=None))
chats = []
last_date = None
chunk_size = 200
groups=[]
result = client(GetDialogsRequest(
offset_date=last_date,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=chunk_size,
hash = 0
))
chats.extend(result.chats)
for chat in chats:
groups.append(chat)
print('Archiving PDFs...')
async def main():
df = pd.read_csv('to_archive.csv', sep=';')
df = df.To.unique()
for i in df:
print("Working on ", i, "...")
l = []
try:
async for message in client.iter_messages(i):
if message.forward is None:
if message.video is None:
if message.sticker is None:
if message.audio is None:
if message.voice is None:
if message.document:
i_clean = i
alphanumeric = ""
for character in i_clean:
if character.isalnum():
alphanumeric += character
directory = './' + alphanumeric
try:
os.makedirs(directory)
except FileExistsError:
pass
media_directory = directory + '/media'
try:
os.makedirs(media_directory)
except FileExistsError:
pass
path = await message.download_media(file=media_directory)
print('File saved to', path)
i_clean = i
alphanumeric = ""
print("Scrape completed for", i,", file saved")
except Exception as e:
print("An exception occurred: ", e)
with client:
client.loop.run_until_complete(main())
print('List archived successfully')
again = input('Do you want to archive more chats? (y/n)')
if again == 'y':
print('Restarting...')
exec(open("archiver.py").read())
else:
pass
launcher = input('Do you want to return to the launcher? (y/n)')
if launcher == 'y':
print('Restarting...')
exec(open("launcher.py").read())
else:
print('Thank you for using Telepathy.')
|
docs/source/tutorials/code/serialization.py | PuzeLiu/mushroom-rl | 344 | 12706208 | from mushroom_rl.core import Serializable
import torch
import numpy as np
from mushroom_rl.utils.parameters import Parameter
class TestClass(Serializable):
def __init__(self, value):
# Create some different types of variables
self._primitive_variable = value # Primitive python variable
self._numpy_vector = np.array([1, 2, 3]*value) # Numpy array
self._dictionary = dict(some='random', keywords=2, fill='the dictionary') # A dictionary
# Building a torch object
data_array = np.ones(3)*value
data_tensor = torch.from_numpy(data_array)
self._torch_object = torch.nn.Parameter(data_tensor)
# Some variables that implement the Serializable interface
self._mushroom_parameter = Parameter(2.0*value)
self._list_of_objects = [Parameter(i) for i in range(value)] # This is a list!
# A variable that is not important e.g. a buffer
self.not_important = np.zeros(10000)
# A variable that contains a reference to another variable
self._list_reference = [self._dictionary]
# Superclass constructor
super().__init__()
# Here we specify how to save each component
self._add_save_attr(
_primitive_variable='primitive',
_numpy_vector='numpy',
_dictionary='pickle',
_torch_object='torch',
_mushroom_parameter='mushroom',
# List of mushroom objects can also be saved with the 'mushroom' mode
_list_of_objects='mushroom',
# The '!' is to specify that we save the variable only if full_save is True
not_important='numpy!',
)
def _post_load(self):
if self.not_important is None:
self.not_important = np.zeros(10000)
self._list_reference = [self._dictionary]
def print_variables(obj):
for label, var in vars(obj).items():
if label != '_save_attributes':
if isinstance(var, Parameter):
print(f'{label}: Parameter({var()})')
elif isinstance(var, list) and isinstance(var[0], Parameter):
new_list = [f'Parameter({item()})' for item in var]
print(f'{label}: {new_list}')
else:
print(label, ': ', var)
if __name__ == '__main__':
# Create test object and print its variables
test_object = TestClass(1)
print('###########################################################################################################')
print('The test object contains the following:')
print('-----------------------------------------------------------------------------------------------------------')
print_variables(test_object)
# Changing the buffer
test_object.not_important[0] = 1
# Save the object on disk
test_object.save('test.msh')
# Create another test object
test_object = TestClass(2)
print('###########################################################################################################')
print('After overwriting the test object:')
print('-----------------------------------------------------------------------------------------------------------')
print_variables(test_object)
# Changing the buffer again
test_object.not_important[0] = 1
# Save the other test object, this time remember buffer
test_object.save('test_full.msh', full_save=True)
# Load first test object and print its variables
print('###########################################################################################################')
test_object = TestClass.load('test.msh')
print('Loading previous test object:')
print('-----------------------------------------------------------------------------------------------------------')
print_variables(test_object)
# Load second test object and print its variables
print('###########################################################################################################')
test_object = TestClass.load('test_full.msh')
print('Loading previous test object:')
print('-----------------------------------------------------------------------------------------------------------')
print_variables(test_object)
|
example_training(without_GPUTaskScheduler)/main.py | lipikaramaswamy/DoppelGANger | 157 | 12706217 | <reponame>lipikaramaswamy/DoppelGANger<filename>example_training(without_GPUTaskScheduler)/main.py
import sys
sys.path.append("..")
from gan import output
sys.modules["output"] = output
from gan.doppelganger import DoppelGANger
from gan.util import add_gen_flag, normalize_per_sample
from gan.load_data import load_data
from gan.network import DoppelGANgerGenerator, Discriminator, AttrDiscriminator
import os
import tensorflow as tf
if __name__ == "__main__":
sample_len = 10
(data_feature, data_attribute,
data_gen_flag,
data_feature_outputs, data_attribute_outputs) = \
load_data("../data/web")
print(data_feature.shape)
print(data_attribute.shape)
print(data_gen_flag.shape)
(data_feature, data_attribute, data_attribute_outputs,
real_attribute_mask) = \
normalize_per_sample(
data_feature, data_attribute, data_feature_outputs,
data_attribute_outputs)
print(real_attribute_mask)
print(data_feature.shape)
print(data_attribute.shape)
print(len(data_attribute_outputs))
data_feature, data_feature_outputs = add_gen_flag(
data_feature, data_gen_flag, data_feature_outputs, sample_len)
print(data_feature.shape)
print(len(data_feature_outputs))
generator = DoppelGANgerGenerator(
feed_back=False,
noise=True,
feature_outputs=data_feature_outputs,
attribute_outputs=data_attribute_outputs,
real_attribute_mask=real_attribute_mask,
sample_len=sample_len)
discriminator = Discriminator()
attr_discriminator = AttrDiscriminator()
checkpoint_dir = "./test/checkpoint"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sample_dir = "./test/sample"
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
time_path = "./test/time.txt"
epoch = 400
batch_size = 100
vis_freq = 200
vis_num_sample = 5
d_rounds = 1
g_rounds = 1
d_gp_coe = 10.0
attr_d_gp_coe = 10.0
g_attr_d_coe = 1.0
extra_checkpoint_freq = 5
num_packing = 1
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
gan = DoppelGANger(
sess=sess,
checkpoint_dir=checkpoint_dir,
sample_dir=sample_dir,
time_path=time_path,
epoch=epoch,
batch_size=batch_size,
data_feature=data_feature,
data_attribute=data_attribute,
real_attribute_mask=real_attribute_mask,
data_gen_flag=data_gen_flag,
sample_len=sample_len,
data_feature_outputs=data_feature_outputs,
data_attribute_outputs=data_attribute_outputs,
vis_freq=vis_freq,
vis_num_sample=vis_num_sample,
generator=generator,
discriminator=discriminator,
attr_discriminator=attr_discriminator,
d_gp_coe=d_gp_coe,
attr_d_gp_coe=attr_d_gp_coe,
g_attr_d_coe=g_attr_d_coe,
d_rounds=d_rounds,
g_rounds=g_rounds,
num_packing=num_packing,
extra_checkpoint_freq=extra_checkpoint_freq)
gan.build()
gan.train()
|
src/backend/common/queries/exceptions.py | guineawheek/ftc-data-take-2 | 266 | 12706264 | class DoesNotExistException(Exception):
pass
|
infrabox/test/pyinfraboxutils/test.py | agu3rra/InfraBox | 265 | 12706283 | <gh_stars>100-1000
#! /usr/bin/env python
import unittest
import sys
import xmlrunner
from pyinfraboxutils.coverage import *
class TestCoverageMethods(unittest.TestCase):
def test_jacoco(self):
parser = Parser("data/report_test.xml")
parser.parse(None, create_markup=False)
self.assertTrue(parser.files[0].functions_found == 2)
self.assertTrue(parser.files[0].functions_hit == 0)
self.assertTrue(parser.files[0].branches_found == 2)
self.assertTrue(parser.files[0].branches_hit == 0)
self.assertTrue(parser.files[0].lines_hit == 0)
self.assertTrue(parser.files[0].lines_found == 3)
self.assertTrue(parser.files[0].name == "HelloWorld.java")
def test_parse_dir(self):
parser = Parser("data/")
parser.parse(None, create_markup=False)
hello = 0
hello2 = 1
if parser.files[0].name == "HelloWorld2.java":
hello = 1
hello2 = 0
self.assertTrue(parser.files[hello].functions_found == 2*2)
self.assertTrue(parser.files[hello].functions_hit == 0*2)
self.assertTrue(parser.files[hello].branches_found == 2*2)
self.assertTrue(parser.files[hello].branches_hit == 0*2)
self.assertTrue(parser.files[hello].lines_hit == 0*2)
self.assertTrue(parser.files[hello].lines_found == 3*2)
self.assertTrue(parser.files[hello].name == "HelloWorld.java")
self.assertTrue(parser.files[hello2].functions_found == 2)
self.assertTrue(parser.files[hello2].functions_hit == 0)
self.assertTrue(parser.files[hello2].branches_found == 2)
self.assertTrue(parser.files[hello2].branches_hit == 0)
self.assertTrue(parser.files[hello2].lines_hit == 0)
self.assertTrue(parser.files[hello2].lines_found == 3)
self.assertTrue(parser.files[hello2].name == "HelloWorld2.java")
if __name__ == '__main__':
s = unittest.defaultTestLoader.discover('.')
r = xmlrunner.XMLTestRunner(output='/infrabox/upload/testresult/').run(s)
sys.exit(not r.wasSuccessful())
|
sacrebleu/__init__.py | bricksdont/sacreBLEU | 373 | 12706302 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = '2.0.0'
__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores'
from .utils import smart_open, SACREBLEU_DIR, download_test_set # noqa: F401
from .utils import get_source_file, get_reference_files # noqa: F401
from .utils import get_available_testsets, get_langpairs_for_testset # noqa: F401
from .metrics.helpers import extract_word_ngrams, extract_char_ngrams # noqa: F401
from .dataset import DATASETS # noqa: F401
from .metrics import BLEU, CHRF, TER # noqa: F401
# Backward compatibility functions for old style API access (<= 1.4.10)
from .compat import corpus_bleu, raw_corpus_bleu, sentence_bleu # noqa: F401
from .compat import corpus_chrf, sentence_chrf # noqa: F401
from .compat import corpus_ter, sentence_ter # noqa: F401
|
cmd/gencudnn/parse.py | xkey-/cu | 275 | 12706314 | from bs4 import BeautifulSoup
import requests
import re
import sys
import os
inputs ={}
outputs = {}
ios = {}
docs = {}
def get():
if os.path.isfile("cache/docs.html"):
with open("cache/docs.html", 'r') as f:
print("Using cache", file=sys.stderr)
return f.read()
r = requests.get("https://docs.nvidia.com/deeplearning/cudnn/api/index.html")
with open("cache/docs.html", 'w') as f:
f.write(r.text)
return r.text
def main():
txt = get()
soup = BeautifulSoup(txt, "html5lib")
contents = soup.find_all(id="contents")
ids = ["cudnn-ops-infer-so-library", "cudnn-ops-train-so-library", "cudnn-cnn-infer-so-library", "cudnn-cnn-train-so-library", "cudnn-adv-infer-so-library", "cudnn-adv-train-so-library", "cudnn-backend-api"]
topics = [contents[0].find_all(id=i)[0].find_all(id=re.compile("-api")) for i in ids]
topics = [t.find_all(class_="topic concept nested2") for topic in topics for t in topic]
topics = [t for ts in topics for t in ts]
#print(topics[0])
for i, topic in enumerate(topics):
rawFnName = topic.find_all(class_='title topictitle2')[0].text
rawFnName = rawFnName.rstrip('()')
try:
fnName = re.search('cudnn.+$', rawFnName).group(0)
except AttributeError as e:
print("rawFnName: {}".format(rawFnName), file=sys.stderr)
continue
try:
paramsDL = topic.find_all(class_='dl')[0] # first definition list is params
except IndexError:
print("rawFnName: {} - topic has no dl class".format(fnName), file=sys.stderr)
continue
#print(paramsDL)
# check previous
#if paramsDL.previous_sibling.previous_sibling.text != "Parameters":
# print("rawFnName: {} has no params::: {}".format(fnName, paramsDL.previous_sibling), file=sys.stderr)
# continue
params = paramsDL.find_all(class_='dt dlterm') # name
paramsDesc = paramsDL.find_all(class_='dd') # use type
paramUse = []
for d in paramsDesc:
try:
use = d.find_all(class_='ph i')[0].text
except IndexError as e:
use = "Input"
paramUse.append(use)
if len(params) != len(paramUse):
print("rawFnName: {} - differing params and use cases".format(fnName), file=sys.stderr)
continue
inputParams = [p.text.strip() for i, p in enumerate(params) if (paramUse[i].strip()=='Input') or (paramUse[i].strip()=="Inputs")]
outputParams = [p.text.strip() for i, p in enumerate(params) if (paramUse[i].strip()=='Output') or (paramUse[i].strip()=="Outputs")]
ioParams = [p.text.strip() for i, p in enumerate(params) if paramUse[i].strip()=='Input/Output']
inputs[fnName] = inputParams
outputs[fnName] = outputParams
ios[fnName] = ioParams
# extract docs
try:
docbody = topic.find_all(class_='body conbody')[0]
except IndexError:
print("fnName: {} - no body".format(fnName), file=sys.stderr)
continue
# clear is better than clever.
try:
doc = docbody.find_all("p")[0].text
except:
print("fnName: {} - no p".format(fnName), file=sys.stderr)
continue
doc = doc.replace("\n", "")
doc = re.sub("\t+", " ", doc)
doc = re.sub("\s+", " ", doc)
doc = doc.replace('"', '`')
doc = doc.replace("This function", fnName)
doc = doc.replace("This routine", fnName)
doc = doc.replace("This", fnName)
doc = doc.strip()
docs[fnName] = doc
# write the go file
print("package main")
print("/* generated by parse.py. DO NOT EDIT */")
print("var inputParams = map[string][]string{")
for k, v in inputs.items():
if len(v) == 0: continue
print('"{}": {{ '.format(k), end="")
for inp in v :
split = inp.split(",")
for s in split:
print('"{}", '.format(s.strip()), end="")
print("},")
print("}")
print("var outputParams = map[string][]string{")
for k, v in outputs.items():
if len(v) == 0: continue
print('"{}": {{ '.format(k), end="")
for inp in v :
split = inp.split(",")
for s in split:
print('"{}", '.format(s.strip()), end="")
print("},")
print("}")
print("var ioParams = map[string][]string{")
for k, v in ios.items():
if len(v) == 0: continue
print('"{}": {{ '.format(k), end="")
for inp in v :
split = inp.split(",")
for s in split:
print('"{}", '.format(s.strip()), end="")
print("},")
print("}")
print("var docs = map[string]string{")
for k, v in docs.items():
print('"{}": "{}",'.format(k, v.strip()))
print("}")
main()
|
module/Probe_Packet.py | Yiidiir/SniffAir | 1,173 | 12706349 | #!/usr/bin/python
import sys
import signal
import argparse
import logging
logging.getLogger ( "scapy.runtime" ).setLevel ( logging.CRITICAL )
from scapy.all import *
# Setup signal handler to catch CTRL-C
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Get arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--interface', metavar='interface', help='wireless interface to use', required=True)
parser.add_argument('-d', '--delay', metavar='delay', help='seconds to delay (default=.3)', default=.3, type=float)
parser.add_argument('-c', '--count', metavar='count', \
help='number of packets to send per SSID per iteration (default=10)', default=10, type=int)
parser.add_argument('-m', '--mac', metavar='mac', help='last 3 octets of source mac address (default=00:11:22)', default='00:11:22')
ssid_group = parser.add_mutually_exclusive_group(required=True)
ssid_group.add_argument('-s', '--ssid', metavar='ssid', help='ssid name')
ssid_group.add_argument('-f', '--file', metavar='file', help='ssid file')
args = parser.parse_args()
# Create ssid list
ssids = []
if args.file == None:
ssids.extend([args.ssid])
else:
with open(args.file) as f:
content = f.readlines()
ssids = [x.strip() for x in content]
# Setup probe request packet
param = Dot11ProbeReq()
ratestr = '03\x12\x96\x18\x24\x30\x48\x60'
rates = Dot11Elt(ID='Rates',info=ratestr)
dst = 'ff:ff:ff:ff:ff:ff'
# Loop until CTRL-C
while True:
for ssid in ssids:
essid = Dot11Elt(ID='SSID',info=ssid)
#dsset = Dot11Elt(ID='DSset',info='\x01')
pkt = RadioTap()\
/Dot11(type=0,subtype=4,addr1=dst,addr2=RandMAC()[0:9]+args.mac,addr3=dst)\
/param/essid/rates
print '[*] 802.11 Probe Request: SSID=[%s], count=%d' % (ssid,args.count)
try:
sendp(pkt,count=args.count,inter=args.delay,verbose=0,iface=args.interface)
except:
raise |
DQM/Physics/test/qcdPhotonsDQM_cfg.py | ckamtsikis/cmssw | 852 | 12706354 | import FWCore.ParameterSet.Config as cms
process = cms.Process("QcdPhotonsDQM")
process.load("DQM.Physics.qcdPhotonsDQM_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.DQM.collectorHost = ''
process.dqmSaver.workflow = cms.untracked.string('/My/Test/DataSet')
## Geometry and Detector Conditions (needed for spike removal code)
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string('START38_V9::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/B81EF896-9AAF-DF11-B31B-001A92971BCA.root',
'/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/886F398A-B8AF-DF11-91A8-003048678FC6.root',
'/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/7830F828-87AF-DF11-9DE0-003048678FD6.root',
'/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0018/26CC6A78-A8AF-DF11-97A5-003048678F78.root',
'/store/relval/CMSSW_3_8_2/RelValPhotonJets_Pt_10/GEN-SIM-RECO/START38_V9-v1/0017/3E226F93-7FAF-DF11-A908-001A92810AF4.root'
)
)
process.p = cms.Path(process.qcdPhotonsDQM+process.dqmSaver)
|
galaxy/api/fields.py | bmclaughlin/galaxy | 904 | 12706369 | <reponame>bmclaughlin/galaxy<gh_stars>100-1000
# (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import datetime as dt
from rest_framework import serializers
from rest_framework.reverse import reverse
class NativeTimestampField(serializers.DateTimeField):
"""Represents internal timestamp value as date time string."""
def to_representation(self, value):
if value is None:
return None
value = dt.datetime.utcfromtimestamp(value).replace(
tzinfo=dt.timezone.utc)
return super().to_representation(value)
def to_internal_value(self, value):
if value is None:
return None
value = super().to_internal_value(value)
return value.astimezone(dt.timezone.utc).timestamp()
class NamespaceObjectField(serializers.Field):
"""Return namespace object for a serializer field."""
def to_representation(self, value):
return {
'id': value.pk,
'href': reverse(
'api:namespace_detail',
kwargs={'pk': value.pk},
request=self.context.get('request'),
),
'name': value.name,
}
class VersionUrlField(serializers.Field):
"""Return version detail url under collection namespace and name."""
def to_representation(self, value):
return reverse(
'api:v2:version-detail',
kwargs={
'namespace': value.collection.namespace.name,
'name': value.collection.name,
'version': value.version,
},
request=self.context.get('request'),
)
|
webwaybooks/utils/meta.py | bysorry/telegram_media_downloader | 401 | 12706374 | <gh_stars>100-1000
"""Utility module to manage meta info."""
import platform
from . import __version__, __copyright__, __license__
APP_VERSION = f"Telegram Media Downloader {__version__}"
DEVICE_MODEL = (
f"{platform.python_implementation()} {platform.python_version()}"
)
SYSTEM_VERSION = f"{platform.system()} {platform.release()}"
LANG_CODE = "en"
def print_meta(logger):
"""Prints meta-data of the downloader script."""
print(f"Telegram Media Downloader v{__version__}, {__copyright__}")
print(f"Licensed under the terms of the {__license__}", end="\n\n")
logger.info(f"Device: {DEVICE_MODEL} - {APP_VERSION}")
logger.info(f"System: {SYSTEM_VERSION} ({LANG_CODE.upper()})")
|
circus/commands/dstats.py | BradleyKirton/circus | 820 | 12706385 | <filename>circus/commands/dstats.py
from circus.exc import ArgumentError
from circus.commands.base import Command
from circus.util import get_info
_INFOLINE = ("%(pid)s %(cmdline)s %(username)s %(nice)s %(mem_info1)s "
"%(mem_info2)s %(cpu)s %(mem)s %(ctime)s")
class Daemontats(Command):
"""\
Get circusd stats
=================
You can get at any time some statistics about circusd
with the dstat command.
ZMQ Message
-----------
To get the circusd stats, simply run::
{
"command": "dstats"
}
The response returns a mapping the property "infos"
containing some process informations::
{
"info": {
"children": [],
"cmdline": "python",
"cpu": 0.1,
"ctime": "0:00.41",
"mem": 0.1,
"mem_info1": "3M",
"mem_info2": "2G",
"nice": 0,
"pid": 47864,
"username": "root"
},
"status": "ok",
"time": 1332265655.897085
}
Command Line
------------
::
$ circusctl dstats
"""
name = "dstats"
def message(self, *args, **opts):
if len(args) > 0:
raise ArgumentError("Invalid message")
return self.make_message()
def execute(self, arbiter, props):
return {'info': get_info(interval=0.01)}
def _to_str(self, info):
children = info.pop("children", [])
ret = ['Main Process:', ' ' + _INFOLINE % info]
if len(children) > 0:
ret.append('Children:')
for child in children:
ret.append(' ' + _INFOLINE % child)
return "\n".join(ret)
def console_msg(self, msg):
if msg['status'] == "ok":
return self._to_str(msg['info'])
else:
return self.console_error(msg)
|
car/scripts/lidarEvasion.py | GNSS523/Cherry-Autonomous-Racecar | 330 | 12706396 | #!/usr/bin/env python
"""
Copyright (c) 2017 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import rospy
from geometry_msgs.msg import Twist, TwistStamped
from std_msgs.msg import String
from sensor_msgs.msg import Image
from sensor_msgs.msg import Joy
from cv_bridge import CvBridge, CvBridgeError
from Xbox360 import XBox360
import sys, os
import cv2
print cv2.__version__
import time
from threading import Lock
"""
This node subscribes to the lidargrid image topic which is basicially a birds eye view of the Neato XV-11 LIDAR's /scan topic.
It then uses the image and checks for white pixels(points) in predefined regions of the image.
If a region contains an white pixel it will publish a twist message with a steering angle or throttle value to try and avoid the object or stop.
This node's intention was to be used as a primitive layer to help prevent the car from crashing into a wall under the Neural Net's control.
Future plans:
This node is very simple currently but could be expanded upon with fusion of IMU data. The regions that are being defined to look for collision could be modeled based on current speed and steering angle as a sort of basic look ahead for obstacles function
Note:
The region function is defined using the TF coordinates which is a right handed coordinate system see http://www.pirobot.org/blog/0018/
Toggle on or off with select button on Xbox 360 controller
"""
lidarRadius = 5.0 ##meters
class evasion(object):
def __init__(self):
self.evadeSet = False
self.controller = XBox360()
self.bridge = CvBridge()
self.throttle = 0
self.grid_img = None
##self.throttleLock = Lock()
print "evasion"
rospy.Subscriber("/lidargrid", Image, self.gridCB, queue_size=1)
rospy.Subscriber("/cmd_vel", TwistStamped , self.twistCB , queue_size = 1)
rospy.Subscriber("/joy", Joy, self.joyCB, queue_size=5)
self.pub_img = rospy.Publisher("/steering_img", Image)
self.pub_twist = rospy.Publisher("/lidar_twist", TwistStamped, queue_size=1)
self.sound = rospy.Publisher("/sound_server/speech_synth", String, queue_size=1)
rospy.init_node ('lidar_cmd',anonymous=True)
rospy.spin()
def twistCB(self, cmd_vel):
if self.evadeSet == True:
try:
##rospy.loginfo("cmd_vel Recieved")
self.throttle = cmd_vel.twist.linear.x
normed_throttle = (self.throttle*2.0)-1.0
front_max = 0.3 + 4.5*(normed_throttle**2.5) ##front region scales with throttle value
rospy.loginfo('normed_throttle: '+str(normed_throttle) + ' front_max: '+str(front_max))
front = self.Occupancy(self.grid_img, 0.1, front_max, -0.2, 0.2) ##(2,0.2) to (0.5,-0.2)
right = self.Occupancy(self.grid_img, 0.0, 1, -0.7, -0.2) ##(2,-0.2) to (0,-0.7)
left = self.Occupancy(self.grid_img, 0.0, 1, 0.2, 0.7) ##(2,0.7) to (0,0.2)
everywhere = self.Occupancy(self.grid_img, -4.0, 4.0, -4.0, 4.0)
cmd = TwistStamped()
#rospy.loginfo(self.throttle)
cmd.twist.angular.z = 0.5
cmd.twist.linear.x = -1.0
if front:
cmd.twist.linear.x = 0.5 ##stop
self.pub_twist.publish(cmd)
self.sound.publish("Forward collision detected")
elif left:
cmd.twist.angular.z = 0.7 ##turn right
self.pub_twist.publish(cmd)
self.sound.publish("Left collision detected")
elif right:
cmd.twist.angular.z = 0.3 ##turn left
self.pub_twist.publish(cmd)
self.sound.publish("Right collision detected")
else:
#self.pub_twist.publish(cmd)
pass
except Exception as f:
print(f)
else:
pass
##rospy.loginfo("Not using Evasion")
def gridCB(self, grid):
rospy.loginfo("Grid Recieved")
try:
self.grid_img = self.bridge.imgmsg_to_cv2(grid)
except CvBridgeError as e:
print(e)
"""
Toggle LIDAR evasion with select button on Xbox 360 Controller
"""
def joyCB(self, joy):
self.controller.update(joy)
events = self.controller.buttonEvents()
if 'back_pressed' in events:
self.evadeSet = not self.evadeSet
rospy.loginfo(self.evadeSet)
"""
Converts TF to pixels(x1,y1),(x2,y2)
"""
def Region(self, grid, xmin, xmax, ymin, ymax):
pixelwidth = grid.shape[0]
tfwidth = lidarRadius*2.0
endx = int( pixelwidth * ((xmin*-1.0+5.0)/tfwidth) )
endy = int( pixelwidth * ((ymin*-1.0+5.0)/tfwidth) )
startx = int( pixelwidth * ((xmax*-1.0+5.0)/tfwidth) )
starty = int( pixelwidth * ((ymax*-1.0+5.0)/tfwidth) )
startx, starty = max(0, startx), max(0, starty)
endx, endy = min(endx, pixelwidth), min(endy, pixelwidth)
return (startx, starty, endx, endy)
"""
checks for occupancy in regions specified then returns a true or false from a sum
"""
def Occupancy(self, grid, xmin, xmax, ymin, ymax):
(startx, starty, endx, endy) = self.Region(grid, xmin, xmax, ymin, ymax)
#rospy.loginfo(str(startx)+" "+str(starty)+" "+str(endx)+" "+str(endy))
region = grid[startx:endx,starty:endy]
#rospy.loginfo(str(region.shape))
sum_ = region.sum()
#rospy.loginfo(str(sum_))
if sum_ > 0:
return True
else:
return False
if __name__ == '__main__':
try:
evasion()
except rospy.ROSInterruptException:
pass
|
testing/buildbot/scripts/upload_test_result_artifacts_unittest.py | zealoussnow/chromium | 14,668 | 12706415 | <reponame>zealoussnow/chromium
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for upload_test_result_artifacts."""
from __future__ import print_function
import json
import mock
import os
import random
import string
import tempfile
import unittest
import upload_test_result_artifacts
class UploadTestResultArtifactsTest(unittest.TestCase):
def setUp(self):
# Used for load tests
self._temp_files = []
def tearDown(self):
# Used for load tests
for fname in self._temp_files:
os.unlink(fname)
### These are load tests useful for seeing how long it takes to upload
### different kinds of test results files. They won't be run as part of
### presubmit testing, since they take a while and talk to the network,
### but the code will stay here in case anyone wants to edit the code
### and wants to check performance. Change the test names from 'loadTestBlah'
### to 'testBlah' to get them to run.
def makeTemp(self, size):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(random.choice(string.ascii_letters) * size)
self._temp_files.append(fname)
return os.path.basename(fname)
def makeTestJson(self, num_tests, artifact_size):
return {
'tests': {
'suite': {
'test%d' % i: {
'artifacts': {
'artifact': self.makeTemp(artifact_size),
},
'expected': 'PASS',
'actual': 'PASS',
} for i in range(num_tests)
}
},
'artifact_type_info': {
'artifact': 'text/plain'
}
}
def _loadTest(self, json_data, upload):
return upload_test_result_artifacts.upload_artifacts(
json_data, '/tmp', upload, 'test-bucket')
def loadTestEndToEndSimple(self):
test_data = self.makeTestJson(1, 10)
print(self._loadTest(test_data, False))
def loadTestEndToEndManySmall(self):
test_data = self.makeTestJson(1000, 10)
self._loadTest(test_data, False)
def loadTestEndToEndSomeBig(self):
test_data = self.makeTestJson(100, 10000000)
self._loadTest(test_data, False)
def loadTestEndToEndVeryBig(self):
test_data = self.makeTestJson(2, 1000000000)
self._loadTest(test_data, False)
### End load test section.
def testGetTestsSimple(self):
self.assertEqual(upload_test_result_artifacts.get_tests({
'foo': {
'expected': 'PASS',
'actual': 'PASS',
},
}), {
('foo',): {
'actual': 'PASS',
'expected': 'PASS',
}
})
def testGetTestsNested(self):
self.assertEqual(upload_test_result_artifacts.get_tests({
'foo': {
'bar': {
'baz': {
'actual': 'PASS',
'expected': 'PASS',
},
'bam': {
'actual': 'PASS',
'expected': 'PASS',
},
},
},
}), {
('foo', 'bar', 'baz'): {
'actual': 'PASS',
'expected': 'PASS',
},
('foo', 'bar', 'bam'): {
'actual': 'PASS',
'expected': 'PASS',
}
})
def testGetTestsError(self):
with self.assertRaises(ValueError):
upload_test_result_artifacts.get_tests([])
def testUploadArtifactsMissingType(self):
"""Tests that the type information is used for validation."""
data = {
'artifact_type_info': {
'log': 'text/plain'
},
'tests': {
'foo': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'screenshot': 'foo.png',
}
}
}
}
with self.assertRaises(ValueError):
upload_test_result_artifacts.upload_artifacts(
data, '/tmp', True, 'test-bucket')
@mock.patch('upload_test_result_artifacts.get_file_digest')
@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')
@mock.patch('upload_test_result_artifacts.shutil.rmtree')
@mock.patch('upload_test_result_artifacts.shutil.copyfile')
def testUploadArtifactsNoUpload(
self, copy_patch, rmtree_patch, mkd_patch, digest_patch):
"""Simple test; no artifacts, so data shouldn't change."""
mkd_patch.return_value = 'foo_dir'
data = {
'artifact_type_info': {
'log': 'text/plain'
},
'tests': {
'foo': {
'actual': 'PASS',
'expected': 'PASS',
}
}
}
self.assertEqual(upload_test_result_artifacts.upload_artifacts(
data, '/tmp', True, 'test-bucket'), data)
mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')
digest_patch.assert_not_called()
copy_patch.assert_not_called()
rmtree_patch.assert_called_once_with('foo_dir')
@mock.patch('upload_test_result_artifacts.get_file_digest')
@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')
@mock.patch('upload_test_result_artifacts.shutil.rmtree')
@mock.patch('upload_test_result_artifacts.shutil.copyfile')
@mock.patch('upload_test_result_artifacts.os.path.exists')
def testUploadArtifactsBasic(
self, exists_patch, copy_patch, rmtree_patch, mkd_patch, digest_patch):
"""Upload a single artifact."""
mkd_patch.return_value = 'foo_dir'
exists_patch.return_value = False
digest_patch.return_value = 'deadbeef'
data = {
'artifact_type_info': {
'log': 'text/plain'
},
'tests': {
'foo': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'foo.txt',
}
}
}
}
self.assertEqual(upload_test_result_artifacts.upload_artifacts(
data, '/tmp', True, 'test-bucket'), {
'artifact_type_info': {
'log': 'text/plain'
},
'tests': {
'foo': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'deadbeef',
}
}
},
'artifact_permanent_location': 'gs://chromium-test-artifacts/sha1',
})
mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')
digest_patch.assert_called_once_with('/tmp/foo.txt')
copy_patch.assert_called_once_with('/tmp/foo.txt', 'foo_dir/deadbeef')
rmtree_patch.assert_called_once_with('foo_dir')
@mock.patch('upload_test_result_artifacts.get_file_digest')
@mock.patch('upload_test_result_artifacts.tempfile.mkdtemp')
@mock.patch('upload_test_result_artifacts.shutil.rmtree')
@mock.patch('upload_test_result_artifacts.shutil.copyfile')
@mock.patch('upload_test_result_artifacts.os.path.exists')
def testUploadArtifactsComplex(
self, exists_patch, copy_patch, rmtree_patch, mkd_patch, digest_patch):
"""Upload multiple artifacts."""
mkd_patch.return_value = 'foo_dir'
exists_patch.return_value = False
digest_patch.side_effect = [
'deadbeef1', 'deadbeef2', 'deadbeef3', 'deadbeef4']
data = {
'artifact_type_info': {
'log': 'text/plain',
'screenshot': 'image/png',
},
'tests': {
'bar': {
'baz': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'baz.log.txt',
'screenshot': 'baz.png',
}
}
},
'foo': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'foo.log.txt',
'screenshot': 'foo.png',
}
},
}
}
self.assertEqual(upload_test_result_artifacts.upload_artifacts(
data, '/tmp', True, 'test-bucket'), {
'artifact_type_info': {
'log': 'text/plain',
'screenshot': 'image/png',
},
'tests': {
'bar': {
'baz': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'deadbeef1',
'screenshot': 'deadbeef2',
}
}
},
'foo': {
'actual': 'PASS',
'expected': 'PASS',
'artifacts': {
'log': 'deadbeef3',
'screenshot': 'deadbeef4',
}
},
},
'artifact_permanent_location': 'gs://chromium-test-artifacts/sha1',
})
mkd_patch.assert_called_once_with(prefix='upload_test_artifacts')
digest_patch.assert_has_calls([
mock.call('/tmp/baz.log.txt'), mock.call('/tmp/baz.png'),
mock.call('/tmp/foo.log.txt'), mock.call('/tmp/foo.png')])
copy_patch.assert_has_calls([
mock.call('/tmp/baz.log.txt', 'foo_dir/deadbeef1'),
mock.call('/tmp/baz.png', 'foo_dir/deadbeef2'),
mock.call('/tmp/foo.log.txt', 'foo_dir/deadbeef3'),
mock.call('/tmp/foo.png', 'foo_dir/deadbeef4'),
])
rmtree_patch.assert_called_once_with('foo_dir')
def testFileDigest(self):
_, path = tempfile.mkstemp(prefix='file_digest_test')
with open(path, 'w') as f:
f.write('a')
self.assertEqual(
upload_test_result_artifacts.get_file_digest(path),
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8')
if __name__ == '__main__':
unittest.main()
|
applications/cli/commands/workflow/tests/test_cancel.py | nparkstar/nauta | 390 | 12706428 | <filename>applications/cli/commands/workflow/tests/test_cancel.py
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest.mock import MagicMock
import pytest
from click.testing import CliRunner
from kubernetes.client import CustomObjectsApi
from commands.workflow.cancel import cancel
from cli_text_consts import WorkflowDeleteTexts as Texts
from platform_resources.workflow import ArgoWorkflow
FAKE_WORKFLOW = ArgoWorkflow(name='fake-workflow', namespace='fake-namespace',
k8s_custom_object_api=MagicMock(spec=CustomObjectsApi))
class WorkflowCancelMocks:
def __init__(self, mocker):
self.get_namespace = mocker.patch('commands.workflow.cancel.get_kubectl_current_context_namespace',
return_value='fake-namespace')
self.get_workflow = mocker.patch('commands.workflow.cancel.ArgoWorkflow.get',
return_value=FAKE_WORKFLOW)
self.delete_workflow = mocker.patch.object(self.get_workflow.return_value, 'delete')
@pytest.fixture()
def cancel_mocks(mocker) -> WorkflowCancelMocks:
return WorkflowCancelMocks(mocker=mocker)
def test_cancel(cancel_mocks: WorkflowCancelMocks):
result = CliRunner().invoke(cancel, [FAKE_WORKFLOW.name], catch_exceptions=False)
assert result.exit_code == 0
assert Texts.SUCCESS_MSG.format(workflow_name=FAKE_WORKFLOW.name) in result.output
def test_cancel_not_found(cancel_mocks: WorkflowCancelMocks):
cancel_mocks.get_workflow.return_value = None
result = CliRunner().invoke(cancel, [FAKE_WORKFLOW.name])
assert result.exit_code == 0
assert Texts.NOT_FOUND_MSG.format(workflow_name=FAKE_WORKFLOW.name) in result.output
def test_cancel_other_error(cancel_mocks: WorkflowCancelMocks):
cancel_mocks.delete_workflow.side_effect = RuntimeError
result = CliRunner().invoke(cancel, [FAKE_WORKFLOW.name])
assert result.exit_code == 1
assert Texts.OTHER_ERROR_MSG in result.output
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/configuration/configured_named_op_example.py | dagster-io/dagster | 4,606 | 12706436 | from dagster import Field, In, Int, List, configured, job, op
# start_configured_named
@op(
config_schema={
"is_sample": Field(bool, is_required=False, default_value=False),
},
ins={"xs": In(List[Int])},
)
def get_dataset(context, xs):
if context.op_config["is_sample"]:
return xs[:5]
else:
return xs
# If we want to use the same op configured in multiple ways in the same pipeline,
# we have to specify unique names when configuring them:
sample_dataset = configured(get_dataset, name="sample_dataset")({"is_sample": True})
full_dataset = configured(get_dataset, name="full_dataset")({"is_sample": False})
@job
def datasets():
sample_dataset()
full_dataset()
# end_configured_named
|
test/test_format_utils.py | toskachin/athenacli | 175 | 12706441 | # -*- coding: utf-8 -*-
from collections import namedtuple
from athenacli.packages.format_utils import format_status, humanize_size
def test_format_status_plural():
assert format_status(rows_length=1) == "1 row in set"
assert format_status(rows_length=2) == "2 rows in set"
def test_format_status_no_results():
assert format_status(rows_length=None) == "Query OK"
def test_format_status_with_stats():
FakeCursor = namedtuple("FakeCursor", ["engine_execution_time_in_millis", "data_scanned_in_bytes"])
assert format_status(rows_length=1, cursor=FakeCursor(10, 12345678900)) == "1 row in set\nExecution time: 10 ms, Data scanned: 11.5 GB, Approximate cost: $0.06"
assert format_status(rows_length=2, cursor=FakeCursor(1000, 1234)) == "2 rows in set\nExecution time: 1000 ms, Data scanned: 1.21 KB, Approximate cost: $0.00"
def test_humanize_size():
assert humanize_size(20) == "20 B"
assert humanize_size(2000) == "1.95 KB"
assert humanize_size(200000) == "195.31 KB"
assert humanize_size(20000000) == "19.07 MB"
assert humanize_size(200000000000) == "186.26 GB"
|
kafkashell/__init__.py | amzyang/kafka-shell | 109 | 12706472 | # -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from kafkashell import completer
from kafkashell import config
from kafkashell import constants
from kafkashell import executor
from kafkashell import helpers
from kafkashell import main
from kafkashell import settings
from kafkashell import style
from kafkashell import toolbar
from kafkashell import version
name = "kafkashell"
__all__ = [
"bindings",
"completer",
"config",
"constants",
"executor",
"helpers",
"main",
"settings",
"style",
"toolbar",
"version"
]
|
738 Monotone Increasing Digits.py | krishna13052001/LeetCode | 872 | 12706485 | <gh_stars>100-1000
#!/usr/bin/python3
"""
Given a non-negative integer N, find the largest number that is less than or
equal to N with monotone increasing digits.
(Recall that an integer has monotone increasing digits if and only if each pair
of adjacent digits x and y satisfy x <= y.)
Example 1:
Input: N = 10
Output: 9
Example 2:
Input: N = 1234
Output: 1234
Example 3:
Input: N = 332
Output: 299
Note: N is an integer in the range [0, 10^9].
"""
class Solution:
def monotoneIncreasingDigits(self, N: int) -> int:
"""
332
322
222
fill 9
299
"""
digits = [int(e) for e in str(N)]
pointer = len(digits)
for i in range(len(digits) - 1, 0, -1):
if digits[i - 1] > digits[i]:
pointer = i
digits[i - 1] -= 1
for i in range(pointer, len(digits)):
digits[i] = 9
return int("".join(map(str, digits)))
if __name__ == "__main__":
assert Solution().monotoneIncreasingDigits(10) == 9
assert Solution().monotoneIncreasingDigits(332) == 299
|
exec.py | fredcallaway/SendCode | 177 | 12706495 | import sublime_plugin
class SendCodeExecCommand(sublime_plugin.WindowCommand):
def run(self, code=None, prog=None):
self.window.active_view().run_command(
"send_code",
{"code": code, "prog": prog}
)
# backward compatibility
class SendCodeBuildCommand(SendCodeExecCommand):
pass
|
Chapter09/list_comp_read_file.py | 4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition | 393 | 12706531 | <filename>Chapter09/list_comp_read_file.py
import sys
filename = sys.argv[1]
with open(filename) as file:
header = file.readline().strip().split("\t")
contacts = [
dict(zip(header, line.strip().split("\t"))) for line in file
]
for contact in contacts:
print("email: {email} -- {last}, {first}".format(**contact))
|
Validation/RecoEgamma/python/egammaPostValidationMiniAOD_cff.py | ckamtsikis/cmssw | 852 | 12706559 | import FWCore.ParameterSet.Config as cms
from Validation.RecoEgamma.electronPostValidationSequenceMiniAOD_cff import *
egammaPostValidationMiniAOD = cms.Sequence( electronPostValidationSequenceMiniAOD )
|
nets/helloworld.py | postBG/netadapt | 158 | 12706576 | import torch
import torch.nn as nn
__all__ = ['HelloWorld', 'helloworld']
class HelloWorld(nn.Module):
def __init__(self, num_classes=10):
super(HelloWorld, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(64, num_classes, kernel_size=3, stride=1, padding=1, bias=False)
)
self.avgpool = nn.AvgPool2d(32, 32)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
return x
def helloworld(num_classes=10):
return HelloWorld()
|
chaos_genius/utils/datetime_helper.py | rsohlot/chaos_genius | 320 | 12706595 | """Provides helper functions related to datetime operations."""
from datetime import date, datetime, timedelta, timezone
import pandas as pd
import pytz
from chaos_genius.core.utils.constants import SUPPORTED_TIMEZONES
from chaos_genius.settings import TIMEZONE
def get_server_timezone():
"""Get server timezone."""
return datetime.now(timezone.utc).astimezone().tzname()
def get_rca_date_from_string(date_value):
"""Get RCA date from string."""
return datetime.strptime(date_value, "%Y/%m/%d %H:%M:%S").date()
def get_datetime_string_with_tz(date_value, hourly=False) -> str:
"""Get date string with timezone."""
if hourly:
main_str = date_value.strftime("%d %b %Y %H:%M") + f" ({TIMEZONE})"
else:
main_str = date_value.strftime("%d %b %Y") + f" ({TIMEZONE})"
return main_str
def _get_tz_from_offset_str(utc_offset_str):
# TODO: update code when tz implementation is complete
sign = -1 if utc_offset_str[-6] == "-" else 1
utc_offset_mins = int(utc_offset_str[-2:]) * sign
utc_offset_hrs = int(utc_offset_str[-5:-3]) * sign
utc_offset = timedelta(hours=utc_offset_hrs, minutes=utc_offset_mins)
timezones = pytz.all_timezones
for tz_name in timezones:
try:
tz = pytz.timezone(tz_name)
tz_offset = tz._transition_info[-1][0]
if utc_offset == tz_offset:
return tz
except AttributeError:
pass
return _get_tz_from_offset_str("GMT+00:00")
def get_lastscan_string_with_tz(datetime_value_str) -> str:
"""Get last scan time in reporting timezone."""
server_tz_offset = timezone(datetime.now().astimezone().utcoffset())
datetime_value = pd.Timestamp(
datetime.strptime(datetime_value_str, "%Y-%m-%dT%H:%M:%S.%f")
).tz_localize(tz=server_tz_offset)
# TODO : Deprecate SUPPORTED_TIMEZONES over releases.
if TIMEZONE in SUPPORTED_TIMEZONES:
timezone_info = _get_tz_from_offset_str(SUPPORTED_TIMEZONES[TIMEZONE])
else:
timezone_info = TIMEZONE
datetime_value = datetime_value.tz_convert(tz=timezone_info)
main_str = datetime_value.strftime("%d %b %Y %H:%M") + f" ({TIMEZONE})"
return main_str
def convert_datetime_to_timestamp(date_value) -> int:
"""Convert datetime to timestamp."""
if isinstance(date_value, date):
date_value = datetime(
year=date_value.year, month=date_value.month, day=date_value.day
)
return int(date_value.timestamp()) * 1000
|
alipay/aop/api/response/AlipayFundJointaccountFundallocCountQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12706599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundJointaccountFundallocCountQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundJointaccountFundallocCountQueryResponse, self).__init__()
self._alloc_amount = None
self._alloc_times = None
@property
def alloc_amount(self):
return self._alloc_amount
@alloc_amount.setter
def alloc_amount(self, value):
self._alloc_amount = value
@property
def alloc_times(self):
return self._alloc_times
@alloc_times.setter
def alloc_times(self, value):
self._alloc_times = value
def parse_response_content(self, response_content):
response = super(AlipayFundJointaccountFundallocCountQueryResponse, self).parse_response_content(response_content)
if 'alloc_amount' in response:
self.alloc_amount = response['alloc_amount']
if 'alloc_times' in response:
self.alloc_times = response['alloc_times']
|
armada_backend/api_images.py | firesoft/armada | 281 | 12706616 | <reponame>firesoft/armada<filename>armada_backend/api_images.py
from armada_backend import api_base, docker_client
from armada_command.docker_utils.images import LocalArmadaImage
from armada_command.scripts.compat import json
class Images(api_base.ApiCommand):
def on_get(self, req, resp, image_name_or_address, image_name=None):
if image_name is None:
dockyard_address = None
image_name = image_name_or_address
else:
dockyard_address = image_name_or_address
image = LocalArmadaImage(dockyard_address, image_name)
try:
docker_api = docker_client.api()
image_info = json.dumps(docker_api.images(image.image_path))
return self.status_ok(resp, {'image_info': '{image_info}'.format(**locals())})
except Exception as e:
return self.status_exception(resp, "Cannot get info about image.", e)
|
sonnet/src/conformance/descriptors.py | ScriptBox99/deepmind-sonnet | 10,287 | 12706622 | <reponame>ScriptBox99/deepmind-sonnet
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module descriptors programatically describe how to use modules."""
import collections
from typing import Callable, Union
import sonnet as snt
import tensorflow as tf
class Wrapped(snt.Module):
@snt.no_name_scope
def __init__(self, wrapped: snt.Module):
super().__init__()
self.wrapped = wrapped
class Training(Wrapped):
@snt.no_name_scope
def __call__(self, x: tf.Tensor):
return self.wrapped(x, is_training=True)
class Recurrent(Wrapped):
"""Unrolls a recurrent module."""
def __init__(self,
module: Union[snt.RNNCore, snt.UnrolledRNN],
unroller=None):
super().__init__(module)
self.unroller = unroller
@snt.no_name_scope
def __call__(self, x: tf.Tensor):
initial_state = self.wrapped.initial_state(batch_size=tf.shape(x)[0])
if isinstance(self.wrapped, snt.UnrolledRNN):
assert self.unroller is None
# The module expects TB...-shaped input as opposed to BT...
x = tf.transpose(x, [1, 0] + list(range(2, x.shape.rank)))
return self.wrapped(x, initial_state)
else:
x = tf.expand_dims(x, axis=0)
return self.unroller(self.wrapped, x, initial_state)
def unwrap(module: snt.Module) -> snt.Module:
while isinstance(module, Wrapped):
module = module.wrapped
return module
# TODO(tomhennigan) De-duplicate this, BATCH_MODULES and goldens.py.
ModuleDescriptor = collections.namedtuple("ModuleDescriptor",
["name", "create", "shape", "dtype"])
ModuleDescriptor.__new__.__defaults__ = (None, None, None, tf.float32)
BATCH_SIZE = 8
# pylint: disable=unnecessary-lambda
BATCH_MODULES = (
ModuleDescriptor(
name="BatchNorm",
create=lambda: Training(snt.BatchNorm(True, True)),
shape=(BATCH_SIZE, 2, 2, 3)),
ModuleDescriptor(
name="Bias", create=lambda: snt.Bias(), shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="Conv1D",
create=lambda: snt.Conv1D(3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv1DTranspose",
create=lambda: snt.Conv1DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv2D",
create=lambda: snt.Conv2D(3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv2DTranspose",
create=lambda: snt.Conv2DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv3D",
create=lambda: snt.Conv3D(3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="Conv3DTranspose",
create=lambda: snt.Conv3DTranspose(3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="CrossReplicaBatchNorm",
create=lambda: Training(snt.distribute.CrossReplicaBatchNorm( # pylint: disable=g-long-lambda
True, True,
snt.ExponentialMovingAverage(0.9),
snt.ExponentialMovingAverage(0.9))),
shape=(BATCH_SIZE, 2, 2, 3)),
ModuleDescriptor(
name="DepthwiseConv2D",
create=lambda: snt.DepthwiseConv2D(3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Dropout",
create=lambda: Training(snt.Dropout(0.5)),
shape=(BATCH_SIZE, 3, 3)),
ModuleDescriptor(
name="Embed",
create=lambda: snt.Embed(10),
shape=(BATCH_SIZE,),
dtype=tf.int32),
ModuleDescriptor(
name="Flatten",
create=lambda: snt.Flatten(),
shape=(BATCH_SIZE, 3, 3, 3)),
ModuleDescriptor(
name="GroupNorm",
create=lambda: snt.GroupNorm(2, True, True),
shape=(BATCH_SIZE, 3, 4)),
ModuleDescriptor(
name="InstanceNorm",
create=lambda: snt.InstanceNorm(True, True),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="LayerNorm",
create=lambda: snt.LayerNorm(1, True, True),
shape=(BATCH_SIZE, 3, 2)),
ModuleDescriptor(
name="Linear", create=lambda: snt.Linear(10), shape=(BATCH_SIZE, 1)),
ModuleDescriptor(
name="Sequential",
create=lambda: snt.Sequential([lambda x: x]),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="nets.VectorQuantizer",
create=lambda: Training(snt.nets.VectorQuantizer(4, 6, 0.25)),
shape=(BATCH_SIZE, 3, 4)),
ModuleDescriptor(
name="nets.VectorQuantizerEMA",
create=lambda: Training(snt.nets.VectorQuantizerEMA(5, 7, 0.5, 0.9)),
shape=(BATCH_SIZE, 5)),
ModuleDescriptor(
name="nets.Cifar10ConvNet",
create=lambda: Training(snt.nets.Cifar10ConvNet()),
shape=(BATCH_SIZE, 3, 3, 2)),
ModuleDescriptor(
name="nets.ResNet50",
create=lambda: Training(snt.nets.ResNet([1, 1, 1, 1], 4)),
shape=(BATCH_SIZE, 3, 3, 2)),
ModuleDescriptor(
name="nets.MLP",
create=lambda: snt.nets.MLP([3, 4, 5]),
shape=(BATCH_SIZE, 3)),
)
RNN_CORES = (
ModuleDescriptor(
name="Conv1DLSTM",
create=lambda: snt.Conv1DLSTM((2, 2), 3, 3),
shape=(BATCH_SIZE, 2, 2)),
ModuleDescriptor(
name="Conv2DLSTM",
create=lambda: snt.Conv2DLSTM((2, 2, 2), 3, 3),
shape=(BATCH_SIZE, 2, 2, 2)),
ModuleDescriptor(
name="Conv3DLSTM",
create=lambda: snt.Conv3DLSTM((2, 2, 2, 2), 3, 3),
shape=(BATCH_SIZE, 2, 2, 2, 2)),
ModuleDescriptor(
name="GRU",
create=lambda: snt.GRU(1),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="LSTM",
create=lambda: snt.LSTM(1),
shape=(BATCH_SIZE, 128)),
ModuleDescriptor(
name="VanillaRNN",
create=lambda: snt.VanillaRNN(8),
shape=(BATCH_SIZE, 128)),
)
UNROLLED_RNN_CORES = (
ModuleDescriptor(
name="UnrolledLSTM",
create=lambda: snt.UnrolledLSTM(1),
shape=(BATCH_SIZE, 1, 128)),
)
def recurrent_factory(
create_core: Callable[[], snt.RNNCore],
unroller,
) -> Callable[[], Recurrent]:
return lambda: Recurrent(create_core(), unroller)
def unroll_descriptors(descriptors, unroller=None):
"""Returns `Recurrent` wrapped descriptors with the given unroller applied."""
out = []
for name, create, shape, dtype in descriptors:
if unroller is None:
name = "Recurrent({})".format(name)
else:
name = "Recurrent({}, {})".format(name, unroller.__name__)
out.append(
ModuleDescriptor(name=name,
create=recurrent_factory(create, unroller),
shape=shape,
dtype=dtype))
return tuple(out)
RECURRENT_MODULES = (
unroll_descriptors(RNN_CORES, snt.dynamic_unroll) +
unroll_descriptors(RNN_CORES, snt.static_unroll) +
unroll_descriptors(UNROLLED_RNN_CORES))
OPTIMIZER_MODULES = (
ModuleDescriptor(
name="optimizers.Adam",
create=lambda: snt.optimizers.Adam(learning_rate=0.1)),
ModuleDescriptor(
name="optimizers.Momentum",
create=lambda: snt.optimizers.Momentum(learning_rate=0.1, momentum=.9)),
ModuleDescriptor(
name="optimizers.RMSProp",
create=lambda: snt.optimizers.RMSProp(learning_rate=0.1)),
ModuleDescriptor(
name="optimizers.SGD",
create=lambda: snt.optimizers.SGD(learning_rate=0.1)),
)
IGNORED_MODULES = {
# Stateless or abstract.
snt.BatchApply,
snt.Deferred,
snt.Module,
snt.Optimizer,
snt.Reshape,
# Metrics.
snt.ExponentialMovingAverage,
snt.Mean,
snt.Metric,
snt.Sum,
# Normalization.
snt.BaseBatchNorm, # Tested via `snt.BatchNorm`.
# Recurrent.
snt.DeepRNN,
snt.RNNCore,
snt.TrainableState,
snt.UnrolledRNN,
# Tested via `snt.nets.ResNet`.
snt.nets.ResNet50,
snt.nets.resnet.BottleNeckBlockV1,
snt.nets.resnet.BottleNeckBlockV2,
snt.nets.resnet.BlockGroup,
}
|
pipeline/oia/oiaTimingPlot.py | konradotto/TS | 125 | 12706629 | # /* Copyright (C) 2016 Ion Torrent Systems, Inc. All Rights Reserved */
import pandas as pd
import datetime
import dateutil
import matplotlib.dates as dates
from matplotlib import pyplot as plt
import numpy as np
from time import strptime
import os
# put the date on the same line with the cpu data
os.system("awk 'NR%2{printf \"%s \",$0;next;}1' cpu_util.log > cpu_data.log")
df = pd.read_csv(
"cpu_data.log",
names=[
"dow",
"mon",
"day",
"time",
"tz",
"year",
"lcpu",
"us",
"lus",
"sy",
"lsy",
"ni",
"lni",
"id",
"lid",
"wa",
"lwa",
"hi",
"lhi",
"si",
"lsi",
"st",
"lst",
],
delim_whitespace=True,
header=None,
)
data = list(df.T.to_dict().values()) # export the data frame to a python dictionary
x_axis = np.zeros(len(data), dtype="datetime64[s]")
y_axis_idle = np.zeros(len(data))
y_axis_idle_smoothed = np.zeros(len(data))
y_axis_usr = np.zeros(len(data))
y_axis_usr_smoothed = np.zeros(len(data))
y_axis_nice = np.zeros(len(data))
y_axis_nice_smoothed = np.zeros(len(data))
y_axis_sys = np.zeros(len(data))
y_axis_sys_smoothed = np.zeros(len(data))
span = 5
span_gpu = 10
for key in range(0, len(data)):
month = str(strptime(data[key]["mon"], "%b").tm_mon).zfill(2)
datekey = (
str(data[key]["year"])
+ "-"
+ month
+ "-"
+ str(data[key]["day"])
+ "T"
+ data[key]["time"]
)
x_axis[key] = np.datetime64(datekey)
y_axis_idle[key] = int(data[key]["id"])
y_axis_usr[key] = int(data[key]["us"])
y_axis_nice[key] = int(data[key]["ni"])
y_axis_sys[key] = int(data[key]["sy"])
# now, read in the gpu data
df = pd.read_csv(
"gpu_util.log", names=["systemtime", "percent"], sep=",", parse_dates=[0]
) # or:, infer_datetime_format=True)
data2 = list(df.T.to_dict().values()) # export the data frame to a python dictionary
x_axis_gpu = np.zeros(len(data2), dtype="datetime64[s]")
y_axis_gpu = np.zeros(len(data))
y_axis_gpu_smoothed = np.zeros(len(data))
for key in range(0, len(data)):
x_axis_gpu[key] = np.datetime64((data2[key]["systemtime"]))
if key < len(data2):
y_axis_gpu[key] = int((data2[key]["percent"].replace(" ", "").replace("%", "")))
else:
y_axis_gpu[key] = 0
# print x_axis[0]
# print x_axis_gpu[0]
# print x_axis[len(x_axis)-1]
# print x_axis_gpu[len(x_axis_gpu)-1]
# smooth the data
if len(data) > span:
for key in range(span, len(data) - span):
sum_gpu = 0
for key2 in range(key - span, key + span):
sum_gpu += y_axis_gpu[key2]
y_axis_gpu_smoothed[key] = sum_gpu / (2 * span)
for key in range(span, len(data) - span):
sum_idle = sum_usr = sum_nice = sum_sys = 0
for key2 in range(key - span, key + span):
sum_idle += y_axis_idle[key2]
sum_usr += y_axis_usr[key2]
sum_nice += y_axis_nice[key2]
sum_sys += y_axis_sys[key2]
y_axis_idle_smoothed[key] = sum_idle / (2 * span)
y_axis_usr_smoothed[key] = sum_usr / (2 * span)
y_axis_nice_smoothed[key] = sum_nice / (2 * span)
y_axis_sys_smoothed[key] = sum_sys / (2 * span)
s = data
wl = 0.6
fsz = 8
fig = plt.figure(figsize=(15, 5))
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fsz)
# xstart, xend = ax.get_xlim()
# xtickvals =
# ax.xaxis.set_ticks(xtickvals)
plt.plot(x_axis, y_axis_usr, "#be4b48", linewidth=wl, label="% usr")
plt.plot(x_axis, y_axis_nice, "#98b954", linewidth=wl, label="% nice")
plt.plot(x_axis, y_axis_sys, "#7d60a0", linewidth=wl, label="% sys")
plt.plot(x_axis, y_axis_idle, "#46aac5", linewidth=wl, label="% idle")
plt.plot(x_axis, y_axis_gpu, "#000000", linewidth=wl, label="% gpu")
plt.legend(loc="right", bbox_to_anchor=(1.25, 0.5), fontsize=fsz)
plt.savefig("oiaTimingRaw.png")
plt.clf()
wl = 1.0
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.85, box.height])
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fsz)
plt.plot(x_axis, y_axis_usr_smoothed, "#be4b48", linewidth=wl, label="% usr")
plt.plot(x_axis, y_axis_nice_smoothed, "#98b954", linewidth=wl, label="% nice")
plt.plot(x_axis, y_axis_sys_smoothed, "#7d60a0", linewidth=wl, label="% sys")
plt.plot(x_axis, y_axis_idle_smoothed, "#46aac5", linewidth=wl, label="% idle")
plt.plot(x_axis, y_axis_gpu_smoothed, "#000000", linewidth=0.4, label="% gpu")
plt.legend(loc="right", bbox_to_anchor=(1.25, 0.5), fontsize=fsz)
plt.savefig("oiaTiming.png")
os.remove("cpu_data.log")
|
3-2.Lattice_LSTM/eval.py | techthiyanes/nlp-notebook | 136 | 12706632 | # -*- coding: utf-8 -*-
import torch
from model import LatticeLSTM
from load_data import char2idx, idx2char, label2idx, idx2label, word2idx, data_generator
character_size = len(char2idx)
word_size = len(word2idx)
embed_dim = 300
hidden_dim = 128
TEST_DATA_PATH = "./data/test_data" # 测试数据
device = "cuda" if torch.cuda.is_available() else 'cpu'
model = LatticeLSTM(character_size, word_size, label2idx, embed_dim, hidden_dim).to(device)
model.load_state_dict(torch.load("./saved_model/model_lattice.pth", map_location=device))
model.eval()
def extract(chars, tags):
result = []
pre = ''
w = []
for idx, tag in enumerate(tags):
if not pre:
if tag.startswith('B'):
pre = tag.split('-')[1]
w.append(chars[idx])
else:
if tag == f'I-{pre}':
w.append(chars[idx])
else:
result.append([w, pre])
w = []
pre = ''
if tag.startswith('B'):
pre = tag.split('-')[1]
w.append(chars[idx])
return [[''.join(x[0]), x[1]] for x in result]
gold_num = 0
predict_num = 0
correct_num = 0
for sent, input_ids, input_words, labels_idx in data_generator(TEST_DATA_PATH, char2idx, word2idx, label2idx):
print(f"Sent: {sent}")
chars = [idx2char[ix] for ix in input_ids]
labels = [idx2label[ix] for ix in labels_idx]
entities = extract(chars, labels)
gold_num += len(entities)
print (f'NER: {entities}')
res = model(input_ids, input_words)
pred_labels = [idx2label[ix] for ix in res[1]]
pred_entities = extract(chars, pred_labels)
predict_num += len(pred_entities)
print (f'Predicted NER: {pred_entities}')
print ('---------------\n')
for pred in pred_entities:
if pred in entities:
correct_num += 1
print(f'gold_num = {gold_num}')
print(f'predict_num = {predict_num}')
print(f'correct_num = {correct_num}')
precision = correct_num/predict_num
print(f'precision = {precision}')
recall = correct_num/gold_num
print(f'recall = {recall}')
print(f'f1-score = {2*precision*recall/(precision+recall)}') |
siphon/tests/test_catalog.py | DanielWatkins/siphon | 164 | 12706640 | <gh_stars>100-1000
# Copyright (c) 2013-2019 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the catalog access API."""
from datetime import datetime
import logging
import pytest
from siphon.catalog import get_latest_access_url, TDSCatalog
from siphon.testing import get_recorder
log = logging.getLogger('siphon.catalog')
log.setLevel(logging.WARNING)
recorder = get_recorder(__file__)
@recorder.use_cassette('thredds-test-toplevel-catalog')
def test_basic():
"""Test of parsing a basic catalog."""
url = 'http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalog_refs
@recorder.use_cassette('thredds-test-toplevel-catalog')
def test_catalog_representation():
"""Test string representation of the catalog object."""
url = 'http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert str(cat) == 'Unidata THREDDS Data Server'
@recorder.use_cassette('thredds-test-toplevel-catalog')
def test_catalog_session():
"""Test of catalog session."""
url = 'http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalog_refs
# nothing is returned from the session close nor can you check it
# but the ability to close is what is desired
cat.session.close()
@recorder.use_cassette('thredds-test-latest-gfs-0p5')
def test_access():
"""Test catalog parsing of access methods."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/grib/'
'NCEP/GFS/Global_0p5deg/latest.xml')
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.access_urls
@recorder.use_cassette('thredds-test-default-5-0')
def test_access_default_catalog():
"""Test case-insensitive parsing of access methods in default catalog."""
url = ('http://localhost:8081/thredds/catalog/catalog.xml')
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.access_urls
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_virtual_access():
"""Test access of virtual datasets."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# find the 2D time coordinate "full collection" dataset
for dataset in list(cat.datasets.values()):
if 'Full Collection' in dataset.name:
ds = dataset
break
assert 'OPENDAP' in ds.access_urls
# TwoD is a virtual dataset, so HTTPServer
# should not be listed here
assert 'HTTPServer' not in ds.access_urls
@recorder.use_cassette('latest_rap_catalog')
def test_get_latest():
"""Test latest dataset helper function."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.xml')
latest_url = get_latest_access_url(url, 'OPENDAP')
assert latest_url
@recorder.use_cassette('latest_rap_catalog')
def test_latest_attribute():
"""Test using the catalog latest attribute."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.xml')
cat = TDSCatalog(url)
assert cat.latest.name == 'RR_CONUS_13km_20150527_0100.grib2'
@recorder.use_cassette('top_level_cat')
def test_tds_top_catalog():
"""Test parsing top-level catalog."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('radar_dataset_cat')
def test_simple_radar_cat():
"""Test parsing of radar server catalog."""
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/dataset.xml'
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('point_feature_dataset_xml')
def test_simple_point_feature_collection_xml():
"""Test accessing point feature top-level catalog."""
url = ('http://thredds.ucar.edu/thredds/catalog/nws/metar/ncdecoded/catalog.xml'
'?dataset=nws/metar/ncdecoded/Metar_Station_Data_fc.cdmr')
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('html_then_xml_catalog')
def test_html_link(recwarn):
"""Test that we fall-back when given an HTML catalog page."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.html')
TDSCatalog(url)
assert 'Changing' in str(recwarn.pop(UserWarning).message)
@recorder.use_cassette('follow_cat')
def test_catalog_follow():
"""Test catalog reference following."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
ref_name = 'Forecast Model Data'
cat = TDSCatalog(url).catalog_refs[ref_name].follow()
assert cat
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_order():
"""Test that we properly order datasets parsed from the catalog."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert list(cat.datasets) == ['Full Collection (Reference / Forecast Time) Dataset',
'Best NAM CONUS 20km Time Series',
'Latest Collection for NAM CONUS 20km']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_get_by_index():
"""Test that datasets can be accessed by index."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert cat.datasets[0].name == 'Full Collection (Reference / Forecast Time) Dataset'
assert cat.datasets[1].name == 'Best NAM CONUS 20km Time Series'
assert cat.datasets[2].name == 'Latest Collection for NAM CONUS 20km'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_str():
"""Test that datasets are printed as expected."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert str(cat.datasets) == ("['Full Collection (Reference / Forecast Time) Dataset', "
"'Best NAM CONUS 20km Time Series', "
"'Latest Collection for NAM CONUS 20km']")
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_sliced_str():
"""Test that datasets are printed as expected when sliced."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert str(cat.datasets[-2:]) == ('[Best NAM CONUS 20km Time Series, '
'Latest Collection for NAM CONUS 20km]')
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time():
"""Test getting dataset by time using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
nearest = cat.catalog_refs.filter_time_nearest(datetime(2015, 5, 28, 17))
assert nearest.title == 'NAM_CONUS_20km_noaaport_20150528_1800.grib1'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time_30():
"""Test getting dataset by time; check for a day in the 30s (#gh-173)."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
nearest = cat.catalog_refs.filter_time_nearest(datetime(2015, 5, 30, 11))
assert nearest.title == 'NAM_CONUS_20km_noaaport_20150530_1200.grib1'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time_raises():
"""Test getting dataset by time using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# Datasets doesn't have any timed datasets
with pytest.raises(ValueError):
cat.datasets.filter_time_nearest(datetime(2015, 5, 28, 17))
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range():
"""Test getting datasets by time range using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
in_range = cat.catalog_refs.filter_time_range(datetime(2015, 5, 28, 0),
datetime(2015, 5, 29, 0))
titles = [item.title for item in in_range]
assert titles == ['NAM_CONUS_20km_noaaport_20150528_0000.grib1',
'NAM_CONUS_20km_noaaport_20150528_0600.grib1',
'NAM_CONUS_20km_noaaport_20150528_1200.grib1',
'NAM_CONUS_20km_noaaport_20150528_1800.grib1',
'NAM_CONUS_20km_noaaport_20150529_0000.grib1']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_bad_time_range():
"""Test warning message for bad time range."""
with pytest.warns(UserWarning):
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
in_range = cat.catalog_refs.filter_time_range(datetime(2015, 5, 29, 0),
datetime(2015, 5, 28, 0))
assert in_range == []
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range_regex():
"""Test getting datasets by time range using filenames, with manual regex."""
# This is DatasetCollection.default_regex, but tests passing it explicitly
regex = (r'(?P<year>\d{4})(?P<month>[01]\d)(?P<day>[0123]\d)_'
r'(?P<hour>[012]\d)(?P<minute>[0-5]\d)')
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
in_range = cat.catalog_refs.filter_time_range(datetime(2015, 5, 28, 0),
datetime(2015, 5, 29, 0),
regex=regex)
titles = [item.title for item in in_range]
assert titles == ['NAM_CONUS_20km_noaaport_20150528_0000.grib1',
'NAM_CONUS_20km_noaaport_20150528_0600.grib1',
'NAM_CONUS_20km_noaaport_20150528_1200.grib1',
'NAM_CONUS_20km_noaaport_20150528_1800.grib1',
'NAM_CONUS_20km_noaaport_20150529_0000.grib1']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range_strptime():
"""Test getting datasets by time range using filenames, with strptime."""
regex = r'noaaport_(?P<strptime>\d{8}_\d{4})'
strptime = '%Y%m%d_%H%M'
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
in_range = cat.catalog_refs.filter_time_range(datetime(2015, 5, 28, 0),
datetime(2015, 5, 29, 0),
regex=regex, strptime=strptime)
titles = [item.title for item in in_range]
assert titles == ['NAM_CONUS_20km_noaaport_20150528_0000.grib1',
'NAM_CONUS_20km_noaaport_20150528_0600.grib1',
'NAM_CONUS_20km_noaaport_20150528_1200.grib1',
'NAM_CONUS_20km_noaaport_20150528_1800.grib1',
'NAM_CONUS_20km_noaaport_20150529_0000.grib1']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range_raises():
"""Test getting datasets by time range using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# No time-based dataset names
with pytest.raises(ValueError):
cat.datasets.filter_time_range(datetime(2015, 5, 28, 0), datetime(2015, 5, 29, 0))
@recorder.use_cassette('top_level_cat')
def test_catalog_ref_order():
"""Test that catalog references are properly ordered."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert list(cat.catalog_refs) == ['Forecast Model Data', 'Forecast Products and Analyses',
'Observation Data', 'Radar Data', 'Satellite Data',
'Unidata case studies']
@recorder.use_cassette('cat_non_standard_context_path')
def test_non_standard_context_path():
"""Test accessing TDS with non-standard Context Path."""
url = 'http://ereeftds.bom.gov.au/ereefs/tds/catalog/ereef/mwq/P1A/catalog.xml'
cat = TDSCatalog(url)
ds = cat.datasets['A20020101.P1A.ANN_MIM_RMP.nc']
expected = ('http://ereeftds.bom.gov.au/ereefs/tds/dodsC/ereef/mwq/'
'P1A/A20020101.P1A.ANN_MIM_RMP.nc')
assert ds.access_urls['OPENDAP'] == expected
@recorder.use_cassette('cat_access_elements')
def test_access_elements():
"""Test parsing access elements in TDS client catalog."""
url = 'http://oceandata.sci.gsfc.nasa.gov/opendap/SeaWiFS/L3SMI/2001/001/catalog.xml'
cat = TDSCatalog(url)
assert len(list(cat.datasets)) != 0
@recorder.use_cassette('cat_only_http')
def test_simple_service_within_compound():
"""Test parsing of a catalog that asks for a single service within a compound one."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/noaaport/text/'
'tropical/atlantic/hdob/catalog.xml')
cat = TDSCatalog(url)
assert (cat.datasets[0].access_urls
== {'HTTPServer': 'http://thredds-test.unidata.ucar.edu/thredds/'
'fileServer/noaaport/text/tropical/atlantic/hdob/'
'High_density_obs_20170824.txt'})
@recorder.use_cassette('rsmas_ramadda')
def test_ramadda_catalog():
"""Test parsing a catalog from RAMADDA."""
url = 'http://weather.rsmas.miami.edu/repository?output=thredds.catalog'
cat = TDSCatalog(url)
assert len(cat.catalog_refs) == 12
@recorder.use_cassette('rsmas_ramadda_datasets')
def test_ramadda_access_urls():
"""Test creating access urls from a catalog from RAMADDA."""
url = 'http://weather.rsmas.miami.edu/repository?output=thredds.catalog'
# Walk down a few levels to where we can get a dataset
cat = (TDSCatalog(url).catalog_refs[0].follow().catalog_refs[0].follow()
.catalog_refs[0].follow())
ds = cat.datasets[3]
assert ds.access_urls['opendap'] == ('http://weather.rsmas.miami.edu/repository/opendap/'
'synth:a43c1cc4-1cf2-4365-97b9-6768b8201407:L3YyYl91c'
'2VzRUNPQS9keW5hbW9fYmFzaWNfdjJiXzIwMTFhbGwubmM='
'/entry.das')
@recorder.use_cassette('tds50_catalogref_follow')
def test_tds50_catalogref_follow():
"""Test following a catalog ref url on TDS 5."""
cat = TDSCatalog('http://thredds-test.unidata.ucar.edu/thredds/catalog.xml')
assert len(cat.catalog_refs[0].follow().catalog_refs) == 59
@recorder.use_cassette('top_level_cat')
def test_catalog_ref_str():
"""Test that catalog references are properly represented as strings."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert str(cat.catalog_refs[0]) == 'Forecast Model Data'
@recorder.use_cassette('ncei_embedded_metadata')
def test_catalog_with_embedded_metadata_elements():
"""Test catalog with embedded metadata elements."""
url = 'https://www.ncei.noaa.gov/thredds/catalog/namanl/201802/20180220/catalog.xml'
cat = TDSCatalog(url)
md = cat.metadata
assert 'external_metadata' in md
assert 'serviceName' in md
@recorder.use_cassette('latest_resolver_on_latest_dataset')
def test_latest_resolver_fail():
"""Test getting latest on catalog that does not have a resolver."""
cat = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GFS/'
'Global_0p25deg_ana/latest.xml')
latest = ''
with pytest.raises(AttributeError) as excinfo:
latest = cat.latest
assert latest == ''
assert '"latest" not available for this catalog' in str(excinfo.value)
|
challenge_1/python/jcpattison/src/reverse.py | rchicoli/2017-challenges | 271 | 12706642 | <reponame>rchicoli/2017-challenges
a = input("Enter your string to reverse: \n")
print(a[::-1])
|
projects/DensePose/tests/test_video_keyframe_dataset.py | mmabrouk/detectron2 | 21,274 | 12706671 | <gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import os
import random
import tempfile
import unittest
import torch
import torchvision.io as io
from densepose.data.transform import ImageResizeTransform
from densepose.data.video import RandomKFramesSelector, VideoKeyframeDataset
try:
import av
except ImportError:
av = None
# copied from torchvision test/test_io.py
def _create_video_frames(num_frames, height, width):
y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width))
data = []
for i in range(num_frames):
xc = float(i) / num_frames
yc = 1 - float(i) / (2 * num_frames)
d = torch.exp(-((x - xc) ** 2 + (y - yc) ** 2) / 2) * 255
data.append(d.unsqueeze(2).repeat(1, 1, 3).byte())
return torch.stack(data, 0)
# adapted from torchvision test/test_io.py
@contextlib.contextmanager
def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, options=None):
if lossless:
if video_codec is not None:
raise ValueError("video_codec can't be specified together with lossless")
if options is not None:
raise ValueError("options can't be specified together with lossless")
video_codec = "libx264rgb"
options = {"crf": "0"}
if video_codec is None:
video_codec = "libx264"
if options is None:
options = {}
data = _create_video_frames(num_frames, height, width)
with tempfile.NamedTemporaryFile(suffix=".mp4") as f:
f.close()
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
yield f.name, data
os.unlink(f.name)
@unittest.skipIf(av is None, "PyAV unavailable")
class TestVideoKeyframeDataset(unittest.TestCase):
def test_read_keyframes_all(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
dataset = VideoKeyframeDataset(video_list, category_list)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((5, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(3)
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector)
self.assertEqual(len(dataset), 1)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(data1.shape, torch.Size((3, 3, 300, 300)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
def test_read_keyframes_with_selector_with_transform(self):
with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
video_list = [fname]
category_list = [None]
random.seed(0)
frame_selector = RandomKFramesSelector(1)
transform = ImageResizeTransform()
dataset = VideoKeyframeDataset(video_list, category_list, frame_selector, transform)
data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
self.assertEqual(len(dataset), 1)
self.assertEqual(data1.shape, torch.Size((1, 3, 800, 800)))
self.assertEqual(data1.dtype, torch.float32)
self.assertIsNone(categories1[0])
return
self.assertTrue(False)
|
qcengine/procedures/nwchem_opt/__init__.py | MolSSI/dqm_compute | 105 | 12706703 | from typing import Union, Dict, Any
from qcelemental.models import OptimizationInput, AtomicInput, OptimizationResult, Provenance
from qcengine.config import TaskConfig
from qcengine.exceptions import UnknownError, InputError
from qcengine.procedures.nwchem_opt.harvester import harvest_as_atomic_result
from qcengine.programs.nwchem.runner import NWChemHarness
from qcengine.procedures.model import ProcedureHarness
class NWChemDriverProcedure(ProcedureHarness):
"""Structural relaxation using NWChem's optimizer"""
_defaults = {"name": "NWChemDriver", "procedure": "optimization"}
class Config(ProcedureHarness.Config):
pass
def found(self, raise_error: bool = False) -> bool:
nwc_harness = NWChemHarness()
return nwc_harness.found(raise_error)
def get_version(self) -> str:
nwc_harness = NWChemHarness()
return nwc_harness.get_version()
def build_input_model(self, data: Union[Dict[str, Any], "OptimizationInput"]) -> OptimizationInput:
return self._build_model(data, OptimizationInput)
def compute(self, input_data: OptimizationInput, config: TaskConfig) -> "BaseModel":
nwc_harness = NWChemHarness()
self.found(raise_error=True)
# Unify the keywords from the OptimizationInput and QCInputSpecification
# Optimization input will override, but don't tell users this as it seems unnecessary
keywords = input_data.keywords.copy()
keywords.update(input_data.input_specification.keywords)
if keywords.get("program", "nwchem").lower() != "nwchem":
raise InputError("NWChemDriver procedure only works with NWChem")
# Make an atomic input
atomic_input = AtomicInput(
molecule=input_data.initial_molecule,
driver="energy",
keywords=keywords,
**input_data.input_specification.dict(exclude={"driver", "keywords"}),
)
# Build the inputs for the job
job_inputs = nwc_harness.build_input(atomic_input, config)
# Replace the last line with a "task {} optimize"
input_file: str = job_inputs["infiles"]["nwchem.nw"].strip()
beginning, last_line = input_file.rsplit("\n", 1)
assert last_line.startswith("task")
last_line = f"task {last_line.split(' ')[1]} optimize"
job_inputs["infiles"]["nwchem.nw"] = f"{beginning}\n{last_line}"
# Run it!
success, dexe = nwc_harness.execute(job_inputs)
# Check for common errors
if "There is an error in the input file" in dexe["stdout"]:
raise InputError(dexe["stdout"])
if "not compiled" in dexe["stdout"]:
# recoverable with a different compilation with optional modules
raise InputError(dexe["stdout"])
# Parse it
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
return self.parse_output(dexe["outfiles"], input_data)
else:
raise UnknownError(dexe["stdout"])
def parse_output(self, outfiles: Dict[str, str], input_model: OptimizationInput) -> OptimizationResult:
# Get the stdout from the calculation (required)
stdout = outfiles.pop("stdout")
stderr = outfiles.pop("stderr")
# Parse out the atomic results from the file
atomic_results = harvest_as_atomic_result(input_model, stdout)
# Isolate the converged result
final_step = atomic_results[-1]
return OptimizationResult(
initial_molecule=input_model.initial_molecule,
input_specification=input_model.input_specification,
final_molecule=final_step.molecule,
trajectory=atomic_results,
energies=[float(r.extras["qcvars"]["CURRENT ENERGY"]) for r in atomic_results],
stdout=stdout,
stderr=stderr,
success=True,
provenance=Provenance(creator="NWChemRelax", version=self.get_version(), routine="nwchem_opt"),
)
|
examples/render/dimension_linear.py | mozman/ezdxf | 515 | 12706709 | <filename>examples/render/dimension_linear.py
# Purpose: using DIMENSION horizontal, vertical and rotated
# Copyright (c) 2018-2021, <NAME>
# License: MIT License
from typing import TYPE_CHECKING
import sys
import math
import pathlib
import random
import ezdxf
from ezdxf.tools.standards import setup_dimstyle
from ezdxf.math import Vec3, UCS
import logging
if TYPE_CHECKING:
from ezdxf.eztypes import DimStyle, DimStyleOverride
# ========================================
# IMPORTANT:
# this script uses f-strings (Python 3.6)
# ========================================
if sys.version_info < (3, 6):
print("This script requires Python 3.6 (f-strings)")
sys.exit()
# ========================================
# Setup logging
# ========================================
logging.basicConfig(level="WARNING")
# ========================================
# Setup your preferred output directory
# ========================================
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
if not OUTDIR.exists():
OUTDIR = pathlib.Path()
# ========================================
# Default text attributes
# ========================================
TEXT_ATTRIBS = {
"height": 0.25,
"style": ezdxf.options.default_dimension_text_style,
}
DIM_TEXT_STYLE = ezdxf.options.default_dimension_text_style
# =======================================================
# Discarding dimension rendering is possible
# for BricsCAD, but is incompatible to AutoCAD -> error
# =======================================================
BRICSCAD = False
def set_text_style(doc, textstyle=DIM_TEXT_STYLE, name="EZDXF"):
if doc.dxfversion == "AC1009":
return
dimstyle = doc.dimstyles.get(name) # type: DimStyle
dimstyle.dxf.dimtxsty = textstyle
def linear_tutorial(dxfversion="R12"):
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
msp.add_line((0, 0), (3, 0))
msp.add_line((0, 7), (10, 0))
# horizontal DIMENSION
# Default DimStyle EZDXF: 1 drawing unit == 1m; scale 1: 100; length_factor=100 -> measurement in cm
#
# base: defines the dimension line, ezdxf accepts any point on the dimension line
# p1: defines the start point of the first extension line, which also defines the first point to measure
# p2: defines the start point of the second extension line, which also defines the second point to measure
dim = msp.add_linear_dim(
base=(3, 2),
p1=(0, 0),
p2=(3, 0),
dimstyle="EZDXF",
override={"dimtxsty": "OpenSans"},
)
# Necessary second step, to create the BLOCK entity with the DIMENSION geometry.
# ezdxf supports DXF R2000 attributes for DXF R12 rendering, but they have to be applied by the DIMSTYLE override
# feature, this additional attributes are not stored in the XDATA section of the DIMENSION entity, they are just
# used to render the DIMENSION entity.
# The return value `dim` is not a DIMENSION entity, instead a DimStyleOverride object is returned, the DIMENSION
# entity is stored as dim.dimension, see also ezdxf.override.DimStyleOverride class.
dim.render()
# rotated DIMENSION without `override` uses ezdxf.options.default_dimension_text_style (OpenSansCondensed-Light)
# angle: defines the angle of the dimension line in relation to the x-axis of the WCS or UCS, measurement is the
# distance between first and second measurement point in direction of `angle`
dim2 = msp.add_linear_dim(
base=(10, 2),
p1=(7, 0),
p2=(10, 0),
angle=-30,
dimstyle="EZDXF",
override={
"dimdle": 0,
"dimdec": 2,
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
},
) # type: DimStyleOverride
# Some properties have setter methods for convenience, this is also the reason for not calling dim2.render()
# automatically.
dim2.set_arrows(blk=ezdxf.ARROWS.closed_filled, size=0.25)
dim2.set_text_align(halign="right")
dim2.render()
doc.set_modelspace_vport(height=5, center=(5, 0))
doc.saveas(OUTDIR / f"dim_linear_{dxfversion}_tutorial.dxf")
def example_background_fill(dxfversion="R12"):
"""
This example shows the background fill feature, ezdxf uses MTEXT for this feature and has no effect in DXF R12.
"""
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
msp.add_line((0, 2.2), (10, 2.2))
dim = msp.add_linear_dim(
base=(0, 2),
p1=(0, 0),
p2=(3, 0),
dimstyle="EZDXF",
override={
"dimtfill": 1, # background color
},
) # type: DimStyleOverride
dim.set_text("bgcolor")
dim.render()
dim = msp.add_linear_dim(
base=(0, 2),
p1=(5, 0),
p2=(8, 0),
dimstyle="EZDXF",
override={
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
},
) # type: DimStyleOverride
dim.set_text("cyan")
dim.render()
doc.saveas(OUTDIR / f"background_fill_example_{dxfversion}.dxf")
def example_for_all_text_placings_R12():
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(doc, "dim_linear_text_placing_R12.dxf")
def example_for_all_text_placings_ucs_R12():
ucs = UCS(origin=(10, 10, 0), ux=(3, 1, 0), uz=(0, 0, 1))
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(
doc, "dim_linear_text_placing_ucs_R12.dxf", ucs
)
def example_for_all_text_placings_in_space_R12():
ucs = UCS(ux=(1, 1, 0), uy=(0, 0, 1))
doc = ezdxf.new("R12", setup=True)
example_for_all_text_placings(
doc, "dim_linear_text_placing_in_space_R12.dxf", ucs
)
def example_for_all_text_placings_R2007():
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(doc, "dim_linear_text_placing_R2007.dxf")
def example_for_all_text_placings_ucs_R2007():
ucs = UCS(origin=(10, 10, 0), ux=(3, 1, 0), uz=(0, 0, 1))
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(
doc, "dim_linear_text_placing_ucs_R2007.dxf", ucs
)
def example_for_all_text_placings_in_space_R2007():
ucs = (
UCS(origin=(20, 20, 0))
.rotate_local_x(math.radians(45))
.rotate_local_z(math.radians(45))
)
doc = ezdxf.new("R2007", setup=True)
set_text_style(doc)
example_for_all_text_placings(
doc, "dim_linear_text_placing_in_space_R2007.dxf", ucs
)
def example_for_all_text_placings(doc, filename, ucs=None):
"""
This example shows many combinations of dimension text placing by `halign`, `valign` and user defined location
override.
Args:
doc: DXF drawing
filename: file name for saving
ucs: user defined coordinate system
"""
def add_text(lines, insert):
insert += (0.2, 0)
attribs = dict(TEXT_ATTRIBS)
line_space = 0.4
delta = Vec3(0, line_space, 0)
for line in lines:
text = msp.add_text(line, dxfattribs=attribs).set_pos(insert)
if ucs:
text.transform(ucs.matrix)
insert -= delta
msp = doc.modelspace()
setup_dimstyle(
doc,
name="TICK",
fmt="EZ_M_100_H25_CM",
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="ARCHTICK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.architectural_tick,
style=DIM_TEXT_STYLE,
)
setup_dimstyle(
doc,
name="CLOSEDBLANK",
fmt="EZ_M_100_H25_CM",
blk=ezdxf.ARROWS.closed_blank,
style=DIM_TEXT_STYLE,
)
def text(dimstyle, x, y, halign, valign, oblique=0):
"""
Default dimension text placing
Args:
dimstyle: dimstyle to use
x: start point x
y: start point y
halign: horizontal text alignment - `left`, `right`, `center`, `above1`, `above2`, requires DXF R2000+
valign: vertical text alignment `above`, `center`, `below`
oblique: angle of oblique extension line, 0 = orthogonal to dimension line
"""
dimattr = {}
if oblique:
dimattr["oblique_angle"] = oblique
base = (x, y + 2)
# wide
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 5, y),
dimstyle=dimstyle,
dxfattribs=dimattr,
) # type: DimStyleOverride
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"halign={halign}", f"valign={valign}", f"oblique={oblique}"],
insert=Vec3(x, y),
)
# narrow
dim = msp.add_linear_dim(
base=base,
p1=(x + 7, y),
p2=(x + 7.3, y),
dimstyle=dimstyle,
dxfattribs=dimattr,
) # type: DimStyleOverride
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
# arrows inside, text outside
dim = msp.add_linear_dim(
base=base,
p1=(x + 10, y),
p2=(x + 10.9999, y),
dimstyle=dimstyle,
override={"dimdec": 2},
dxfattribs=dimattr,
) # type: DimStyleOverride
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
# narrow and force text inside
dim = msp.add_linear_dim(
base=base,
p1=(x + 14, y),
p2=(x + 14.3, y),
dimstyle=dimstyle,
override={"dimtix": 1},
dxfattribs=dimattr,
) # type: DimStyleOverride
dim.set_text_align(halign=halign, valign=valign)
dim.render(ucs=ucs, discard=BRICSCAD)
def user_text_free(dimstyle, x=0, y=0, leader=False):
"""
User defined dimension text placing.
Args:
dimstyle: dimstyle to use
x: start point x
y: start point y
leader: use leader line if True
"""
override = {
"dimdle": 0.0,
"dimexe": 0.5, # length of extension line above dimension line
"dimexo": 0.5, # extension line offset
"dimtfill": 2, # custom text fill
"dimtfillclr": 4, # cyan
}
base = (x, y + 2)
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
) # type: DimStyleOverride
location = Vec3(x + 3, y + 3, 0)
dim.set_location(location, leader=leader)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"usr absolute={location}", f"leader={leader}"], insert=Vec3(x, y)
)
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
) # type: DimStyleOverride
relative = Vec3(-1, +1) # relative to dimline center
dim.set_location(relative, leader=leader, relative=True)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[f"usr relative={relative}", f"leader={leader}"], insert=Vec3(x, y)
)
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 3, y),
dimstyle=dimstyle,
override=override,
) # type: DimStyleOverride
dh = -0.7
dv = 1.5
dim.shift_text(dh, dv)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[
f"shift text=({dh}, {dv})",
],
insert=Vec3(x, y),
)
override["dimtix"] = 1 # force text inside
x += 4
dim = msp.add_linear_dim(
base=base,
p1=(x, y),
p2=(x + 0.3, y),
dimstyle=dimstyle,
override=override,
) # type: DimStyleOverride
dh = 0
dv = 1
dim.shift_text(dh, dv)
dim.render(ucs=ucs, discard=BRICSCAD)
add_text(
[
f"shift text=({dh}, {dv})",
],
insert=Vec3(x, y),
)
dimstyles = ["TICK", "ARCHTICK", "CLOSEDBLANK"]
xoffset = 17
yoffset = 5
for col, dimstyle in enumerate(dimstyles):
row = 0
for halign in ("center", "left", "right"):
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="above",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="center",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign=halign,
valign="below",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="above1",
valign="above",
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="above2",
valign="above",
)
row += 1
user_text_free(dimstyle, x=col * xoffset, y=row * yoffset)
row += 1
user_text_free(dimstyle, x=col * xoffset, y=row * yoffset, leader=True)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="center",
valign="above",
oblique=70,
)
row += 1
text(
dimstyle,
x=col * xoffset,
y=row * yoffset,
halign="above1",
valign="above",
oblique=80,
)
row += 1
doc.saveas(OUTDIR / filename)
def example_multi_point_linear_dimension():
"""
Example for using the ezdxf "multi-point linear dimension" feature, which generates dimension entities for multiple
points at ones and tries to move dimension text to a readable location.
This feature works best with DXF R2007+.
"""
doc = ezdxf.new("R2007", setup=True)
msp = doc.modelspace()
points = [(0, 0), (5, 1), (5.2, 1), (5.4, 0), (7, 0), (10, 3)]
msp.add_lwpolyline(points)
# create quick a new DIMSTYLE as alternative to overriding DIMSTYLE attributes
dimstyle = doc.dimstyles.duplicate_entry(
"EZDXF", "WITHTFILL"
) # type: DimStyle
dimstyle.dxf.dimtfill = 1
msp.add_multi_point_linear_dim(
base=(0, 5), points=points, dimstyle="WITHTFILL"
)
doc.saveas(OUTDIR / f"multi_point_linear_dim_R2007.dxf")
def random_point(start, end):
dist = end - start
return Vec3(start + random.random() * dist, start + random.random() * dist)
def example_random_multi_point_linear_dimension(
count=10, length=20, discard=BRICSCAD
):
"""
Example for using the ezdxf "multi-point linear dimension" feature, which generates dimension entities for multiple
points at ones and tries to move dimension text to a readable location.
This feature works best with DXF R2007+.
"""
doc = ezdxf.new("R2007", setup=True)
msp = doc.modelspace()
# create a random polyline.
points = [random_point(0, length) for _ in range(count)]
msp.add_lwpolyline(points, dxfattribs={"color": 1})
# create quick a new DIMSTYLE as alternative to overriding DIMSTYLE attributes
dimstyle = doc.dimstyles.duplicate_entry(
"EZDXF", "WITHTFILL"
) # type: DimStyle
dimstyle.dxf.dimtfill = 1
dimstyle.dxf.dimdec = 2
dimstyle = doc.dimstyles.duplicate_entry(
"WITHTFILL", "WITHTXT"
) # type: DimStyle
dimstyle.dxf.dimblk = ezdxf.ARROWS.closed
dimstyle.dxf.dimtxsty = "STANDARD"
dimstyle.dxf.dimrnd = 0.5
dimstyle.set_text_align(valign="center")
msp.add_multi_point_linear_dim(
base=(0, length + 2),
points=points,
dimstyle="WITHTFILL",
discard=discard,
)
msp.add_multi_point_linear_dim(
base=(-2, 0),
points=points,
angle=90,
dimstyle="WITHTFILL",
discard=discard,
)
msp.add_multi_point_linear_dim(
base=(10, -10),
points=points,
angle=45,
dimstyle="WITHTXT",
discard=discard,
)
doc.saveas(OUTDIR / f"multi_random_point_linear_dim_R2007.dxf")
def linear_all_arrow_style(
version="R12", dimltype=None, dimltex1=None, dimltex2=None, filename=""
):
"""
Show all AutoCAD standard arrows on a linear dimension.
Args:
version: DXF version
dimltype: dimension linetype
dimltex1: linetype for first extension line
dimltex2: linetype for second extension line
filename: filename for saving
"""
doc = ezdxf.new(version, setup=True)
msp = doc.modelspace()
ezdxf_dimstyle = doc.dimstyles.get("EZDXF") # type: DimStyle
ezdxf_dimstyle.copy_to_header(doc)
for index, name in enumerate(sorted(ezdxf.ARROWS.__all_arrows__)):
y = index * 4
attributes = {
"dimtxsty": "LiberationMono",
"dimdle": 0.5,
}
if dimltype:
attributes["dimltype"] = dimltype
if dimltex1:
attributes["dimltex1"] = dimltex1
if dimltex2:
attributes["dimltex2"] = dimltex2
dim = msp.add_linear_dim(
base=(3, y + 2),
p1=(0, y),
p2=(3, y),
dimstyle="EZDXF",
override=attributes,
) # type: DimStyleOverride
dim.set_arrows(blk=name, size=0.25)
dim.render()
if not filename:
filename = "all_arrow_styles_dim_{}.dxf".format(version)
doc.saveas(OUTDIR / filename)
def linear_tutorial_using_tolerances(version="R2000"):
"""
Shows usage of tolerances for the dimension text.
ezdxf uses MTEXT features for tolerance rendering and therefore requires DXF R2000+, but if you are using a
friendly CAD application like BricsCAD, you can let the CAD application do the rendering job, be aware this files
are not AutoCAD compatible.
Args:
version: DXF version
"""
doc = ezdxf.new(version, setup=True)
msp = doc.modelspace()
# DO NOT RENDER BY EZDXF for DXF R12
discard = version == "R12"
tol_style = doc.dimstyles.duplicate_entry(
"EZDXF", "TOLERANCE"
) # type: DimStyle
# not all features are supported by DXF R12:
# zero suppression (DIMTZIN), align (DIMTOLJ) and dec (DIMTDEC) require DXF R2000+
tol_style.set_tolerance(0.1, hfactor=0.5, align="top", dec=2)
msp.add_linear_dim(
base=(0, 3), p1=(0, 0), p2=(10, 0), dimstyle="tolerance"
).render(discard=discard)
dim = msp.add_linear_dim(
base=(0, 3), p1=(15, 0), p2=(15.5, 0), dimstyle="tolerance"
)
# set tolerance attributes by dim style override
dim.set_tolerance(0.1, 0.15, hfactor=0.4, align="middle", dec=2)
dim.render(discard=discard)
doc.saveas(OUTDIR / f"dimensions_with_tolerance_{version}.dxf")
def linear_tutorial_using_limits(version="R2000"):
"""
Shows usage of limits for the dimension text, limits are the lower and upper limit for the measured distance, the
measurement itself is not shown.
ezdxf uses MTEXT features for limits rendering and therefore requires DXF R2000+, but if you are using a
friendly CAD application like BricsCAD, you can let the CAD application do the rendering job, be aware this files
are not AutoCAD compatible.
Args:
version: DXF version
"""
doc = ezdxf.new(version, setup=True)
msp = doc.modelspace()
# DO NOT RENDER BY EZDXF for DXF R12
discard = version == "R12"
tol_style = doc.dimstyles.duplicate_entry(
"EZDXF", "LIMITS"
) # type: DimStyle
# not all features are supported by DXF R12:
# zero suppression (DIMTZIN), align (DIMTOLJ) and dec (DIMTDEC) require DXF R2000+
tol_style.set_limits(upper=0.1, lower=0.1, hfactor=0.5, dec=2)
msp.add_linear_dim(
base=(0, 3), p1=(0, 0), p2=(10, 0), dimstyle="limits"
).render(discard=discard)
msp.add_linear_dim(
base=(0, 3), p1=(15, 0), p2=(15.5, 0), dimstyle="limits"
).render(discard=discard)
doc.saveas(OUTDIR / f"dimensions_with_limits_{version}.dxf")
def linear_tutorial_using_tvp():
"""
For the vertical text alignment `center`, exists an additional DXF feature, to move the dimension text vertical
up and down (DIMTVP). Vertical distance dimension line to text center = text_height * vshift (DIMTVP)
"""
doc = ezdxf.new("R2000", setup=True)
msp = doc.modelspace()
style = doc.dimstyles.duplicate_entry("EZDXF", "TVP") # type: DimStyle
# shift text upwards
style.set_text_align(valign="center", vshift=2.0)
msp.add_linear_dim(
base=(0, 3), p1=(0, 0), p2=(10, 0), dimstyle="TVP"
).render()
msp.add_linear_dim(
base=(0, 3), p1=(15, 0), p2=(15.5, 0), dimstyle="TVP"
).render()
style = doc.dimstyles.duplicate_entry("EZDXF", "TVP2") # type: DimStyle
# shift text downwards
style.set_text_align(valign="center", vshift=-2.0)
msp.add_linear_dim(
base=(0, 7), p1=(0, 5), p2=(10, 5), dimstyle="TVP2"
).render()
msp.add_linear_dim(
base=(0, 7), p1=(15, 5), p2=(15.5, 5), dimstyle="TVP2"
).render()
doc.saveas(OUTDIR / "dimensions_with_dimtvp.dxf")
def linear_tutorial_ext_lines():
doc = ezdxf.new("R12", setup=True)
msp = doc.modelspace()
msp.add_line((0, 0), (3, 0))
attributes = {
"dimexo": 0.5,
"dimexe": 0.5,
"dimdle": 0.5,
"dimblk": ezdxf.ARROWS.none,
"dimclrt": 3,
}
msp.add_linear_dim(
base=(3, 2), p1=(0, 0), p2=(3, 0), dimstyle="EZDXF", override=attributes
).render()
attributes = {
"dimtad": 4,
"dimclrd": 2,
"dimclrt": 4,
}
msp.add_linear_dim(
base=(10, 2),
p1=(7, 0),
p2=(10, 0),
angle=-30,
dimstyle="EZDXF",
override=attributes,
).render()
msp.add_linear_dim(
base=(3, 5),
p1=(0, 10),
p2=(3, 10),
dimstyle="EZDXF",
override=attributes,
).render()
doc.saveas(OUTDIR / "dim_linear_R12_ext_lines.dxf")
def linear_EZ_M(fmt):
doc = ezdxf.new("R12", setup=("linetypes", "styles"))
msp = doc.modelspace()
ezdxf.setup_dimstyle(doc, fmt)
msp.add_line((0, 0), (1, 0))
msp.add_linear_dim(base=(0, 1), p1=(0, 0), p2=(1, 0), dimstyle=fmt).render()
doc.saveas(OUTDIR / f"dim_linear_R12_{fmt}.dxf")
def linear_EZ_CM(fmt):
doc = ezdxf.new("R12", setup=("linetypes", "styles"))
msp = doc.modelspace()
ezdxf.setup_dimstyle(doc, fmt)
msp.add_line((0, 0), (100, 0))
msp.add_linear_dim(
base=(0, 100), p1=(0, 0), p2=(100, 0), dimstyle=fmt
).render()
doc.saveas(OUTDIR / f"dim_linear_R12_{fmt}.dxf")
def linear_EZ_MM(fmt):
doc = ezdxf.new("R12", setup=("linetypes", "styles"))
msp = doc.modelspace()
ezdxf.setup_dimstyle(doc, fmt)
msp.add_line((0, 0), (1000, 0))
msp.add_linear_dim(
base=(0, 1000), p1=(0, 0), p2=(1000, 0), dimstyle=fmt
).render()
doc.saveas(OUTDIR / f"dim_linear_R12_{fmt}.dxf")
ALL = True
if __name__ == "__main__":
example_for_all_text_placings_ucs_R12()
example_for_all_text_placings_in_space_R12()
example_for_all_text_placings_ucs_R2007()
example_for_all_text_placings_in_space_R2007()
if ALL:
linear_tutorial("R2007")
linear_tutorial_using_tvp()
linear_tutorial_using_limits("R2000")
linear_tutorial_using_limits("R12")
linear_tutorial_using_tolerances("R2000")
linear_tutorial_using_tolerances("R12")
linear_tutorial("R2007")
linear_tutorial("R12")
example_background_fill("R2007")
example_for_all_text_placings_R12()
example_for_all_text_placings_R2007()
example_multi_point_linear_dimension()
example_random_multi_point_linear_dimension(count=10, length=20)
linear_all_arrow_style("R12")
linear_all_arrow_style(
"R12",
dimltex1="DOT2",
dimltex2="DOT2",
filename="dotted_extension_lines_R12.dxf",
)
linear_all_arrow_style("R2000")
linear_all_arrow_style(
"R2007",
dimltex1="DOT2",
dimltex2="DOT2",
filename="dotted_extension_lines_R2007.dxf",
)
linear_tutorial_ext_lines()
linear_EZ_M("EZ_M_100_H25_CM")
linear_EZ_M("EZ_M_1_H25_CM")
linear_EZ_CM("EZ_CM_100_H25_CM")
linear_EZ_CM("EZ_CM_1_H25_CM")
linear_EZ_MM("EZ_MM_100_H25_MM")
linear_EZ_MM("EZ_MM_1_H25_MM")
|
stable_nalu/network/number_translation.py | wlm2019/Neural-Arithmetic-Units | 147 | 12706716 |
import torch
from ..abstract import ExtendedTorchModule
from ..layer import GeneralizedLayer, GeneralizedCell
class NumberTranslationNetwork(ExtendedTorchModule):
UNIT_NAMES = GeneralizedCell.UNIT_NAMES
def __init__(self, unit_name,
embedding_size=2, # 1 for the number, 1 for the gate ?
hidden_size=2, # 1 for the number, 1 for the gate ?
dictionary_size=30,
writer=None,
**kwags):
super().__init__('network', writer=writer, **kwags)
self.unit_name = unit_name
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.dictionary_size = dictionary_size
self.register_buffer('lstm_zero_state_h', torch.Tensor(hidden_size))
self.register_buffer('lstm_zero_state_c', torch.Tensor(hidden_size))
self.register_buffer('output_zero_state', torch.Tensor(1))
self.embedding = torch.nn.Embedding(dictionary_size, embedding_size)
self.lstm_cell = torch.nn.LSTMCell(embedding_size, hidden_size)
self.output_cell = GeneralizedCell(hidden_size, 1,
unit_name,
writer=self.writer,
name='recurrent_output',
**kwags)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.zeros_(self.lstm_zero_state_h)
torch.nn.init.zeros_(self.lstm_zero_state_c)
torch.nn.init.zeros_(self.output_zero_state)
self.embedding.reset_parameters()
self.lstm_cell.reset_parameters()
self.output_cell.reset_parameters()
def forward(self, x):
"""Performs recurrent iterations over the input.
Arguments:
input: Expected to have the shape [obs, time]
"""
# Perform recurrent iterations over the input
h_1_tm1 = self.lstm_zero_state_h.repeat(x.size(0), 1)
c_1_tm1 = self.lstm_zero_state_c.repeat(x.size(0), 1)
h_2_tm1 = self.output_zero_state.repeat(x.size(0), 1)
for t in range(x.size(1)):
x_t = x[:, t]
h_0_t = self.embedding(x_t)
h_1_t, c_1_t = self.lstm_cell(h_0_t, (h_1_tm1, c_1_tm1))
h_2_t = self.output_cell(h_1_t, h_2_tm1)
# Just use previuse results if x is a <pad> token
h_2_t = torch.where(x[:, t].view(-1, 1) == 0, h_2_tm1, h_2_t)
# Prepear for next iterations
h_1_tm1 = h_1_t
c_1_tm1 = c_1_t
h_2_tm1 = h_2_t
return h_2_t
def extra_repr(self):
return 'unit_name={}, embedding_size={}, hidden_size={}, dictionary_size={}'.format(
self.unit_name, self.embedding_size, self.hidden_size, self.dictionary_size
)
|
sdk/python/pulumi_gcp/monitoring/alert_policy.py | sisisin/pulumi-gcp | 121 | 12706727 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AlertPolicyArgs', 'AlertPolicy']
@pulumi.input_type
class AlertPolicyArgs:
def __init__(__self__, *,
combiner: pulumi.Input[str],
conditions: pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]],
display_name: pulumi.Input[str],
documentation: Optional[pulumi.Input['AlertPolicyDocumentationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AlertPolicy resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
pulumi.set(__self__, "combiner", combiner)
pulumi.set(__self__, "conditions", conditions)
pulumi.set(__self__, "display_name", display_name)
if documentation is not None:
pulumi.set(__self__, "documentation", documentation)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if notification_channels is not None:
pulumi.set(__self__, "notification_channels", notification_channels)
if project is not None:
pulumi.set(__self__, "project", project)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
@property
@pulumi.getter
def combiner(self) -> pulumi.Input[str]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@combiner.setter
def combiner(self, value: pulumi.Input[str]):
pulumi.set(self, "combiner", value)
@property
@pulumi.getter
def conditions(self) -> pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def documentation(self) -> Optional[pulumi.Input['AlertPolicyDocumentationArgs']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@documentation.setter
def documentation(self, value: Optional[pulumi.Input['AlertPolicyDocumentationArgs']]):
pulumi.set(self, "documentation", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@notification_channels.setter
def notification_channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_channels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
@pulumi.input_type
class _AlertPolicyState:
def __init__(__self__, *,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]] = None,
creation_records: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input['AlertPolicyDocumentationArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering AlertPolicy resources.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input['AlertPolicyDocumentationArgs'] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[str] name: -
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
if combiner is not None:
pulumi.set(__self__, "combiner", combiner)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if creation_records is not None:
pulumi.set(__self__, "creation_records", creation_records)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if documentation is not None:
pulumi.set(__self__, "documentation", documentation)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_channels is not None:
pulumi.set(__self__, "notification_channels", notification_channels)
if project is not None:
pulumi.set(__self__, "project", project)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
@property
@pulumi.getter
def combiner(self) -> Optional[pulumi.Input[str]]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@combiner.setter
def combiner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "combiner", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="creationRecords")
def creation_records(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
"""
return pulumi.get(self, "creation_records")
@creation_records.setter
def creation_records(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AlertPolicyCreationRecordArgs']]]]):
pulumi.set(self, "creation_records", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def documentation(self) -> Optional[pulumi.Input['AlertPolicyDocumentationArgs']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@documentation.setter
def documentation(self, value: Optional[pulumi.Input['AlertPolicyDocumentationArgs']]):
pulumi.set(self, "documentation", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
-
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@notification_channels.setter
def notification_channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_channels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
class AlertPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify people or services
about this state.
To get more information about AlertPolicy, see:
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
* How-to Guides
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
## Example Usage
### Monitoring Alert Policy Basic
```python
import pulumi
import pulumi_gcp as gcp
alert_policy = gcp.monitoring.AlertPolicy("alertPolicy",
combiner="OR",
conditions=[gcp.monitoring.AlertPolicyConditionArgs(
condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs(
aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs(
alignment_period="60s",
per_series_aligner="ALIGN_RATE",
)],
comparison="COMPARISON_GT",
duration="60s",
filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
),
display_name="test condition",
)],
display_name="My Alert Policy",
user_labels={
"foo": "bar",
})
```
## Import
AlertPolicy can be imported using any of these accepted formats
```sh
$ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AlertPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A description of the conditions under which some aspect of your system is
considered to be "unhealthy" and the ways to notify people or services
about this state.
To get more information about AlertPolicy, see:
* [API documentation](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies)
* How-to Guides
* [Official Documentation](https://cloud.google.com/monitoring/alerts/)
## Example Usage
### Monitoring Alert Policy Basic
```python
import pulumi
import pulumi_gcp as gcp
alert_policy = gcp.monitoring.AlertPolicy("alertPolicy",
combiner="OR",
conditions=[gcp.monitoring.AlertPolicyConditionArgs(
condition_threshold=gcp.monitoring.AlertPolicyConditionConditionThresholdArgs(
aggregations=[gcp.monitoring.AlertPolicyConditionConditionThresholdAggregationArgs(
alignment_period="60s",
per_series_aligner="ALIGN_RATE",
)],
comparison="COMPARISON_GT",
duration="60s",
filter="metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"",
),
display_name="test condition",
)],
display_name="My Alert Policy",
user_labels={
"foo": "bar",
})
```
## Import
AlertPolicy can be imported using any of these accepted formats
```sh
$ pulumi import gcp:monitoring/alertPolicy:AlertPolicy default {{name}}
```
:param str resource_name: The name of the resource.
:param AlertPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlertPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlertPolicyArgs.__new__(AlertPolicyArgs)
if combiner is None and not opts.urn:
raise TypeError("Missing required property 'combiner'")
__props__.__dict__["combiner"] = combiner
if conditions is None and not opts.urn:
raise TypeError("Missing required property 'conditions'")
__props__.__dict__["conditions"] = conditions
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["documentation"] = documentation
__props__.__dict__["enabled"] = enabled
__props__.__dict__["notification_channels"] = notification_channels
__props__.__dict__["project"] = project
__props__.__dict__["user_labels"] = user_labels
__props__.__dict__["creation_records"] = None
__props__.__dict__["name"] = None
super(AlertPolicy, __self__).__init__(
'gcp:monitoring/alertPolicy:AlertPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
combiner: Optional[pulumi.Input[str]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]]] = None,
creation_records: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
documentation: Optional[pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_channels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
user_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'AlertPolicy':
"""
Get an existing AlertPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] combiner: How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyConditionArgs']]]] conditions: A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AlertPolicyCreationRecordArgs']]]] creation_records: A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
:param pulumi.Input[str] display_name: A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
:param pulumi.Input[pulumi.InputType['AlertPolicyDocumentationArgs']] documentation: Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
:param pulumi.Input[bool] enabled: Whether or not the policy is enabled. The default is true.
:param pulumi.Input[str] name: -
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_channels: Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AlertPolicyState.__new__(_AlertPolicyState)
__props__.__dict__["combiner"] = combiner
__props__.__dict__["conditions"] = conditions
__props__.__dict__["creation_records"] = creation_records
__props__.__dict__["display_name"] = display_name
__props__.__dict__["documentation"] = documentation
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
__props__.__dict__["notification_channels"] = notification_channels
__props__.__dict__["project"] = project
__props__.__dict__["user_labels"] = user_labels
return AlertPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def combiner(self) -> pulumi.Output[str]:
"""
How to combine the results of multiple conditions to
determine if an incident should be opened.
Possible values are `AND`, `OR`, and `AND_WITH_MATCHING_RESOURCE`.
"""
return pulumi.get(self, "combiner")
@property
@pulumi.getter
def conditions(self) -> pulumi.Output[Sequence['outputs.AlertPolicyCondition']]:
"""
A list of conditions for the policy. The conditions are combined by
AND or OR according to the combiner field. If the combined conditions
evaluate to true, then an incident is created. A policy can have from
one to six conditions.
Structure is documented below.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter(name="creationRecords")
def creation_records(self) -> pulumi.Output[Sequence['outputs.AlertPolicyCreationRecord']]:
"""
A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be
ignored.
"""
return pulumi.get(self, "creation_records")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
A short name or phrase used to identify the
condition in dashboards, notifications, and
incidents. To avoid confusion, don't use the same
display name for multiple conditions in the same
policy.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def documentation(self) -> pulumi.Output[Optional['outputs.AlertPolicyDocumentation']]:
"""
Documentation that is included with notifications and incidents related
to this policy. Best practice is for the documentation to include information
to help responders understand, mitigate, escalate, and correct the underlying
problems detected by the alerting policy. Notification channels that have
limited capacity might not show this documentation.
Structure is documented below.
"""
return pulumi.get(self, "documentation")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not the policy is enabled. The default is true.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
-
The unique resource name for this condition.
Its syntax is:
projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
[CONDITION_ID] is assigned by Stackdriver Monitoring when
the condition is created as part of a new or updated alerting
policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationChannels")
def notification_channels(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Identifies the notification channels to which notifications should be
sent when incidents are opened or closed or when new violations occur
on an already opened incident. Each element of this array corresponds
to the name field in each of the NotificationChannel objects that are
returned from the notificationChannels.list method. The syntax of the
entries in this field is
`projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`
"""
return pulumi.get(self, "notification_channels")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
This field is intended to be used for organizing and identifying the AlertPolicy
objects.The field can contain up to 64 entries. Each key and value is limited
to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values
can contain only lowercase letters, numerals, underscores, and dashes. Keys
must begin with a letter.
"""
return pulumi.get(self, "user_labels")
|
pyltr/metrics/tests/test_roc.py | Haiga/pyltr | 432 | 12706749 | """
Testing for AUC ROC metric.
"""
from . import helpers
import itertools
import numpy as np
import pyltr
import sklearn.metrics
class TestAUCROC(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.AUCROC()
def get_queries_with_values(self):
for i in range(0, 7):
for tup in itertools.product(*([(0, 1)] * i)):
if any(e != tup[0] for e in tup):
yield (np.array(tup),
sklearn.metrics.roc_auc_score(tup, range(i, 0, -1)))
else:
yield np.array(tup), 0.0
def get_queries(self):
for i in range(0, 7):
for tup in itertools.product(*([(0, 1)] * i)):
yield np.array(tup)
|
beartype/_util/hint/pep/proposal/pep484/utilpep484ref.py | posita/beartype | 1,056 | 12706753 | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`-compliant **forward reference type hint utilities**
(i.e., callables specifically applicable to :pep:`484`-compliant forward
reference type hints).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
import typing
from beartype.roar import BeartypeDecorHintForwardRefException
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_7
from beartype._util.cache.utilcachecall import callable_cached
from typing import Any
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ HINTS }....................
# Conditionally define the "typing.ForwardRef" superclass depending on the
# current Python version. This superclass was thankfully publicized under
# Python >= 3.7 after its initial privatization under Python <= 3.6.
HINT_PEP484_FORWARDREF_TYPE: Any = (
typing.ForwardRef if IS_PYTHON_AT_LEAST_3_7 else
typing._ForwardRef # type: ignore [attr-defined]
)
'''
:pep:`484`-compliant **forward reference type** (i.e., class of all forward
reference objects implicitly created by all :mod:`typing` type hint factories
when subscripted by a string).
'''
# ....................{ TESTERS }....................
def is_hint_pep484_forwardref(hint: object) -> bool:
'''
``True`` only if the passed object is a :pep:`484`-compliant **forward
reference type hint** (i.e., instance of the :class:`typing.ForwardRef`
class implicitly replacing all string arguments subscripting :mod:`typing`
objects).
The :mod:`typing` module implicitly replaces all strings subscripting
:mod:`typing` objects (e.g., the ``MuhType`` in ``List['MuhType']``) with
:class:`typing.ForwardRef` instances containing those strings as instance
variables, for nebulous reasons that make little justifiable sense.
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a :pep:`484`-compliant forward
reference type hint.
'''
# Return true only if this hint is an instance of the PEP 484-compliant
# forward reference superclass.
return isinstance(hint, HINT_PEP484_FORWARDREF_TYPE)
# ....................{ GETTERS }....................
@callable_cached
def get_hint_pep484_forwardref_type_basename(hint: Any) -> str:
'''
**Unqualified classname** (i.e., name of a class *not* containing a ``.``
delimiter and thus relative to the fully-qualified name of the lexical
scope declaring that class) referred to by the passed :pep:`484`-compliant
**forward reference type hint** (i.e., instance of the
:class:`typing.ForwardRef` class implicitly replacing all string arguments
subscripting :mod:`typing` objects).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
str
Unqualified classname referred to by this :pep:`484`-compliant forward
reference type hint.
Raises
----------
BeartypeDecorHintForwardRefException
If this object is *not* a :pep:`484`-compliant forward reference.
See Also
----------
:func:`is_hint_pep484_forwardref`
Further commentary.
'''
# If this object is *NOT* a PEP 484-compliant forward reference, raise an
# exception.
if not is_hint_pep484_forwardref(hint):
raise BeartypeDecorHintForwardRefException(
f'Type hint {repr(hint)} not forward reference.')
# Else, this object is a PEP 484-compliant forward reference.
# Return the unqualified classname referred to by this reference. Note
# that:
# * This requires violating privacy encapsulation by accessing a dunder
# instance variable unique to the "typing.ForwardRef" class.
# * This object defines a significant number of other "__forward_"-prefixed
# dunder instance variables, which exist *ONLY* to enable the blatantly
# useless typing.get_type_hints() function to avoid repeatedly (and thus
# inefficiently) reevaluating the same forward reference. *sigh*
return hint.__forward_arg__
|
precise/stats.py | Tony763/mycroft-precise | 626 | 12706783 | #!/usr/bin/env python3
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""
Module handles computing and formatting basic statistics
about a dataset like false negatives and false positives
"""
import numpy as np
counts_str = '''
=== Counts ===
False Positives: {false_pos}
True Negatives: {true_neg}
False Negatives: {false_neg}
True Positives: {true_pos}
'''.strip()
summary_str = '''
=== Summary ===
{num_correct} out of {total}
{accuracy_ratio:.2%}
{false_pos_ratio:.2%} false positives
{false_neg_ratio:.2%} false negatives
'''
class Stats:
"""Represents a set of statistics from a model run on a dataset"""
def __init__(self, outputs, targets, filenames):
self.outputs = np.array(outputs)
self.targets = np.array(targets)
self.filenames = filenames
self.num_positives = int((self.targets > 0.5).sum())
self.num_negatives = int((self.targets < 0.5).sum())
# Methods
self.false_positives = lambda threshold=0.5: self.calc_metric(False, True, threshold) / max(1,
self.num_negatives)
self.false_negatives = lambda threshold=0.5: self.calc_metric(False, False, threshold) / max(1,
self.num_positives)
self.num_correct = lambda threshold=0.5: (
(self.outputs >= threshold) == self.targets.astype(bool)
).sum()
self.num_incorrect = lambda threshold=0.5: len(self) - self.num_correct(threshold)
self.accuracy = lambda threshold=0.5: self.num_correct(threshold) / max(1, len(self))
def __len__(self):
return len(self.outputs)
def to_np_dict(self):
import numpy as np
return {
'outputs': self.outputs,
'targets': self.targets,
'filenames': np.array(self.filenames)
}
@staticmethod
def from_np_dict(data) -> 'Stats':
return Stats(data['outputs'], data['targets'], data['filenames'])
def to_dict(self, threshold=0.5):
return {
'true_pos': self.calc_metric(True, True, threshold),
'true_neg': self.calc_metric(True, False, threshold),
'false_pos': self.calc_metric(False, True, threshold),
'false_neg': self.calc_metric(False, False, threshold),
}
def counts_str(self, threshold=0.5):
return counts_str.format(**self.to_dict(threshold))
def summary_str(self, threshold=0.5):
return summary_str.format(
num_correct=self.num_correct(threshold), total=len(self),
accuracy_ratio=self.accuracy(threshold),
false_pos_ratio=self.false_positives(threshold),
false_neg_ratio=self.false_negatives(threshold)
)
def calc_filenames(self, is_correct: bool, actual_output: bool, threshold=0.5) -> list:
"""Find a list of files with the given classification"""
return [
filename
for output, target, filename in zip(self.outputs, self.targets, self.filenames)
if ((output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold)
]
def calc_metric(self, is_correct: bool, actual_output: bool, threshold=0.5) -> int:
"""For example, calc_metric(False, True) calculates false positives"""
return int(
((((self.outputs > threshold) == self.targets.astype(bool)) == is_correct) &
((self.outputs > threshold) == actual_output)).sum()
)
@staticmethod
def matches_sample(output, target, threshold, is_correct, actual_output):
"""
Check if a sample with the given network output, target output, and threshold
is the classification (is_correct, actual_output) like true positive or false negative
"""
return (bool(output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold)
|
rex/exploit/cgc/__init__.py | tiedaoxiaotubie/rex | 471 | 12706800 | <filename>rex/exploit/cgc/__init__.py<gh_stars>100-1000
from .cgc_type1_exploit import CGCType1Exploit
from .cgc_type2_exploit import CGCType2Exploit
from .cgc_exploit import CGCExploit
from .type1 import CGCType1RopExploit, CGCType1ShellcodeExploit, CGCType1CircumstantialExploit
from .type2 import CGCType2RopExploit, CGCType2ShellcodeExploit
|
models/mli/fairness/reweighing_recipe.py | pragnesh-ai/driverlessai-recipes | 194 | 12706819 | """Debiasing using reweighing"""
"""
This data recipe performs reweighing debiasing using the AIF360 package.
https://github.com/Trusted-AI/AIF360
<NAME>., <NAME>. Data preprocessing techniques for classification without discrimination.
Knowl Inf Syst 33, 1–33 (2012). https://doi.org/10.1007/s10115-011-0463-8
The transformer splits the original data as specified and returns training, validation, and test sets
with weights added.
1. Update the folder_path and data_file variables to indicate the location of the dataset(s).
2. validation_test_files lists additional validation or test files that need to be updated with weights.
3. validation_split indicates the percentiles at which the original data should be split to create a
validation and test set. If it's empty, no validation or test set is created. [0.7] would create
a 70/30 training/validation split. [0.7, 0.9] would create a 70/20/10 training, validation, and test split.
4. target is the name of the target column.
5. favorable_label and unfavorable_label are the socially positive and negative target value respectively.
6. protected_group_info list of lists, where each sublist contains the name of a protected column,
the unprivledged level, and the privleged level. Each of the protected columns must be binary.
7. From the Datasets section of driverless, click on ADD DATASET and then UPLOAD DATA RECIPE to upload this file.
Be sure to use the specified validation set to be used for validation when a model is trained. The weights
can cause leakage if the validation or test data is used for determining the weights.
"""
import datatable as dt
import numpy as np
import os
from h2oaicore.data import CustomData
from h2oaicore.systemutils import config
class MyReweightingData(CustomData):
_modules_needed_by_name = ['datetime', 'fairlearn', 'aif360', 'sklearn']
@staticmethod
def create_data():
import pandas as pd
from h2oaicore.models_utils import import_tensorflow
tf = import_tensorflow()
# above is because aif360 requires tensorflow
from aif360.datasets import BinaryLabelDataset
from aif360.algorithms.preprocessing.reweighing import Reweighing
"""
Update the below as needed
"""
#########
#########
#########
# Path to the data
folder_path = 'tmp/'
# Data file
data_file = 'housing_train_proc.csv'
full_data_file = folder_path + data_file
if not os.path.isfile(full_data_file):
# for testing, just return something
if config.hard_asserts:
return dt.Frame(np.array([[1, 2, 3], [4, 5, 6]]))
else:
return []
train = pd.read_csv(full_data_file)
validation_test_files = ['housing_test_proc.csv']
validation_split = [0.6, 0.8]
# Target column
target = 'high_priced'
favorable_label = 0
unfavorable_label = 1
# Privleged_group_info = [[Protetected group name 1, prevleged level, unprivleged level], [Protetected group name 2, prevleged level, unprivleged level]]
# The protected group columns need to be binary
protected_group_info = [['hispanic', 0, 1], ['black', 0, 1]]
#########
#########
#########
# Set up protected group info
protected_groups = [group_info[0] for group_info in protected_group_info]
dataset_orig = BinaryLabelDataset(df=train, label_names=[target], favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
protected_attribute_names=protected_groups)
privileged_groups = []
unprivileged_groups = []
for protected_group in protected_group_info:
privileged_groups_dict = {}
unprivileged_groups_dict = {}
privileged_groups_dict[protected_group[0]] = protected_group[1]
unprivileged_groups_dict[protected_group[0]] = protected_group[2]
privileged_groups.append(privileged_groups_dict)
unprivileged_groups.append(unprivileged_groups_dict)
# Fit weights on the full dataset to be used on the external test set, if given
RW_full = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_full.fit(dataset_orig)
# Split the original data into train, validation, and test if applicable
if len(validation_split) == 1:
dataset_orig_train, dataset_orig_valid = dataset_orig.split(validation_split, shuffle=True)
elif len(validation_split) == 2:
dataset_orig_train_valid, dataset_orig_test = dataset_orig.split([validation_split[1]], shuffle=True)
# Fit the weights on both the validation and test set for the test set split
RW_train_valid = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_train_valid.fit(dataset_orig_train_valid)
dataset_orig_train, dataset_orig_valid = dataset_orig_train_valid.split(
[validation_split[0] / (validation_split[1])], shuffle=True)
else:
dataset_orig_train = dataset_orig
# Fit weights on the training set only
RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW.fit(dataset_orig_train)
dataset_transf_train = RW.transform(dataset_orig_train)
# Add the weigts to the training set
train_df = pd.DataFrame(dataset_transf_train.features, columns=dataset_transf_train.feature_names)
train_df[target] = dataset_transf_train.labels.ravel()
train_df['weights'] = dataset_transf_train.instance_weights.ravel()
# Create datasets with minimum features calculated the given number of days ahead
dataset_dict = {}
dataset_dict[data_file.split('.')[0] + "_rw_train.csv"] = train_df
# Add weights to the validation split (if a validation split was specified)
if len(validation_split) >= 1:
dataset_transf_valid = RW.transform(dataset_orig_valid)
valid_df = pd.DataFrame(dataset_transf_valid.features, columns=dataset_transf_valid.feature_names)
valid_df[target] = dataset_transf_valid.labels.ravel()
valid_df['weights'] = dataset_transf_valid.instance_weights.ravel()
dataset_dict[data_file.split('.')[0] + "_rw_validation.csv"] = valid_df
# Add weights to the test split (if a test split was specified)
if len(validation_split) >= 2:
dataset_transf_test = RW_train_valid.transform(dataset_orig_test)
test_df = pd.DataFrame(dataset_transf_test.features, columns=dataset_transf_test.feature_names)
test_df[target] = dataset_transf_test.labels.ravel()
test_df['weights'] = dataset_transf_test.instance_weights.ravel()
dataset_dict[data_file.split('.')[0] + "_rw_test.csv"] = test_df
# Add weights to the test files (If provided)
for valid_file in validation_test_files:
valid = pd.read_csv(folder_path + valid_file)
dataset_valid_orig = BinaryLabelDataset(df=valid, label_names=[target], favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
protected_attribute_names=protected_groups)
dataset_transf_valid = RW_full.transform(dataset_valid_orig)
valid_df = pd.DataFrame(dataset_transf_valid.features, columns=dataset_transf_valid.feature_names)
valid_df[target] = dataset_transf_valid.labels.ravel()
valid_df['weights'] = dataset_transf_valid.instance_weights.ravel()
dataset_dict[valid_file.split('.')[0] + "_rw_transformed.csv"] = valid_df
return dataset_dict
|
tests/test_defs.py | jayvdb/ipynb | 208 | 12706839 | import pytest
import importlib
@pytest.fixture(
scope='module',
params=[
'ipynb.fs.defs.pure_ipynb.foo',
'ipynb.fs.defs.mixed_ipynb.foo'
]
)
def foo(request):
return importlib.import_module(request.param)
@pytest.fixture(
scope='module',
params=[
'ipynb.fs.defs.pure_ipynb',
'ipynb.fs.defs.mixed_ipynb'
]
)
def init(request):
return importlib.import_module(request.param)
def test_execute(foo):
assert foo.foo() == 'foo'
rawr = foo.RAWR()
assert rawr.rawr() == 'rawr'
def test_no_execute(foo):
assert not hasattr(foo, 'bar')
assert not hasattr(foo, 'r')
def test_allcaps_execute(foo):
assert foo.WAT == 'boo'
def test_all(init):
r = init.RAWR()
assert r.rawr() == 'rawr'
def test_bogus_ipynb():
with pytest.raises(ImportError):
import ipynb.fs.defs.bogus_ipynb as bogus_ipynb
def test_r_notebook():
with pytest.raises(ImportError):
import ipynb.fs.defs.r_notebook
def test_nbformat_2():
with pytest.raises(ImportError):
import ipynb.fs.defs.older_nbformat
|
mealpy/human_based/CA.py | thieu1995/mealpy | 162 | 12706851 | <filename>mealpy/human_based/CA.py
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 12:09, 02/03/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class OriginalCA(Optimizer):
"""
The original version of: Culture Algorithm (CA)
Based on Ruby version in the book: Clever Algorithm (Jason Brown)
"""
def __init__(self, problem, epoch=10000, pop_size=100, accepted_rate=0.15, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
accepted_rate (float): probability of accepted rate, Default: 0.15,
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.accepted_rate = accepted_rate
## Dynamic variables
self.dyn_belief_space = {
"lb": self.problem.lb,
"ub": self.problem.ub,
}
self.dyn_accepted_num = int(self.accepted_rate * self.pop_size)
# update situational knowledge (g_best here is a element inside belief space)
def binary_tournament(self, population):
id1, id2 = np.random.choice(list(range(0, len(population))), 2, replace=False)
return population[id1] if (population[id1][self.ID_FIT] < population[id2][self.ID_FIT]) else population[id2]
def create_faithful(self, lb, ub):
position = np.random.uniform(lb, ub)
fitness = self.get_fitness_position(position=position)
return [position, fitness]
def update_belief_space(self, belief_space, pop_accepted):
pos_list = np.array([solution[self.ID_POS] for solution in pop_accepted])
belief_space["lb"] = np.min(pos_list, axis=0)
belief_space["ub"] = np.max(pos_list, axis=0)
return belief_space
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
if mode != "sequential":
print("CA optimizer is support sequential mode only!")
exit(0)
# create next generation
pop_child = [self.create_faithful(self.dyn_belief_space["lb"], self.dyn_belief_space["ub"]) for _ in range(0, self.pop_size)]
# select next generation
pop = [self.binary_tournament(pop + pop_child) for _ in range(0, self.pop_size)]
if self.problem.minmax == "min":
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR])
else:
pop = sorted(pop, key=lambda agent: agent[self.ID_FIT][self.ID_TAR], reverse=True)
# Get accepted faithful
accepted = pop[:self.dyn_accepted_num]
# Update belief_space
self.dyn_belief_space = self.update_belief_space(self.dyn_belief_space, accepted)
return pop |
davarocr/davarocr/davar_common/core/__init__.py | icedream2/DAVAR-Lab-OCR | 387 | 12706872 | <reponame>icedream2/DAVAR-Lab-OCR<gh_stars>100-1000
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : __init__.py
# Abstract :
# Current Version: 1.0.0
# Date : 2021-05-20
##################################################################################################
"""
from .builder import POSTPROCESS, build_postprocess
from .evaluation import DavarDistEvalHook, DavarEvalHook
__all__ = ['POSTPROCESS',
'build_postprocess',
"DavarEvalHook",
"DavarDistEvalHook",
]
|
gui/custom.py | AXeL-dev/Dindo-Bot | 102 | 12706893 | <reponame>AXeL-dev/Dindo-Bot<gh_stars>100-1000
# <NAME>
# Copyright (c) 2018 - 2019 AXeL
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Pango
from lib.tools import fit_position_to_destination
from lib.parser import parse_color
import math
class CustomComboBox(Gtk.ComboBoxText):
def __init__(self, data_list=[], sort=False):
Gtk.ComboBoxText.__init__(self)
# set max chars width
for renderer in self.get_cells():
renderer.props.max_width_chars = 10
renderer.props.ellipsize = Pango.EllipsizeMode.END
# append data
self.append_list(data_list, sort)
def append_list(self, data_list, sort=False, clear=False):
# clear combobox
if clear:
self.remove_all()
# sort data
if sort:
data_list = sorted(data_list)
# append data
for text in data_list:
self.append_text(text)
def sync_with_combo(self, combo, use_contains=False):
if self.get_active() != -1 and combo.get_active() != -1:
# do not allow same text at same time
self_text = self.get_active_text()
combo_text = combo.get_active_text()
if (use_contains and (self_text in combo_text or combo_text in self_text)) or self_text == combo_text:
combo.set_active(-1)
class TextValueComboBox(Gtk.ComboBox):
def __init__(self, data_list=[], model=None, text_key=None, value_key=None, sort_key=None):
Gtk.ComboBox.__init__(self)
# set max chars width
renderer_text = Gtk.CellRendererText()
renderer_text.props.max_width_chars = 10
renderer_text.props.ellipsize = Pango.EllipsizeMode.END
# append data
if model is None:
self.model = Gtk.ListStore(str, str)
else:
self.model = model
self.append_list(data_list, text_key, value_key, sort_key)
self.set_model(self.model)
self.pack_start(renderer_text, True)
self.add_attribute(renderer_text, 'text', 0)
# save data list values (for further use)
self.values = [ item[value_key] for item in data_list ]
def append_list(self, data_list, text_key, value_key, sort_key=None, clear=False):
# clear combobox
if clear:
self.remove_all()
# sort data
if sort_key is not None:
data_list = sorted(data_list, key=lambda item: item[sort_key])
# append data
if text_key is not None and value_key is not None:
for data in data_list:
self.model.append([data[text_key], data[value_key]])
def _get_active(self, index):
active = self.get_active()
if active != -1:
return self.model[active][index]
else:
return None
def get_active_text(self):
return self._get_active(0)
def get_active_value(self):
return self._get_active(1)
def set_active_value(self, value):
self.set_active(self.values.index(value))
def remove_all(self):
self.model.clear()
class CustomTreeView(Gtk.Frame):
def __init__(self, model, columns):
Gtk.Frame.__init__(self)
self.perform_scroll = False
self.model = model
self.columns = columns
self.vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(self.vbox)
## ScrolledWindow
scrolled_window = Gtk.ScrolledWindow()
self.vbox.pack_start(scrolled_window, True, True, 0)
# TreeView
self.tree_view = Gtk.TreeView(model)
scrolled_window.add(self.tree_view)
for column in columns:
self.tree_view.append_column(column)
self.tree_view.connect('size-allocate', self.scroll_tree_view)
self.selection = self.tree_view.get_selection()
self.selection.set_mode(Gtk.SelectionMode.SINGLE)
def is_empty(self):
return len(self.model) == 0
def connect(self, event_name, event_callback):
if event_name == 'selection-changed':
self.selection.connect('changed', event_callback)
else:
self.tree_view.connect(event_name, event_callback)
def append_row(self, row, select=True, scroll_to=True):
# append row
self.model.append(row)
# scroll to row
if scroll_to:
self.perform_scroll = True
# select row
if select:
index = len(self.model) - 1
self.select_row(index)
def select_row(self, index):
path = Gtk.TreePath(index)
self.selection.select_path(path)
def get_row_index(self, row_iter):
return self.model.get_path(row_iter).get_indices()[0]
def get_rows_count(self):
return len(self.model)
def get_selected_row(self):
model, tree_iter = self.selection.get_selected()
if tree_iter:
row = []
for i in range(len(self.columns)):
column_value = model.get_value(tree_iter, i)
row.append(column_value)
return row
else:
return None
def remove_selected_row(self):
# remove selected row
model, tree_iter = self.selection.get_selected()
if tree_iter:
model.remove(tree_iter)
def scroll_tree_view(self, widget, event):
if self.perform_scroll:
adj = widget.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
self.perform_scroll = False
class CustomListBox(Gtk.Frame):
def __init__(self, parent=None, allow_moving=True):
Gtk.Frame.__init__(self)
self.parent = parent
self.allow_moving = allow_moving
self.perform_scroll = False
self.add_callback = None
self.delete_callback = None
self.activate_callback = None
## ListBox
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(vbox)
self.listbox = Gtk.ListBox()
self.listbox.set_selection_mode(Gtk.SelectionMode.SINGLE)
self.listbox.connect('size-allocate', self.on_size_allocate)
self.listbox.connect('row-activated', self.on_row_activated)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(self.listbox)
vbox.pack_start(scrolled_window, True, True, 0)
## ActionBar
actionbar = Gtk.ActionBar()
vbox.pack_end(actionbar, False, False, 0)
default_buttons_box = ButtonBox(linked=True)
actionbar.pack_start(default_buttons_box)
if allow_moving:
# Move up
self.move_up_button = Gtk.Button()
self.move_up_button.set_tooltip_text('Move up')
self.move_up_button.set_image(Gtk.Image(icon_name='go-up-symbolic'))
self.move_up_button.connect('clicked', self.on_move_up_button_clicked)
default_buttons_box.add(self.move_up_button)
# Move down
self.move_down_button = Gtk.Button()
self.move_down_button.set_tooltip_text('Move down')
self.move_down_button.set_image(Gtk.Image(icon_name='go-down-symbolic'))
self.move_down_button.connect('clicked', self.on_move_down_button_clicked)
default_buttons_box.add(self.move_down_button)
# Delete
self.delete_button = Gtk.Button()
self.delete_button.set_tooltip_text('Delete')
self.delete_button.set_image(Gtk.Image(icon_name='edit-delete-symbolic'))
self.delete_button.connect('clicked', self.on_delete_button_clicked)
default_buttons_box.add(self.delete_button)
# Clear all
self.clear_all_button = Gtk.Button()
self.clear_all_button.set_tooltip_text('Clear all')
self.clear_all_button.set_image(Gtk.Image(icon_name='edit-clear-all-symbolic'))
self.clear_all_button.connect('clicked', self.on_clear_all_button_clicked)
default_buttons_box.add(self.clear_all_button)
# Initialise default buttons status
self.reset_buttons()
# Buttons box
self.buttons_box = ButtonBox(linked=True)
actionbar.pack_end(self.buttons_box)
def on_add(self, callback):
self.add_callback = callback
def on_delete(self, callback):
self.delete_callback = callback
def on_activate(self, callback):
self.activate_callback = callback
def on_row_activated(self, listbox, row):
if self.allow_moving:
rows_count = len(self.get_rows())
index = row.get_index()
# Move up
enable_move_up = True if index > 0 else False
self.move_up_button.set_sensitive(enable_move_up)
# Move down
enable_move_down = True if index < rows_count - 1 else False
self.move_down_button.set_sensitive(enable_move_down)
# Delete
self.delete_button.set_sensitive(True)
# Clear all
self.clear_all_button.set_sensitive(True)
# Invoke activate callback
if self.activate_callback is not None:
self.activate_callback()
def on_size_allocate(self, listbox, event):
if self.perform_scroll:
adj = listbox.get_adjustment()
adj.set_value(adj.get_upper() - adj.get_page_size())
self.perform_scroll = False
def add_button(self, button):
self.buttons_box.add(button)
def append_text(self, text):
# add new row
row = Gtk.ListBoxRow()
label = Gtk.Label(text, xalign=0, margin=5)
row.add(label)
self.listbox.add(row)
self.listbox.show_all()
self.perform_scroll = True
self.select_row(row)
if self.add_callback is not None:
self.add_callback()
def select_row(self, row):
self.listbox.select_row(row)
self.on_row_activated(self.listbox, row)
def get_rows(self):
return self.listbox.get_children()
def is_empty(self):
return len(self.get_rows()) == 0
def get_row_text(self, row):
label = row.get_children()[0]
return label.get_text()
def reset_buttons(self):
if self.allow_moving:
self.move_up_button.set_sensitive(False)
self.move_down_button.set_sensitive(False)
self.delete_button.set_sensitive(False)
if self.is_empty():
self.clear_all_button.set_sensitive(False)
def remove_row(self, row, reset=True):
row_index = row.get_index()
self.listbox.remove(row)
if reset:
self.reset_buttons()
if self.delete_callback is not None:
self.delete_callback(row_index)
def on_delete_button_clicked(self, button):
row = self.listbox.get_selected_row()
self.remove_row(row)
def move_row(self, row, new_index):
self.listbox.select_row(None) # remove selection
self.listbox.remove(row)
self.listbox.insert(row, new_index)
self.select_row(row)
def on_move_up_button_clicked(self, button):
row = self.listbox.get_selected_row()
if row:
index = row.get_index()
self.move_row(row, index - 1)
def on_move_down_button_clicked(self, button):
row = self.listbox.get_selected_row()
if row:
index = row.get_index()
self.move_row(row, index + 1)
def clear(self):
for row in self.get_rows():
self.remove_row(row, False)
self.reset_buttons()
def on_clear_all_button_clicked(self, button):
dialog = Gtk.MessageDialog(text='Confirm clear all?', transient_for=self.parent, buttons=Gtk.ButtonsType.OK_CANCEL, message_type=Gtk.MessageType.QUESTION)
response = dialog.run()
dialog.destroy()
# We only clear when the user presses the OK button
if response == Gtk.ResponseType.OK:
self.clear()
class SpinButton(Gtk.SpinButton):
def __init__(self, min=0, max=100, value=0, step=1, page_step=5):
adjustment = Gtk.Adjustment(value=value, lower=min, upper=max, step_increment=step, page_increment=page_step, page_size=0)
Gtk.SpinButton.__init__(self, adjustment=adjustment)
class ImageLabel(Gtk.Box):
def __init__(self, image, text, padding=0):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=3)
self.set_border_width(padding)
self.add(image)
self.label = Gtk.Label(text, ellipsize=Pango.EllipsizeMode.END)
self.label.set_tooltip_text(text)
self.add(self.label)
def get_text(self):
return self.label.get_text()
class StackListBox(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
self.count = 0
frame = Gtk.Frame()
scrolled_window = Gtk.ScrolledWindow(hscrollbar_policy=Gtk.PolicyType.NEVER)
self.listbox = Gtk.ListBox()
self.listbox.set_selection_mode(Gtk.SelectionMode.SINGLE)
self.listbox.connect('row-activated', self.on_row_activated)
scrolled_window.add(self.listbox)
frame.add(scrolled_window)
self.pack_start(frame, True, True, 0)
self.stack = Gtk.Stack()
self.pack_end(self.stack, False, False, 0)
def on_row_activated(self, listbox, row):
name = row.get_children()[0].get_text()
self.stack.set_visible_child_name(name)
def append(self, label, widget):
# add listbox label
self.listbox.add(label)
if self.count == 0: # select first row
self.listbox.select_row(self.listbox.get_row_at_index(self.count))
# add stack widget
self.stack.add_named(widget, label.get_text())
self.count += 1
class ButtonBox(Gtk.Box):
def __init__(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=5, centered=False, linked=False):
Gtk.Box.__init__(self, orientation=orientation)
self.buttons_container = Gtk.Box(orientation=orientation)
self.orientation = orientation
self.linked = linked
# set centered
if centered:
self.pack_start(self.buttons_container, True, False, 0)
else:
self.pack_start(self.buttons_container, False, False, 0)
# set linked
if linked:
Gtk.StyleContext.add_class(self.buttons_container.get_style_context(), Gtk.STYLE_CLASS_LINKED)
else:
self.buttons_container.set_spacing(spacing)
def add(self, button):
if self.orientation == Gtk.Orientation.VERTICAL and not self.linked:
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
hbox.pack_start(button, True, False, 0)
self.buttons_container.add(hbox)
else:
self.buttons_container.add(button)
class MessageBox(Gtk.Box):
def __init__(self, text=None, color='black', enable_buttons=False):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
self.enable_buttons = enable_buttons
# label
self.label = Gtk.Label(text)
self.label.modify_fg(Gtk.StateType.NORMAL, Gdk.color_parse(color))
self.add(self.label)
# question buttons
if enable_buttons:
self.button_box = ButtonBox(linked=True)
self.pack_end(self.button_box, False, False, 0)
# yes
self.yes_button = Gtk.Button()
self.yes_button.set_tooltip_text('Yes')
self.yes_button.set_image(Gtk.Image(icon_name='emblem-ok-symbolic'))
self.button_box.add(self.yes_button)
# no
self.no_button = Gtk.Button()
self.no_button.set_tooltip_text('No')
self.no_button.set_image(Gtk.Image(icon_name='window-close-symbolic'))
self.button_box.add(self.no_button)
def print_message(self, text, is_question=False):
self.label.set_text(text)
if self.enable_buttons:
if is_question:
self.button_box.show()
else:
self.button_box.hide()
self.show()
class MenuButton(Gtk.Button):
def __init__(self, text=None, position=Gtk.PositionType.BOTTOM, icon_name=None, padding=2):
Gtk.Button.__init__(self, text)
if icon_name is not None:
self.set_image(Gtk.Image(icon_name=icon_name))
self.connect('clicked', self.on_clicked)
# popover
self.popover = Gtk.Popover(relative_to=self, position=position)
self.popover.set_border_width(padding)
def on_clicked(self, button):
self.popover.show_all()
def add(self, widget):
self.popover.add(widget)
class MenuImage(Gtk.EventBox):
def __init__(self, icon_name='pan-down-symbolic', pixel_size=13, position=Gtk.PositionType.BOTTOM, padding=2):
Gtk.EventBox.__init__(self)
self.add(Gtk.Image(icon_name=icon_name, pixel_size=pixel_size))
self.connect('button-press-event', self.on_button_press)
self.connect('enter-notify-event', self.on_enter_notify)
# popover
self.popover = Gtk.Popover(relative_to=self, position=position)
self.popover.set_border_width(padding)
def on_button_press(self, widget, event):
self.popover.show_all()
def on_enter_notify(self, widget, event):
window = self.get_window()
window.set_cursor(Gdk.Cursor(Gdk.CursorType.HAND1))
def set_widget(self, widget):
self.popover.add(widget)
class FileChooserButton(Gtk.FileChooserButton):
def __init__(self, title, filter=None):
Gtk.FileChooserButton.__init__(self, title=title)
if filter is not None and len(filter) > 1:
name, pattern = filter
file_filter = Gtk.FileFilter()
file_filter.set_name('%s (%s)' % (name, pattern))
file_filter.add_pattern(pattern)
self.add_filter(file_filter)
class MiniMap(Gtk.Frame):
point_colors = {
'Monster': 'red',
'Resource': 'green',
'NPC': 'blue',
'None': 'black'
}
def __init__(self, background_color='#CECECE', show_grid=True, grid_color='#DDDDDD', grid_size=(15, 15), point_radius=3):
Gtk.Frame.__init__(self)
self.points = []
self.point_opacity = 0.7
self.point_radius = point_radius
self.show_grid = show_grid
self.grid_color = grid_color
self.grid_size = grid_size
self.background_color = background_color
self.use_origin_colors = False
self.add_borders = False
self.drawing_area = Gtk.DrawingArea()
self.drawing_area.set_has_tooltip(True)
self.drawing_area.connect('draw', self.on_draw)
self.drawing_area.connect('query-tooltip', self.on_query_tooltip)
self.add(self.drawing_area)
def set_use_origin_colors(self, value):
self.use_origin_colors = value
if self.points:
self.drawing_area.queue_draw()
def set_add_borders(self, value):
self.add_borders = value
if self.points:
self.drawing_area.queue_draw()
def get_color_key(self):
return 'origin_color' if self.use_origin_colors else 'color'
def add_point(self, point, name=None, color=None, redraw=True):
# set point coordinates
new_point = {
'x': point['x'],
'y': point['y'],
'width': point['width'],
'height': point['height']
}
# set point name
if name is not None:
new_point['name'] = name
elif 'name' in point:
new_point['name'] = point['name']
else:
new_point['name'] = None
# set point color
new_point['color'] = color
new_point['origin_color'] = parse_color(point['color'], as_hex=True) if 'color' in point else None
# add point
self.points.append(new_point)
if redraw:
self.drawing_area.queue_draw()
def add_points(self, points, name=None, color=None):
for point in points:
self.add_point(point, name, color, False)
self.drawing_area.queue_draw()
def remove_point(self, index):
if 0 <= index < len(self.points):
del self.points[index]
self.drawing_area.queue_draw()
def clear(self):
if self.points:
self.points = []
self.drawing_area.queue_draw()
def on_draw(self, widget, cr):
drawing_area = widget.get_allocation()
square_width, square_height = self.grid_size
cr.set_line_width(1)
# set color function
def set_color(value, opacity=1.0):
color = Gdk.color_parse(value)
cr.set_source_rgba(float(color.red) / 65535, float(color.green) / 65535, float(color.blue) / 65535, opacity)
# fill background with color
if self.background_color:
cr.rectangle(0, 0, drawing_area.width, drawing_area.height)
set_color(self.background_color)
cr.fill()
# draw grid lines
if self.show_grid:
set_color(self.grid_color)
# draw vertical lines
for x in range(square_width, drawing_area.width, square_width + 1): # +1 for line width
cr.move_to(x + 0.5, 0) # +0.5 for smooth line
cr.line_to(x + 0.5, drawing_area.height)
# draw horizontal lines
for y in range(square_height, drawing_area.height, square_height + 1):
cr.move_to(0, y + 0.5)
cr.line_to(drawing_area.width, y + 0.5)
cr.stroke()
# draw points
for point in self.points:
# fit point to drawing area (should keep here, because it's useful when drawing area get resized)
x, y = fit_position_to_destination(point['x'], point['y'], point['width'], point['height'], drawing_area.width, drawing_area.height)
if self.add_borders:
set_color('black')
cr.arc(x, y, self.point_radius, 0, 2*math.pi)
if self.add_borders:
cr.stroke_preserve()
color_key = self.get_color_key()
color = self.point_colors['None'] if point[color_key] is None else point[color_key]
set_color(color, self.point_opacity)
cr.fill()
def get_tooltip_widget(self, point):
# on draw function
def on_draw(widget, cr):
cr.set_line_width(1)
# draw point
color_key = self.get_color_key()
color = Gdk.color_parse(point[color_key])
cr.set_source_rgba(float(color.red) / 65535, float(color.green) / 65535, float(color.blue) / 65535, self.point_opacity)
cr.arc(self.point_radius, self.point_radius, self.point_radius, 0, 2*math.pi)
cr.fill()
# tooltip widget
if point['name'] is not None:
widget = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=3)
color_key = self.get_color_key()
if point[color_key] is not None:
drawing_area = Gtk.DrawingArea()
point_diameter = self.point_radius*2
drawing_area.set_size_request(point_diameter, point_diameter)
drawing_area.connect('draw', on_draw)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.pack_start(drawing_area, True, False, 0)
widget.add(box)
widget.add(Gtk.Label(point['name']))
widget.show_all()
else:
widget = None
return widget
def on_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
drawing_area = self.drawing_area.get_allocation()
tooltip_widget = None
# check if a point is hovered
for point in self.points:
# fit point to drawing area
point_x, point_y = fit_position_to_destination(point['x'], point['y'], point['width'], point['height'], drawing_area.width, drawing_area.height)
# TODO: the check below should be circular, not rectangular
if point_x - self.point_radius <= x <= point_x + self.point_radius and point_y - self.point_radius <= y <= point_y + self.point_radius:
tooltip_widget = self.get_tooltip_widget(point)
break
# if so
if tooltip_widget is not None:
# set tooltip widget
tooltip.set_custom(tooltip_widget)
# show the tooltip
return True
else:
return False
|
tools/accuracy_checker/accuracy_checker/annotation_converters/lmdb.py | APrigarina/open_model_zoo | 1,031 | 12706912 | """
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
from .format_converter import DirectoryBasedAnnotationConverter, ConverterReturn
from ..utils import UnsupportedPackage
from ..representation import CharacterRecognitionAnnotation
from ..config import BoolField
try:
import lmdb
except ImportError as import_error:
lmdb = UnsupportedPackage("lmdb", import_error.msg)
class LMDBConverter(DirectoryBasedAnnotationConverter):
__provider__ = 'lmdb_text_recognition_database'
annotation_types = (CharacterRecognitionAnnotation, )
supported_symbols = '0123456789abcdefghijklmnopqrstuvwxyz'
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'lower_case': BoolField(description='Convert GT text to lowercase.', optional=True)
})
return configuration_parameters
def configure(self):
super().configure()
self.lower_case = self.get_value_from_config('lower_case')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""Reads data from disk and returns dataset in converted for AC format
Args:
check_content (bool, optional): Check if content is valid. Defaults to False.
progress_callback (bool, optional): Display progress. Defaults to None.
progress_interval (int, optional): Units to display progress. Defaults to 100 (percent).
Returns:
[type]: Converted dataset
"""
annotations = []
content_errors = None if not check_content else []
lmdb_env = lmdb.open(bytes(self.data_dir), readonly=True)
with lmdb_env.begin(write=False) as txn:
num_iterations = int(txn.get('num-samples'.encode()))
for index in range(1, num_iterations + 1):
label_key = f'label-{index:09d}'.encode()
text = txn.get(label_key).decode('utf-8')
if self.lower_case:
text = text.lower()
if progress_callback is not None and index % progress_interval == 0:
progress_callback(index / num_iterations * 100)
if check_content:
img_key = f'label-{index:09d}'.encode()
image_bytes = txn.get(img_key)
image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_ANYCOLOR)
if image is None:
content_errors.append(f'label-{index:09d}: does not exist')
annotations.append(CharacterRecognitionAnnotation(index, text))
label_map = {ind: str(key) for ind, key in enumerate(self.supported_symbols)}
meta = {'label_map': label_map, 'blank_label': len(label_map)}
return ConverterReturn(annotations, meta, content_errors)
|
setup.py | nigoroll/django-rest-framework-jwt | 165 | 12706918 | <filename>setup.py
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
setup(
package_dir={"": "src"},
packages=find_packages(where="src"),
)
|
organize/filters/created.py | tank0226/organize | 1,231 | 12706941 | import sys
from typing import Dict, Optional, SupportsFloat
import pendulum # type: ignore
from pathlib import Path
from organize.utils import DotDict
from .filter import Filter
class Created(Filter):
"""
Matches files by created date
:param int years:
specify number of years
:param int months:
specify number of months
:param float weeks:
specify number of weeks
:param float days:
specify number of days
:param float hours:
specify number of hours
:param float minutes:
specify number of minutes
:param float seconds:
specify number of seconds
:param str mode:
either 'older' or 'newer'. 'older' matches all files created before the given
time, 'newer' matches all files created within the given time.
(default = 'older')
:param str timezone:
specify timezone
:returns:
- ``{created.year}`` -- the year the file was created
- ``{created.month}`` -- the month the file was created
- ``{created.day}`` -- the day the file was created
- ``{created.hour}`` -- the hour the file was created
- ``{created.minute}`` -- the minute the file was created
- ``{created.second}`` -- the second the file was created
Examples:
- Show all files on your desktop created at least 10 days ago:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- created:
days: 10
actions:
- echo: 'Was created at least 10 days ago'
- Show all files on your desktop which were created within the last 5 hours:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- created:
hours: 5
mode: newer
actions:
- echo: 'Was created within the last 5 hours'
- Sort pdfs by year of creation:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Documents'
filters:
- extension: pdf
- created
actions:
- move: '~/Documents/PDF/{created.year}/'
- Use specific timezone when processing files
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Documents'
filters:
- extension: pdf
- created:
timezone: "Europe/Moscow"
actions:
- move: '~/Documents/PDF/{created.day}/{created.hour}/'
"""
def __init__(
self,
years=0,
months=0,
weeks=0,
days=0,
hours=0,
minutes=0,
seconds=0,
mode="older",
timezone=pendulum.tz.local_timezone(),
) -> None:
self._mode = mode.strip().lower()
if self._mode not in ("older", "newer"):
raise ValueError("Unknown option for 'mode': must be 'older' or 'newer'.")
self.is_older = self._mode == "older"
self.timezone = timezone
self.timedelta = pendulum.duration(
years=years,
months=months,
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
)
print(bool(self.timedelta))
def pipeline(self, args: DotDict) -> Optional[Dict[str, pendulum.DateTime]]:
created_date = self._created(args.path)
# Pendulum bug: https://github.com/sdispater/pendulum/issues/387
# in_words() is a workaround: total_seconds() returns 0 if years are given
if self.timedelta.in_words():
is_past = (created_date + self.timedelta).is_past()
match = self.is_older == is_past
else:
match = True
if match:
return {"created": created_date}
return None
def _created(self, path: Path) -> pendulum.DateTime:
# see https://stackoverflow.com/a/39501288/300783
stat = path.stat()
time = 0 # type: SupportsFloat
if sys.platform.startswith("win"):
time = stat.st_ctime
else:
try:
time = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
time = stat.st_mtime
return pendulum.from_timestamp(float(time), tz=self.timezone)
def __str__(self):
return "[Created] All files %s than %s" % (
self._mode,
self.timedelta.in_words(),
)
|
test/bibliopixel/util/image/extract_gif_lines_test.py | rec/leds | 253 | 12706943 | import unittest
from unittest.mock import patch
from bibliopixel.util.image import extract_gif_lines
class ExtractGifLinesTest(unittest.TestCase):
def test_extract(self):
actual = list(extract_gif_lines._extract(GIF_LINES))
self.assertEqual(actual, EXPECTED1)
def test_extract_gif_lines(self):
actual = list(extract_gif_lines.extract_gif_lines(GIF_LINES))
self.assertEqual(actual, EXPECTED2)
def test_errors(self):
actual = list(extract_gif_lines.extract_gif_lines(BAD_LINES))
self.assertEqual(actual, EXPECTED2)
GIF_LINES = """
# Here's some stuff.
# now code
.. code-block:: yaml
math.frog(23)
print('glog')
# But there's no GIF file.
# More code:
.. code-block:: yaml
animation: BiblioPixelAnimations.matrix.MatrixRain
shape: [2, 2]
.. code-block:: yaml
animation: BiblioPixelAnimations.matrix.MatrixRain
shape: [32, 32]
.. image:: https://raw.githubusercontent.com/ManiacalLabs/DocsFiles/master/\
BiblioPixel/doc/bibliopixel/animations/something.gif
.. code-block:: yaml
animation: .split
shape: 128
.. image:: https://raw.githubusercontent.com/ManiacalLabs/DocsFiles/master/\
BiblioPixel/doc/bibliopixel/animations/minimal.gif
""".splitlines()
BAD_LINES = GIF_LINES + """
.. code-block:: json
}}}
... image: blah.gif
""".splitlines()
YAML_LINES_1 = """\
animation: BiblioPixelAnimations.matrix.MatrixRain
shape: [32, 32]
""".splitlines()
YAML_LINES_2 = """\
animation: .split
shape: 128
""".splitlines()
EXPECTED1 = [
('doc/bibliopixel/animations/something.gif', YAML_LINES_1),
('doc/bibliopixel/animations/minimal.gif', YAML_LINES_2)]
DATA1 = {
'animation': 'BiblioPixelAnimations.matrix.MatrixRain',
'shape': [32, 32]}
DATA2 = {
'animation': '.split',
'shape': 128}
EXPECTED2 = [
('doc/bibliopixel/animations/something.gif', DATA1),
('doc/bibliopixel/animations/minimal.gif', DATA2)]
|
kitti360scripts/devkits/convertOxtsPose/python/testPoseToOxts.py | carloradice/kitti360Scripts | 214 | 12706958 | <reponame>carloradice/kitti360Scripts<gh_stars>100-1000
# Test script for loading Oxts data and convert to Mercator coordinate
import os
from data import loadPoses
from utils import postprocessPoses
from convertPoseToOxts import convertPoseToOxts
if __name__=="__main__":
# root dir of KITTI-360
if 'KITTI360_DATASET' in os.environ:
kitti360_dir = os.environ['KITTI360_DATASET']
else:
kitti360_dir = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..', '..')
# load poses
seq_id = 0
pose_file = os.path.join(kitti360_dir, 'data_poses', '2013_05_28_drive_%04d_sync'%seq_id, 'poses.txt')
if not os.path.isfile(pose_file):
raise ValueError('%s does not exist! \nPlease specify KITTI360_DATASET in your system path.\nPlease check if you have downloaded system poses (data_poses.zip) and unzipped them under KITTI360_DATASET' % pose_file)
[ts, poses] = loadPoses(pose_file)
print('Loaded pose file %s' % pose_file)
# convert coordinate system from
# x=forward, y=left, z=up
# to
# x=forward, y=right, z=down
poses = postprocessPoses(poses)
# convert to lat/lon coordinate
oxts = convertPoseToOxts(poses)
# write to file
output_dir = 'output'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_file = '%s/2013_05_28_drive_%04d_sync_pose2oxts.txt'% (output_dir, seq_id)
with open(output_file, 'w') as f:
for oxts_ in oxts:
oxts_ = ' '.join(['%.6f'%x for x in oxts_])
f.write('%s\n'%oxts_)
print('Output written to %s' % output_file)
|
st3/mdpopups/pygments/lexers/agile.py | evandroforks/mdpopups | 109 | 12706992 | <reponame>evandroforks/mdpopups
# -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from .lisp import SchemeLexer
from .jvm import IokeLexer, ClojureLexer
from .python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from .ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from .perl import PerlLexer, Perl6Lexer
from .d import CrocLexer, MiniDLexer
from .iolang import IoLexer
from .tcl import TclLexer
from .factor import FactorLexer
from .scripting import LuaLexer, MoonScriptLexer
__all__ = []
|
models/multi_speech_model.py | gaoyiyeah/KWS-CTC | 340 | 12706997 | from __future__ import division
import tensorflow as tf
import speech_model
class MultiSpeechModel(object):
def __init__(self, num_gpus):
self.num_gpus = num_gpus
self._init_inference = False
self._init_cost = False
self._init_train = False
def init_inference(self, config):
batch_size = config['batch_size']
assert batch_size % self.num_gpus == 0, \
"Batch size must be divisible by the number of GPUs."
batch_per_gpu = batch_size // self.num_gpus
self._models = []
for i in range(self.num_gpus):
with tf.device('/gpu:{}'.format(i)):
model = speech_model.SpeechModel()
config['batch_size'] = batch_per_gpu
model.init_inference(config)
tf.get_variable_scope().reuse_variables()
self._models.append(model)
self._init_inference = True
def init_cost(self):
assert self._init_inference, "Must init inference before cost."
for i in range(self.num_gpus):
with tf.device('/gpu:{}'.format(i)):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
self._models[i].init_cost()
costs = [model.cost for model in self._models]
zero = tf.constant(0.0)
finite_costs = [tf.where(tf.is_finite(c), c, zero) for c in costs]
self._cost = tf.div(tf.add_n(finite_costs),
self.num_gpus)
self._init_cost = True
def init_train(self, config):
assert self._init_inference, "Must init inference before train."
assert self._init_cost, "Must init cost before train."
learning_rate = config['learning_rate']
self._momentum_val = config['momentum']
max_grad_norm = config['max_grad_norm']
decay_steps = config['lr_decay_steps']
decay_rate = config['lr_decay_rate']
self._momentum = tf.Variable(0.5, trainable=False)
self._global_step = step = tf.Variable(0, trainable=False)
self.lr = tf.train.exponential_decay(learning_rate, step,
decay_steps, decay_rate, staircase=True)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
ema = tf.train.ExponentialMovingAverage(0.99, name="avg")
avg_cost_op = ema.apply([self.cost])
self._avg_cost = ema.average(self.cost)
grads = []
for i in range(self.num_gpus):
with tf.device('/gpu:{}'.format(i)):
tvars = tf.trainable_variables()
grads.append(tf.gradients(self._models[i].cost, tvars))
average_grads = _average_gradients(grads)
scaled_grads, norm = tf.clip_by_global_norm(average_grads, max_grad_norm)
self._grad_norm = norm
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
optimizer = tf.train.MomentumOptimizer(self.lr, self._momentum)
with tf.control_dependencies([avg_cost_op]):
self._train_op = optimizer.apply_gradients(zip(scaled_grads, tvars),
global_step=step)
self._init_train = True
def feed_dict(self, inputs, labels=None):
"""
Constructs the feed dictionary from given inputs necessary to run
an operations for the model.
Args:
inputs : List of 2D numpy array input spectrograms. Should be
of shape [input_dim x time]
labels : List of labels for each item in the batch. Each label
should be a list of integers. If label=None does not feed the
label placeholder (for e.g. inference only).
Returns:
A dictionary of placeholder keys and feed values.
"""
feed_dict = {}
batches = _split_batch(self.num_gpus, inputs, labels)
for model, (i, l) in zip(self._models, batches):
feed_dict.update(model.feed_dict(i, labels=l))
return feed_dict
def start_momentum(self, session):
m = self._momentum.assign(self._momentum_val)
session.run([m])
def set_mean_std(self, mean, std, session):
self._models[0].set_mean_std(mean, std, session)
@property
def cost(self):
assert self._init_cost, "Must init cost."
return self._cost
@property
def avg_cost(self):
assert self._init_train, "Must init train."
return self._avg_cost
@property
def grad_norm(self):
assert self._init_train, "Must init train."
return self._grad_norm
@property
def global_step(self):
assert self._init_train, "Must init train."
return self._global_step
@property
def input_dim(self):
assert self._init_inference, "Must init inference."
return self._models[0].input_dim
@property
def output_dim(self):
assert self._init_inference, "Must init inference."
return self._models[0].output_dim
@property
def train_op(self):
assert self._init_train, "Must init train."
return self._train_op
def _average_gradients(model_grads):
"""
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of list of gradients for each model.
Returns:
List of gradients where each gradient has been averaged
across all models.
"""
average_grads = []
for grads in zip(*model_grads):
grads = [tf.expand_dims(g, 0) for g in grads]
# Average over the 'model' dimension.
grad = tf.concat(grads, axis=0)
grad = tf.reduce_mean(grad, 0)
average_grads.append(grad)
return average_grads
def _split_batch(num_gpus, data, labels=None):
"""
Split a set of data into batch_size // num_gpus batches.
Args:
inputs : List of 2D numpy array input spectrograms. Should be
of shape [input_dim x time]
labels : List of labels for each item in the batch. Each label
should be a list of integers. If labels=None the corresponding
labels item for each batch will also be None.
Returns:
A num_gpus length list of (inputs, labels) of
the same types as above but with batch_size // num_gpus
entries in each.
"""
batch_size = len(data)
n = batch_size // num_gpus
batches = []
for i in range(0, batch_size, n):
batch = [data[i:i + n], None]
if labels:
batch[1] = labels[i:i + n]
batches.append(batch)
return batches
|
tests/test_simulation.py | SebastianoF/pyro2 | 151 | 12707010 | <filename>tests/test_simulation.py
import simulation_null as sn
from util import runparams
import mesh.patch as patch
import mesh.boundary as bnd
class TestSimulation(object):
@classmethod
def setup_class(cls):
""" this is run once for each class before any tests """
pass
@classmethod
def teardown_class(cls):
""" this is run once for each class after all tests """
pass
def setup_method(self):
""" this is run before each test """
self.rp = runparams.RuntimeParameters()
self.rp.params["driver.tmax"] = 1.0
self.rp.params["driver.max_steps"] = 100
self.rp.params["driver.init_tstep_factor"] = 0.5
self.rp.params["driver.max_dt_change"] = 1.2
self.rp.params["driver.fix_dt"] = -1.0
self.sim = sn.NullSimulation("test", "test", self.rp)
myg = patch.Grid2d(8, 16)
myd = patch.CellCenterData2d(myg)
bc = bnd.BC()
myd.register_var("a", bc)
myd.create()
self.sim.cc_data = myd
def teardown_method(self):
""" this is run after each test """
self.rp = None
self.sim = None
def test_finished_n(self):
self.sim.n = 1000
assert self.sim.finished()
def test_finished_t(self):
self.sim.cc_data.t = 2.0
assert self.sim.finished()
def test_compute_timestep(self):
# set a dt and n = 0, then init_tstep_factor should kick in
self.sim.dt = 2.0
self.sim.n = 0
self.sim.compute_timestep()
assert self.sim.dt == 1.0
# now set dt_old and a new dt and see if the max_dt_change kicks in
self.sim.n = 1.0
self.sim.dt_old = 1.0
self.sim.dt = 2.0
self.sim.compute_timestep()
assert self.sim.dt == 1.2
# now test what happens if we go over tmax
self.sim.cc_data.t = 0.75
self.dt = 0.5
self.sim.compute_timestep()
assert self.sim.dt == 0.25
def test_grid_setup():
rp = runparams.RuntimeParameters()
rp.params["mesh.nx"] = 8
rp.params["mesh.ny"] = 16
rp.params["mesh.xmin"] = 0.0
rp.params["mesh.xmax"] = 1.0
rp.params["mesh.ymin"] = 0.0
rp.params["mesh.ymax"] = 2.0
g = sn.grid_setup(rp)
assert g.nx == 8
assert g.ny == 16
assert g.dx == 1.0/8
assert g.dy == 1.0/8
|
applications/DEMApplication/tests/test_glued_particles.py | lkusch/Kratos | 778 | 12707035 | import os
import KratosMultiphysics as Kratos
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage as DEM_analysis_stage
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class GluedParticlesTestSolution(DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "glued_particles_tests_files")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
tolerance = 1e-4
for node in self.spheres_model_part.Nodes:
angular_velocity = node.GetSolutionStepValue(Kratos.ANGULAR_VELOCITY)
if node.Id == 1:
if self.time > 0.01:
self.assertAlmostEqual(angular_velocity[0], 2.0, delta=tolerance)
if self.time > 0.499999 and self.time < 0.5000001:
self.assertAlmostEqual(node.X, -1.0, delta=tolerance)
self.assertAlmostEqual(node.Y, 0.6634116060768411, delta=tolerance)
self.assertAlmostEqual(node.Z, 0.21612092234725555, delta=tolerance)
if self.time > 0.999999 and self.time < 1.0000001:
self.assertAlmostEqual(node.X, -1.0, tolerance)
self.assertAlmostEqual(node.Y, 0.6362810292697275, delta=tolerance)
self.assertAlmostEqual(node.Z, -0.16645873461885752, delta=tolerance)
def Finalize(self):
self.procedures.RemoveFoldersWithResults(str(self.main_path), str(self.problem_name), '')
super().Finalize()
class TestGluedParticles(KratosUnittest.TestCase):
def setUp(self):
pass
@classmethod
def test_Glued_Particles_1(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "glued_particles_tests_files")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
model = Kratos.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(GluedParticlesTestSolution, model, parameters_file_name, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
if __name__ == "__main__":
Kratos.Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
Algorithms/Longest_Consecutive_Sequence.py | abdzitter/Daily-Coding-DS-ALGO-Practice | 289 | 12707045 | <gh_stars>100-1000
# lcs = longest_consecutive_series
# ccn = count_of_consecutive_numbers
class Solution(object): #main function
def longestConsecutive(self, values): #sub funcction
lcs = 0 # Initializing
for i in values: # Iteration the given value
if i-1 not in values: # condition check
value = i
ccn = 0
while i in values:
i+=1 #increament
ccn+=1 #increment
lcs = max(lcs,ccn) #maximum finding
return lcs
print(" Length of LCS is ",Solution().longestConsecutive({13,15,19,16,21,17,18,23,1,4})) #Calling two function Solution and longestConsecutive
'''
longestConsecutive will be called and it will pass the value.
longest_consecutive_series value will be assign
loop will be called
for i = 1 (True ):-
if i-1 not in values ( True ):-
value = 1
count_of_consecutive_numbers = 0
for i =1 (True )
i +=1 , i=2
count_of_consecutive_numbers = 1
max will be find between longest_consecutive_series,count_of_consecutive_numbers
for i =2 ( False )
back to main loop.
for i = 4 ( True ) :-
if i-1 not in values ( True ):-
value = 4
count_of_consecutive_numbers = 0
for i = 4 (True )
i +=1 , i = 5
count_of_consecutive_numbers = 1
longest_consecutive_series =1
for i = 5 ( False )
back to main loop
for i = 5 (False ) :- The value of i will be iterate
for i = 13 (True ) :-
if i-1 not in values ( True ):-
value = 13
count_of_consecutive_numbers = 0
for i = 13 ( True )
i +=1 , i = 13
count_of_consecutive_numbers = 1
longest_consecutive_series =
for i = 14 (False)
back to main loop
for i = 14 (False ):-
if i-1 not in values ( False )
for i = 15 (True ):-
if i-1 not in values ( True ):-
value = 15
count_of_consecutive_numbers = 0
for i = 15 ( True )
i +=1 , i = 16
count_of_consecutive_numbers = 1
longest_consecutive_series =1
for i = 16 ( True )
i +=1 , i = 17
count_of_consecutive_numbers = 2
longest_consecutive_series =2
for i = 17 ( True )
i +=1 , i = 18
count_of_consecutive_numbers = 3
longest_consecutive_series =3
for i = 18 ( True )
i +=1 , i = 19
count_of_consecutive_numbers = 4
longest_consecutive_series =4
for i = 19 ( True )
i +=1 , i = 20
count_of_consecutive_numbers = 5
longest_consecutive_series =5
for i = 20 ( False )
back to main loop
for i = 16 ( True ):-
if i-1 not in values ( False ):-
back to main loop
for i = 17 ( True ):-
if i-1 not in values ( False ):-
back to main loop
for i = 18 ( True ):-
if i-1 not in values ( False ):-
back to main loo
for i = 19 ( True ):-
if i-1 not in values ( False ):-
back to main loo
for i = 21 ( True ):-
if i-1 not in values ( True ):-
value = 21
count_of_consecutive_numbers = 0
for i = 21 ( True )
i +=1 , i = 22
count_of_consecutive_numbers = 1
longest_consecutive_series =1
for i = 22 ( False ):-
back to main loop
for i = 23 ( True ):-
if i-1 not in values ( True ):-
value = 23
count_of_consecutive_numbers = 0
for i = 23 ( True )
i +=1 , i = 24
count_of_consecutive_numbers = 1
longest_consecutive_series = 1
for i = 24 (False ):-
back to main loop
'''
|
DaPy/methods/regressors/lr.py | huihui7987/DaPy | 552 | 12707078 | <gh_stars>100-1000
from DaPy.methods.core import BaseLinearModel
class LinearRegressor(BaseLinearModel):
def __init__(self, engine='numpy', learn_rate=0.05, l1_penalty=0, l2_penalty=0, fit_intercept=True):
BaseLinearModel.__init__(self, engine, learn_rate, l1_penalty, l2_penalty, fit_intercept)
def _forecast(self, X):
return X.dot(self._weight) + self._bias
def fit(self, X, Y, epoch=500, early_stop=True, verbose=False):
self._fit(X, Y, epoch, early_stop, verbose)
return self
def predict(self, X):
X = self._engine.mat(X)
return self._forecast(X)
|
owtf/lib/exceptions.py | Udbhavbisarya23/owtf | 1,514 | 12707151 | <gh_stars>1000+
"""
owtf.lib.exceptions
~~~~~~~~~~~~~~~~~~~
Declares the framework exceptions and HTTP errors
"""
try:
from http.client import responses
except ImportError:
from httplib import responses
import tornado.web
class FrameworkException(Exception):
def __init__(self, value):
self.parameter = value
def __repr__(self):
return self.parameter
class APIError(tornado.web.HTTPError):
"""Equivalent to ``RequestHandler.HTTPError`` except for in name"""
def api_assert(condition, *args, **kwargs):
"""Assertion to fail with if not ``condition``
Asserts that ``condition`` is ``True``, else raises an ``APIError``
with the provided ``args`` and ``kwargs``
:type condition: bool
"""
if not condition:
raise APIError(*args, **kwargs)
class FrameworkAbortException(FrameworkException):
pass
class PluginAbortException(FrameworkException):
pass
class UnreachableTargetException(FrameworkException):
pass
class UnresolvableTargetException(FrameworkException):
pass
class DBIntegrityException(FrameworkException):
pass
class InvalidTargetReference(FrameworkException):
pass
class InvalidSessionReference(FrameworkException):
pass
class InvalidTransactionReference(FrameworkException):
pass
class InvalidParameterType(FrameworkException):
pass
class InvalidWorkerReference(FrameworkException):
pass
class InvalidErrorReference(FrameworkException):
pass
class InvalidWorkReference(FrameworkException):
pass
class InvalidConfigurationReference(FrameworkException):
pass
class InvalidUrlReference(FrameworkException):
pass
class InvalidActionReference(FrameworkException):
pass
class InvalidMessageReference(FrameworkException):
pass
class InvalidMappingReference(FrameworkException):
pass
class DatabaseNotRunningException(Exception):
pass
class PluginException(Exception):
pass
class PluginsDirectoryDoesNotExist(PluginException):
"""The specified plugin directory does not exist."""
class PluginsAlreadyLoaded(PluginException):
"""`load_plugins()` called twice."""
|
openmdao/docs/openmdao_book/other/disable_snopt_cells.py | friedenhe/OpenMDAO | 451 | 12707154 | import json
def disable_snopt_cells(fname):
"""
Once the first SNOPT cell is found, delete all code cells.
Parameters
----------
fname : str
Name of the notebook file, from openmdao_book.
"""
fname = f'openmdao_book/{fname}'
with open(fname) as f:
dct = json.load(f)
changed = False
newcells = []
found_snopt = False
for cell in dct['cells']:
if cell['cell_type'] == 'code':
if cell['source']: # cell is not empty
code = ''.join(cell['source'])
if found_snopt or 'SNOPT' in code:
found_snopt = True
else:
newcells.append(cell)
else:
newcells.append(cell)
dct['cells'] = newcells
with open(fname, 'w') as f:
json.dump(dct, f, indent=1, ensure_ascii=False)
return changed
if __name__ == '__main__':
notebooks = ['features/building_blocks/drivers/pyoptsparse_driver.ipynb']
for notebook in notebooks:
disable_snopt_cells(notebook)
|
AutotestWebD/apps/myadmin/service/UserLogService.py | yangjourney/sosotest | 422 | 12707216 | <filename>AutotestWebD/apps/myadmin/service/UserLogService.py
from all_models.models import TbUserLog
class UserLogService(object):
@staticmethod
def updateUserLogService(permissionData):
tbModel = TbUserLog.objects.filter(id=permissionData["id"])
tbModel.update(**permissionData)
|
plenario/apiary/views.py | vforgione/plenario | 109 | 12707224 | <filename>plenario/apiary/views.py<gh_stars>100-1000
from json import dumps, loads
from flask import Blueprint
from redis import Redis
from sqlalchemy import desc, select
from plenario.database import redshift_base as rshift_base
from plenario.models.SensorNetwork import SensorMeta
from plenario.settings import REDIS_HOST
blueprint = Blueprint('apiary', __name__)
redis = Redis(REDIS_HOST)
def index() -> list:
"""Generate the information necessary for displaying unknown features on the
admin index page.
"""
rshift_base.metadata.reflect()
unknown_features = rshift_base.metadata.tables['unknown_feature']
query = select([unknown_features]) \
.order_by(desc(unknown_features.c.datetime)) \
.limit(5)
rp = query.execute()
results = []
for row in rp:
sensor = SensorMeta.query.get(row.sensor)
if sensor is None:
expected = 'No metadata exists for this sensor!'
else:
expected = dumps(sensor.observed_properties, indent=2, sort_keys=True)
result = {
'sensor': row.sensor,
'datetime': row.datetime,
'incoming': dumps(loads(row.data), indent=2, sort_keys=True, default=str),
'expected': expected
}
results.append(result)
return results
|
longclaw/shipping/serializers/rates.py | al-bezd/longclaw | 351 | 12707229 | from rest_framework import serializers
from longclaw.shipping.models.rates import ShippingRate
class ShippingRateSerializer(serializers.ModelSerializer):
class Meta:
model = ShippingRate
fields = "__all__"
|
configs/_base_/schedules/schedule_1x.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 12707257 | # optimizer
optimizer = dict(type="SGD", lr=0.04, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy="step", warmup="linear", warmup_iters=100, warmup_ratio=0.001, step=[7, 11])
total_epochs = 12
|
ufora/FORA/Compiler/Compiler_test.py | ufora/ufora | 571 | 12707265 | <gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ufora.FORA.python.Runtime as Runtime
import ufora.FORA.python.FORA as FORA
import ufora.native.FORA as ForaNative
class TestCompiler(unittest.TestCase):
def setUp(self):
self.runtime = Runtime.getMainRuntime()
self.axioms = self.runtime.getAxioms()
self.compiler = self.runtime.getTypedForaCompiler()
def test_resolveAxiomDirectly_smallStrings(self):
instance = ForaNative.ImplValContainer(
("s1", ForaNative.makeSymbol("Operator"), ForaNative.makeSymbol("+"), "s2")
)
jov = ForaNative.implValToJOV(instance)
joa = self.axioms.resolveAxiomDirectly(self.compiler, jov.getTuple())
self.assertEqual(len(joa.throwPart()),0)
self.assertEqual(len(joa.resultPart()),1)
result = joa.resultPart()[0]
self.assertEqual(result, ForaNative.parseStringToJOV('"s1s2"'))
def test_resolveAxiomDirectly_Vector(self):
vectorIVC = FORA.extractImplValContainer(FORA.eval("[]"))
jov = ForaNative.parseStringToJOV(("({Vector([])}, `append, 2)"))
joa = self.axioms.resolveAxiomDirectly(self.compiler, jov.getTuple())
self.assertEqual(len(joa.throwPart()),0)
self.assertEqual(len(joa.resultPart()),1)
result = joa.resultPart()[0]
self.assertEqual(result, ForaNative.parseStringToJOV("{Vector([{Int64}])}"))
def test_resolveAxiomDirectly_VeryLongComputation(self):
vectorIVC = FORA.extractImplValContainer(FORA.eval("[]"))
jov = ForaNative.parseStringToJOV(("({Vector([])}, `append, 2)"))
joa = self.axioms.resolveAxiomDirectly(self.compiler, jov.getTuple())
self.assertEqual(len(joa.throwPart()),0)
self.assertEqual(len(joa.resultPart()),1)
result = joa.resultPart()[0]
self.assertEqual(result, ForaNative.parseStringToJOV("{Vector([{Int64}])}"))
|
tests/test_serialisation.py | gaganchhabra/appkernel | 156 | 12707270 | from .utils import *
def setup_function(function):
""" executed before each method call
"""
print('\n\nSETUP ==> ')
def teardown_function(function):
""" teardown any state that was previously setup with a setup_method
call.
"""
print("\nTEAR DOWN <==")
def test_basic_serialisation():
p = create_rich_project()
p.finalise_and_validate()
print(('\n> serialized project: {}'.format(p.dumps(pretty_print=True))))
deserialised_proj = Project.loads(p.dumps())
print(('> deserialized project: {}'.format(deserialised_proj)))
assert type(deserialised_proj.created) == datetime
assert deserialised_proj.created == p.created, '!!! the deserialized project has created field w. type {} while it should be {}'.format(
type(deserialised_proj.created), type(p.created))
|
src/plugins/nat/extras/nat_ses_open.py | yasics/vpp | 751 | 12707271 | from trex_stl_lib.api import *
class STLS1:
def __init__ (self):
self.ip_range = {'local': {'start': "10.0.0.3", 'end': "10.1.255.255"},
'external': {'start': "172.16.1.3", 'end': "172.16.1.3"},
'remote': {'start': "2.2.0.1", 'end': "2.2.0.1"}}
self.port_range = {'local': {'start': 1025, 'end': 65535},
'remote': {'start': 12, 'end': 12}}
def create_stream (self, vm):
base_pkt = Ether()/IP()/UDP()
if len(base_pkt) < 64:
pad_len = 64 - len(base_pkt)
pad = Padding()
pad.load = '\x00' * pad_len
base_pkt = base_pkt/pad
pkt = STLPktBuilder(pkt=base_pkt, vm=vm)
return STLStream(packet=pkt, mode=STLTXCont())
def get_streams (self, direction = 0, **kwargs):
if direction == 0:
ip_src = self.ip_range['remote']
ip_dst = self.ip_range['external']
src_port = self.port_range['remote']
dst_port = self.port_range['local']
else:
ip_src = self.ip_range['local']
ip_dst = self.ip_range['remote']
src_port = self.port_range['local']
dst_port = self.port_range['remote']
vm = STLVM()
vm.var(name="ip_src", min_value=ip_src['start'], max_value=ip_src['end'], size=4, op="random")
vm.var(name="ip_dst", min_value=ip_dst['start'], max_value=ip_dst['end'], size=4, op="random")
vm.var(name="src_port", min_value=src_port['start'], max_value=src_port['end'], size=2, op="random")
vm.var(name="dst_port", min_value=dst_port['start'], max_value=dst_port['end'], size=2, op="random")
vm.write(fv_name="ip_src", pkt_offset="IP.src")
vm.write(fv_name="ip_dst", pkt_offset="IP.dst")
vm.write(fv_name="src_port", pkt_offset="UDP.sport")
vm.write(fv_name="dst_port", pkt_offset="UDP.dport")
vm.fix_chksum()
return [ self.create_stream(vm) ]
# dynamic load - used for trex console or simulator
def register():
return STLS1()
|
src/app/tests/tests_s3_client.py | iNerV/education-backend | 151 | 12707297 | import pytest
from app.integrations.s3 import AppS3
pytestmark = [pytest.mark.django_db]
def test_client_init():
client = AppS3().client
assert 'botocore.client.S3' in str(client.__class__)
|
sublime_jedi/go_to.py | zjzh/SublimeJEDI | 641 | 12707312 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
try:
from typing import Set, List, Tuple, Any
except Exception:
pass
import sublime
import sublime_plugin
from functools import partial
import re
from .utils import to_relative_path, PythonCommandMixin, get_settings, is_python_scope, debounce
from .daemon import ask_daemon
from .settings import get_settings_param
class BaseLookUpJediCommand(PythonCommandMixin):
def _jump_to_in_window(self, filename, line_number=None, column_number=None, transient=False):
""" Opens a new window and jumps to declaration if possible
:param filename: string or int
:param line_number: int
:param column_number: int
:param transient: bool
If transient is True, opens a transient view
"""
active_window = self.view.window()
# restore saved location
try:
if self.view.sel()[0] != self.point:
self.view.sel().clear()
self.view.sel().add(self.point)
except AttributeError:
# called without setting self.point
pass
# If the file was selected from a drop down list
if isinstance(filename, int):
if filename == -1: # cancelled
# restore view
active_window.focus_view(self.view)
self.view.show(self.point)
return
filename, line_number, column_number = self.options[filename]
flags = self.prepare_layout(active_window, transient, filename)
active_window.open_file('%s:%s:%s' % (filename, line_number or 0,
column_number or 0), flags)
def prepare_layout(self, window, transient, filename):
"""
prepares the layout of the window to configured and returns flags
for opening the file
"""
flags = sublime.ENCODED_POSITION
if transient:
flags |= sublime.TRANSIENT
# sublime cant show quick panel with options on one panel and
# file's content in transient mode on another panel
# so dont do anything if its a requrest to show just options
return flags
goto_layout = get_settings_param(self.view, 'sublime_goto_layout')
if goto_layout == 'single-panel-transient' and not transient:
flags |= sublime.TRANSIENT
elif goto_layout == 'two-panel':
self.switch_to_two_panel_layout(window, filename)
elif goto_layout == 'two-panel-transient':
self.switch_to_two_panel_layout(window, filename)
if not transient:
flags |= sublime.TRANSIENT
return flags
def switch_to_two_panel_layout(self, window, filename):
curr_group = window.active_group()
layout = window.get_layout()
if len(layout['cells']) == 1:
# currently a single panel layout so switch to two panels
window.set_layout({
'cols': [0.0, 0.5, 1.0],
'rows': [0.0, 1.0],
'cells': [[0, 0, 1, 1], [1, 0, 2, 1]],
})
# select non current group(panel)
selected_group = None
for group in range(window.num_groups()):
if group != curr_group:
selected_group = group
window.focus_group(group)
break
# if the file is already opened and is in current group
# move it to another panel.
files_in_curr_group = dict([
(i.file_name(), i) for i in
window.views_in_group(curr_group)
])
if filename and filename in files_in_curr_group:
if files_in_curr_group[filename].view_id != self.view.view_id:
window.set_view_index(files_in_curr_group[filename], selected_group, 0)
def _window_quick_panel_open_window(self, view, options):
""" Shows the active `sublime.Window` quickpanel (dropdown) for
user selection.
:param option: list of `jedi.api_classes.BasDefinition`
"""
active_window = view.window()
# remember filenames
self.options = options
# remember current file location
self.point = self.view.sel()[0]
# Show the user a selection of filenames
active_window.show_quick_panel(
[self.prepare_option(o) for o in options],
self._jump_to_in_window,
on_highlight=partial(self._jump_to_in_window, transient=True))
def prepare_option(self, option):
""" prepare option to display out in quick panel """
raise NotImplementedError(
"{} require `prepare_option` definition".format(self.__class__)
)
class SublimeJediGoto(BaseLookUpJediCommand, sublime_plugin.TextCommand):
"""
Go to object definition
"""
def run(self, edit):
follow_imports = get_settings(self.view)['follow_imports']
ask_daemon(
self.view,
self.handle_definitions,
'goto',
ask_kwargs={
'follow_imports': follow_imports
},
)
def handle_definitions(self, view, defns):
if not defns:
return False
if len(defns) == 1:
defn = defns[0]
self._jump_to_in_window(*defn)
else:
self._window_quick_panel_open_window(view, defns)
def prepare_option(self, option):
return to_relative_path(option[0])
class SublimeJediFindUsages(BaseLookUpJediCommand, sublime_plugin.TextCommand):
"""
Find object usages, and optionally rename objects.
"""
def run(self, edit):
self.edit = edit
ask_daemon(self.view, self.handle_usages, 'usages')
def handle_usages(self, view, options) -> None:
if not options:
return
active_window = view.window()
# remember filenames
self.options = options
# remember current file location
self.point = self.view.sel()[0]
# expands selection to all of "focused" symbol
name = expand_selection(self.view, self.point)
def handle_rename(new_name: str) -> None:
groups = [] # type: List[List[Tuple[str, int, int]]]
files = set() # type: Set[str]
for option in options:
file = option[0]
if not file: # can't replace text (or even show usages) in unsaved file
continue
if file in files:
groups[-1].append(option)
else:
groups.append([option])
files.add(file)
for group in groups:
rename_in_file(group, group[0][0], new_name)
def rename_in_file(group, file_, new_name):
# type: (List[Tuple[str, int, int]], str, str) -> None
with open(file_) as f:
text = f.read()
original_text = text
offset = 0
for option in group:
assert text and name
_, row, col = option
point = text_point(original_text, row-1, col-1)
text = text[:point + offset] + new_name + text[point + offset + len(name):]
offset += len(new_name) - len(name)
with open(file_, "w") as f:
f.write(text)
def handle_choose(idx):
if not name:
return
if idx == 0:
view.window().show_input_panel("New name:", name, handle_rename, None, None)
return
self._jump_to_in_window(idx - 1 if idx != -1 else idx)
def handle_highlight(idx):
if idx == 0:
return
self._jump_to_in_window(idx - 1 if idx != -1 else idx, transient=True)
# Show the user a selection of filenames
files = {option[0] for option in options} # type: Set[str]
first_option = [[
'rename "{}"'.format(name),
"{} occurrence{} in {} file{}".format(
len(options), 's' if len(options) != 1 else '', len(files), 's' if len(files) != 1 else '')
]]
active_window.show_quick_panel(
first_option + [self.prepare_option(o) for o in options],
handle_choose,
on_highlight=handle_highlight)
def prepare_option(self, option):
return [to_relative_path(option[0]),
"line: %d column: %d" % (option[1], option[2])]
def expand_selection(view, point):
# type: (Any, Any) -> str
name = ""
_, col = view.rowcol(point.begin())
for match in re.finditer(r"[A-Za-z0-9_]+", view.substr(view.line(point.begin()))):
if match.start() <= col and match.end() >= col:
name = match.group()
return name
def text_point(text: str, row: int, col: int) -> int:
"""
Return the integer offset for the char at 0-indexed row and col in text.
Similar to View.text_point, but doesn't require loading the view first.
"""
chars = 0
for line in text.splitlines()[:row]:
chars += len(line) + 1
return chars + col
class SublimeJediEventListener(sublime_plugin.EventListener):
def on_selection_modified_async(self, view) -> None:
should_highlight = get_settings_param(view, 'highlight_usages_on_select')
if not view.file_name() or not is_python_scope(view, view.sel()[0].begin()) or not should_highlight:
return
highlight_usages(view)
@debounce(0.35)
def highlight_usages(view) -> None:
ask_daemon(view, handle_highlight_usages, 'usages')
def handle_highlight_usages(view, options):
# type: (Any, List[Tuple[str, int, int]]) -> None
name = expand_selection(view, view.sel()[0])
file_name = view.file_name()
def get_region(o):
# type: (Tuple[str, int, int]) -> Any
_, row, col = o
point = view.text_point(row-1, col-1)
return sublime.Region(point, point + len(name))
regions = [get_region(o) for o in options if o[0] == file_name]
if not regions:
view.erase_regions('sublime-jedi-usages')
return
highlight_color = get_settings_param(view, 'highlight_usages_color')
view.add_regions("sublime-jedi-usages", regions, highlight_color or "region.bluish",
flags=sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE)
|
grow/extensions/hooks/deployment_register_hook_test.py | tabulon-ext/grow | 335 | 12707320 | <reponame>tabulon-ext/grow<gh_stars>100-1000
"""Tests for deployment destination registration hook."""
import unittest
from grow.extensions.hooks import deployment_register_hook
class DeploymentRegisterHookTestCase(unittest.TestCase):
"""Test the deployment destination registration hook."""
def test_something(self):
"""?"""
pass
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.