id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
475799
|
import pytest
from ceph_deploy.cli import get_parser
from ceph_deploy.tests.util import assert_too_few_arguments
class TestParserRepo(object):
def setup(self):
self.parser = get_parser()
def test_repo_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('repo --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy repo' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_repo_name_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('repo'.split())
out, err = capsys.readouterr()
assert_too_few_arguments(err)
def test_repo_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('repo ceph'.split())
out, err = capsys.readouterr()
assert_too_few_arguments(err)
def test_repo_one_host(self):
args = self.parser.parse_args('repo ceph host1'.split())
assert args.host == ['host1']
def test_repo_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['repo', 'ceph'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
def test_repo_name(self):
args = self.parser.parse_args('repo ceph host1'.split())
assert args.repo_name == 'ceph'
def test_repo_remove_default_is_false(self):
args = self.parser.parse_args('repo ceph host1'.split())
assert not args.remove
def test_repo_remove_set_true(self):
args = self.parser.parse_args('repo ceph --remove host1'.split())
assert args.remove
def test_repo_remove_delete_alias(self):
args = self.parser.parse_args('repo ceph --delete host1'.split())
assert args.remove
def test_repo_url_default_is_none(self):
args = self.parser.parse_args('repo ceph host1'.split())
assert args.repo_url is None
def test_repo_url_custom_path(self):
args = self.parser.parse_args('repo ceph --repo-url https://ceph.com host1'.split())
assert args.repo_url == "https://ceph.com"
def test_repo_gpg_url_default_is_none(self):
args = self.parser.parse_args('repo ceph host1'.split())
assert args.gpg_url is None
def test_repo_gpg_url_custom_path(self):
args = self.parser.parse_args('repo ceph --gpg-url https://ceph.com/key host1'.split())
assert args.gpg_url == "https://ceph.com/key"
|
475816
|
import glob
import os
def _path(*args):
return os.path.join(os.path.dirname(__file__), *args)
# pyxbld for pyximport (from cython):
pyxbld = open(_path('template.pyxbld')).read()
# Sundials:
sundials_templates_dir = _path('sundials_templates')
sundials = {
os.path.basename(pth): open(pth).read() for pth in glob.glob(
os.path.join(sundials_templates_dir, '*.*'))
}
def render_pyxbld(pyxbasename, **kwargs):
if 'sources' not in kwargs:
kwargs['sources'] = []
if 'include_dirs' not in kwargs:
kwargs['include_dirs'] = []
if 'library_dirs' not in kwargs:
kwargs['library_dirs'] = []
if 'libraries' not in kwargs:
kwargs['libraries'] = []
if 'extra_compile_args' not in kwargs:
kwargs['extra_compile_args'] = []
if 'extra_link_args' not in kwargs:
kwargs['extra_link_args'] = []
open(pyxbasename + '.pyxbld', 'wt').write(pyxbld % kwargs)
|
475846
|
import graphics as gfx
import common
import vector
import apple as appleFuncs
SPEED = 12
def testForAppleProjectileCollision(projectile, apples):
for apple in apples[:]:
appleCenter = apple.getCenter()
projCenter = projectile.getCenter()
if vector.distanceBetween(appleCenter, projCenter) < appleFuncs.DIAMETER:
appleFuncs.removeApple(apples, apple)
def moveProjectile(direction, projectile):
dx = direction.getX()
dy = direction.getY()
#direction.y += 0.05 #Apply gravity
projectile.move(dx, dy)
def isOutOfBounds(centre):
x = centre.getX()
y = centre.getY()
d = appleFuncs.DIAMETER
return x - d > common.WINDOW_WIDTH or x + d < 0 or \
y - d > common.WINDOW_HEIGHT or y + d < 0
def update(projectiles, projectileDirections, apples):
'''Updates the player's projectiles'''
removeMe = []
for i in range(len(projectiles)):
moveProjectile(projectileDirections[i], projectiles[i])
testForAppleProjectileCollision(projectiles[i], apples)
if isOutOfBounds(projectiles[i].getCenter()):
removeMe.append(i)
'''
for x in removeMe:
projectiles[i].undraw()
projectileDirections.pop(x)
projectiles.pop(x)
'''
def create(playerPoint, target, window):
'''Creates a projectile'''
dx, dy = vector.getPointDifference(playerPoint, target)
proj = appleFuncs.makeDefaultApple(playerPoint.getX(), playerPoint.getY(), window)
dirVector = vector.normalise(gfx.Point(dx, dy))
dx = dirVector.getX() * SPEED
dy = dirVector.getY() * SPEED
velocity = gfx.Point(dx, dy)
return proj, velocity
|
475857
|
from django.urls import path
from users import views as users_views
from django.contrib.auth import views as auth_views
from django.conf.urls import url
urlpatterns = [
path('', users_views.profile, name='profile'),
path('home/', users_views.home, name='home'),
path('register/', users_views.register, name='register'),
path('about/', users_views.about, name='about'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('profile/', users_views.profile, name='profile'),
path('profile/changeInfo/', users_views.change_info, name='change_info'),
path('profile/<int:id>/', users_views.other_profile, name='profile_with_id'),
# url(r'^profile/(?P<pk>\d+)/$', users_views.profile, name='profile_with_pk'),
path('profile/friends/<int:id>/', users_views.friends_profile, name='friend_profile'),
]
|
475871
|
from utils_io_folder import get_immediate_childfile_paths
from utils_json import read_json_from_file, write_json_to_file
def merge_posetrack_jsons():
posetrack_annotation_folder = "../data/Data_2017/posetrack_data/annotations/train"
save_json_path = "posetrack_merged_train.json"
gt_json_paths = get_immediate_childfile_paths(posetrack_annotation_folder, "json")
merge_json(gt_json_paths, save_json_path)
posetrack_annotation_folder = "../data/Data_2017/posetrack_data/annotations/val"
save_json_path = "posetrack_merged_val.json"
gt_json_paths = get_immediate_childfile_paths(posetrack_annotation_folder, "json")
merge_json(gt_json_paths, save_json_path)
return
def merge_json(gt_json_paths, save_json_path):
python_data_merged = {"annolist": []}
for gt_json_path in gt_json_paths:
python_data = read_json_from_file(gt_json_path)
python_data_merged["annolist"].extend(python_data["annolist"])
write_json_to_file(python_data_merged, save_json_path, flag_verbose=False)
if __name__ == "__main__":
merge_posetrack_jsons()
|
475872
|
import os
import glob
from flask import Flask
from flask import jsonify
from flask import request, render_template
from webapp import app
#from model.util import *
from SigNet import main1, getpredictions
valid_mimetypes = ['image/jpeg', 'image/png', 'image/tiff']
global model
# def get_predictions(img_name):
# #TODO
# return {
# "bboxes":
# [
# {"x1": 10, "x2": 50, "y1": 10, "y2": 50}
# ],
# }
@app.route('/home')
def home1():
model = main1([])
return render_template('welcome.html')
@app.route('/load')
def index():
#model = main1([])
return render_template('index.html')
# return render_template('index.html')
from PIL import Image
import numpy as np
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
custid = request.form['customer ID']
#print ('action' ,request.form['usr'])
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
print (img_file)
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
#print (app.config['UPLOAD_FOLDER'])
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
#img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
results = getpredictions(img_name, custid)
if(results == 1):
results = " Original "
if(results == 0):
results = " Forgery "
# Delete image when done with analysis
#os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(results)
@app.route('/upload', methods=['POST','GET'])
def upload():
if request.method == 'POST':
custid = request.form['customer ID']
#print ('action' ,request.form['usr'])
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
print (img_file)
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
#print (app.config['UPLOAD_FOLDER'])
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
#img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
results = insertTable(custid,img_name,os.path.join(app.config['UPLOAD_FOLDER'], img_name))
if(results == 1):
results = "Upload Successfully"
if(results == 0):
results = "Not "
results = "Upload Successfully"
# Delete image when done with analysis
#os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(results)
else:
return render_template('upload.html')
import sqlite3
def createconnection():
con = sqlite3.connect('test8.db')
cursor = con.cursor()
return cursor
def insertTable(Signatureid, filename,picture_file):
insert_query = """INSERT INTO dataset (ID, fileName,file) VALUES(?,?, ?)"""
c = createconnection()
with open(picture_file, 'rb') as picture_file:
ablob = picture_file.read()
c.execute(insert_query, (Signatureid, filename, ablob))
c.connection.commit()
def get_file_from_db(customer_id):
cursor = createconnection()
select_fname = """SELECT file,fileName from dataset where ID = ?"""
cursor.execute(select_fname, (customer_id,))
item = cursor.fetchall()
cursor.connection.commit()
return item
CREATE_TABLE = """CREATE TABLE IF NOT EXISTS dataset (ID TEXT,fileName TEXT, file BLOB)"""
cursor = createconnection()
cursor.execute(CREATE_TABLE)
cursor.connection.commit()
|
475888
|
from pathlib import Path
import pytest
from sqlalchemy import Column, String
from flask_filealchemy.loaders import (
BaseLoader,
loader_for,
MarkdownFrontmatterDirectoryLoader,
YAMLDirectoryLoader,
YAMLFileLoader,
)
def test_base_loader_does_not_validate():
with pytest.raises(NotImplementedError):
BaseLoader(None, None)
def test_yaml_file_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('_all.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLFileLoader
)
def test_no_loader_found(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('invalid.md').write('does-not-matter')
authors.join('valid.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert not loader_for(Path(tmpdir.strpath), author_table)
def test_yaml_directory_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('first.yml').write('does-not-matter')
authors.join('second.yml').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLDirectoryLoader
)
def test_yaml_directory_loader_with_extra_extensions(db, tmpdir):
authors = tmpdir.mkdir('authors')
for index, extension in enumerate(YAMLDirectoryLoader.extensions):
authors.join('authors-{}.{}'.format(index, extension)).write(
'does-not-matter'
)
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table), YAMLDirectoryLoader
)
def test_markdown_frontmatter_loader(db, tmpdir):
authors = tmpdir.mkdir('authors')
authors.join('first.md').write('does-not-matter')
authors.join('second.md').write('does-not-matter')
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table),
MarkdownFrontmatterDirectoryLoader,
)
def test_markdown_frontmatter_loader_with_extra_extensions(db, tmpdir):
authors = tmpdir.mkdir('authors')
for index, extension in enumerate(
MarkdownFrontmatterDirectoryLoader.extensions
):
authors.join('authors-{}.{}'.format(index, extension)).write(
'does-not-matter'
)
class Author(db.Model):
__tablename__ = 'authors'
slug = Column(String(255), primary_key=True)
name = Column(String(255), nullable=False)
assert len(db.metadata.sorted_tables) == 1
assert db.metadata.sorted_tables[0].name == 'authors'
author_table = db.metadata.sorted_tables[0]
assert isinstance(
loader_for(Path(tmpdir.strpath), author_table),
MarkdownFrontmatterDirectoryLoader,
)
|
475910
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class SVDEmbedding(nn.Module):
def __init__(
self,
num,
rank,
output_dim,
padding_idx=None,
embedding_initializer=nn.init.normal_,
transformer_initializer=nn.init.xavier_uniform_):
super(SVDEmbedding, self).__init__()
if padding_idx == -1:
padding_idx = None
self.embedding = nn.Embedding(int(num), int(rank), padding_idx=padding_idx)
self.transformer = nn.Linear(int(rank), output_dim, bias=False)
self.embedding_initializer = embedding_initializer
self.transformer_initializer = transformer_initializer
def init_weights(self):
# init weights
self.embedding_initializer(self.embedding.weight.data)
self.transformer_initializer(self.transformer.weight.data)
def forward(self, src):
return self.transformer(self.embedding(src))
class SVDEmbeddingClassifier(nn.Module):
def __init__(
self,
num,
rank,
input_dim,
embedding_initializer=nn.init.normal_,
transformer_initializer=nn.init.xavier_uniform_,
bias_initializer=nn.init.uniform_):
super(SVDEmbeddingClassifier, self).__init__()
self.embedding = nn.Linear(int(rank), int(num))
self.transformer = nn.Linear(input_dim, int(rank), bias=False)
self.bias = nn.Parameter(torch.FloatTensor(int(num),))
self.embedding_initializer = embedding_initializer
self.transformer_initializer = transformer_initializer
self.bias_initializer = bias_initializer
def init_weights(self):
# init weights
self.embedding_initializer(self.embedding.weight.data)
self.transformer_initializer(self.transformer.weight.data)
self.bias_initializer(self.bias.data)
def forward(self, src):
return self.embedding(self.transformer(src)) + self.bias
|
475931
|
class Winratio:
def __init__(self, **kwargs):
self.losses = kwargs.get("Losses", kwargs.get("losses", 0)) or 0
self.wins = kwargs.get("Wins", kwargs.get("wins", 0)) or 0
@property
def winratio(self):
_w = self.wins /(self.matches_played if self.matches_played > 1 else 1) * 100.0
return int(_w) if _w % 2 == 0 else round(_w, 2)
@property
def matches_played(self):
return self.wins + self.losses
|
475961
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('validator', '0003_celery_task_20190402_1445'),
]
operations = [
migrations.CreateModel(
name='DatasetConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='dataset_configurations', to='validator.Dataset')),
('filters', models.ManyToManyField(related_name='dataset_configurations', to='validator.DataFilter')),
],
),
migrations.AddField(
model_name='datasetconfiguration',
name='validation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dataset_configurations', to='validator.ValidationRun'),
),
migrations.AddField(
model_name='datasetconfiguration',
name='variable',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='dataset_configurations', to='validator.DataVariable'),
),
migrations.AddField(
model_name='datasetconfiguration',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='dataset_configurations', to='validator.DatasetVersion'),
),
migrations.RenameField(
model_name='validationrun',
old_name='scaling_ref',
new_name='old_scaling_ref'
),
migrations.AddField(
model_name='validationrun',
name='scaling_ref',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='scaling_ref_validation_run', to='validator.DatasetConfiguration'),
),
migrations.AddField(
model_name='validationrun',
name='reference_configuration',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ref_validation_run', to='validator.DatasetConfiguration'),
),
]
|
475983
|
data = (
'Ruo ', # 0x00
'Bei ', # 0x01
'E ', # 0x02
'Yu ', # 0x03
'Juan ', # 0x04
'Yu ', # 0x05
'Yun ', # 0x06
'Hou ', # 0x07
'Kui ', # 0x08
'Xiang ', # 0x09
'Xiang ', # 0x0a
'Sou ', # 0x0b
'Tang ', # 0x0c
'Ming ', # 0x0d
'Xi ', # 0x0e
'Ru ', # 0x0f
'Chu ', # 0x10
'Zi ', # 0x11
'Zou ', # 0x12
'Ju ', # 0x13
'Wu ', # 0x14
'Xiang ', # 0x15
'Yun ', # 0x16
'Hao ', # 0x17
'Yong ', # 0x18
'Bi ', # 0x19
'Mo ', # 0x1a
'Chao ', # 0x1b
'Fu ', # 0x1c
'Liao ', # 0x1d
'Yin ', # 0x1e
'Zhuan ', # 0x1f
'Hu ', # 0x20
'Qiao ', # 0x21
'Yan ', # 0x22
'Zhang ', # 0x23
'Fan ', # 0x24
'Qiao ', # 0x25
'Xu ', # 0x26
'Deng ', # 0x27
'Bi ', # 0x28
'Xin ', # 0x29
'Bi ', # 0x2a
'Ceng ', # 0x2b
'Wei ', # 0x2c
'Zheng ', # 0x2d
'Mao ', # 0x2e
'Shan ', # 0x2f
'Lin ', # 0x30
'Po ', # 0x31
'Dan ', # 0x32
'Meng ', # 0x33
'Ye ', # 0x34
'Cao ', # 0x35
'Kuai ', # 0x36
'Feng ', # 0x37
'Meng ', # 0x38
'Zou ', # 0x39
'Kuang ', # 0x3a
'Lian ', # 0x3b
'Zan ', # 0x3c
'Chan ', # 0x3d
'You ', # 0x3e
'Qi ', # 0x3f
'Yan ', # 0x40
'Chan ', # 0x41
'Zan ', # 0x42
'Ling ', # 0x43
'Huan ', # 0x44
'Xi ', # 0x45
'Feng ', # 0x46
'Zan ', # 0x47
'Li ', # 0x48
'You ', # 0x49
'Ding ', # 0x4a
'Qiu ', # 0x4b
'Zhuo ', # 0x4c
'Pei ', # 0x4d
'Zhou ', # 0x4e
'Yi ', # 0x4f
'Hang ', # 0x50
'Yu ', # 0x51
'Jiu ', # 0x52
'Yan ', # 0x53
'Zui ', # 0x54
'Mao ', # 0x55
'Dan ', # 0x56
'Xu ', # 0x57
'Tou ', # 0x58
'Zhen ', # 0x59
'Fen ', # 0x5a
'Sakenomoto ', # 0x5b
None, # 0x5c
'Yun ', # 0x5d
'Tai ', # 0x5e
'Tian ', # 0x5f
'Qia ', # 0x60
'Tuo ', # 0x61
'Zuo ', # 0x62
'Han ', # 0x63
'Gu ', # 0x64
'Su ', # 0x65
'Po ', # 0x66
'Chou ', # 0x67
'Zai ', # 0x68
'Ming ', # 0x69
'Luo ', # 0x6a
'Chuo ', # 0x6b
'Chou ', # 0x6c
'You ', # 0x6d
'Tong ', # 0x6e
'Zhi ', # 0x6f
'Xian ', # 0x70
'Jiang ', # 0x71
'Cheng ', # 0x72
'Yin ', # 0x73
'Tu ', # 0x74
'Xiao ', # 0x75
'Mei ', # 0x76
'Ku ', # 0x77
'Suan ', # 0x78
'Lei ', # 0x79
'Pu ', # 0x7a
'Zui ', # 0x7b
'Hai ', # 0x7c
'Yan ', # 0x7d
'Xi ', # 0x7e
'Niang ', # 0x7f
'Wei ', # 0x80
'Lu ', # 0x81
'Lan ', # 0x82
'Yan ', # 0x83
'Tao ', # 0x84
'Pei ', # 0x85
'Zhan ', # 0x86
'Chun ', # 0x87
'Tan ', # 0x88
'Zui ', # 0x89
'Chuo ', # 0x8a
'Cu ', # 0x8b
'Kun ', # 0x8c
'Ti ', # 0x8d
'Mian ', # 0x8e
'Du ', # 0x8f
'Hu ', # 0x90
'Xu ', # 0x91
'Xing ', # 0x92
'Tan ', # 0x93
'Jiu ', # 0x94
'Chun ', # 0x95
'Yun ', # 0x96
'Po ', # 0x97
'Ke ', # 0x98
'Sou ', # 0x99
'Mi ', # 0x9a
'Quan ', # 0x9b
'Chou ', # 0x9c
'Cuo ', # 0x9d
'Yun ', # 0x9e
'Yong ', # 0x9f
'Ang ', # 0xa0
'Zha ', # 0xa1
'Hai ', # 0xa2
'Tang ', # 0xa3
'Jiang ', # 0xa4
'Piao ', # 0xa5
'Shan ', # 0xa6
'Yu ', # 0xa7
'Li ', # 0xa8
'Zao ', # 0xa9
'Lao ', # 0xaa
'Yi ', # 0xab
'Jiang ', # 0xac
'Pu ', # 0xad
'Jiao ', # 0xae
'Xi ', # 0xaf
'Tan ', # 0xb0
'Po ', # 0xb1
'Nong ', # 0xb2
'Yi ', # 0xb3
'Li ', # 0xb4
'Ju ', # 0xb5
'Jiao ', # 0xb6
'Yi ', # 0xb7
'Niang ', # 0xb8
'Ru ', # 0xb9
'Xun ', # 0xba
'Chou ', # 0xbb
'Yan ', # 0xbc
'Ling ', # 0xbd
'Mi ', # 0xbe
'Mi ', # 0xbf
'Niang ', # 0xc0
'Xin ', # 0xc1
'Jiao ', # 0xc2
'Xi ', # 0xc3
'Mi ', # 0xc4
'Yan ', # 0xc5
'Bian ', # 0xc6
'Cai ', # 0xc7
'Shi ', # 0xc8
'You ', # 0xc9
'Shi ', # 0xca
'Shi ', # 0xcb
'Li ', # 0xcc
'Zhong ', # 0xcd
'Ye ', # 0xce
'Liang ', # 0xcf
'Li ', # 0xd0
'Jin ', # 0xd1
'Jin ', # 0xd2
'Qiu ', # 0xd3
'Yi ', # 0xd4
'Diao ', # 0xd5
'Dao ', # 0xd6
'Zhao ', # 0xd7
'Ding ', # 0xd8
'Po ', # 0xd9
'Qiu ', # 0xda
'He ', # 0xdb
'Fu ', # 0xdc
'Zhen ', # 0xdd
'Zhi ', # 0xde
'Ba ', # 0xdf
'Luan ', # 0xe0
'Fu ', # 0xe1
'Nai ', # 0xe2
'Diao ', # 0xe3
'Shan ', # 0xe4
'Qiao ', # 0xe5
'Kou ', # 0xe6
'Chuan ', # 0xe7
'Zi ', # 0xe8
'Fan ', # 0xe9
'Yu ', # 0xea
'Hua ', # 0xeb
'Han ', # 0xec
'Gong ', # 0xed
'Qi ', # 0xee
'Mang ', # 0xef
'Ri ', # 0xf0
'Di ', # 0xf1
'Si ', # 0xf2
'Xi ', # 0xf3
'Yi ', # 0xf4
'Chai ', # 0xf5
'Shi ', # 0xf6
'Tu ', # 0xf7
'Xi ', # 0xf8
'Nu ', # 0xf9
'Qian ', # 0xfa
'Ishiyumi ', # 0xfb
'Jian ', # 0xfc
'Pi ', # 0xfd
'Ye ', # 0xfe
'Yin ', # 0xff
)
|
475996
|
import pkg_resources
import warnings
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import torch
from .. import setting
from . import activation
from . import concatenator
from . import array2diagmat
from . import array2symmat
from . import deepsets
from . import gcn
from . import grad_gcn
from . import identity
from . import integration
from . import iso_gcn
from . import laplace_net
from . import lstm
from . import mlp
from . import message_passing
from . import reducer
from . import reshape
from . import siml_module
from . import symmat2array
from . import tcn
from . import tensor_operations
from . import time_norm
from . import translator
class BlockInformation():
def __init__(self, block, use_support=False, trainable=True):
if not issubclass(block, siml_module.SimlModule):
raise ValueError(f"{block} should be a subclass of SimlModule")
self.block = block
self.use_support = use_support
self.trainable = trainable
return
class Network(torch.nn.Module):
dict_block_info = {
# Layers without weights
'activation': BlockInformation(activation.Activation, trainable=False),
'array2diagmat': BlockInformation(
array2diagmat.Array2Diagmat, trainable=False),
'array2symmat': BlockInformation(
array2symmat.Array2Symmat, trainable=False),
'concatenator': BlockInformation(
concatenator.Concatenator, trainable=False),
'contraction': BlockInformation(
tensor_operations.Contraction, trainable=False),
'distributor': BlockInformation(
reducer.Reducer, trainable=False), # For backward compatibility
'identity': BlockInformation(identity.Identity, trainable=False),
'integration': BlockInformation(
integration.Integration, trainable=False),
'reducer': BlockInformation(reducer.Reducer, trainable=False),
'reshape': BlockInformation(reshape.Reshape, trainable=False),
'symmat2array': BlockInformation(
symmat2array.Symmat2Array, trainable=False),
'time_norm': BlockInformation(time_norm.TimeNorm, trainable=False),
'translator': BlockInformation(translator.Translator, trainable=False),
# Layers with weights
'adjustable_mlp': BlockInformation(mlp.MLP),
'deepsets': BlockInformation(deepsets.DeepSets),
'gcn': BlockInformation(gcn.GCN, use_support=True),
'grad_gcn': BlockInformation(grad_gcn.GradGCN, use_support=True),
'iso_gcn': BlockInformation(iso_gcn.IsoGCN, use_support=True),
'laplace_net': BlockInformation(
laplace_net.LaplaceNet, use_support=True),
'lstm': BlockInformation(lstm.LSTM),
'mlp': BlockInformation(mlp.MLP),
'message_passing': BlockInformation(
message_passing.MessagePassing, use_support=True),
'tcn': BlockInformation(tcn.TCN),
}
INPUT_LAYER_NAME = 'Input'
OUTPUT_LAYER_NAME = 'Output'
def __init__(self, model_setting, trainer_setting):
super().__init__()
self.model_setting = model_setting
self.trainer_setting = trainer_setting
self.y_dict_mode = isinstance(self.trainer_setting.outputs, dict)
for block in self.model_setting.blocks:
if 'distributor' == block.type:
warnings.warn(
'distributor type is deprecated. Use reducer',
DeprecationWarning)
self.dict_block_setting = {
block_setting.name: block_setting
for block_setting in self.model_setting.blocks}
self.call_graph = self._create_call_graph()
self.sorted_graph_nodes = list(nx.topological_sort(self.call_graph))
self._update_dict_block_setting()
self.dict_block_information = {
block_name: self.dict_block_info[block_setting.type]
for block_name, block_setting in self.dict_block_setting.items()}
self.dict_block = self._create_dict_block()
self.use_support = np.any([
block_information.use_support
for block_information in self.dict_block_information.values()])
self.merge_sparses = False
# self.merge_sparses = np.any([
# isinstance(v, iso_gcn.IsoGCN) for v in self.dict_block.values()])
if self.merge_sparses:
print('Sparse matrices are merged for IsoGCN')
return
def _create_call_graph(self):
call_graph = nx.DiGraph()
block_names = [
block_setting.name for block_setting
in self.model_setting.blocks] + [
self.INPUT_LAYER_NAME, self.OUTPUT_LAYER_NAME]
for block_setting in self.model_setting.blocks:
if block_setting.name == self.INPUT_LAYER_NAME:
raise ValueError(
f"Do not use block names: {self.INPUT_LAYER_NAME}")
if block_setting.name == self.OUTPUT_LAYER_NAME:
raise ValueError(
f"Do not use block names: {self.OUTPUT_LAYER_NAME}")
for destination in block_setting.destinations:
if destination not in block_names:
raise ValueError(f"{destination} does not exist")
call_graph.add_edge(block_setting.name, destination)
if block_setting.is_first:
call_graph.add_edge(self.INPUT_LAYER_NAME, block_setting.name)
if block_setting.is_last:
call_graph.add_edge(block_setting.name, self.OUTPUT_LAYER_NAME)
# Validate call graph
if not nx.is_directed_acyclic_graph(call_graph):
cycle = nx.find_cycle(call_graph)
raise ValueError(
f"Cycle found in the network: {cycle}")
for graph_node in call_graph.nodes():
predecessors = tuple(call_graph.predecessors(graph_node))
successors = tuple(call_graph.successors(graph_node))
if len(predecessors) == 0 and graph_node != self.INPUT_LAYER_NAME:
raise ValueError(f"{graph_node} has no predecessors")
if len(successors) == 0 and graph_node != self.OUTPUT_LAYER_NAME:
raise ValueError(f"{graph_node} has no successors")
return call_graph
def _update_dict_block_setting(self):
self.dict_block_setting.update({
self.INPUT_LAYER_NAME: setting.BlockSetting(
name=self.INPUT_LAYER_NAME, type='identity'),
self.OUTPUT_LAYER_NAME: setting.BlockSetting(
name=self.OUTPUT_LAYER_NAME, type='identity')})
for graph_node in self.sorted_graph_nodes:
predecessors = tuple(self.call_graph.predecessors(graph_node))
block_setting = self.dict_block_setting[graph_node]
block_type = block_setting.type
if graph_node == self.INPUT_LAYER_NAME:
first_node = self.trainer_setting.input_length
last_node = self.trainer_setting.input_length
elif graph_node == self.OUTPUT_LAYER_NAME:
first_node = self.trainer_setting.output_length
last_node = self.trainer_setting.output_length
elif block_type == 'array2symmat':
first_node == 6
last_node = 1
elif block_type == 'symmat2array':
max_first_node = np.sum([
self.dict_block_setting[predecessor].nodes[-1]
for predecessor in predecessors])
first_node = max(len(np.arange(max_first_node)[
block_setting.input_selection]), 1)
last_node = first_node * 6
elif block_type == 'concatenator':
max_first_node = np.sum([
self.dict_block_setting[predecessor].nodes[-1]
for predecessor in predecessors])
first_node = len(np.arange(max_first_node)[
block_setting.input_selection])
last_node = first_node
elif block_type in ['reducer', 'contraction']:
max_first_node = np.sum([
self.dict_block_setting[predecessor].nodes[-1]
for predecessor in predecessors])
first_node = len(np.arange(max_first_node)[
block_setting.input_selection])
last_node = np.max([
self.dict_block_setting[predecessor].nodes[-1]
for predecessor in predecessors])
elif block_type == 'reshape':
max_first_node = np.sum([
self.dict_block_setting[predecessor].nodes[-1]
for predecessor in predecessors])
first_node = len(np.arange(max_first_node)[
block_setting.input_selection])
last_node = block_setting.optional['new_shape'][1]
elif block_type == 'integration':
max_first_node = self.dict_block_setting[
predecessors[0]].nodes[-1]
first_node = len(np.arange(max_first_node)[
block_setting.input_selection])
last_node = first_node - 1
else:
if len(predecessors) != 1:
raise ValueError(
f"{graph_node} has {len(predecessors)} "
f"predecessors: {predecessors}")
if block_setting.is_first:
if isinstance(self.trainer_setting.input_length, dict):
input_keys = block_setting.input_keys
if input_keys is None:
raise ValueError(
'Input is dict. Plese specify input_keys to '
f"the first nodes: {block_setting}")
input_length = self.trainer_setting.input_length
max_first_node = int(
np.sum([
input_length[input_key] for input_key
in input_keys]))
else:
max_first_node = self.trainer_setting.input_length
else:
max_first_node = self.dict_block_setting[
tuple(predecessors)[0]].nodes[-1]
first_node = len(np.arange(max_first_node)[
block_setting.input_selection])
if self.dict_block_info[block_type].trainable:
last_node = self.trainer_setting.output_length
else:
last_node = first_node
if graph_node not in [
self.INPUT_LAYER_NAME, self.OUTPUT_LAYER_NAME] \
and block_setting.nodes[0] == -1:
block_setting.nodes[0] = int(first_node)
if graph_node not in [
self.INPUT_LAYER_NAME, self.OUTPUT_LAYER_NAME] \
and block_setting.nodes[-1] == -1:
output_key = block_setting.output_key
if output_key is None:
if isinstance(last_node, dict):
raise ValueError(
'Output is dict. Plese specify output_key to the '
f"last nodes: {block_setting}")
block_setting.nodes[-1] = int(
last_node)
else:
if block_setting.is_last:
output_length = self.trainer_setting.output_length
block_setting.nodes[-1] = int(
output_length[output_key])
else:
raise ValueError(
'Cannot put output_key when is_last is False: '
f"{block_setting}")
return
def _create_dict_block(self):
dict_block = torch.nn.ModuleDict({
block_name:
self.dict_block_information[block_name].block(block_setting).to(
block_setting.device)
for block_name, block_setting in self.dict_block_setting.items()})
return dict_block
def forward(self, x_):
x = x_['x']
supports = np.asarray(x_.get('supports', None))
original_shapes = x_.get('original_shapes', None)
dict_hidden = {
block_name: None for block_name in self.call_graph.nodes}
for graph_node in self.sorted_graph_nodes:
block_setting = self.dict_block_setting[graph_node]
if graph_node == self.INPUT_LAYER_NAME:
dict_hidden[graph_node] = x
else:
device = block_setting.device
if block_setting.input_keys is None:
inputs = [
self._select_dimension(
dict_hidden[predecessor],
block_setting.input_selection, device)
for predecessor
in self.call_graph.predecessors(graph_node)]
else:
inputs = [
torch.cat([
dict_hidden[predecessor][input_key][
..., block_setting.input_selection].to(device)
for input_key in block_setting.input_keys], dim=-1)
for predecessor
in self.call_graph.predecessors(graph_node)]
if self.dict_block_information[graph_node].use_support:
if self.merge_sparses:
# NOTE: support_input_indices are ignored
selected_supports = supports
else:
if len(supports.shape) == 1:
selected_supports = supports[
block_setting.support_input_indices]
else:
selected_supports = [
[s.to(device) for s in sp] for sp
in supports[
:, block_setting.support_input_indices]]
hidden = self.dict_block[graph_node](
*inputs, supports=selected_supports,
original_shapes=original_shapes)
else:
hidden = self.dict_block[graph_node](
*inputs, original_shapes=original_shapes)
if block_setting.coeff is not None:
hidden = hidden * block_setting.coeff
if block_setting.output_key is None:
dict_hidden[graph_node] = hidden
else:
dict_hidden[graph_node] = {
block_setting.output_key: hidden}
if self.y_dict_mode:
return_dict = {}
if isinstance(dict_hidden[self.OUTPUT_LAYER_NAME], dict):
return_dict.update(dict_hidden[self.OUTPUT_LAYER_NAME])
else:
for h in dict_hidden[self.OUTPUT_LAYER_NAME]:
return_dict.update(h)
return return_dict
else:
return dict_hidden[self.OUTPUT_LAYER_NAME]
def _select_dimension(self, x, input_selection, device):
if isinstance(x, dict):
if input_selection != slice(0, None, 1):
raise ValueError(
f"Cannot set input_selection after dict_output")
return {key: value.to(device) for key, value in x.items()}
else:
if input_selection == slice(0, None, 1):
return x
else:
return x[..., input_selection].to(device)
def draw(self, output_file_name):
figure = plt.figure(dpi=1000)
mapping = {
graph_node:
f"{graph_node}\n"
f"{self.dict_block_setting[graph_node].type}\n"
f"{self.dict_block_setting[graph_node].nodes}"
for graph_node in self.sorted_graph_nodes}
d = nx.drawing.nx_pydot.to_pydot(nx.relabel.relabel_nodes(
self.call_graph, mapping=mapping, copy=True))
if self.trainer_setting.figure_format == 'pdf':
d.write_pdf(output_file_name)
elif self.trainer_setting.figure_format == 'png':
d.write_png(output_file_name)
plt.close(figure)
return
def add_block(name, block, *, trainable=True, use_support=False):
"""Add block definition to siml.
Parameters
----------
name: str
Name of the block.
block: siml.network.SimlModule-like
User defined block.
trainable: bool, optional
If True, the block is considered as a trainable block. The default is
True.
use_support: bool, optional
If True, use sparse matrix for the second input. The default is False.
"""
Network.dict_block_info.update(
{name: BlockInformation(
block, trainable=trainable, use_support=use_support)})
return
# Load torch_geometric blocks when torch_geometric is installed
if 'torch-geometric' in [
p.key for p in pkg_resources.working_set]: # pylint: disable=E1133
from . import pyg
add_block(
name='cluster_gcn', block=pyg.ClusterGCN, trainable=True,
use_support=True)
add_block(
name='gin', block=pyg.GIN, trainable=True, use_support=True)
add_block(
name='gcnii', block=pyg.GCNII, trainable=True, use_support=True)
|
476080
|
import pytest
from data.test_data import generate_unstructured_test_data
from lofo import Dataset
def test_dataset():
df = generate_unstructured_test_data(1000, text=True)
features = ["A", "B", "C", "D", "D2", "E"]
# Exception: feature group row count is not equal to the features' row count
feature_groups = {"interactions": df[["A", "B"]].values[:10]*df[["C", "D"]].values[:10]}
with pytest.raises(Exception):
assert Dataset(df=df, target="binary_target", features=features, feature_groups=feature_groups)
# Exception: Feature group name A is in use by other features
feature_groups = {"A": df[["A", "B"]].values*df[["C", "D"]].values}
with pytest.raises(Exception):
assert Dataset(df=df, target="binary_target", features=features, feature_groups=feature_groups)
# Exception: Feature group type is not numpy.ndarray or scipy.csr.csr_matrix
feature_groups = {"F": df[["A", "B"]]}
with pytest.raises(Exception):
assert Dataset(df=df, target="binary_target", features=features, feature_groups=feature_groups)
d = Dataset(df=df, target="binary_target", features=features, feature_groups={"F": df[["A", "B"]].values},
auto_group_threshold=0.5)
assert "D" not in d.feature_names and "D2" not in d.feature_names
assert "D & D2" in d.feature_names and "F" in d.feature_groups.keys()
|
476140
|
from Utils import Attribute
class View:
def View(self,name):
self.mName = name
self.attributes = []
self.mChildren = []
@classmethod
def fromELement(cls,element):
mName = element.tag
viewCls = cls(mName)
attributeslist = []
mChildren = []
attributes = element.attributes.values()
for obj in attributes:
attributeslist.append(Attribute(obj.name, obj.value))
elements = [elem for elem in element.iter()]
for obj in elements:
mChildren.add(View.fromELement(obj))
viewCls.mChildren =mChildren
viewCls.attributes = attributeslist
return cls(mName)
def addAtribute(self,attribute):
self.attributes.append(attribute)
|
476141
|
import os
dataset_path = r"D:\free_corpus\base"
transformed_path = r"D:\free_corpus\processed"
packed_path = r"D:\free_corpus\packed"
# Containing metadata.csv and mels/, in dir of each dataset
# and lang_to_id.json and spk_id.json in transformed_path and packed_path
include_corpus = ['caito_de_de', 'caito_en_uk', 'caito_en_us', 'caito_es_es', 'caito_fr_fr',
'caito_it_it', 'caito_pl_pl', 'caito_ru_ru', 'caito_uk_ua',
'css10_de', 'css10_el', 'css10_es', 'css10_fi', 'css10_fr', 'css10_hu', 'css10_ja',
'css10_nl', 'css10_zh', 'css10_ru', 'databaker', 'enbible',
'google_bn_bd', 'google_bn_in', 'google_ca_es', 'google_eu_es', 'google_gl_es', 'google_gu_in',
'google_jv_id', 'google_km_kh', 'google_kn_in', 'google_ml_in', 'google_mr_in', 'google_my_mm',
'google_ne_np', 'google_si_lk', 'google_su_id', 'google_ta_in', 'google_te_in', 'google_yo_ng',
'jsut', 'kss', 'ljspeech', 'lsru', 'nst_da', 'nst_nb', 'pt_br', 'siwis', 'thorsten',
'hifi_us', 'hifi_uk', 'rss']
dataset_language = {'css10_de': 'de-de', 'css10_el': 'el-gr', 'css10_es': 'es-es', 'css10_fi': 'fi-fi',
'css10_fr': 'fr-fr', 'css10_hu': 'hu-hu', 'css10_ja': 'ja-jp', 'css10_nl': 'nl-nl',
'css10_zh': 'zh-cn', 'css10_ru': 'ru-ru', 'lsru': 'ru-ru',
'jsut': 'ja-jp', 'kss': 'ko-kr', 'ljspeech': 'en-us', 'pt_br': 'pt-br', 'siwis': 'fr-fr',
'thorsten': 'de-de', 'databaker': 'zh-cn', 'enbible': 'en-us', 'nst_da': 'da-dk', 'nst_nb': 'nb-no',
'hifi_us': 'en-us', 'hifi_uk': 'en-uk', 'rss': 'ro-ro'}
def get_dataset_language(dataset_name):
if dataset_name.startswith('google') or dataset_name.startswith('caito'):
return dataset_name[-5:].replace('_', '-')
return dataset_language[dataset_name]
|
476173
|
import socket
import threading
import docker
import time
import iptc
from enum import Enum
from src.logger import logger
from src.OneWayThread import OneWayThread
class ContainerThread(threading.Thread):
def __init__(self, source, connection, config):
super().__init__()
self.source = source
logger.debug(self.source)
self.connection = connection
self.config = config
self.container_ip = self.container_port = self.container_protocol = None
self.dest = self.thread1 = self.thread2 = self.container = None
'''
Need to make a different one for macos as docker desktop for macos
doesn't allow connecting to a docker-defined network. I'm thinking of
using 127.0.0.1 and mapping the internal port to one in the range
25000-25999 as those don't appear to be claimed in
https://support.apple.com/en-us/HT202944
I believe client sockets start in the 40000's
'''
def connect_to_container(self):
nwsettings = self.container.attrs['NetworkSettings']
self.container_ip = nwsettings['Networks']['bridge']['IPAddress']
logger.debug(self.container_ip)
ports = nwsettings['Ports']
assert len(ports) == 1
for p in ports.keys():
self.container_port = int(p.split('/')[0])
self.container_protocol = p.split('/')[1]
logger.debug(self.container_port)
logger.debug(self.container_protocol)
for x in range(9):
try:
self.dest = socket.create_connection( \
(self.container_ip, self.container_port), timeout=2)
self.dest.settimeout(self.config.get('connection_timeout', 10))
logger.debug(self.dest)
return
except Exception as err:
logger.debug(err)
time.sleep(2)
logger.info('Unable to connect to ' + self.container_ip + ':' + \
self.container_port)
logger.info(err)
raise err
def create_rules(self):
proto = self.container_protocol.lower()
srcIP = self.source.getpeername()[0]
dstIP = self.container_ip
srcport = str(self.source.getpeername()[1])
dstport = str(self.container_port)
self.to_rule = { \
'src': srcIP, \
'dst': dstIP, \
'target': 'ACCEPT', \
'protocol': proto, \
proto: {'sport': srcport, 'dport': dstport} \
}
logger.debug(self.to_rule)
iptc.easy.add_rule('filter', 'FORWARD', self.to_rule)
self.from_rule = { \
'src': dstIP, \
'dst': srcIP, \
'target': 'ACCEPT', \
'protocol': proto, \
proto: {'sport': dstport, 'dport': srcport} \
}
logger.debug(self.from_rule)
iptc.easy.add_rule('filter', 'FORWARD', self.from_rule)
self.drop_rule = { \
'src': dstIP, \
'dst': '!' + srcIP, \
'target': 'DROP' \
}
logger.debug(self.drop_rule)
iptc.easy.add_rule('filter', 'FORWARD', self.drop_rule)
def remove_rules(self):
logger.debug('Removing rules')
iptc.easy.delete_rule('filter', "FORWARD", self.to_rule)
iptc.easy.delete_rule('filter', "FORWARD", self.from_rule)
iptc.easy.delete_rule('filter', "FORWARD", self.drop_rule)
def start_and_join_threads(self):
logger.debug('Starting thread1')
self.thread1 = OneWayThread(self.source, self.dest, self.connection, self.config, 'request')
self.thread1.start()
logger.debug('Starting thread2')
self.thread2 = OneWayThread(self.dest, self.source, self.connection, self.config, 'response')
self.thread2.start()
logger.debug('Joining thread1')
self.thread1.join()
logger.debug('Joining thread2')
self.thread2.join()
def run(self):
try:
client = docker.from_env()
self.container = client.containers.run(self.config['container'], detach=True)
logger.info('Started: %s', self.container)
self.container.reload()
except Exception as err:
logger.info(err)
return
try:
self.connect_to_container()
except Exception as err:
logger.info(err)
self.stop_and_remove()
return
self.create_rules()
self.start_and_join_threads()
self.remove_rules()
self.dest.close()
self.stop_and_remove()
def stop_and_remove(self):
logger.debug(str(self.container.logs()))
logger.info('Stopping: %s', self.container)
self.container.stop()
logger.info('Removing: %s', self.container)
self.container.remove()
def shutdown(self):
self.thread1.shutdown()
self.thread2.shutdown()
self.dest.close()
self.stop_and_remove()
|
476199
|
from zipf import make_zipf, is_zipf
generated = make_zipf(5)
print('generated distribution: {}'.format(generated))
generated[-1] *= 2
print('passes test with default tolerance: {}'.format(is_zipf(generated)))
print('passes test with tolerance of 1.0: {}'.format(is_zipf(generated, rel=1.0)))
|
476223
|
from numpy import arcsin, exp
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW29
A SlotW29 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z12|| = W0
alpha = arcsin(self.W0 / (2 * Rbo))
Z0 = Rbo * exp(1j * 0)
Z1 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z2 = Z1 + self.H0
Z3 = Z2 - (self.W1 - self.W0) * 1j / 2.0
Z4 = Z3 + self.H1
Z5 = Z4 - (self.W2 - self.W1) * 1j / 2.0
Z6 = Z5 + self.H2
else:
Z2 = Z1 - self.H0
Z3 = Z2 - (self.W1 - self.W0) * 1j / 2.0
Z4 = Z3 - self.H1
Z5 = Z4 - (self.W2 - self.W1) * 1j / 2.0
Z6 = Z5 - self.H2
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z6
# symetry
point_dict["Z7"] = Z6.conjugate()
point_dict["Z8"] = Z5.conjugate()
point_dict["Z9"] = Z4.conjugate()
point_dict["Z10"] = Z3.conjugate()
point_dict["Z11"] = Z2.conjugate()
point_dict["Z12"] = Z1.conjugate()
return point_dict
|
476225
|
import torchvision.ops # noqa: F401
from torch2trt_dynamic.plugins import create_roipool_plugin
from torch2trt_dynamic.torch2trt_dynamic import (get_arg, tensorrt_converter,
trt_)
@tensorrt_converter('torchvision.ops.roi_pool')
def convert_roi_pool(ctx):
input = get_arg(ctx, 'input', pos=0, default=None)
boxes = get_arg(ctx, 'boxes', pos=1, default=None)
output_size = get_arg(ctx, 'output_size', pos=2, default=7)
spatial_scale = get_arg(ctx, 'spatial_scale', pos=3, default=1.)
output = ctx.method_return
input_trt = trt_(ctx.network, input)
boxes_trt = trt_(ctx.network, boxes)
plugin = create_roipool_plugin(
'roi_pool_' + str(id(boxes)),
out_size=output_size,
featmap_strides=[1. / spatial_scale],
roi_scale_factor=-1,
finest_scale=56)
custom_layer = ctx.network.add_plugin_v2(
inputs=[boxes_trt, input_trt], plugin=plugin)
output._trt = custom_layer.get_output(0)
@tensorrt_converter('torchvision.ops.RoIPool.forward')
def convert_RoIPool(ctx):
module = ctx.method_args[0]
input = get_arg(ctx, 'input', pos=1, default=None)
boxes = get_arg(ctx, 'boxes', pos=2, default=None)
output_size = module.output_size
spatial_scale = module.spatial_scale
old_method_args = ctx.method_args
old_method_kwargs = ctx.method_kwargs
new_method_args = [input, boxes, output_size, spatial_scale]
new_method_kwargs = {}
ctx.method_args = new_method_args
ctx.method_kwargs = new_method_kwargs
convert_roi_pool(ctx)
ctx.method_args = old_method_args
ctx.method_kwargs = old_method_kwargs
|
476269
|
import pyaudio
import socket
import select
from pynput.keyboard import Key, Listener, Controller
import threading
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 4096
HOST = socket.gethostname()
PORT = 8082
streaming = True
audio = pyaudio.PyAudio()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK)
def get_keyboard_input():
global streaming
def on_press(key):
global streaming
if key == Key.esc:
streaming = False
return False
with Listener(on_press=on_press) as listener:
listener.join()
keypresses = threading.Thread(target=get_keyboard_input)
keypresses.start()
try:
while streaming:
data = conn.recv(CHUNK)
stream.write(data)
except KeyboardInterrupt:
print("Killing connection")
s.close()
stream.close()
audio.terminate()
|
476282
|
import re
def get_errors(query: str, offset_field: str):
errors = []
if not is_valid_timestamp(query, offset_field):
errors.append(f'The query should have ascending ordering by {offset_field}')
if not is_valid_offset(query):
errors.append('Please use ${OFFSET} with a gt condition (not gte)')
return errors
def is_valid_timestamp(query: str, offset_field: str) -> bool:
regexp = re.compile(rf'"sort"[\s\S]*"{offset_field}"[\s\S]*"order"[\s\S]*"asc"')
if regexp.search(query):
return True
return False
def is_valid_offset(query: str) -> bool:
regexp = re.compile(r'"gt"[\s\S]*\${OFFSET}')
if regexp.search(query):
return True
return False
|
476296
|
from .variables import get_variable, set_variable, delete_variable, Attributes
from .variables import GLOBAL_NAMESPACE, DEFAULT_ATTRIBUTES
from .privileges import privileges, patch_current_process_privileges
from .boot import get_boot_order, get_boot_entry, set_boot_entry, set_boot_order
from .boot import get_parsed_boot_entry, set_parsed_boot_entry
from .load_option import LoadOption
|
476299
|
import geoip2.database
import geoip2.webservice
import os
from pydantic import BaseModel
from tracardi.domain.named_entity import NamedEntity
from tracardi.service.singleton import Singleton
class PluginConfiguration(BaseModel):
source: NamedEntity
ip: str
class GeoLiteCredentials(BaseModel):
accountId: int
license: str
host: str = 'geolite.info'
class GeoIpConfiguration(BaseModel):
webservice: GeoLiteCredentials = None
database: str = None
def get_local_database(self):
return os.path.join(self.database)
def is_local(self):
return self.database is not None
def is_remote(self):
return self.webservice is not None
class MaxMindGeoLite2Client:
def __init__(self, credentials: GeoLiteCredentials):
self.client = geoip2.webservice.AsyncClient(credentials.accountId, credentials.license, host=credentials.host)
async def read(self, ip: str):
return await self.client.city(ip)
async def close(self):
await self.client.close()
class MaxMindGeoLite2Reader(metaclass=Singleton):
def __init__(self, database):
self.reader = geoip2.database.Reader(database)
def read(self, ip: str):
return self.reader.city(ip)
def __del__(self):
if self.reader:
self.reader.close()
class MaxMindGeoLite2:
def __init__(self, config: GeoIpConfiguration):
self.config = config
if self.config.is_local():
self.client = MaxMindGeoLite2Reader(database=self.config.get_local_database())
elif self.config.is_remote():
self.client = MaxMindGeoLite2Client(credentials=self.config.webservice)
else:
raise ValueError("Misconfiguration of MaxMindGeoLite2.")
async def fetch(self, ip):
result = self.client.read(ip)
if isinstance(self.client, MaxMindGeoLite2Client):
result = await result
await self.client.close()
return result
|
476309
|
import os
import sys
from distutils.core import setup
from pathlib import Path
import numpy as np
from transonic.dist import make_backend_files, init_transonic_extensions
path_here = Path(__file__).parent.absolute()
include_dirs = [np.get_include()]
pack_name = "future"
paths = tuple((path_here / pack_name).glob("*.py"))
for backend in ("pythran", "cython", "numba"):
make_backend_files(paths, backend=backend)
extensions = []
if "egg_info" not in sys.argv:
extensions = init_transonic_extensions(
pack_name,
backend="pythran",
include_dirs=[np.get_include()],
compile_args=("-O3", "-DUSE_XSIMD"),
inplace=True,
)
extensions.extend(
init_transonic_extensions(
pack_name, backend="cython", inplace=True, annotate=True
)
)
init_transonic_extensions(pack_name, backend="numba")
setup(
name=pack_name,
ext_modules=extensions,
# script_name="setup.py",
script_args=["build_ext", "--inplace"],
# cmdclass=dict(build_ext=ParallelBuildExt),
)
|
476384
|
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
badge = html.Div(
[
dbc.Badge("Primary", color="primary", className="me-1"),
dbc.Badge("Secondary", color="secondary", className="me-1"),
dbc.Badge("Success", color="success", className="me-1"),
dbc.Badge("Warning", color="warning", className="me-1"),
dbc.Badge("Danger", color="danger", className="me-1"),
dbc.Badge("Info", color="info", className="me-1"),
dbc.Badge("Light", color="light", className="me-1"),
dbc.Badge("Dark", color="dark"),
],
className="mb-2",
)
badge_pills = html.Div(
[
dbc.Badge("Primary", color="primary", className="me-1", pill=True),
dbc.Badge("Secondary", color="secondary", className="me-1", pill=True),
dbc.Badge("Success", color="success", className="me-1", pill=True),
dbc.Badge("Warning", color="warning", className="me-1", pill=True),
dbc.Badge("Danger", color="danger", className="me-1", pill=True),
dbc.Badge("Info", color="info", className="me-1", pill=True),
dbc.Badge("Light", color="light", className="me-1", pill=True),
dbc.Badge("Dark", color="dark", pill=True),
],
className="mb-2",
)
badge_text = html.Div(
[
dbc.Badge(
"Primary",
color="white",
text_color="primary",
className="border me-1",
),
dbc.Badge(
"Secondary",
color="white",
text_color="secondary",
className="border me-1",
),
dbc.Badge(
"Success",
color="white",
text_color="success",
className="border me-1",
),
dbc.Badge(
"Warning",
color="white",
text_color="warning",
className="border me-1",
),
dbc.Badge(
"Danger",
color="white",
text_color="danger",
className="border me-1",
),
dbc.Badge(
"Info", color="white", text_color="info", className="border me-1"
),
dbc.Badge(
"Dark", color="white", text_color="dark", className="border me-1"
),
dbc.Badge(
"Black",
color="white",
text_color="black",
className="border me-1",
),
dbc.Badge(
"Muted",
color="white",
text_color="muted",
className="border me-1",
),
dbc.Badge(
"Light",
color="secondary",
text_color="light",
className="border me-1",
),
dbc.Badge(
"White", color="secondary", text_color="white", className="border"
),
],
className="mb-2",
)
badge_sizes = html.Div(
[
html.H1(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H2(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H3(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H4(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H5(["Example heading", dbc.Badge("New", className="ms-1")]),
html.H6(["Example heading", dbc.Badge("New", className="ms-1")]),
]
)
badges = html.Div(
[
make_subheading("Badge", "badge"),
badge,
badge_pills,
badge_text,
badge_sizes,
],
className="mb-4",
)
|
476400
|
import random
import asyncio
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from proxy_rax import ProxyRaxBot
class SlowBot(ProxyRaxBot):
async def on_step(self, iteration):
await asyncio.sleep(random.random())
await super().on_step(iteration)
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Bot(Race.Terran, SlowBot()),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=False, step_time_limit=0.2)
if __name__ == '__main__':
main()
|
476434
|
import numpy as np
import matplotlib.pyplot as plt
RANDOM_SEED = 42
NUM_VIDEOS = 1000
MAX_NUM_BITRATES = 10
MIN_NUM_BITRATES = 3
MAX_NUM_CHUNKS = 100
MIN_NUM_CHUNKS = 20
# bit rate candidates
# [200, 300, 450, 750, 1200, 1850, 2850, 4300, 6000, 8000] # Kbps
MEAN_VIDEO_SIZE = [0.1, 0.15, 0.38, 0.6, 0.93, 1.43, 2.15, 3.25, 4.5, 6] # MB
STD_VIDEO_SIZE_NOISE = 0.1
VIDEO_FOLDER = './videos/'
np.random.seed(RANDOM_SEED)
all_bitrate_idx = np.array(range(MAX_NUM_BITRATES))
mask_bitrate_idx_to_shuffle = np.array(range(MAX_NUM_BITRATES))
for video_idx in xrange(NUM_VIDEOS):
num_bitrates = np.random.randint(MIN_NUM_BITRATES, MAX_NUM_BITRATES + 1)
num_chunks = np.random.randint(MIN_NUM_CHUNKS, MAX_NUM_CHUNKS + 1)
np.random.shuffle(mask_bitrate_idx_to_shuffle)
mask_bitrate_idx = mask_bitrate_idx_to_shuffle[:num_bitrates]
mask_bitrate_idx.sort()
if np.all(mask_bitrate_idx == [1, 3, 4, 5, 6, 7]):
# avoid using the same bitrates as the ones we do testing
np.random.shuffle(mask_bitrate_idx_to_shuffle)
mask_bitrate_idx = mask_bitrate_idx_to_shuffle[:num_bitrates]
mask_bitrate_idx.sort()
with open(VIDEO_FOLDER + str(video_idx), 'wb') as f:
f.write(str(num_bitrates) + '\t' + str(num_chunks) + '\n')
for i in xrange(MAX_NUM_BITRATES):
if i in mask_bitrate_idx:
f.write('1' + '\t')
else:
f.write('0' + '\t')
f.write('\n')
for _ in xrange(num_chunks):
for i in xrange(num_bitrates):
mean = MEAN_VIDEO_SIZE[mask_bitrate_idx[i]]
noise = np.random.normal(1, STD_VIDEO_SIZE_NOISE)
f.write(str(mean * noise) + '\t')
f.write('\n')
|
476439
|
class UserModel(Tower):
@model_property
def inference(self):
assert self.input_shape[0]==224, 'Input shape should be 224 pixels'
assert self.input_shape[1]==224, 'Input shape should be 224 pixels'
# Create some wrappers for simplicity
def conv2d(x, W, b, s, padding='SAME'):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, s, s, 1], padding=padding)
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k, s, padding='VALID'):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding=padding)
# Create model
def conv_net(x, weights, biases):
conv1 = conv2d(x, weights['wc1'], biases['bc1'], s=4, padding='SAME')
#conv1 = tf.nn.local_response_normalization(conv1, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)
pool1 = maxpool2d(conv1, k=3, s=2)
conv2 = conv2d(pool1, weights['wc2'], biases['bc2'], s=1, padding='SAME')
#conv2 = tf.nn.local_response_normalization(conv2, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0)
pool2 = maxpool2d(conv2, k=3, s=2)
conv3 = conv2d(pool2, weights['wc3'], biases['bc3'], s=1, padding='SAME')
conv4 = conv2d(conv3, weights['wc4'], biases['bc4'], s=1, padding='SAME')
conv5 = conv2d(conv4, weights['wc5'], biases['bc5'], s=1, padding='SAME')
pool5 = maxpool2d(conv5, k=3, s=2)
# Flatten
flatten = tf.reshape(pool5, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(flatten, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
if self.is_training:
fc1 = tf.nn.dropout(fc1, 0.5)
fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
fc2 = tf.nn.relu(fc2)
if self.is_training:
fc2 = tf.nn.dropout(fc2, 0.5)
# Output, class prediction
out = tf.add(tf.matmul(fc2, weights['out']), biases['out'])
return out
# Initialize W using stddev 1/sqrt(n), with n the input dimension size.
# Store layers weight & bias
weights = {
# 11x11 conv, #channels input, 96 outputs
'wc1': tf.get_variable('wc1', [11, 11, self.input_shape[2], 96], initializer=tf.contrib.layers.xavier_initializer()),
# 5x5 conv, 96 inputs, 256 outputs
'wc2': tf.get_variable('wc2', [5, 5, 96, 256], initializer=tf.contrib.layers.xavier_initializer()),
# 3x3 conv, 256 inputs, 384 outputs
'wc3': tf.get_variable('wc3', [3, 3, 256, 384], initializer=tf.contrib.layers.xavier_initializer()),
# 3x3 conv, 384 inputs, 384 outputs
'wc4': tf.get_variable('wc4', [3, 3, 384, 384], initializer=tf.contrib.layers.xavier_initializer()),
# 3x3 conv, 384 inputs, 256 outputs
'wc5': tf.get_variable('wc5', [3, 3, 384, 256], initializer=tf.contrib.layers.xavier_initializer()),
# fully connected, 6*6*256=9216 inputs, 4096 outputs
'wd1': tf.get_variable('wd1', [6*6*256, 4096], initializer=tf.contrib.layers.xavier_initializer()),
# fully connected, 4096 in, 4096 out
'wd2': tf.get_variable('wd2', [4096, 4096], initializer=tf.contrib.layers.xavier_initializer()),
# 4096 inputs, #classes outputs (class prediction)
'out': tf.get_variable('wout', [4096, self.nclasses], initializer=tf.contrib.layers.xavier_initializer()),
}
# Leave the intial biases zero
biases = {
'bc1': tf.get_variable('bc1', [96], initializer=tf.constant_initializer(0.0)),
'bc2': tf.get_variable('bc2', [256], initializer=tf.constant_initializer(0.0)),
'bc3': tf.get_variable('bc3', [384], initializer=tf.constant_initializer(0.0)),
'bc4': tf.get_variable('bc4', [384], initializer=tf.constant_initializer(0.0)),
'bc5': tf.get_variable('bc5', [256], initializer=tf.constant_initializer(0.0)),
'bd1': tf.get_variable('bd1', [4096], initializer=tf.constant_initializer(0.0)),
'bd2': tf.get_variable('bd2', [4096], initializer=tf.constant_initializer(0.0)),
'out': tf.get_variable('bout', [self.nclasses], initializer=tf.constant_initializer(0.0))
}
model = conv_net(self.x, weights, biases)
return model
@model_property
def loss(self):
loss = digits.classification_loss(self.inference, self.y)
accuracy = digits.classification_accuracy(self.inference, self.y)
self.summaries.append(tf.summary.scalar(accuracy.op.name, accuracy))
return loss
|
476466
|
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPool2D, ZeroPadding2D, Flatten
from tensorflow import keras
import tensorflow as tf
import numpy as np
class BaseModel():
"""
Base class used to implement a model.
This class itself should not instantiated.
"""
def __init__(self, comm, controller, rank, n_acts, obs_shape, sess_config=None):
if sess_config is None:
self.sess = tf.Session()
else:
self.sess = tf.Session(config=sess_config)
### Initialize MPI variables ###
self.comm = comm
self.controller = controller
self.rank = rank
### Initialize placeholder and other network ops ###
self.sess_config = sess_config
self.obs_shape = obs_shape
self.n_acts = n_acts
self.update_policy = self.train_policy
self.create_phs(obs_shape=self.obs_shape)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
self.create_policy_ops()
self.sess.run(tf.global_variables_initializer())
# Sync the weights of models on all processes
self.sync_weights()
def create_phs(self, obs_shape):
"""
Creates placeholders (input ops) for the model.
"""
self.obs_ph = tf.placeholder(
dtype=tf.float32, shape=(None, *list(obs_shape)))
self.next_obs_ph = tf.placeholder(
dtype=tf.float32, shape=(None, *list(obs_shape)))
self.act_ph = tf.placeholder(dtype=tf.int32, shape=(None,))
self.rew_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
self.gae_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
def create_policy_ops(self):
"""
Creates the ops for the policy and value network.
Additionally creates the ops for updating the network.
This function should be overridden by subclasses.
"""
self.act_out = 0
self.value_op = 0
self.policy_update = 0
self.value_update = 0
def gen_actions(self, obs):
"""
Generates actions (1 to 1) for each observation passed
into the function.
"""
if type(obs) == list:
obs = np.asarray(obs)
assert (type(obs) == np.ndarray), \
"Observations must be a numpy array!"
assert (list(obs.shape)[1:] == list(self.obs_shape)), \
"Observations must have the shape, (batch_size, dimensions..., 1)!"
return self.sess.run(self.act_out, feed_dict={self.obs_ph: obs})[0]
def gen_actions_and_values(self, obs):
"""
Generates actions and values (1 to 1) for each observation
passed into the function.
"""
if type(obs) == list:
obs = np.asarray(obs)
assert (type(obs) == np.ndarray), \
"Observations must be a numpy array!"
assert (list(obs.shape)[1:] == list(self.obs_shape)), \
"Observations must have the shape, (batch_size, dimensions..., 1)!"
acts, vals = self.sess.run(
[self.act_out, self.value_op], feed_dict={self.obs_ph: obs})
return acts[0], vals[0]
def train_policy(self, states, actions, rewards, gaes):
"""
Updates the policy given training data from
environment simulation.
"""
self.sess.run([self.policy_update, self.value_update],
feed_dict={self.obs_ph: states,
self.act_ph: actions,
self.rew_ph: rewards,
self.gae_ph: gaes})
def sync_weights(self):
"""
Sync the weights between model on all processes
using MPI.
"""
if self.rank == self.controller:
self.comm.bcast(self.sess.run(
tf.trainable_variables()), self.controller)
else:
sync_vars = self.comm.bcast(None, self.controller)
t_vars = tf.trainable_variables()
for pair in zip(t_vars, sync_vars):
self.sess.run(tf.assign(pair[0], pair[1]))
class OneDimModel(BaseModel):
"""
Vanilla Policy Gradient implemented for an
environment with 1-dimensional states.
"""
def __init__(self, comm, controller, rank, n_acts, obs_shape, sess_config=None):
super().__init__(comm, controller, rank, n_acts, obs_shape, sess_config)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
def create_policy_ops(self):
"""
Creates the ops for the policy and value network.
Additionally creates the ops for updating the network.
"""
with tf.variable_scope('policy'):
# Creating a fully connected net for the policy and value estimator
dense_1 = Dense(64, activation='relu')(self.obs_ph)
act_dense_1 = Dense(64, activation='relu')(dense_1)
act_dense_2 = Dense(64, activation='relu')(act_dense_1)
val_dense_1 = Dense(64, activation='relu')(dense_1)
val_dense_2 = Dense(64, activation='relu')(val_dense_1)
# Output probability distribution over possible actions
self.act_probs_op = Dense(
self.n_acts, activation='softmax', name='act_probs')(act_dense_2)
self.act_out = tf.random.categorical(tf.log(self.act_probs_op), 1)
# Output value of observed state
self.value_op = Dense(1, activation='linear')(val_dense_2)
self.act_masks = tf.one_hot(
self.act_ph, self.n_acts, dtype=tf.float32)
self.log_probs = tf.log(self.act_probs_op)
self.resp_acts = tf.reduce_sum(
self.act_masks * self.log_probs, axis=1)
self.policy_loss = \
-tf.reduce_mean(self.resp_acts * self.gae_ph)
self.policy_update = self.optimizer.minimize(self.policy_loss)
# Add dependency on policy update to make sure the value network
# only gets updated after the policy
with tf.control_dependencies([self.policy_update]):
self.value_loss = tf.reduce_mean(
tf.square(self.rew_ph - tf.squeeze(self.value_op)))
self.value_update = self.optimizer.minimize(self.value_loss)
class TwoDimModel(BaseModel):
"""
Vanilla Policy Gradient implemented for an
environment with 2-dimensional states.
"""
def __init__(self, comm, controller, rank, n_acts, obs_shape, sess_config=None):
super().__init__(comm, controller, rank, n_acts, obs_shape, sess_config)
def create_policy_ops(self):
"""
Creates the ops for the policy and value network.
Additionally creates the ops for updating the network.
"""
with tf.variable_scope('policy'):
# Creating a conv net for the policy and value estimator
conv_1 = Conv2D(16, 5, 3, activation='relu')(self.obs_ph)
pooling_1 = MaxPool2D(2)(conv_1)
act_conv_1 = Conv2D(32, 3, 2, activation='relu')(conv_1)
act_pool_1 = MaxPool2D(2)(act_conv_1)
act_flat = Flatten()(act_pool_1)
val_conv_1 = Conv2D(32, 3, 2, activation='relu')(conv_1)
val_pool_1 = MaxPool2D(2)(val_conv_1)
val_flat = Flatten()(val_pool_1)
# Output probability distribution over possible actions
self.act_probs_op = Dense(
self.n_acts, activation='softmax', name='act_probs')(act_flat)
self.act_out = tf.random.categorical(tf.log(self.act_probs_op), 1)
# Output value of observed state
self.value_op = Dense(1, activation='linear')(val_flat)
self.act_masks = tf.one_hot(
self.act_ph, self.n_acts, dtype=tf.float32)
self.log_probs = tf.log(self.act_probs_op)
self.resp_acts = tf.reduce_sum(
self.act_masks * self.log_probs, axis=1)
self.policy_loss = \
-tf.reduce_mean(self.resp_acts * self.gae_ph)
self.policy_update = self.optimizer.minimize(self.policy_loss)
# Add dependency on policy update to make sure the value network
# only gets updated after the policy
with tf.control_dependencies([self.policy_update]):
self.value_loss = tf.reduce_mean(
tf.square(self.rew_ph - tf.squeeze(self.value_op)))
self.value_update = self.optimizer.minimize(self.value_loss)
|
476485
|
from flask_socketio import SocketIO
socketio = SocketIO(cors_allowed_origins='*')
def initSocket(app):
socketio.init_app(app)
|
476515
|
from __future__ import absolute_import
import os
from django.conf import settings
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'troupon.settings')
app = Celery('troupon')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
476529
|
from ctypes import c_int, c_size_t, c_void_p
from .dll import _bind, SDLFunc, AttributeDict
from .stdinc import SDL_bool
__all__ = [
# Defines
"SDL_CACHELINE_SIZE",
]
# Constants & enums
SDL_CACHELINE_SIZE = 128
# Raw ctypes function definitions
_funcdefs = [
SDLFunc("SDL_GetCPUCount", None, c_int),
SDLFunc("SDL_GetCPUCacheLineSize", None, c_int),
SDLFunc("SDL_HasRDTSC", None, SDL_bool),
SDLFunc("SDL_HasAltiVec", None, SDL_bool),
SDLFunc("SDL_HasMMX", None, SDL_bool),
SDLFunc("SDL_Has3DNow", None, SDL_bool),
SDLFunc("SDL_HasSSE", None, SDL_bool),
SDLFunc("SDL_HasSSE2", None, SDL_bool),
SDLFunc("SDL_HasSSE3", None, SDL_bool),
SDLFunc("SDL_HasSSE41", None, SDL_bool),
SDLFunc("SDL_HasSSE42", None, SDL_bool),
SDLFunc("SDL_GetSystemRAM", None, c_int),
SDLFunc("SDL_HasAVX", None, SDL_bool),
SDLFunc("SDL_HasAVX2", None, SDL_bool, added='2.0.4'),
SDLFunc("SDL_HasAVX512F", None, SDL_bool, added='2.0.9'),
SDLFunc("SDL_HasARMSIMD", None, SDL_bool, added='2.0.12'),
SDLFunc("SDL_HasNEON", None, SDL_bool, added='2.0.6'),
SDLFunc("SDL_SIMDGetAlignment", None, c_size_t, added='2.0.10'),
SDLFunc("SDL_SIMDAlloc", [c_size_t], c_void_p, added='2.0.10'),
SDLFunc("SDL_SIMDRealloc", [c_void_p, c_size_t], c_void_p, added='2.0.14'),
SDLFunc("SDL_SIMDFree", [c_void_p], None, added='2.0.10'),
]
_ctypes = AttributeDict()
for f in _funcdefs:
_ctypes[f.name] = _bind(f.name, f.args, f.returns, f.added)
__all__.append(f.name) # Add all bound functions to module namespace
# Aliases for ctypes bindings
SDL_GetCPUCount = _ctypes["SDL_GetCPUCount"]
SDL_GetCPUCacheLineSize = _ctypes["SDL_GetCPUCacheLineSize"]
SDL_HasRDTSC = _ctypes["SDL_HasRDTSC"]
SDL_HasAltiVec = _ctypes["SDL_HasAltiVec"]
SDL_HasMMX = _ctypes["SDL_HasMMX"]
SDL_Has3DNow = _ctypes["SDL_Has3DNow"]
SDL_HasSSE = _ctypes["SDL_HasSSE"]
SDL_HasSSE2 = _ctypes["SDL_HasSSE2"]
SDL_HasSSE3 = _ctypes["SDL_HasSSE3"]
SDL_HasSSE41 = _ctypes["SDL_HasSSE41"]
SDL_HasSSE42 = _ctypes["SDL_HasSSE42"]
SDL_GetSystemRAM = _ctypes["SDL_GetSystemRAM"]
SDL_HasAVX = _ctypes["SDL_HasAVX"]
SDL_HasAVX2 = _ctypes["SDL_HasAVX2"]
SDL_HasAVX512F = _ctypes["SDL_HasAVX512F"]
SDL_HasARMSIMD = _ctypes["SDL_HasARMSIMD"]
SDL_HasNEON = _ctypes["SDL_HasNEON"]
SDL_SIMDGetAlignment = _ctypes["SDL_SIMDGetAlignment"]
SDL_SIMDAlloc = _ctypes["SDL_SIMDAlloc"]
SDL_SIMDRealloc = _ctypes["SDL_SIMDRealloc"]
SDL_SIMDFree = _ctypes["SDL_SIMDFree"]
|
476530
|
import argparse
import os
import math
import ruamel.yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_captioning import XVLM
import utils
from utils.checkpointer import Checkpointer
from utils.hdfs_io import hmkdir, hexists
from dataset.utils import collect_result, coco_caption_eval
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer
def train(model, data_loader, optimizer, epoch, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.5f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
for i, (image, caption, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
loss = model(image, caption)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.5f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluation(model, data_loader, device, config):
# test
model.eval()
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Caption generation:'
print_freq = 50
result = []
for image, image_id in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
captions = model_without_ddp.generate(image, sample=False, num_beams=config['num_beams'], max_length=config['max_length'],
min_length=config['min_length'])
for caption, img_id in zip(captions, image_id):
result.append({"image_id": img_id.item(), "caption": caption})
return result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
world_size = utils.get_world_size()
if world_size > 8:
assert hexists(args.output_hdfs) and args.output_hdfs.startswith('hdfs'), "for collect_result among nodes"
if args.bs > 0:
config['batch_size_train'] = args.bs // world_size
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
print("Creating captioning dataset")
train_dataset, val_dataset, test_dataset = create_dataset('caption_coco', config)
datasets = [train_dataset, val_dataset, test_dataset]
train_dataset_size = len(train_dataset)
world_size = utils.get_world_size()
if utils.is_main_process():
print(f"### data {train_dataset_size}, batch size, {config['batch_size_train']} x {world_size}")
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
train_loader, val_loader, test_loader = create_loader(datasets, samplers,
batch_size=[config['batch_size_train'], config['batch_size_test'], config['batch_size_test']],
num_workers=[4, 4, 4], is_trains=[True, False, False],
collate_fns=[None, None, None])
print("Creating model")
model = XVLM(config=config)
model.load_pretrained(args.checkpoint, config, is_eval=args.evaluate, load_capt_pretrain=args.load_capt_pretrain)
model = model.to(device)
print("### Total Params: ", sum(p.numel() for p in model.parameters() if p.requires_grad))
start_time = time.time()
print("### output_dir, ", args.output_dir, flush=True)
print("### output_hdfs, ", args.output_hdfs, flush=True)
if args.evaluate:
print("Start evaluating")
test_result = evaluation(model, test_loader, device, config)
test_result_file = collect_result(test_result, 'test_eval', local_wdir=args.result_dir,
hdfs_wdir=args.output_hdfs,
write_to_hdfs=world_size > 8, save_result=True, remove_duplicate='image_id')
if utils.is_main_process():
coco_test = coco_caption_eval(config['test_gt_file'], test_result_file)
log_stats = {**{f'test_{k}': v for k, v in coco_test.eval.items()}}
print(log_stats, flush=True)
dist.barrier()
else:
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
arg_sche['step_per_epoch'] = math.ceil(train_dataset_size / (config['batch_size_train'] * world_size))
lr_scheduler = create_scheduler(arg_sche, optimizer)
checkpointer = Checkpointer(args.output_hdfs if hexists(args.output_hdfs) else args.output_dir)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# print("Start evaluating (zero shot)")
# test_result = evaluation(model, test_loader, device, config)
# test_result_file = collect_result(test_result, 'test_eval_zeroshot', local_wdir=args.result_dir,
# hdfs_wdir=args.output_hdfs,
# write_to_hdfs=world_size > 8, save_result=True, remove_duplicate='image_id')
#
# if utils.is_main_process():
# coco_test = coco_caption_eval(config['test_gt_file'], test_result_file)
# log_stats = {**{f'test_{k}': v for k, v in coco_test.eval.items()}}
# print(log_stats, flush=True)
#
# dist.barrier()
for epoch in range(start_epoch, max_epoch):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, epoch, device, lr_scheduler, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
save_obj = {
'model': model_without_ddp.state_dict(),
# 'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
'config': config,
# 'epoch': epoch,
}
checkpointer.save_checkpoint(model_state=save_obj,
epoch=epoch,
training_states=optimizer.state_dict())
if epoch >= config['start_eval']:
# val_result = evaluation(model, val_loader, device, config)
# val_result_file = collect_result(val_result, 'val_epoch%d' % epoch, local_wdir=args.result_dir, hdfs_wdir=args.output_hdfs,
# write_to_hdfs=world_size > 8, save_result=True, remove_duplicate='image_id')
test_result = evaluation(model, test_loader, device, config)
test_result_file = collect_result(test_result, 'test_epoch%d' % epoch, local_wdir=args.result_dir, hdfs_wdir=args.output_hdfs,
write_to_hdfs=world_size > 8, save_result=True, remove_duplicate='image_id')
if utils.is_main_process():
# coco_val = coco_caption_eval(config['val_gt_file'], val_result_file)
coco_test = coco_caption_eval(config['test_gt_file'], test_result_file)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
# **{f'val_{k}': v for k, v in coco_val.eval.items()},
**{f'test_{k}': v for k, v in coco_test.eval.items()},
'epoch': epoch}
dist.barrier()
if utils.is_main_process():
with open(os.path.join(args.output_dir, "log.txt"), "a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
os.system(f"cat {args.output_dir}/log.txt")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('### Time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('--config', default='./configs/VQA.yaml')
parser.add_argument('--output_dir', default='output/vqa')
parser.add_argument('--output_hdfs', type=str, default='', help="to collect eval results among nodes")
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', action='store_false')
parser.add_argument('--load_capt_pretrain', action='store_true')
parser.add_argument('--bs', default=-1, type=int)
parser.add_argument('--evaluate', action='store_true')
# for self-critical sequence training
parser.add_argument('--scst', action='store_true', help='Self-critical sequence training')
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
args.result_dir = os.path.join(args.output_dir, 'result')
hmkdir(args.output_dir)
hmkdir(args.result_dir)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
if len(args.output_hdfs):
hmkdir(args.output_hdfs)
main(args, config)
|
476566
|
from .scp_policy_resource import ScpPolicyResource
from .scp_attachment_resource import ScpAttachmentResource
|
476618
|
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
# Fixtures must be visible for lazy_fixture() calls.
from .fixtures import * # noqa
@pytest.fixture(
params=(
lazy_fixture('cof1'),
lazy_fixture('cof2'),
lazy_fixture('cof3'),
),
)
def case_data(request):
return request.param
@pytest.fixture(params=(0, ))
def aligner_edge(request):
return request.param
@pytest.fixture(params=(0, ))
def id(request):
return request.param
@pytest.fixture(
params=(
np.array([0, 0, 0]),
np.array([-20, 1, 21]),
),
)
def cell(request):
return np.array(request.param)
@pytest.fixture(
params=(
[0, 0, 0],
[1, 2, -20],
),
)
def position(request):
return np.array(request.param, dtype=np.float64)
|
476625
|
import os
import sys
from typing import Dict, Set
import setuptools
is_py37_or_newer = sys.version_info >= (3, 7)
package_metadata: dict = {}
with open("./src/datahub_airflow_plugin/__init__.py") as fp:
exec(fp.read(), package_metadata)
def get_long_description():
root = os.path.dirname(__file__)
with open(os.path.join(root, "README.md")) as f:
description = f.read()
return description
base_requirements = {
# Compatability.
"dataclasses>=0.6; python_version < '3.7'",
"typing_extensions>=3.10.0.2",
"mypy_extensions>=0.4.3",
# Actual dependencies.
"typing-inspect",
"pydantic>=1.5.1",
"apache-airflow >= 1.10.2",
"acryl-datahub[airflow] >= 0.8.36",
}
mypy_stubs = {
"types-dataclasses",
"sqlalchemy-stubs",
"types-pkg_resources",
"types-six",
"types-python-dateutil",
"types-requests",
"types-toml",
"types-PyYAML",
"types-freezegun",
"types-cachetools",
# versions 0.1.13 and 0.1.14 seem to have issues
"types-click==0.1.12",
"types-tabulate",
# avrogen package requires this
"types-pytz",
}
base_dev_requirements = {
*base_requirements,
*mypy_stubs,
"black>=21.12b0",
"coverage>=5.1",
"flake8>=3.8.3",
"flake8-tidy-imports>=4.3.0",
"isort>=5.7.0",
"mypy>=0.920",
# pydantic 1.8.2 is incompatible with mypy 0.910.
# See https://github.com/samuelcolvin/pydantic/pull/3175#issuecomment-995382910.
"pydantic>=1.9.0",
"pytest>=6.2.2",
"pytest-asyncio>=0.16.0",
"pytest-cov>=2.8.1",
"pytest-docker>=0.10.3,<0.12",
"tox",
"deepdiff",
"requests-mock",
"freezegun",
"jsonpickle",
"build",
"twine",
"packaging",
}
base_dev_requirements_airflow_1 = base_dev_requirements.copy()
dev_requirements = {
*base_dev_requirements,
}
dev_requirements_airflow_1_base = {
"apache-airflow==1.10.15",
"apache-airflow-backport-providers-snowflake",
}
dev_requirements_airflow_1 = {
*base_dev_requirements_airflow_1,
*dev_requirements_airflow_1_base,
}
entry_points = {
"airflow.plugins": "acryl-datahub-airflow-plugin = datahub_airflow_plugin.datahub_plugin:DatahubPlugin"
}
setuptools.setup(
# Package metadata.
name=package_metadata["__package_name__"],
version=package_metadata["__version__"],
url="https://datahubproject.io/",
project_urls={
"Documentation": "https://datahubproject.io/docs/",
"Source": "https://github.com/datahub-project/datahub",
"Changelog": "https://github.com/datahub-project/datahub/releases",
},
license="Apache License 2.0",
description="Datahub Airflow plugin to capture executions and send to Datahub",
long_description=get_long_description(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Unix",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Environment :: MacOS X",
"Topic :: Software Development",
],
# Package info.
zip_safe=False,
python_requires=">=3.6",
package_dir={"": "src"},
packages=setuptools.find_namespace_packages(where="./src"),
entry_points=entry_points,
# Dependencies.
install_requires=list(base_requirements),
extras_require={
"dev": list(dev_requirements),
"dev-airflow1-base": list(dev_requirements_airflow_1_base),
"dev-airflow1": list(dev_requirements_airflow_1),
},
)
|
476626
|
from typing import TYPE_CHECKING
from PySide2.QtGui import QKeySequence, Qt
from .menu import Menu, MenuEntry, MenuSeparator
if TYPE_CHECKING:
from angrmanagement.ui.widgets.qlog_widget import QLogWidget
class LogMenu(Menu):
def __init__(self, log_widget: 'QLogWidget'):
super().__init__("", parent=log_widget)
self.entries.extend([
MenuEntry('&Copy selected content', log_widget.copy_selected_messages,
shortcut=QKeySequence(Qt.CTRL + Qt.Key_C)),
MenuEntry('Copy selected message', log_widget.copy_selected),
MenuEntry('Copy all content', log_widget.copy_all_messages),
MenuEntry('Copy all messages', log_widget.copy_all),
MenuSeparator(),
MenuEntry('C&lear log', log_widget.clear_log),
])
|
476635
|
from decimal import Decimal as D
from django.test import TestCase
from parameterized import parameterized
from capone.api.actions import create_transaction
from capone.api.actions import credit
from capone.api.actions import debit
from capone.models import LedgerEntry
from capone.models import MatchType
from capone.models import Transaction
from capone.tests.factories import LedgerFactory
from capone.tests.factories import OrderFactory
from capone.tests.factories import UserFactory
class TestFilterByRelatedObjects(TestCase):
"""
Test Transaction.objects.filter_by_related_objects.
"""
AMOUNT = D('100')
@classmethod
def _create_transaction_with_evidence(cls, evidence):
return create_transaction(
cls.create_user,
evidence=evidence,
ledger_entries=[
LedgerEntry(
ledger=cls.ledger,
amount=credit(cls.AMOUNT)),
LedgerEntry(
ledger=cls.ledger,
amount=debit(cls.AMOUNT)),
]
)
@classmethod
def setUpTestData(cls):
cls.create_user = UserFactory()
cls.order_1 = OrderFactory()
cls.order_2 = OrderFactory()
cls.ledger = LedgerFactory()
cls.transaction_with_both_orders = (
cls._create_transaction_with_evidence([
cls.order_1,
cls.order_2,
])
)
cls.transaction_with_only_order_1 = (
cls._create_transaction_with_evidence([
cls.order_1,
])
)
cls.transaction_with_only_order_2 = (
cls._create_transaction_with_evidence([
cls.order_1,
])
)
cls.transaction_with_neither_order = (
cls._create_transaction_with_evidence([
OrderFactory(),
])
)
cls.transaction_with_three_orders = (
cls._create_transaction_with_evidence([
cls.order_1,
cls.order_2,
OrderFactory(),
])
)
@parameterized.expand([
(MatchType.ANY, 'all'),
(MatchType.ALL, 'all'),
(MatchType.NONE, 'all'),
(MatchType.EXACT, 'none'),
])
def test_filter_with_no_evidence(self, match_type, queryset_function_name):
"""
Method returns correct Transactions with no evidence given.
"""
result_queryset = getattr(
Transaction.objects, queryset_function_name)().values_list('id')
self.assertEqual(
set(result_queryset),
set(Transaction.objects.filter_by_related_objects(
[], match_type=match_type).values_list('id'))
)
@parameterized.expand([
(MatchType.ANY, [True, True, True, False, True]),
(MatchType.ALL, [True, False, False, False, True]),
(MatchType.NONE, [False, False, False, True, False]),
(MatchType.EXACT, [True, False, False, False, False]),
])
def test_filters(self, match_type, results):
"""
Method returns correct Transactions with various evidence given.
This test uses the differing groups of transactions from
`setUpTestData` to test that different `MatchTypes` give the right
results. Note that the list of booleans in the `parameterized.expand`
decorator maps to the querysets in `query_list`.
"""
query_list = [
self.transaction_with_both_orders,
self.transaction_with_only_order_1,
self.transaction_with_only_order_2,
self.transaction_with_neither_order,
self.transaction_with_three_orders,
]
for query, query_should_be_in_result in zip(query_list, results):
if query_should_be_in_result:
self.assertIn(
query,
Transaction.objects.filter_by_related_objects(
[self.order_1, self.order_2],
match_type=match_type
)
)
else:
self.assertNotIn(
query,
Transaction.objects.filter_by_related_objects([
self.order_1, self.order_2,
], match_type=match_type)
)
@parameterized.expand([
(MatchType.ANY, 1),
(MatchType.ALL, 1),
(MatchType.NONE, 1),
(MatchType.EXACT, 4),
])
def test_query_counts(self, match_type, query_counts):
"""
`filter_by_related_objects` should use a constant number of queries.
"""
with self.assertNumQueries(query_counts):
list(Transaction.objects.filter_by_related_objects(
[self.order_1],
match_type=match_type
))
with self.assertNumQueries(query_counts):
list(Transaction.objects.filter_by_related_objects(
[self.order_1, self.order_2],
match_type=match_type
))
def test_invalid_match_type(self):
"""
Invalid MatchTypes are not allowed.
"""
with self.assertRaises(ValueError):
Transaction.objects.filter_by_related_objects(match_type='foo')
def test_chaining_filter_to_existing_queryset(self):
"""
`filter_by_related_objects` can be used like any other queryset filter.
"""
self.assertEqual(Transaction.objects.count(), 5)
self.assertEqual(
Transaction.objects.filter_by_related_objects(
[self.order_1]).count(), 4)
transactions_restricted_by_ledger = (
Transaction.objects.filter(ledgers__in=[self.ledger])
)
self.assertEqual(
transactions_restricted_by_ledger.filter_by_related_objects(
[self.order_1]).distinct().count(), 4)
|
476637
|
import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GymEnv
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestContinuousMLPQFunction(TfGraphTestCase):
@pytest.mark.parametrize('hidden_sizes', [(1, ), (2, ), (3, ), (1, 1),
(2, 2)])
def test_q_vals(self, hidden_sizes):
env = GymEnv(DummyBoxEnv())
obs_dim = env.spec.observation_space.flat_dim
act_dim = env.spec.action_space.flat_dim
qf = ContinuousMLPQFunction(env_spec=env.spec,
action_merge_layer=0,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
obs = np.full(obs_dim, 1).flatten()
act = np.full(act_dim, 1).flatten()
expected_output = np.full((1, ),
(obs_dim + act_dim) * np.prod(hidden_sizes))
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_output_shape(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs = env.step(1).observation
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
outputs = qf.get_qval([obs], [act])
assert outputs.shape == (1, 1)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_build(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec,
action_merge_layer=0,
hidden_sizes=(1, ),
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
obs = np.full(obs_dim, 1).flatten()
act = np.full(action_dim, 1).flatten()
output1 = qf.get_qval([obs], [act])
input_var1 = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs.shape[0]))
input_var2 = tf.compat.v1.placeholder(tf.float32,
shape=(None, act.shape[0]))
q_vals = qf.build(input_var1, input_var2, 'another')
output2 = self.sess.run(q_vals,
feed_dict={
input_var1: [obs],
input_var2: [act]
})
expected_output = np.full((1, ),
np.prod(obs_dim) + np.prod(action_dim))
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs = env.step(1).observation
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
with tf.compat.v1.variable_scope('ContinuousMLPQFunction', reuse=True):
bias = tf.compat.v1.get_variable('mlp_concat/hidden_0/bias')
# assign it to all one
bias.load(tf.ones_like(bias).eval())
output1 = qf.get_qval([obs], [act])
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()):
qf_pickled = pickle.loads(h_data)
output2 = qf_pickled.get_qval([obs], [act])
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_sizes', [
((1, ), (1, ), (3, )),
((2, ), (2, ), (32, )),
((1, 1), (1, ), (3, 3)),
((2, 2), (2, ), (32, 32)),
])
def test_clone(self, obs_dim, action_dim, hidden_sizes):
env = GymEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=hidden_sizes)
qf_clone = qf.clone('another_qf')
assert qf_clone._hidden_sizes == qf._hidden_sizes
for cloned_param, param in zip(qf_clone.parameters.values(),
qf.parameters.values()):
assert np.array_equal(cloned_param, param)
|
476642
|
import pytest
import torch
from utils import seq_len_to_mask
from module import DotAttention, MultiHeadAttention
torch.manual_seed(1)
q = torch.randn(4, 6, 20) # [B, L, H]
k = v = torch.randn(4, 5, 20) # [B, S, H]
key_padding_mask = seq_len_to_mask([5, 4, 3, 2], max_len=5)
attention_mask = torch.tensor([1, 0, 0, 1, 0]) # 为1 的地方 mask 掉
head_mask = torch.tensor([0, 1, 0, 0]) # 为1 的地方 mask 掉
# m = DotAttention(dropout=0.0)
# ao,aw = m(q,k,v,key_padding_mask)
# print(ao.shape,aw.shape)
# print(aw)
def test_DotAttention():
m = DotAttention(dropout=0.0)
ao, aw = m(q, k, v, mask_out=key_padding_mask)
assert ao.shape == torch.Size([4, 6, 20])
assert aw.shape == torch.Size([4, 6, 5])
assert torch.all(aw[1, :, -1:].eq(0)) == torch.all(aw[2, :, -2:].eq(0)) == torch.all(aw[3, :, -3:].eq(0)) == True
def test_MultiHeadAttention():
m = MultiHeadAttention(embed_dim=20, num_heads=4, dropout=0.0)
ao, aw = m(q, k, v, key_padding_mask=key_padding_mask,attention_mask=attention_mask,head_mask=head_mask)
assert ao.shape == torch.Size([4, 6, 20])
assert aw.shape == torch.Size([4, 4, 6, 5])
assert aw.unbind(dim=1)[1].bool().any() == False
if __name__ == '__main__':
pytest.main()
|
476661
|
import torch
class Net(torch.jit.ScriptModule):
def __init__(self, n: int):
super().__init__()
self._conv1 = torch.nn.Conv2d(
1, 10, 4, stride=1, padding=0, dilation=1, groups=1, bias=True
)
self._conv2 = torch.nn.Conv2d(
10, 10, 3, stride=1, padding=0, dilation=1, groups=1, bias=True
)
self._conv3 = torch.nn.Conv2d(
10, 10, 2, stride=1, padding=0, dilation=1, groups=1, bias=True
)
self._dense6 = torch.nn.Linear(10, 1, bias=False)
@torch.jit.script_method
def forward(self, x):
x = x.view([x.shape[0], 1, 4, 4])
x = torch.cat([x, x[:, :, :3, :]], dim=2)
x = torch.cat([x, x[:, :, :, :3]], dim=3)
x = self._conv1(x)
x = torch.nn.functional.relu(x)
x = torch.cat([x, x[:, :, :2, :]], dim=2)
x = torch.cat([x, x[:, :, :, :2]], dim=3)
x = self._conv2(x)
x = torch.nn.functional.relu(x)
x = torch.cat([x, x[:, :, :1, :]], dim=2)
x = torch.cat([x, x[:, :, :, :1]], dim=3)
x = self._conv3(x)
x = torch.nn.functional.relu(x)
x = x.view([x.shape[0], 10, -1])
x = x.mean(dim=2)
x = self._dense6(x)
# x = torch.nn.functional.softplus(x)
x = torch.exp(x)
return x
|
476675
|
from abc import ABC, abstractmethod
from typing import Dict, Callable, Any, Optional, List
from dataclasses import dataclass
@dataclass
class BaseParams:
cpus_per_worker: int = 1
use_gpu: bool = False
gpus_per_worker: Optional[int] = None
def __post_init__(self):
if self.gpus_per_worker and not self.use_gpu:
raise ValueError("gpus_per_worker is set, but use_gpu is False. "
"use_gpu must be True if gpus_per_worker is "
"set. ")
if self.use_gpu and isinstance(self.gpus_per_worker,
int) and self.gpus_per_worker < 1:
raise ValueError(
f"gpus_per_worker must be >= 1: Got {self.gpus_per_worker}.")
self.gpus_per_worker = self.gpus_per_worker or int(self.use_gpu)
class Adapter(ABC):
"""Adapter for executing Ray calls for various types(e.g. static and elastic)
Horovod jobs.
"""
@abstractmethod
def start(self,
executable_cls: type = None,
executable_args: Optional[List] = None,
executable_kwargs: Optional[Dict] = None,
extra_env_vars: Optional[Dict] = None):
"""Starts the Adapter
Args:
executable_cls (type): The class that will be created within
an actor (BaseHorovodWorker). This will allow Horovod
to establish its connections and set env vars.
executable_args (List): Arguments to be passed into the
worker class upon initialization.
executable_kwargs (Dict): Keyword arguments to be passed into the
worker class upon initialization.
extra_env_vars (Dict): Environment variables to be set
on the actors (worker processes) before initialization.
"""
raise NotImplementedError("Method must be implemented in a subclass")
@abstractmethod
def execute(self, fn: Callable[["executable_cls"], Any],
callbacks: Optional[List[Callable]] = None) -> List[Any]:
"""Executes the provided function on all workers.
Args:
fn: Target function to be invoked on every object.
callbacks: List of callables. Each callback must either
be a callable function or a class that implements __call__.
Every callback will be invoked on every value logged
by the rank 0 worker.
Returns:
Deserialized return values from the target function.
"""
raise NotImplementedError("Method must be implemented in a subclass")
@abstractmethod
def run(self,
fn: Callable[[Any], Any],
args: Optional[List] = None,
kwargs: Optional[Dict] = None,
callbacks: Optional[List[Callable]] = None) -> List[Any]:
"""Executes the provided function on all workers.
Args:
fn: Target function that can be executed with arbitrary
args and keyword arguments.
args: List of arguments to be passed into the target function.
kwargs: Dictionary of keyword arguments to be
passed into the target function.
callbacks: List of callables. Each callback must either
be a callable function or a class that implements __call__.
Every callback will be invoked on every value logged
by the rank 0 worker.
Returns:
Deserialized return values from the target function.
"""
raise NotImplementedError("Method must be implemented in a subclass")
@abstractmethod
def run_remote(self,
fn: Callable[[Any], Any],
args: Optional[List] = None,
kwargs: Optional[Dict] = None,
callbacks: Optional[List[Callable]] = None):
"""Executes the provided function on all workers.
Args:
fn: Target function that can be executed with arbitrary
args and keyword arguments.
args: List of arguments to be passed into the target function.
kwargs: Dictionary of keyword arguments to be
passed into the target function.
Returns:
list: List of ObjectRefs that you can run `ray.get` on to
retrieve values.
"""
raise NotImplementedError("Method must be implemented in a subclass")
@abstractmethod
def execute_single(self,
fn: Callable[["executable_cls"], Any]) -> List[Any]:
"""Executes the provided function on the rank 0 worker (chief).
Args:
fn: Target function to be invoked on the chief object.
Returns:
Deserialized return values from the target function.
"""
raise NotImplementedError("Method must be implemented in a subclass")
@abstractmethod
def shutdown(self):
"""Destroys the adapter."""
raise NotImplementedError("Method must be implemented in a subclass")
|
476711
|
from __future__ import unicode_literals
import six
import abc
import logging
from jsonpointer import resolve_pointer, JsonPointerException
logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseTransformer(object):
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractproperty
def schema(self):
raise NotImplementedError
def transform(self, doc, fail=False):
return self._transform_dict(self.schema, doc, fail=fail)
def _transform_dict(self, d, doc, fail=False):
return {
key: self._maybe_transform_value(value, doc, fail=fail)
for key, value in d.items()
}
def _transform_list(self, l, doc, fail=False):
return [
self._maybe_transform_value(item, doc, fail=fail)
for item in l
]
def _maybe_transform_value(self, value, doc, fail=False):
try:
return self._transform_value(value, doc, fail=fail)
except Exception as e:
if fail:
raise
logger.exception(e)
return None
def _transform_value(self, value, doc, fail=False):
if isinstance(value, dict):
return self._transform_dict(value, doc, fail=fail)
elif isinstance(value, list):
return self._transform_list(value, doc, fail=fail)
elif isinstance(value, tuple) and len(value) == 2 and isinstance(value[0], tuple):
return self._transform_args_kwargs(value, doc)
elif isinstance(value, tuple):
return self._transform_tuple(value, doc)
elif isinstance(value, six.string_types):
return self._transform_string(value, doc)
elif callable(value):
return value(doc)
def _transform_tuple(self, l, doc):
fn, values = l[-1], l[:-1]
args = []
for value in values:
if isinstance(value, six.string_types):
args.append(self._transform_string(value, doc))
elif callable(value):
args.append(value(doc))
return fn(*args)
def _transform_args_kwargs(self, l, doc):
fn = l[1]
return fn(
*self._transform_args(l[0], doc),
**self._transform_kwargs(l[0], doc)
)
def _transform_args(self, t, doc):
return [self._transform_string(arg, doc) for arg in t[0]]
def _transform_kwargs(self, t, doc):
return {
k: self._transform_string(v, doc) for k, v in t[1].items()
} if len(t) == 2 else {}
@six.add_metaclass(abc.ABCMeta)
class XMLTransformer(BaseTransformer):
namespaces = {}
def _transform_string(self, string, doc):
return doc.xpath(string, namespaces=self.namespaces)
@six.add_metaclass(abc.ABCMeta)
class JSONTransformer(BaseTransformer):
def _transform_string(self, val, doc):
try:
return resolve_pointer(doc, val)
except JsonPointerException as e:
# This is because of jsonpointer's exception structure
if 'not found in' in e.args[0] or 'is not a valid list index' in e.args[0]:
return None
raise e
|
476727
|
import time
from datetime import datetime
from smtplib import SMTPRecipientsRefused
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset, password_reset_confirm
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.timezone import activate
from django_celery_results.models import TaskResult
from rest_framework.authtoken.models import Token
from cropwatch.apps.metrics.forms import *
from cropwatch.apps.metrics.models import *
from cropwatch.settings import DEFAULT_FROM_EMAIL
def status():
stat = '{ "status": "OK" }'
return HttpResponse(stat, content_type='application/json')
@login_required
def base(request, form_class=ioTankForm):
object_list = []
bot_forms = {}
account_settings = AccountSettings.objects.get(user=request.user)
activate(account_settings.timezone)
if not ioTank.objects.filter(owner=request.user):
bots = None
else:
bots = ioTank.objects.filter(owner=request.user)
for bot in bots:
form = form_class(request.POST or None, instance=bot)
bot_forms[bot] = form
return render(request, 'metrics.html',
{'cal_json': object_list, 'bots': bots, 'tz': account_settings.timezone, 'bot_forms': bot_forms})
@login_required
def iotank_edit(request, uuid=None, form_class=ioTankForm, ):
bot = ioTank.objects.get(id=uuid)
if request.method == "POST":
if request.POST["action"] == "Save":
bot_form = form_class(request.POST, instance=bot)
if bot_form.is_valid():
form = bot_form.save(commit=False)
form.save()
return HttpResponseRedirect('/devices/')
else:
return HttpResponseRedirect('/devices/')
else:
return HttpResponseRedirect('/devices/')
return HttpResponseRedirect('/devices/')
@login_required
def notice(request):
last_month = datetime.today() - timedelta(days=15)
notices = Notice.objects.filter(owner=request.user).filter(timestamp__gte=last_month).order_by('-timestamp')
final = {}
settings = AccountSettings.objects.get(user=request.user)
activate(settings.timezone)
for notice in notices:
qs = TaskResult.objects.get(task_id=notice.taskid)
final[notice] = qs
return render(request, 'notice.html', {'final': final, 'settings': settings})
@login_required
def flot_ajax(request, start, end, uuid):
data = ""
# check if the incoming values are integers,
try:
int(start) and int(end)
# are they 13 digit integers, (are they timestamps)
if len(start) == 13 and len(end) == 13:
t1 = []
t2 = []
h = []
uv = []
l = []
# does the user have an iotank?
if not ioTank.objects.filter(owner=request.user):
bots = None
else:
bots = ioTank.objects.get(owner=request.user, id=uuid)
# Z for UTC %z for localtz
start = time.strftime('%Y-%m-%d %H:%M:%SZ', time.gmtime(int(start) / 1000.))
end = time.strftime('%Y-%m-%d %H:%M:%SZ', time.gmtime(int(end) / 1000.))
readings = SensorReading.objects.filter(bot=bots, timestamp__range=(start, end)).order_by('timestamp')
for i in readings:
if bots.u == 'F':
i.t1 = 9.0 / 5.0 * int(i.t1) + 32
i.t2 = 9.0 / 5.0 * int(i.t2) + 32
t1_list = []
t2_list = []
h_list = []
uv_list = []
l_list = []
unixtime = int(time.mktime(i.timestamp.timetuple()) * 1000)
t1_list.append(unixtime)
t1_list.append(float(i.t1))
t1.append(t1_list)
t2_list.append(unixtime)
t2_list.append(int(i.t2))
t2.append(t2_list)
h_list.append(unixtime)
h_list.append(int(i.h))
h.append(h_list)
uv_list.append(unixtime)
uv_list.append(float(i.uv))
uv.append(uv_list)
l_list.append(unixtime)
l_list.append(float(i.l))
l.append(l_list)
data = '{"data":{"t1":' + str(t1) + ',"t2":' + str(t2) + ',"h":' + str(h) + ',"uv":' + str(
uv) + ',"l":' + str(l) + '}}'
except ValueError:
return HttpResponse(start + end, content_type='application/json')
return HttpResponse(data, content_type='application/json')
# List all devices
@login_required
def devices(request, form_class=ioTankForm, ):
account_settings = AccountSettings.objects.get(user=request.user)
activate(account_settings.timezone)
bot_forms = {}
if not ioTank.objects.filter(owner=request.user):
messages.error(request, "You need to add an ioTank", extra_tags='safe')
else:
bots = ioTank.objects.filter(owner=request.user)
for bot in bots:
form = form_class(request.POST or None, instance=bot)
bot.token = Token.objects.get(user=bot.bot_user)
bot_forms[bot] = form
return render(request, "devices.html", {'bot_forms': bot_forms})
# Add new ioTank
@login_required
def bot_add(request):
newio = ioTank(owner=request.user)
user = User.objects.create_user(str(newio)[:30])
newio.bot_user_id = user.id
newio.save()
Token.objects.get_or_create(user=user)
return HttpResponseRedirect('/devices/')
# Delete ioTank and all sensor data for said unit.
@login_required
def bot_delete(request, uuid=None):
bot = ioTank.objects.get(id=uuid)
bot.bot_user.delete()
return HttpResponseRedirect('/devices/')
# Regen Token
@login_required
def bot_regen(request, uuid=None):
bot = ioTank.objects.get(id=uuid)
token = Token.objects.get(user=bot.bot_user)
token.delete()
Token.objects.get_or_create(user=bot.bot_user)
return HttpResponseRedirect('/devices/')
def reset(request):
# Wrap the built-in password reset view and pass it the arguments
# like the template name, email template name, subject template name
# and the url to redirect after the password reset is initiated.
if request.user.is_authenticated():
return HttpResponseRedirect('/metrics/')
else:
return password_reset(request, template_name='reset.html',
email_template_name='reset_email.html',
subject_template_name='reset_subject.txt',
post_reset_redirect=reverse('resetsent'))
# This view handles password reset confirmation links. See urls.py file for the mapping.
# This view is not used here because the password reset emails with confirmation links
# cannot be sent from this application.
def reset_confirm(request, uidb64=None, token=None):
# Wrap the built-in reset confirmation view and pass to it all the captured parameters like uidb64, token
# and template name, url to redirect after password reset is confirmed.
if request.user.is_authenticated():
return HttpResponseRedirect('/metrics/')
else:
return password_reset_confirm(request, template_name='reset_confirm.html',
uidb64=uidb64, token=token, post_reset_redirect=reverse('success'))
# This view renders a page with success message.
def success(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/metrics/')
else:
return render(request, "success.html")
# This view renders a page with success message.
def resetsent(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/metrics/')
else:
return render(request, "resetsent.html")
@login_required
def settings(request):
errors = "NONE"
setting = AccountSettings.objects.get(user=request.user)
user = User.objects.get(username=request.user)
if request.method == 'POST':
upf = UserProfileForm(request.POST, instance=setting)
emailf = EmailForm(request.POST, instance=user)
if upf.is_valid() and emailf.is_valid():
try:
emailf.save(commit=False)
upf.save()
emailf.save()
except ValidationError as e:
errors = str(e.message)
upf = UserProfileForm(instance=setting)
emailf = EmailForm(instance=user)
else:
upf = UserProfileForm(instance=setting)
emailf = EmailForm(instance=user)
else:
upf = UserProfileForm(instance=setting)
emailf = EmailForm(instance=user)
return render(request, 'settings.html', {'userprofileform': upf, 'email_form': emailf, 'errors': errors})
def register(request):
errors = "NONE"
if request.user.is_authenticated():
return HttpResponseRedirect('/metrics/')
else:
if request.method == 'POST':
uf = UserForm(request.POST, prefix='user')
upf = AccountSettingsForm(request.POST, prefix='userprofile')
print(uf.errors)
print(upf.errors)
if uf.is_valid() and upf.is_valid():
# this is a cheap way to verify e-mails are unique on signup.
# We can't reasonably edit the django user model now
try:
user = uf.save(commit=False)
try:
send_mail(
'ObjectSyndicate.com registration.',
'Your registration is complete. Your username is ' + str(user) + '.',
DEFAULT_FROM_EMAIL,
[uf.cleaned_data['email']],
fail_silently=False,
)
except SMTPRecipientsRefused:
#
errors = "EMAILFAIL"
uf = UserForm(prefix='user')
upf = AccountSettingsForm(prefix='userprofile')
return render(request, 'register.html',
{'userform': uf, 'userprofileform': upf, 'errors': errors})
try:
userprofile = upf.save(commit=False)
user = uf.save()
userprofile.user = user
userprofile.tier = '1'
userprofile.save()
user = authenticate(username=uf.cleaned_data['username'],
password=<PASSWORD>data['password'],
)
login(request, user)
return HttpResponseRedirect('/metrics/')
except ValidationError as e:
#
errors = str(e.message)
uf = UserForm(prefix='user')
upf = AccountSettingsForm(prefix='userprofile')
except ValidationError as e:
#
errors = str(e.message)
uf = UserForm(prefix='user')
upf = AccountSettingsForm(prefix='userprofile')
else:
uf = UserForm(prefix='user')
upf = AccountSettingsForm(prefix='userprofile')
else:
uf = UserForm(prefix='user')
upf = AccountSettingsForm(prefix='userprofile')
return render(request, 'register.html', {'userform': uf, 'userprofileform': upf, 'errors': errors})
|
476729
|
import numpy as np
import argparse
import json
import os
# Function copy from video2tfrecords.py
def split_equal(ids: list, duration: list, num: int, min_duration: int = 256):
sort = sorted(zip(duration, ids))[::-1]
ids_split = [[] for i in range(num)]
duration_spit = [[] for i in range(num)]
duration_sum = [0] * num
for d, i in sort:
if np.sum(d) > min_duration or min_duration <= 0:
pos = np.argmin(duration_sum)
ids_split[pos].append(i)
duration_spit[pos].append(d)
duration_sum[pos] = duration_sum[pos] + np.sum(d)
return ids_split, duration_spit
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('load_path', type=str,
help='The path to a json file containing video information, or a path to a folder containing '
'json files with video information.')
parser.add_argument('split', type=int, help='The number of equal splits.')
parser.add_argument('-prefix', type=str, default='', help='A save file prerfix.')
args = parser.parse_args()
load_path = args.load_path
split = args.split
prefix = args.prefix
if os.path.isdir(load_path):
load_path = [os.path.join(load_path, p) for p in os.listdir(load_path)]
else:
load_path = [load_path]
ids = []
duration = []
for l in load_path:
json_load = json.load(open(l))
ids = ids + json_load['id']
duration = duration + json_load['duration']
if duration[0] is not list:
ids = [[id] for id in ids]
else:
duration = [np.sum(_duration) for _duration in duration]
ids, duration = split_equal(ids, duration, split, -1)
split_chunk_count = 0
split_video_count = 0
split_video_duration = 0
for i in range(len(ids)):
buffer_chunk_count = len(ids[i])
buffer_video_count = int(np.sum([np.sum([len(___ids) for ___ids in __ids]) for __ids in ids[i]]))
buffer_video_duration = int(np.sum([np.sum(d) for d in duration[i]]))
print('split:', i, 'chunks:', buffer_chunk_count, 'videos:',
buffer_video_count, 'duration:', buffer_video_duration)
split_chunk_count += buffer_chunk_count
split_video_count += buffer_video_count
split_video_duration += buffer_video_duration
print('')
print('total num of chunks:', split_chunk_count, 'total num of videos:',
split_video_count, 'total video duration:', split_video_duration)
print('')
for idx, (i, d) in enumerate(zip(ids, duration)):
path = "{}work_split_{}.json".format(prefix, idx)
dump = {'id': i, 'duration': d}
json.dump(dump, open(path, 'w'))
|
476779
|
import sj
import sys
status = 0
def test(prefix, file, rest):
sj.create_and_cd_jalangi_tmp()
ana = sj.execute_return_np(sj.JALANGI_SCRIPT+' --inlineIID --inlineSource --analysis ../src/js/sample_analyses/ChainedAnalyses.js --analysis ../src/js/runtime/SMemory.js --analysis ../src/js/sample_analyses/pldi16/TraceAll.js '+prefix+file+'.js '+rest, savestderr=True)
if 'analysis exception!!!' in ana:
print ana
print "{} failed".format(file)
else:
print "{} passed".format(file)
sj.cd_parent()
with open('tests/unit/unitTests.txt') as fp:
for line in fp:
args = line.split()
if len(args) == 2:
rest = args[1]
else:
rest = ''
test('../tests/unit/',args[0], rest)
exit(status)
|
476840
|
from tests.utils import W3CTestCase
class TestFlexbox_Inline(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_inline'))
|
476896
|
def omp_sections_private():
sum = 7
sum0 = 0
if 'omp parallel':
if 'omp sections private(sum0)':
if 'omp section':
sum0 = 0
for i in range(0, 400):
sum0 += i
if 'omp critical':
sum += sum0
if 'omp section':
sum0 = 0
for i in range(400, 700):
sum0 += i
if 'omp critical':
sum += sum0
if 'omp section':
sum0 = 0
for i in range(700, 1000):
sum0 += i
if 'omp critical':
sum += sum0
known_sum = (999 * 1000) / 2 + 7;
return known_sum == sum
|
476903
|
import logging
import unittest
from clarifai.rest import ApiError, ClarifaiApp
class TestAuth(unittest.TestCase):
"""
unit test for api auth
"""
def test_auth_with_invalid_key(self):
""" instantiate with key """
api_preset_key = 'abc'
with self.assertRaises(ApiError):
ClarifaiApp(api_key=api_preset_key, log_level=logging.WARN)
def test_auth_with_id_and_secret(self):
with self.assertRaises(DeprecationWarning):
ClarifaiApp(app_id='aa', app_secret='bb', log_level=logging.WARN)
|
476967
|
import numpy as np
import matplotlib.pyplot as plt # For plotting
import pdb
import pandas as pd
import seaborn as sns
from scipy import stats
data1 = np.load("train_nu.npz")
#data1 = np.load('pipe_test_1dnu.npz')
nu = data1['nu_1d']
#nu = np.sort(nu)
print('nu is',nu)
############################
#profile viscosity
#ss
data = np.load('pred_poiseuille_para.npz')
mesh = data['mesh']
print('shape of mesh is',mesh.shape)
u = data['u']
v = data['v']
p = data['p']
ut = data['ut']
uMaxP=data['uMaxP']
uMaxA=data['uMaxA']
print("shape of uMaxA",uMaxA.shape)
Ny, Nx, Np = u.shape
print ("mesh shape = ", mesh.shape)
print ("u shape", u.shape)
idxP = 28
# plt.figure()
# plt.contourf(mesh[0,:,:, idxP], mesh[1,:,:,idxP], u[:, :, idxP])
# plt.axis('equal')
# plt.colorbar()
#plt.savefig('pipe1.png')
#idxP = np.array([0,28,49])
idxP = [3]
plot_x = 0.8
plot_y = 0.07
fontsize = 16
# y = np.linspace(-0.05,0.05,50)
# ii = 0
# for idxPi in idxP:
# plt.figure()
# for i in range(Nx):
# pP, = plt.plot(y,u[:, i, idxPi])
# pT, = plt.plot(y,ut[:, i, idxPi], 'r--')
# ii = ii+1
#plt.close('all')
# plt.legend([pP, pT], ['NN Surrogate,nu = 2.1e-4', 'Truth'],fontsize = fontsize)
#print ('max u = ', np.max(u[:, :, idxP]))
#print ('max ut = ', np.max(ut[:, :, idxP]))
#plt.savefig('pipe2.png')
#plt.text(0, 0.1, r'$\delta$',
#{'color': 'k', 'fontsize': 24, 'ha': 'center', 'va': 'center',
#'bbox': dict(boxstyle="round", fc="w", ec="k", pad=0.2)})
d = 0.1
#plot spanwise u profile along y, looping from nu_small to nu_large
#u = data['u']
idx_X = int(round(Nx/2))
y = np.linspace(-0.05,0.05,50)
can = [3,6,14,49]
#for idxP in range(len(nu)):
xtext= [0,0.5,1]
ytext = [0.45,0.28,0.1,0.01]
plt.figure(1)
Re = []
plt.figure(1)
plt.clf()
for idxP in range(len(can)):
#plt.figure(1)
#plt.clf()
ax1 = plt.subplot(111)
pT, = plt.plot(y,ut[:, idx_X, can[idxP]], color='darkblue', linestyle='-', lw=3.0, alpha=1.0)
pP, = plt.plot(y,u[:, idx_X, can[idxP]], color='red', linestyle='--', dashes=(5, 5), lw=2.0, alpha=1.0)
tmpRe = np.max(u[:, idx_X, can[idxP]])*d/nu[can[idxP]]
Re.append(tmpRe)
#print("Re is",Re)
nu_current = float("{0:.5f}".format(nu[can[idxP]]))
#plt.title(r'$\nu = $' + str(nu_current))
plt.text(-0.012,ytext[idxP],r'$\nu = $' + str(nu_current),{'color': 'k', 'fontsize': 16})
#plt.legend([pT, pP], ['Analytical', 'NN surrogate'], fontsize = 16,loc = 10)
plt.ylabel(r'$u(y)$', fontsize=16)
plt.xlabel(r'$y$', fontsize=16)
ax1.tick_params(axis='x', labelsize=16)
ax1.tick_params(axis='y', labelsize=16)
ax1.set_xlim([-0.05, 0.05])
ax1.set_ylim([0.0, 0.62])
figureName = 'pipe_uProfiles_nuIdx_.png'
plt.savefig(figureName, bbox_inches='tight')
print('Re is',Re)
np.savez('test_Re',Re = Re)
plt.figure(2)
plt.clf()
ax1 = plt.subplot(111)
sns.kdeplot(uMaxA[0, :], shade=True, label='Analytical', linestyle="-", linewidth=3)
sns.kdeplot(uMaxP[0, :], shade=False, label='DNN', linestyle="--", linewidth=3.5, color='darkred')
plt.legend(prop={'size': 16})
plt.xlabel(r'$u_c$', fontsize=16)
plt.ylabel(r'PDF', fontsize=16)
ax1.tick_params(axis='x', labelsize=16)
ax1.tick_params(axis='y', labelsize=16)
figureName = 'pipe_unformUQ.png'
plt.savefig(figureName, bbox_inches='tight')
plt.show()
|
476970
|
class StorageNotFound(Exception):
pass
class StorageSettedError(Exception):
pass
class MismatchedVersionError(Exception):
pass
class InvalidPassword(Exception):
pass
|
476980
|
from pymtl import *
from lizard.util.rtl.interface import UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.drop_unit import DropUnit, DropUnitInterface
from lizard.util.rtl.register import Register, RegisterInterface
from lizard.mem.rtl.memory_bus import MemMsgType, MemMsgStatus
from lizard.core.rtl.messages import FetchMsg, PipelineMsgStatus
from lizard.msg.codes import ExceptionCode
from lizard.config.general import *
from lizard.util.rtl.pipeline_stage import PipelineStageInterface
def FetchInterface():
return PipelineStageInterface(FetchMsg(), None)
class Fetch(Model):
def __init__(s, fetch_interface, MemMsg, enable_btb):
UseInterface(s, fetch_interface)
s.MemMsg = MemMsg
xlen = XLEN
ilen = ILEN
ilen_bytes = ilen / 8
s.require(
MethodSpec(
'mem_recv',
args=None,
rets={'msg': s.MemMsg.resp},
call=True,
rdy=True,
),
MethodSpec(
'mem_send',
args={'msg': s.MemMsg.req},
rets=None,
call=True,
rdy=True,
),
MethodSpec(
'check_redirect',
args={},
rets={
'redirect': Bits(1),
'target': Bits(xlen),
},
call=False,
rdy=False,
),
MethodSpec(
'btb_read',
args={
'key': XLEN,
},
rets={
'value': XLEN,
'valid': Bits(1)
},
call=False,
rdy=False,
),
)
s.drop_unit = DropUnit(DropUnitInterface(s.MemMsg.resp))
s.connect_m(s.drop_unit.input, s.mem_recv, {
'msg': 'data',
})
# PYMTL_BROKEN
s.drop_unit_output_data_data = Wire(s.drop_unit.output_data.data.nbits)
s.connect(s.drop_unit_output_data_data, s.drop_unit.output_data.data)
s.inst_from_mem = Wire(ILEN)
# PYMTL_BROKEN
@s.combinational
def pymtl_is_broken_connect_does_not_work():
s.inst_from_mem.v = s.drop_unit_output_data_data[0:ilen]
s.fetch_val = Register(
RegisterInterface(Bits(1), True, False), reset_value=0)
s.fetch_msg = Register(RegisterInterface(FetchMsg(), True, False))
s.in_flight = Register(
RegisterInterface(Bits(1), True, False), reset_value=0)
s.pc = Register(RegisterInterface(Bits(xlen), True, False), reset_value=0)
s.advance_f1 = Wire(1)
s.advance_f0 = Wire(1)
@s.combinational
def handle_advance():
s.advance_f1.v = s.drop_unit.output_rdy and (not s.fetch_val.read_data or
s.take_call)
s.advance_f0.v = not s.in_flight.read_data or s.drop_unit.drop_status_occurred or s.advance_f1
@s.combinational
def handle_redirect():
# Insert BTB here!
s.btb_read_key.v = s.pc.read_data
if s.check_redirect_redirect:
# drop if in flight
s.drop_unit.drop_call.v = s.in_flight.read_data
# the new PC is the target
s.pc.write_data.v = s.check_redirect_target
s.pc.write_call.v = 1
else:
s.drop_unit.drop_call.v = 0
# if we are issuing now, the new PC is just ilen_bytes more than the last one
if s.btb_read_valid and enable_btb:
s.pc.write_data.v = s.btb_read_value
else:
s.pc.write_data.v = s.pc.read_data + ilen_bytes
s.pc.write_call.v = s.advance_f0
s.connect(s.in_flight.write_data, 1)
s.connect(s.in_flight.write_call, s.advance_f0)
s.connect(s.peek_msg, s.fetch_msg.read_data)
@s.combinational
def handle_f1():
s.fetch_val.write_call.v = 0
s.fetch_val.write_data.v = 0
s.fetch_msg.write_call.v = 0
s.fetch_msg.write_data.v = 0
s.drop_unit.output_call.v = 0
if s.check_redirect_redirect:
# invalidate the output
s.peek_rdy.v = 0
# write a 0 into the valid register
s.fetch_val.write_call.v = 1
else:
s.peek_rdy.v = s.fetch_val.read_data
if s.drop_unit.output_rdy and (not s.fetch_val.read_data or
s.take_call):
s.fetch_val.write_call.v = 1
s.fetch_val.write_data.v = 1
s.fetch_msg.write_call.v = 1
s.drop_unit.output_call.v = 1
s.fetch_msg.write_data.hdr_pc.v = s.pc.read_data
if s.drop_unit.output_data.stat != MemMsgStatus.OK:
s.fetch_msg.write_data.hdr_status.v = PipelineMsgStatus.PIPELINE_MSG_STATUS_EXCEPTION_RAISED
if s.drop_unit.output_data.stat == MemMsgStatus.ADDRESS_MISALIGNED:
s.fetch_msg.write_data.exception_info_mcause.v = ExceptionCode.INSTRUCTION_ADDRESS_MISALIGNED
elif s.drop_unit.output_data.stat == MemMsgStatus.ACCESS_FAULT:
s.fetch_msg.write_data.exception_info_mcause.v = ExceptionCode.INSTRUCTION_ACCESS_FAULT
# save the faulting PC as mtval
s.fetch_msg.write_data.exception_info_mtval.v = s.pc.read_data
else:
s.fetch_msg.write_data.hdr_status.v = PipelineMsgStatus.PIPELINE_MSG_STATUS_VALID
s.fetch_msg.write_data.inst.v = s.inst_from_mem
s.fetch_msg.write_data.pc_succ.v = s.pc.write_data
elif s.take_call:
# someone is calling, but we are stalled, so give them output but
# unset valid
s.fetch_val.write_call.v = 1
s.fetch_val.write_data.v = 0
# handle_f0
s.connect(s.mem_send_msg.type_, int(MemMsgType.READ))
@s.combinational
def write_addr():
s.mem_send_msg.addr.v = s.pc.write_data
s.connect(s.mem_send_msg.len_, ilen_bytes)
# can only send it if advancing
s.connect(s.mem_send_call, s.advance_f0)
def line_trace(s):
pc = s.pc.read_data.hex()[2:]
empty = ' ' * len(pc)
if s.advance_f1:
trace = pc
elif s.peek_rdy:
trace = '#{}'.format(empty[1:])
else:
trace = empty
return trace
|
476985
|
import FWCore.ParameterSet.Config as cms
source = cms.Source("EmptySource")
from GeneratorInterface.Hydjet2Interface.hydjet2DefaultParameters_cff import *
generator = cms.EDFilter("Hydjet2GeneratorFilter",
collisionParameters5020GeV,
qgpParameters,
hydjet2Parameters,
fNhsel = cms.int32(2), # Flag to include jet (J)/jet quenching (JQ) and hydro (H) state production, fNhsel (0 H on & J off, 1 H/J on & JQ off, 2 H/J/HQ on, 3 J on & H/JQ off, 4 H off & J/JQ on)
PythiaParameters = cms.PSet(PythiaDefaultBlock,
parameterSets = cms.vstring(
#'pythiaUESettings',
'ProQ2Otune',
'hydjet2PythiaDefault',
'pythiaJets',
'pythiaPromptPhotons',
'myParameters',
'pythiaZjets',
'pythiaBottomoniumNRQCD',
'pythiaCharmoniumNRQCD',
'pythiaQuarkoniaSettings',
'pythiaWeakBosons',
'TDB'
)
),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
fIfb = cms.int32(1), # Flag of type of centrality generation, fBfix (=0 is fixed by fBfix, >0 distributed [fBfmin, fBmax])
fBmin = cms.double(0.), # Minimum impact parameter, fBmin
fBmax = cms.double(30.), # Maximum impact parameter, fBmax
fBfix = cms.double(0.), # Fixed impact parameter, fBfix
)
'''
RA(Pb) ~= 6.813740957 fm
% cent b/RA
0 0
5 0.51
6 0.57
10 0.74
12 0.81
15 0.91
20 1.05
25 1.18
30 1.29
35 1.39
40 1.49
45 1.58
50 1.67
55 1.75
60 1.83
65 1.90
70 1.97
75 2.06
'''
|
477004
|
import urllib.request as request
import os, datetime, json
url = 'https://ladsweb.modaps.eosdis.nasa.gov/archive/allData'
def get_lev_pro_year(level, product, year):
ref = url + '/%s/%s/%s.json'%(level, product, year)
req = request.Request(ref)
response = request.urlopen(req)
return json.loads(response.read().decode('utf-8'))
def get_list(level, product, year, day):
ref = url + '/%s/%s/%s/%s.json'%(level, product, year, day)
req = request.Request(ref)
response = request.urlopen(req)
return json.loads(response.read().decode('utf-8'))
def get_all(level, product, areas, year, days = (0,365)):
print('searching... %s level %s %s'%(product, level, year))
if isinstance(days, list): days = ['%.3d'%i for i in days]
else:
buf = [i['name'] for i in get_lev_pro_year(level, product, year)]
days = [i for i in buf if int(i)>=days[0] and int(i)<=days[1]]
files = []
for day in days:
print('\t%s day ...'%day, end=' ')
lst = [i['name'] for i in get_list(level, product, year, day)]
files.extend([(level, product, year, day), i] for i in lst)
print('done')
print()
files = [i for i in files if sum([a in i[1] for a in areas])]
return files
def download_one(pre, name, des):
ref = url + '/%s/%s/%s/%s/%s'%(pre+(name,))
req = request.Request(ref)
response = request.urlopen(req)
html = response.read()
f = open('%s/%s'%(des,name), 'wb')
f.write(html)
f.close()
def search(product, level, areas, terms):
files = []
for pro in product:
for year, days in terms:
files.extend(get_all(level, pro, areas, year, days))
print('%s new files found!'%len(files))
return files
def download(files, des):
succ = 0
for pre, name in files:
try:
print('\t', name, end=' ... ')
download_one(pre, name, des)
print('done')
succ += 1
except:
print('failed!')
print('download completed, succ %s,failed %s'%(succ, len(files)-succ))
if __name__ == '__main__':
files = search(['MOD09Q1', 'MOD11A2'], level=6, areas=['h25v05', 'h27v05', 'h28v05'], terms=[(2019, (0,30))])
download(files, '')
|
477027
|
from followthemoney.rdf import URIRef, Identifier
from followthemoney.types.common import PropertyType
from followthemoney.util import defer as _
class ChecksumType(PropertyType):
"""Content hashes calculated using SHA1. Checksum references are used by
document-typed entities in Aleph to refer to raw data in the archive
(e.g. the document from which the entity is extracted).
Unfortunately, this has some security implications: in order to avoid people
getting access to documents for which they know the checksum, properties
of this type are scrubbed when submitted via the normal API. Checksums can only
be defined by uploading a document to be ingested."""
name = "checksum"
group = "checksums"
label = _("Checksum")
plural = _("Checksums")
matchable = True
pivot = True
def rdf(self, value: str) -> Identifier:
return URIRef(f"hash:{value}")
|
477050
|
def palin_perm(str):
bitvec = [0] * 26
str = str.lower()
for char in str:
charCode = ord(char) - ord("a")
if charCode >= 0 and charCode < 26:
if bitvec[charCode] == 0:
bitvec[charCode] = 1
else:
bitvec[charCode] = 0
one = False
for i in range(26):
if not one and bitvec[i] == 1:
one = True
elif one and bitvec[i] == 1:
return False
return True
print(palin_perm("tact coa"))
print(palin_perm("racecara"))
print(palin_perm("tactcoapapa"))
print(palin_perm(""))
print(palin_perm("loolo"))
|
477057
|
class RhinoDeselectAllObjectsEventArgs(EventArgs):
# no doc
Document=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Document(self: RhinoDeselectAllObjectsEventArgs) -> RhinoDoc
"""
ObjectCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ObjectCount(self: RhinoDeselectAllObjectsEventArgs) -> int
"""
|
477085
|
from __future__ import print_function,division
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
class Evaluator(object):
def __init__(self,loss=nn.MSELoss(),batch_size=64,delay=36):
self.loss=loss
self.batch_size=batch_size
self.delay=delay
def evaluate(self,model,data,device=torch.device('cpu')):
model.eval()
loss=self.loss
dataloader= DataLoader(dataset=data,batch_size=self.batch_size,shuffle=True,num_workers=0)
_loss_val=0.0
rmse=0
with torch.no_grad():
model=model.to(device)
for batch in dataloader:
input_vars=batch['X'].to(device)
#decoder_inputs=batch['decoder_inputs'].to(device)
#decoder_inputs=batch['X'][:,:,:3].to(device)
target_vars=batch['y'].to(device)
day_ago_data=None
if model.use_day_ago_info:
day_ago_data=batch['one_day_ago'].to(device)
seq2seq_outputs=model(input_vars,day_ago_data)
_loss_val+=loss(seq2seq_outputs,target_vars).item()
return _loss_val/(len(dataloader)),rmse
|
477099
|
import string # forced import by CodeWars kata
from string import ascii_uppercase as up, ascii_lowercase as low, \
ascii_letters as az, maketrans
ROT13 = maketrans(low[13:] + low[:13] + up[13:] + up[:13], az)
def rot13(message):
return message.translate(ROT13)
|
477127
|
class TestCRUD:
def test_add_one(self, db):
result = db.add_one(
{'name': 'alice', 'url': 'alice.com', 'method': 'get'})
errors = result.get('errors')
success = result.get('success')
assert errors is None
assert success is True
def test_fetch_one(self, db, db_dummy_data):
result = db.fetch_one(name='alice')
assert result.name == 'alice'
assert result.url == 'alice.com'
assert result.method == 'get'
def test_update_one(self, db, db_dummy_data):
result = db.fetch_one(name='alice')
db.update_one(rowid=result.rowid, values={
'name': 'eve', 'method': 'post', 'url': 'example.com'})
result = db.fetch_one(name='eve')
assert result.name == 'eve'
assert result.method == 'post'
def test_fetch_all(self, db, db_dummy_data):
rows = db.fetch_all()
assert len(rows) == 2
def test_delete_one(self, db, db_dummy_data):
before_delete = db.fetch_all()
db.delete_one(name='alice')
after_delete = db.fetch_all()
assert len(before_delete) == len(after_delete) + 1
class TestErrors:
def test_add_one_missing_column(self, db):
result = db.add_one({})
errors = result.get('errors')
assert errors == 'missing column error: name'
result = db.add_one(
{'name': 'alice'})
errors = result.get('errors')
assert errors == 'missing column error: url'
result = db.add_one(
{'name': 'alice', 'url': 'alice.com'})
errors = result.get('errors')
assert errors == 'missing column error: method'
result = db.add_one(
{'name': 'alice', 'url': 'alice.com', 'method': 'get'})
errors = result.get('errors')
assert errors is None
def test_constraint_violation(self, db, db_dummy_data):
result = db.add_one(
{'name': 'alice', 'url': 'alice.com', 'method': 'get'})
errors = result.get('errors')
assert errors == '{}'.format(
'sqlite error: UNIQUE constraint failed: requests.name')
class TestSort:
def test_sort(self, db, db_dummy_data):
name_asc = db.fetch_all(sort_by='name', order='asc')
assert name_asc[0].name == 'alice'
assert name_asc[1].name == 'bob'
name_desc = db.fetch_all(sort_by='name', order='desc')
assert name_desc[0].name == 'bob'
assert name_desc[1].name == 'alice'
time_asc = db.fetch_all(sort_by='timestamp', order='asc')
assert time_asc[0].name == 'alice'
assert time_asc[1].name == 'bob'
time_desc = db.fetch_all(sort_by='timestamp', order='desc')
assert time_desc[0].name == 'bob'
assert time_desc[1].name == 'alice'
|
477154
|
import json
import unittest
import mock
from src.access_token_common import AccessTokenCommon
class AccessTokenCommonTest(unittest.TestCase):
mock_os_environ_access_token = {"ACCESS_TOKEN_FROM_ENV": "access token 42"}
@mock.patch.dict("os.environ", mock_os_environ_access_token, clear=True)
def test_access_token_from_environment(self):
mock_api_config = mock.Mock()
mock_api_config.app_id = "test-app-id"
mock_api_config.app_secret = "test-app-secret"
mock_api_config.oauth_token_dir = "test-token-dir"
# The name of the access token is converted to upper case to find
# it in the process environment.
access_token = AccessTokenCommon(mock_api_config, name="access_token_from_env")
access_token.fetch()
# echo -n 'access token 42' | shasum -a 256
self.assertEqual(
access_token.hashed(),
"<KEY>",
)
with self.assertRaisesRegex(
RuntimeError, "AccessToken does not have a refresh token"
):
access_token.hashed_refresh_token()
mock_os_environ_empty = {}
@mock.patch.dict("os.environ", mock_os_environ_empty, clear=True)
@mock.patch("os.chmod")
@mock.patch("builtins.open")
def test_access_token_from_json(self, mock_open, mock_chmod):
mock_api_config = mock.Mock()
mock_api_config.app_id = "test-app-id"
mock_api_config.app_secret = "test-app-secret"
mock_api_config.oauth_token_dir = "test-token-dir"
access_token_dict = {
"name": "access_token_from_file",
"access_token": "test access token from json",
"refresh_token": "test refresh token from json",
"scopes": "test-scope-1,test-scope-2",
}
# Test access token JSON file read
mock_json_file = mock.Mock()
mock_json_file.read.return_value = json.dumps(access_token_dict)
mock_json_file.fileno.return_value = 42
# mocking __enter__ is required because open is used in a context manager
mock_open.return_value.__enter__.return_value = mock_json_file
access_token = AccessTokenCommon(mock_api_config, name="access_token_from_file")
access_token.fetch()
# echo -n 'test access token from json' | shasum -a 256
self.assertEqual(
access_token.hashed(),
"<PASSWORD>ff<PASSWORD>c<PASSWORD>22f<PASSWORD>4<PASSWORD>cf6",
)
# echo -n 'test refresh token from json' | shasum -a 256
self.assertEqual(
access_token.hashed_refresh_token(),
"<PASSWORD>",
)
# Test access token JSON file write
self.mock_write_text = ""
def mock_write_json(text): # write to a string instead of a file
self.mock_write_text += text
mock_json_file.write = mock_write_json
access_token.write()
mock_chmod.assert_called_once_with(42, 0o600)
self.assertEqual(json.loads(self.mock_write_text), access_token_dict)
|
477181
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2 / 3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y)
y2 = pd.Series(upper * std + y)
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, y, y1, y2
def mavg(x,y, window):
"compute moving average"
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
std_err = pd.rolling_std(y, window)
y = pd.rolling_mean(y, window)
y1 = y - std_err
y2 = y + std_err
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, y, y1, y2
|
477193
|
import torch
from torch import nn
class LiteConv3x3(nn.Module):
"""Lite 3x3 convolution"""
def __init__(self):
super(LiteConv3x3, self).__init__()
def forward(self, input):
return input
class AG(nn.Module):
"""Aggregation gate"""
def __init__(self):
super(AG, self).__init__()
def forward(self, input):
return input
class OSBlock(nn.Module):
"""Omni-scale block"""
def __init__(self):
super(OSBlock, self).__init__()
def forward(self, input):
return input
if __name__ == '__main__':
print('test OSBlock')
|
477196
|
import os
import copy
import click
import uuid
from zipfile import ZipFile, ZIP_DEFLATED
from io import BytesIO
from tcfcli.libs.utils.yaml_parser import yaml_dump
from tcfcli.common.template import Template
from tcfcli.libs.utils.cos_client import CosClient
from tcfcli.common.user_exceptions import TemplateNotFoundException, \
InvalidTemplateException, ContextException
from tcfcli.common.tcsam.tcsam_macro import TcSamMacro as tsmacro
from tcfcli.common import tcsam
_DEFAULT_OUT_TEMPLATE_FILE = "deploy.yaml"
_CURRENT_DIR = '.'
_BUILD_DIR = './.tcf_build'
@click.command()
@click.option('--template-file', '-t', type=click.Path(exists=True), help="FAM template file or path about function config")
@click.option('--cos-bucket', type=str, help="COS bucket name")
@click.option('--output-template-file', '-o',
type=click.Path(),
help="FAM output template file or path",
default=_DEFAULT_OUT_TEMPLATE_FILE,
show_default=True)
def package(template_file, cos_bucket, output_template_file):
'''
Package a scf and upload to the cos
'''
package = Package(template_file, cos_bucket, output_template_file)
package.do_package()
class Package(object):
def __init__(self, template_file, cos_bucket, output_template_file):
self.template_file = template_file
self.cos_bucket = cos_bucket
self.output_template_file = output_template_file
self.check_params()
# self.resource = Resources(Template.get_template_data(self.template_file))
template_data = tcsam.tcsam_validate(Template.get_template_data(self.template_file))
self.resource = template_data.get(tsmacro.Resources, {})
def do_package(self):
for ns in self.resource:
for func in self.resource[ns]:
if func == tsmacro.Type:
continue
code_url = self._do_package_core(
self.resource[ns][func][tsmacro.Properties].get(tsmacro.CodeUri, "")
)
if "cos_bucket_name" in code_url:
self.resource[ns][func][tsmacro.Properties]["CosBucketName"] = code_url["cos_bucket_name"]
self.resource[ns][func][tsmacro.Properties]["CosObjectName"] = code_url["CosObjectName"]
click.secho("Upload function zip file '{}' to COS bucket '{}' success".
format(os.path.basename(code_url["cos_object_name"]),
code_url["cos_bucket_name"]), fg="green")
elif "zip_file" in code_url:
self.resource[ns][func][tsmacro.Properties]["LocalZipFile"] = code_url["zip_file"]
yaml_dump({tsmacro.Resources: self.resource}, self.output_template_file)
click.secho("Generate deploy file '{}' success".format(self.output_template_file), fg="green")
def check_params(self):
if not self.template_file:
click.secho("FAM Template Not Found", fg="red")
raise TemplateNotFoundException("Missing option --template-file")
def _do_package_core(self, func_path):
zipfile, zip_file_name = self._zip_func(func_path)
code_url = dict()
if self.cos_bucket:
CosClient().upload_file2cos(bucket=self.cos_bucket, file=zipfile.read(),
key=zip_file_name)
code_url["cos_bucket_name"] = self.cos_bucket
code_url["cos_object_name"] = "/" + zip_file_name
else:
code_url["zip_file"] = os.path.join(os.getcwd(), zip_file_name)
return code_url
def _zip_func(self, func_path):
buff = BytesIO()
if not os.path.exists(func_path):
raise ContextException("Function file or path not found by CodeUri '{}'".format(func_path))
zip_file_name = str(uuid.uuid1()) + '.zip'
cwd = os.getcwd()
os.chdir(func_path)
with ZipFile(buff, mode='w', compression=ZIP_DEFLATED) as zip_object:
for current_path, sub_folders, files_name in os.walk(_CURRENT_DIR):
if current_path == _BUILD_DIR:
continue
for file in files_name:
zip_object.write(os.path.join(current_path, file))
os.chdir(cwd)
buff.seek(0)
buff.name = zip_file_name
if not os.path.exists(_BUILD_DIR):
os.mkdir(_BUILD_DIR)
zip_file_name = os.path.join(_BUILD_DIR, zip_file_name)
# a temporary support for upload func from local zipfile
with open(zip_file_name, 'wb') as f:
f.write(buff.read())
buff.seek(0)
# click.secho("Compress function '{}' to zipfile '{}' success".format(func_config.name, zip_file_name))
return buff, zip_file_name
|
477230
|
import pytest
import time
from common.exceptions import PlenumTypeError, PlenumValueError
from stp_core.ratchet import Ratchet
from plenum.common.throttler import Throttler
def test_throttler_init_invalid_args():
for windowSize in (None, '5', [4]):
with pytest.raises(PlenumTypeError):
Throttler(windowSize)
for windowSize in (-1, 0):
with pytest.raises(PlenumValueError):
Throttler(windowSize)
def test_throttler_case1():
"""
Tests throttler with default delay function
"""
windowSize = 3
throttler = Throttler(windowSize)
testIterations = windowSize * 5
for i in range(testIterations):
hasAcquired, timeToWait = throttler.acquire()
if i % windowSize == 0:
assert hasAcquired
assert round(timeToWait) == 0
else:
assert not hasAcquired
assert windowSize - i % windowSize == round(timeToWait)
time.sleep(1)
def test_throttler_case2():
"""
Tests throttler with custom delay function
"""
windowSize = 10
testIterations = windowSize - 2
ratchet = Ratchet(a=2, b=0.05, c=1, base=2, peak=windowSize)
throttler = Throttler(windowSize, ratchet.get)
cooldowns = [time.sleep(1) or throttler.acquire()[1]
for i in range(testIterations)]
middle = len(cooldowns) // 2
firstIteration, secondIteration = cooldowns[:middle], cooldowns[middle:]
for a, b in zip(firstIteration, secondIteration):
if not a == b == 0:
assert b > a
|
477234
|
import sys
from bisect import bisect_left
def bin_number(n, size=4):
return bin(n)[2:].zfill(size)
def iar():
return list(map(int, input().split()))
def ini():
return int(input())
def isp():
return map(int, input().split())
def sti():
return str(input())
def par(a):
print(" ".join(list(map(str, a))))
def tdl(outerListSize, innerListSize, defaultValue=0):
return [[defaultValue] * innerListSize for i in range(outerListSize)]
def sts(s):
s = list(s)
s.sort()
return "".join(s)
def bis(a, x):
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return [i, True]
else:
return [-1, False]
class pair:
def __init__(self, f, s):
self.fi = f
self.se = s
def __lt__(self, other):
return (self.fi, self.se) < (other.fi, other.se)
# Getting a list of integers space seperated
def int_list():
return list(map(int, input().split()))
def read_till_end():
return sys.stdin.read().split()
def just_read():
return sys.stdin.readline()
if __name__ == "__main__":
values = int_list()
print(*values)
|
477237
|
import os
from abc import ABC
import datetime
import logging
from typing import Callable, Iterator, Union, Optional, List, Type
from enum import Enum, IntEnum
import numpy
import h5py
from s100py.s1xx import s1xx_sequence, S1xxObject, S1xxCollection, S1xxDatasetBase, S1xxGridsBase, S1XXFile, h5py_string_dtype
from s100py.s100 import S100Root, S100Exception, FeatureContainerDCF2, FeatureInstanceDCF2, FeatureInformation, FeatureInformationDataset, GroupFBase
WATER_LEVEL = "WaterLevel"
FILLVALUE_HEIGHT = -9999.0
FILLVALUE_TREND = 0
TYPE_OF_WATER_LEVEL_DATA = Enum(value="TYPE_OF_WATER_LEVEL_DATA",
names=[("Observation", 1),
("Astronomical prediction", 2),
("Analysis or hybrid method", 3),
("Hydrodynamic model hindcast", 4),
("Hydrodynamic model forecast", 5),
("Observed minus predicted", 6),
("Observed minus analysis", 7),
("Observed minus hindcast", 8),
("Observed minus forecast", 9),
("Forecast minus predicted", 10),
]
)
VERTICAL_COORDINATE_BASE = Enum(value="VERTICAL_COORDINATE_BASE",
names=[("Sea Surface", 1),
("Vertical Datum", 2),
("Sea Bottom", 3)
]
)
VERTICAL_DATUM_REFERENCE = Enum(value="VERTICAL_DATUM_REFERENCE",
names=[("S-100 vertical datum", 1),
("EPSG", 2)
]
)
class S104Exception(S100Exception):
"""Raised when input is not S104 compliant"""
pass
class WaterLevelTrend(IntEnum):
"""Water level trend enumerated constant and returns an int object"""
Decreasing = 1
Increasing = 2
Steady = 3
# noinspection PyAbstractClass
class S104MetadataListBase(S1xxCollection):
"""Define group name format"""
write_format_str = ".%02d"
class WaterLevelUncertaintyInformation(S1xxObject):
"""S100 code and uncertainty of data values"""
__name_hdf_name__ = "name" #: HDF5 naming
__value_hdf_name__ = "value" #: HDF5 naming
@property
def __version__(self) -> int:
return 1
@property
def name(self) -> str:
""" The plain text name of the data
Returns:
str: Name of the data ("waterLevelHeight" or "waterLevelTrend")
"""
return self._attributes[self.__name_hdf_name__]
@name.setter
def name(self, val: str):
"""Incoming value datatype validation"""
self._attributes[self.__name_hdf_name__] = val
@property
def __name_type__(self):
"""Uncertainty name datatype"""
return str
def name_create(self):
"""Create empty object"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.name = self.__name_type__()
@property
def value(self) -> str:
""" The uncertainty value"""
return self._attributes[self.__value_hdf_name__]
@value.setter
def value(self, val: int):
"""Incoming value datatype validation"""
self._attributes[self.__value_hdf_name__] = val
@property
def __value_type__(self):
"""Uncertainty value datatype"""
return str
def value_create(self):
"""Create empty object"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.value = self.__value_type__()
class WaterLevelUncertaintyDataset(S1xxDatasetBase):
"""Create uncertainty dataset"""
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
""" The plain text name of the dataset"""
return "uncertainty"
@property
def metadata_type(self) -> Type[WaterLevelUncertaintyInformation]:
"""S104 datatype"""
return WaterLevelUncertaintyInformation
class GeometryValuesDataset(S1xxGridsBase):
__longitude_hdf_name__ = "longitude"
__latitude_hdf_name__ = "latitude"
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
""" The plain text name of the dataset"""
return "geometryValues"
@property
def longitude(self) -> s1xx_sequence:
"""Get the data"""
return self._attributes[self.__longitude_hdf_name__]
@longitude.setter
def longitude(self, val: s1xx_sequence):
"""Potential validation or other checks/changes to incoming data"""
self._attributes[self.__longitude_hdf_name__] = val
@property
def __longitude_type__(self) -> s1xx_sequence:
"""S100 Datatype"""
return numpy.ndarray
@property
def longitude_dtype(self) -> Type[float]:
"""S100 Datatype"""
return numpy.float32
def longitude_create(self):
""" Creates a blank, empty or zero value for longitude"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.longitude = self.__longitude_type__([], self.longitude_dtype)
@property
def latitude(self) -> s1xx_sequence:
"""Get the data"""
return self._attributes[self.__latitude_hdf_name__]
@latitude.setter
def latitude(self, val: s1xx_sequence):
"""Potential validation or other checks/changes to incoming data"""
self._attributes[self.__latitude_hdf_name__] = val
@property
def __latitude_type__(self) -> s1xx_sequence:
"""S100 Datatype"""
return numpy.ndarray
@property
def latitude_dtype(self) -> Type[float]:
"""S100 Datatype"""
return numpy.float32
def latitude_create(self):
""" Creates a blank, empty or zero value for latitude"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.latitude = self.__latitude_type__([], self.latitude_dtype)
def get_write_order(self):
"""Specify order of attributes for ordered dict"""
return [self.__longitude_hdf_name__, self.__latitude_hdf_name__]
def get_compound_dtype(self):
return [self.longitude_dtype, self.latitude_dtype]
class PositioningGroup(S1xxObject):
__geometry_values_hdf_name__ = "geometry_values"
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
""" The plain text name of the group
Returns:
str: Name of the group
"""
return "Positioning"
@property
def metadata_type(self) -> type:
return GeometryValuesDataset
@property
def geometry_values(self) -> GeometryValuesDataset:
"""Get the data"""
return self._attributes[self.__geometry_values_hdf_name__]
@geometry_values.setter
def geometry_values(self, val: GeometryValuesDataset):
self._attributes[self.__geometry_values_hdf_name__] = val
@property
def __geometry_values_type__(self) -> Type[GeometryValuesDataset]:
"""S100 Datatype"""
return GeometryValuesDataset
def geometry_values_create(self):
""" Creates a blank, empty or zero value for geometry_values"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.geometry_values = self.__geometry_values_type__()
class WaterLevelValues(S1xxGridsBase):
"""NNN Group Datasets"""
__water_level_height_hdf_name__ = "waterLevelHeight"
__water_level_trend_hdf_name__ = "waterLevelTrend"
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
""" The plain text name of the dataset
Returns:
str: Name of the dataset
"""
return "values"
@property
def water_level_height(self) -> s1xx_sequence:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__water_level_height_hdf_name__]
@water_level_height.setter
def water_level_height(self, val: s1xx_sequence):
self._attributes[self.__water_level_height_hdf_name__] = val
@property
def __water_level_height_type__(self) -> s1xx_sequence:
"""Define array datatype"""
return numpy.ndarray
@property
def water_level_height_dtype(self) -> Type[float]:
"""Define array datatype"""
return numpy.float32
def water_level_height_create(self):
""" Creates a blank, empty or zero value for water_level_height"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level_height = self.__water_level_height_type__([], self.water_level_height_dtype)
@property
def water_level_trend(self) -> WaterLevelTrend:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__water_level_trend_hdf_name__]
@water_level_trend.setter
def water_level_trend(self, val: Union[int, str, WaterLevelTrend]):
self.set_enum_attribute(val, self.__water_level_trend_hdf_name__, self.__water_level_trend_type__)
@property
def __water_level_trend_type__(self) -> s1xx_sequence:
"""Define datatype"""
return numpy.ndarray
@property
def water_level_trend_dtype(self) -> Type[WaterLevelTrend]:
"""Define array datatype"""
return h5py.enum_dtype(dict([(water_level_trend.name, water_level_trend.value) for water_level_trend in WaterLevelTrend]))
def water_level_trend_create(self):
""" Creates a blank, empty or zero value for water_level_trend"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level_trend = self.__water_level_trend_type__([], self.water_level_trend_dtype)
def get_write_order(self):
return [self.__water_level_height_hdf_name__, self.__water_level_trend_hdf_name__]
def get_compound_dtype(self):
return [self.water_level_height_dtype, self.water_level_trend_dtype]
class WaterLevelGroup(S1xxObject):
__values_hdf_name__ = "values"
__time_point_hdf_name__ = "timePoint"
@property
def values(self) -> WaterLevelValues:
"""Plain text name of the dataset (e.g values)"""
return self._attributes[self.__values_hdf_name__]
@values.setter
def values(self, val: WaterLevelValues):
self._attributes[self.__values_hdf_name__] = val
@property
def __values_type__(self) -> Type[WaterLevelValues]:
return WaterLevelValues
def values_create(self):
""" Creates a blank, empty or zero value for values"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.values = self.__values_type__()
@property
def time_point(self) -> S1xxObject:
"""Defines the conversion from python naming to HDF5 (S100) naming"""
return self._attributes[self.__time_point_hdf_name__]
@time_point.setter
def time_point(self, val: S1xxObject):
self._attributes[self.__time_point_hdf_name__] = val
@property
def __time_point_type__(self) -> Type[str]:
"""Attribute datatype"""
return str
def time_point_create(self):
""" Creates a blank, empty or zero value for time_point"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.time_point = self.__time_point_type__()
@property
def __version__(self) -> int:
return 1
class WaterLevelGroupList(S1xxCollection):
""" This is the list of Group.NNN that are held as a list.
Each Group.NNN has a dataset of depth and uncertainty.
"""
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
return "Group"
@property
def metadata_type(self) -> type:
return WaterLevelGroup
class WaterLevelFeatureInstance(FeatureInstanceDCF2):
""" Basic template for the name of the attribute
Attribute name will be automatically determined based on the array position
of the S104_MetadataList
"""
__water_level_group_hdf_name__ = "Group" + r"[\._]\d+"
__uncertainty_dataset_hdf_name__ = "uncertainty"
__number_of_nodes_hdf_name__ = "numberOfNodes"
__type_of_water_level_data_hdf_name__ = "typeOfWaterLevelData"
@property
def __water_level_group_type__(self):
return WaterLevelGroupList
def water_level_group_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level_group = self.__water_level_group_type__()
@property
def water_level_group(self) -> S1xxCollection:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__water_level_group_hdf_name__]
@water_level_group.setter
def water_level_group(self, val: S1xxCollection):
self._attributes[self.__water_level_group_hdf_name__] = val
@property
def number_of_nodes(self) -> S1xxObject:
return self._attributes[self.__number_of_nodes_hdf_name__]
@number_of_nodes.setter
def number_of_nodes(self, val: S1xxObject):
self._attributes[self.__number_of_nodes_hdf_name__] = val
@property
def __number_of_nodes_type__(self) -> Type[numpy.int32]:
return numpy.int32
def number_of_nodes_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.number_of_nodes = self.__number_of_nodes_type__()
@property
def uncertainty_dataset(self) -> S1xxDatasetBase:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__uncertainty_dataset_hdf_name__]
@uncertainty_dataset.setter
def uncertainty_dataset(self, val: S1xxDatasetBase):
self._attributes[self.__uncertainty_dataset_hdf_name__] = val
@property
def __uncertainty_dataset_type__(self) -> Type[WaterLevelUncertaintyDataset]:
return WaterLevelUncertaintyDataset
def uncertainty_dataset_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.uncertainty_dataset = self.__uncertainty_dataset_type__()
@property
def __positioning_group_hdf_name__(self) -> str:
return "Positioning"
@property
def positioning_group(self) -> S1xxObject:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__positioning_group_hdf_name__]
@positioning_group.setter
def positioning_group(self, val: S1xxObject):
self._attributes[self.__positioning_group_hdf_name__] = val
@property
def __positioning_group_type__(self):
"""Defines datatype"""
return PositioningGroup
def positioning_group_create(self):
""" Creates a blank, empty or zero value for positioning_group"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.positioning_group = self.__positioning_group_type__()
@property
def type_of_water_level_data(self) -> TYPE_OF_WATER_LEVEL_DATA:
return self._attributes[self.__type_of_water_level_data_hdf_name__]
@type_of_water_level_data.setter
def type_of_water_level_data(self, val: Union[int, str, TYPE_OF_WATER_LEVEL_DATA]):
self.set_enum_attribute(val, self.__type_of_water_level_data_hdf_name__, self.__type_of_water_level_data_type__)
@property
def __type_of_water_level_data_type__(self) -> Type[TYPE_OF_WATER_LEVEL_DATA]:
return TYPE_OF_WATER_LEVEL_DATA
def type_of_water_level_data_create(self):
""" Creates a value using the first item in the enumeration of type_of_water_level_data"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.type_of_water_level_data = list(self.__type_of_water_level_data_type__)[0]
class WaterLevelList(S104MetadataListBase):
"""
This is the set of WaterLevel.NN that act like a list here.
They will contain a list of Groups.NNN as well as other attributes etc.
"""
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
return WATER_LEVEL
@property
def metadata_type(self) -> Type[WaterLevelFeatureInstance]:
return WaterLevelFeatureInstance
class WaterLevelContainer(FeatureContainerDCF2):
""" This is the WaterLevel right off the root of the HDF5 which has possible attributes from S100 spec table 10c-10
This will hold child groups named WaterLevel.NN
"""
#: Basic template for the name of the attribute
#: Attribute name will be automatically determined based on the containing list's index
__water_level_hdf_name__ = WATER_LEVEL + r"[\._]\d+"
__min_dataset_height_hdf_name__ = "minDatasetHeight"
__max_dataset_height_hdf_name__ = "maxDatasetHeight"
__method_water_level_product_hdf_name__ = "methodWaterLevelProduct"
@property
def __version__(self) -> int:
return 1
@property
def __water_level_type__(self):
return WaterLevelList
def water_level_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level = self.__water_level_type__()
@property
def water_level(self) -> S104MetadataListBase:
""" The water level data, a list of WaterLevel
Returns:
S104_MetadataList_base: Contains a list of WaterLevel objects
via the WaterLevel_List class
"""
return self._attributes[self.__water_level_hdf_name__]
@water_level.setter
def water_level(self, val: S104MetadataListBase):
self._attributes[self.__water_level_hdf_name__] = val
@property
def min_dataset_height(self) -> S1xxObject:
"""Defines the conversion from python naming to HDF5 (S104) naming"""
return self._attributes[self.__min_dataset_height_hdf_name__]
@min_dataset_height.setter
def min_dataset_height(self, val: S1xxObject):
self._attributes[self.__min_dataset_height_hdf_name__] = val
@property
def __min_dataset_height_type__(self) -> Type[float]:
"""Defines datatype"""
return float
def min_dataset_height_create(self):
""" Creates a blank, empty or zero value for min_dataset_height"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.min_dataset_height = self.__min_dataset_height_type__()
@property
def max_dataset_height(self) -> S1xxObject:
return self._attributes[self.__max_dataset_height_hdf_name__]
@max_dataset_height.setter
def max_dataset_height(self, val: S1xxObject):
self._attributes[self.__max_dataset_height_hdf_name__] = val
@property
def __max_dataset_height_type__(self) -> Type[numpy.float32]:
"""Defines datatype"""
return numpy.float32
def max_dataset_height_create(self):
""" Creates a blank, empty or zero value for max_dataset_height"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.max_dataset_height = self.__max_dataset_height_type__()
@property
def method_water_level_product(self) -> S1xxObject:
return self._attributes[self.__method_water_level_product_hdf_name__]
@method_water_level_product.setter
def method_water_level_product(self, val: S1xxObject):
self._attributes[self.__method_water_level_product_hdf_name__] = val
@property
def __method_water_level_product_type__(self) -> Type[str]:
"""Defines datatype"""
return str
def method_water_level_product_create(self):
""" Creates a blank, empty or zero value for method_water_level_product"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.method_water_level_product = self.__method_water_level_product_type__()
class WaterLevelFeatureDataset(FeatureInformationDataset):
"""Create group_f feature dataset"""
@property
def __version__(self) -> int:
return 1
@property
def metadata_name(self) -> str:
"""S104 feature information dataset name"""
return WATER_LEVEL
@property
def metadata_type(self) -> Type[FeatureInformation]:
"""Feature information base class"""
return FeatureInformation
class GroupF(GroupFBase):
"""From S100 Table 10c-8 – Components of feature information group"""
__water_level_feature_dataset_hdf_name__ = WATER_LEVEL
@property
def __version__(self) -> int:
return 1
def feature_code_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.feature_code = self.__feature_code_type__([WATER_LEVEL], dtype=h5py_string_dtype)
@property
def __water_level_feature_dataset_type__(self):
return WaterLevelFeatureDataset
def water_level_feature_dataset_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level_feature_dataset = self.__water_level_feature_dataset_type__()
@property
def water_level_feature_dataset(self) -> WaterLevelFeatureDataset:
return self._attributes[self.__water_level_feature_dataset_hdf_name__]
@water_level_feature_dataset.setter
def water_level_feature_dataset(self, val: WaterLevelFeatureDataset):
self._attributes[self.__water_level_feature_dataset_hdf_name__] = val
class S104Root(S100Root):
"""The root group contains a feature information group and N feature containers.
In S104 there is one feature container 'water level'.
The coverage names are determined from the matching CoveragesAttributes
Table 3 of v0.0.7
"""
__water_level_hdf_name__ = WATER_LEVEL
__water_level_trend_threshold_hdf_name__ = "waterLevelTrendThreshold"
__vertical_coordinate_system_hdf_name__ = "verticalCS"
__vertical_coordinate_base_hdf_name__ = "verticalCoordinateBase"
__vertical_datum_reference_hdf_name__ = "verticalDatumReference"
__vertical_datum_epsg_hdf_name__ = "verticalDatum"
__horizontal_crs_hdf_name__ = "horizontalCRS"
@property
def __version__(self) -> int:
return 1
@property
def __feature_information_type__(self):
return GroupF
@property
def water_level(self) -> S1xxObject:
return self._attributes[self.__water_level_hdf_name__]
@property
def __water_level_type__(self):
return WaterLevelContainer
def water_level_create(self):
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level = self.__water_level_type__()
@water_level.setter
def water_level(self, val: S1xxObject):
self._attributes[self.__water_level_hdf_name__] = val
@property
def water_level_trend_threshold(self) -> S1xxObject:
return self._attributes[self.__water_level_trend_threshold_hdf_name__]
@water_level_trend_threshold.setter
def water_level_trend_threshold(self, val: S1xxObject):
self._attributes[self.__water_level_trend_threshold_hdf_name__] = val
@property
def __water_level_trend_threshold_type__(self) -> Type[numpy.float32]:
return numpy.float32
def water_level_trend_threshold_create(self):
""" Creates a blank, empty or zero value for water level trend threshold"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.water_level_trend_threshold = self.__water_level_trend_threshold_type__()
@property
def vertical_coordinate_system(self) -> S1xxObject:
return self._attributes[self.__vertical_coordinate_system_hdf_name__]
@vertical_coordinate_system.setter
def vertical_coordinate_system(self, val: S1xxObject):
self._attributes[self.__vertical_coordinate_system_hdf_name__] = val
@property
def __vertical_coordinate_system_type__(self) -> Type[numpy.int32]:
"""Define S104 datatype"""
return numpy.int32
def vertical_coordinate_system_create(self):
""" Creates a blank, empty or zero value for vertical coordinate system"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.vertical_coordinate_system = self.__vertical_coordinate_system_type__()
@property
def vertical_coordinate_base(self) -> VERTICAL_COORDINATE_BASE:
return self._attributes[self.__vertical_coordinate_base_hdf_name__]
@vertical_coordinate_base.setter
def vertical_coordinate_base(self, val: Union[int, str, VERTICAL_COORDINATE_BASE]):
self.set_enum_attribute(val, self.__vertical_coordinate_base_hdf_name__, self.__vertical_coordinate_base_type__)
@property
def __vertical_coordinate_base_type__(self) -> Type[VERTICAL_COORDINATE_BASE]:
"""Enumeration data type"""
return VERTICAL_COORDINATE_BASE
def vertical_coordinate_base_create(self):
""" Creates a value using the first item in the enumeration of vertical_coordinate_base"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.vertical_coordinate_base = list(self.__vertical_coordinate_base_type__)[0]
@property
def vertical_datum_reference(self) -> VERTICAL_DATUM_REFERENCE:
return self._attributes[self.__vertical_datum_reference_hdf_name__]
@vertical_datum_reference.setter
def vertical_datum_reference(self, val: Union[int, str, VERTICAL_DATUM_REFERENCE]):
self.set_enum_attribute(val, self.__vertical_datum_reference_hdf_name__, self.__vertical_datum_reference_type__)
@property
def __vertical_datum_reference_type__(self) -> Type[VERTICAL_DATUM_REFERENCE]:
"""Defines enumeration datatype"""
return VERTICAL_DATUM_REFERENCE
def vertical_datum_reference_create(self):
""" Creates a value using the first item in the enumeration of vertical_datum_reference"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.vertical_datum_reference = list(self.__vertical_datum_reference_type__)[0]
@property
def vertical_datum_epsg(self) -> S1xxObject:
"""EPSG code for vertical datum for verticalDatumReference = 2"""
return self._attributes[self.__vertical_datum_hdf_name__]
@vertical_datum_epsg.setter
def vertical_datum_epsg(self, val: S1xxObject):
self._attributes[self.__vertical_datum_hdf_name__] = val
@property
def __vertical_datum_epsg_type__(self) -> Type[numpy.int32]:
"""Define datatype"""
return numpy.int32
def vertical_datum_epsg_create(self):
""" Creates a blank, empty or zero value for vertical_datum_epsg"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.vertical_datum_epsg = self.__vertical_datum_epsg_type__()
@property
def horizontal_crs(self) -> S1xxObject:
return self._attributes[self.__horizontal_crs_hdf_name__]
@horizontal_crs.setter
def horizontal_crs(self, val: S1xxObject):
self._attributes[self.__horizontal_crs_hdf_name__] = val
@property
def __horizontal_crs_type__(self) -> Type[numpy.int32]:
"""Define S104 datatype"""
return numpy.int32
def horizontal_crs_create(self):
""" Creates a blank, empty or zero value for horizontal crs"""
# noinspection PyAttributeOutsideInit
# pylint: disable=attribute-defined-outside-init
self.horizontal_crs = self.__horizontal_crs_type__()
class DiscoveryMetadata(S1xxObject):
""" 12.2.6 of v1.0.1"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise NotImplementedError()
class S104File(S1XXFile):
""" HDF5 file object"""
PRODUCT_SPECIFICATION = 'INT.IHO.S-104.0.0'
def __init__(self, *args, **kywrds):
super().__init__(*args, root=S104Root, **kywrds)
|
477285
|
import torch
import torch.nn as nn
from torch.nn import init
from torch.optim import lr_scheduler
import torchvision.models as torch_models
'''
Helper functions for model
Borrow tons of code from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
'''
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], backend_pretrain=False):
if len(gpu_ids) > 0:
# print("gpu_ids,", gpu_ids)
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
if not backend_pretrain:
init_weights(net, init_type, gain=init_gain)
return net
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def custom_resnet(model_name='resnet18', pretrained=False, **kwargs):
if pretrained and 'num_classes' in kwargs and kwargs['num_classes'] != 1000:
# patch n_classes params
n_classes = kwargs['num_classes']
kwargs['num_classes'] = 1000
model = getattr(torch_models, model_name)(**kwargs)
pretrained_state_dict = torch.utils.model_zoo.load_url(model_urls[model_name])
# load only existing feature
pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if k in model.state_dict()}
model.load_state_dict(pretrained_dict)
print("[Info] Successfully load ImageNet pretrained parameters for %s." % model_name)
# update fc layer
print("Predict Class Number:", n_classes)
model.fc = nn.Linear(pretrained_state_dict['fc.weight'].size(1), n_classes)
init.xavier_normal_(model.fc.weight.data, 0.02)
init.constant_(model.fc.bias.data, 0.0)
else:
model = getattr(torch_models, model_name)(pretrained, **kwargs)
return model
def define_recog(input_nc, n_classes, which_model_netR, image_size, init_type='normal', init_gain=0.02, gpu_ids=[], backend_pretrain=False):
net_recog = None
if which_model_netR == 'default':
net_recog = RecognitionNet(input_nc, n_classes, image_size)
elif 'resnet' in which_model_netR:
# input size 224
net_recog = custom_resnet(which_model_netR, pretrained=backend_pretrain, num_classes=n_classes)
else:
raise NotImplementedError('Recognition model [%s] is not implemented' % which_model_netR)
return init_net(net_recog, init_type, init_gain, gpu_ids, backend_pretrain)
def define_multi_dis(input_nc, n_dis, hidden_nc_list=None, init_type='normal', init_gain=0.02, gpu_ids=[]):
dis_list = []
for idx in range(n_dis):
cur_dis = ThreeLayerDisNet(input_nc, hidden_nc_list)
cur_dis = init_net(cur_dis, init_type, init_gain, gpu_ids)
dis_list.append(cur_dis)
return dis_list
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, gan_type='wgan-gp', target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_type = gan_type
if self.gan_type == 'wgan-gp':
self.loss = lambda x, y: -torch.mean(x) if y else torch.mean(x)
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'gan':
self.loss = nn.BCELoss()
else:
raise NotImplementedError('GAN loss type [%s] is not found' % gan_type)
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
target_tensor = target_is_real
else:
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
class RecognitionNet(nn.Module):
""" Use linear function for recognition model as illustrated in the original paper.
image_size: int"""
def __init__(self, input_nc, n_classes, image_size):
super(RecognitionNet, self).__init__()
n_input = input_nc * image_size * image_size
model = [nn.Linear(n_input, n_classes, bias=True),
nn.Sigmoid()]
self.model = nn.Sequential(*model)
def forward(self, img):
img = img.view(img.size(0), -1)
pred_aus = self.model(img)
return pred_aus
class ThreeLayerDisNet(nn.Module):
"""A three-layer feedforward net for discriminator
hidden_nc_list: with 2 items"""
def __init__(self, input_nc, hidden_nc_list=None):
super(ThreeLayerDisNet, self).__init__()
if hidden_nc_list is None:
hidden_nc_list = [240, 240] # set default as the vanilla GAN
model = [nn.Linear(input_nc, hidden_nc_list[0]),
nn.ReLU(),
nn.Linear(hidden_nc_list[0], hidden_nc_list[1]),
nn.ReLU(),
nn.Linear(hidden_nc_list[1], 1),
nn.Sigmoid()]
self.model = nn.Sequential(*model)
def forward(self, aus):
out = self.model(aus)
return out
|
477320
|
import torch
import os
import sys
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from ..utils import tqdm, HiddenPrints
class Evaluator():
"""
Evaluation class.
Performs evalution on a given model and DataHandler.
Two main functions can be used:
- eval: which runs evaluation on the DataHandler query set.
- eval_all: which use DataHandler instantiated with eval_all=True.
It runs eval on all the loaders provided by the DataHandler.
Arguments:
- model: pytorch model to be evaluated.
- cfg: cfg object of the model.
- data_handler: DataHandler object of the dataset.
"""
def __init__(self,
model,
cfg,
data_handler,
output_folder='./Experiments_FSFCOS/Results'):
self.model = model
self.cfg = cfg
self.data_handler = data_handler
self.device = cfg.MODEL.DEVICE
self.output_folder = output_folder
self.categories = None
self.current_classes = None
def eval(self, verbose=True, all_classes=True, verbose_classes=True, loaders=None, seed=None):
"""
Eval function on a single data loader (or couple query/support loaders)
Arguments:
- verbose: print eval results at the end of computation.
- all_classes:
"""
if seed is not None:
self.data_handler.rng_handler.update_seeds(seed)
if self.cfg.FEWSHOT.ENABLED:
if loaders is None:
query_loader, support_loader, classes = self.data_handler.get_dataloader(
seed=seed
)
else:
query_loader, support_loader, classes = loaders
self.current_classes = classes
if verbose_classes:
print('Evaluation on classes: {}'.format(str(classes)))
self.categories = {
idx: v['name']
for idx, v in query_loader.dataset.coco.cats.items()
}
self.contiguous_label_map = query_loader.dataset.contiguous_category_id_to_json_id
predictions = self.compute_pred_fs(query_loader, support_loader,
classes)
else:
query_loader = self.data_handler.get_dataloader(seed=seed)
classes = np.array(list(query_loader.dataset.coco.cats.keys())) + 1
predictions = self.compute_pred(query_loader)
if self.has_pred(predictions):
for pred in predictions:
pred.add_field("objectness",
torch.ones(len(pred), device=self.device))
# dirty hack to remove prints from pycocotools
with HiddenPrints():
coco_results = self.prepare_for_coco_detection(predictions,
query_loader.dataset)
res = self.evaluate_predictions_on_coco(
query_loader.dataset.coco, coco_results,
os.path.join(self.output_folder, 'res.json'),
'bbox',
classes=classes)
res_per_class = {}
for c in classes:
res_per_class[c] = self.evaluate_predictions_on_coco(query_loader.dataset.coco,
coco_results,
os.path.join(
self.output_folder,
'res.json'),
'bbox',
classes=[c])
if verbose:
self.eval_summary(res, res_per_class, all_classes=all_classes)
return res, res_per_class
else:
return {}, {}
def eval_all(self, n_episode=1, verbose=True, seed=None):
"""
Similar to eval function except it loop over the multiple dataloaders returned
by the DataHandler. (DataHandler must have eval_all=True).
Results are then accumulated and stored in a pandas dataframe.
"""
assert self.data_handler.eval_all == True, 'Use eval_all with eval_all=True in DataHandler'
accumulated_res_test = {}
accumulated_res_train = {}
all_res = {
'train': accumulated_res_test,
'test': accumulated_res_train
}
for eval_ep in range(n_episode):
if seed is not None:
self.data_handler.rng_handler.update_seeds(seed)
loaders = self.data_handler.get_dataloader(seed=seed)
for setup in ['train', 'test']:
res_all_cls = {}
for q_s_loaders in loaders[setup]:
_, res_cls = self.eval(verbose=False, loaders=q_s_loaders, seed=seed)
# this will overwrite some keys if the last batch is padded
# but only one eval is retained for each class
res_all_cls.update(res_cls)
for k, v in res_all_cls.items():
if not k in all_res[setup]:
all_res[setup][k] = []
all_res[setup][k].append(v.stats)
for setup in ['train', 'test']:
for k, v in all_res[setup].items():
all_res[setup][k] = np.vstack(all_res[setup][k]).mean(axis=0)
return self.prettify_results_fs(all_res, verbose=verbose)
def eval_no_fs(self, seed=None, verbose=False):
"""
Eval without fewshot.
"""
overall_res, res_per_class = self.eval(seed=seed, verbose=verbose)
for k in res_per_class:
res_per_class[k] = res_per_class[k].stats
return self.prettify_results(res_per_class, verbose=verbose)
def eval_summary(self, res, res_per_class, all_classes=True):
"""
Format results.
"""
classes = list(res_per_class.keys())
sep = '+{}+'.format('-'*77)
print('''\n{}\n\t\tAll classes {:<30}\n{}'''.format(
sep,
self.get_cat_names(classes),
sep))
res.summarize()
if all_classes:
for c, res in res_per_class.items():
print('''\n{}\n\t\tClass {}\n{}'''.format(
sep,
'{}: '.format(c) + self.categories[c-1],
sep))
res.summarize()
def get_cat_names(self, classes):
return ", ".join([
'\n\t\t{}: '.format(c) + self.categories[c-1] for c in classes
])
def compute_pred_fs(self, query_loader, support_loader=None, classes=None):
"""
Model inference on a query_loader with fewshot.
"""
predictions = []
with torch.no_grad():
# for iteration, (images, targ,
# img_id) in enumerate(tqdm(query_loader,
# desc='Computing predictions')):
for iteration, (images, targ,
img_id) in enumerate(query_loader):
support = self.model.compute_support_features(
support_loader, self.device)
images = images.to(self.device)
pred_batch = self.model(images, classes=classes, support=support)
for idx, pred in enumerate(pred_batch):
pred.add_field('image_id', torch.tensor(
img_id[idx])) # store img_id as tensor for convenience
predictions.append(pred.to('cpu'))
return predictions
def compute_pred(self, query_loader, support_loader=None, classes=None):
"""
Model inference on a query_loader without fewshot.
"""
predictions = []
with torch.no_grad():
for iteration, (images, targ, img_id) in enumerate(query_loader):
images = images.to(self.device)
pred_batch = self.model(images)
for idx, pred in enumerate(pred_batch):
pred.add_field('image_id', torch.tensor(
img_id[idx])) # store img_id as tensor for convenience
predictions.append(pred.to('cpu'))
return predictions
def evaluate_predictions_on_coco(self,
coco_gt, coco_results, json_result_file, iou_type="bbox",
classes=None):
"""
Run coco evaluation using pycocotools.
"""
coco_dt = coco_gt.loadRes(coco_results)
# self.ignore_dataset_annot_without_pred(coco_gt, coco_dt, classes)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
# coco_eval.params.catIds = [c - 1 for c in classes]
coco_eval.params.catIds = [self.contiguous_label_map[c] for c in classes]
coco_eval.params.imgIds = list(set([
det['image_id'] for det in list(coco_dt.anns.values())
]))
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
def ignore_dataset_annot_without_pred(self, coco_gt, coco_dt, classes):
img_with_predictions = set([det['image_id'] for det in list(coco_dt.anns.values())])
gt_anns = coco_gt.anns
classes_json = [self.contiguous_label_map[c] for c in classes]
rm_keys = []
for k, v in gt_anns.items():
if v['image_id'] not in img_with_predictions or \
(classes is not None and v['category_id'] not in classes_json):
# category id is not necesarily contiguous
gt_anns[k]['ignore'] = 1
rm_keys.append(k)
elif v['image_id'] not in img_with_predictions:
del coco_gt.imgs[v['image_id']]
for k in rm_keys:
del gt_anns[k]
def prepare_for_coco_detection(self, predictions, dataset):
"""
Convert predictions from model into coco format detections.
"""
# assert isinstance(dataset, COCODataset)
coco_results = []
for prediction in predictions:
image_id = prediction.get_field('image_id').item()
original_id = dataset.id_to_img_map[image_id]
if len(prediction) == 0:
continue
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
prediction = prediction.convert("xywh")
boxes = prediction.bbox.tolist()
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels]
coco_results.extend(
[
{
"image_id": original_id,
"category_id": mapped_labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def has_pred(self, predictions):
nb_pred = 0
for pred in predictions:
nb_pred += len(pred)
return nb_pred > 0
"""
Prettify method build pandas dataframes from results of evaluation.
"""
def prettify_results_fs(self, results, verbose=True):
import pandas as pd
metrics = {}
metrics['Measure'] = ['AP'] * 6 + ['AR'] * 6
metrics['IoU'] = [
'0.50:0.95',
'0.50',
'0.75',
] + ['0.50:0.95'] * 9
metrics['Area'] = ['all', 'all', 'all', 'small', 'medium', 'large'] * 2
df_metrics = pd.DataFrame.from_dict(metrics)
df_train = pd.DataFrame.from_dict(results['train'])
df_train = df_train.reindex(sorted(df_train.columns), axis=1)
df_test = pd.DataFrame.from_dict(results['test'])
df_test = df_test.reindex(sorted(df_test.columns), axis=1)
train_cls = list(results['train'].keys())
df_all = pd.concat([df_metrics, df_train, df_test], axis=1)
df_all = df_all.set_index(['Measure', 'IoU', 'Area'])
columns = [('Train classes', c) if c in train_cls else
('Test classes', c) for c in df_all.columns]
df_all.columns = pd.MultiIndex.from_tuples(columns)
if verbose:
print(df_all)
return df_all
def prettify_results(self, results, verbose=True):
import pandas as pd
metrics = {}
metrics['Measure'] = ['AP'] * 6 + ['AR'] * 6
metrics['IoU'] = [
'0.50:0.95',
'0.50',
'0.75',
] + ['0.50:0.95'] * 9
metrics['Area'] = ['all', 'all', 'all', 'small', 'medium', 'large'] * 2
df_metrics = pd.DataFrame.from_dict(metrics)
df_train = pd.DataFrame.from_dict(results)
df_train = df_train.reindex(sorted(df_train.columns), axis=1)
train_cls = list(results.keys())
df_all = pd.concat([df_metrics, df_train], axis=1)
df_all = df_all.set_index(['Measure', 'IoU', 'Area'])
columns = [('Train classes', c) for c in df_all.columns]
df_all.columns = pd.MultiIndex.from_tuples(columns)
if verbose:
print(df_all)
return df_all
|
477323
|
import numpy as np
import cv2
import string
import math
import os
import uuid
wd, _ = os.path.split(os.path.abspath(__file__))
class Captcha:
def __init__(self, width, high, ls=None, lc=4, fs=None,
folder=os.path.join(wd, 'samples'), debug=False):
"""
:param ls: letter set, all
:param fs: font set
:param lc: letter count in one pic
:param folder: the folder to save img
:param debug: debug mode
"""
if fs is None:
fs = ['FONT_HERSHEY_COMPLEX', 'FONT_HERSHEY_SIMPLEX',
'FONT_ITALIC']
self.fs = fs
if ls is None:
ls = string.ascii_uppercase + string.digits
if isinstance(ls, str):
self.letter = [i for i in ls]
elif isinstance(ls, list):
self.letter = ls
self.lc = lc
self.width, self.high = width, high
self.debug = debug
self.folder = folder
if not self.debug and folder:
if not os.path.exists(self.folder):
os.makedirs(self.folder)
def _tilt_img(self, img):
tmp_img = img.copy()
tmp_img.fill(255)
tile_angle = np.random.randint(
int(100*-math.pi/6), int(100*math.pi/6)
) / 100
high, width, _ = img.shape
for y in range(width):
for x in range(high):
new_y = int(y + (x-high/2)*math.tanh(tile_angle))
try:
tmp_img[x, new_y, :] = img[x, y, :]
except IndexError:
pass
img[:, :, :] = tmp_img[:, :, :]
def _shake_img(self, img, outer_top_left, outer_bottom_right,
inner_top_left, inner_bottom_right):
(x1, y1), (x2, y2) = outer_top_left, outer_bottom_right
(i1, j1), (i2, j2) = inner_top_left, inner_bottom_right
delta_x = np.random.randint(x1-i1, x2-i2)
delta_y = np.random.randint(y1-j1, y2-j2)
area = img[y1:y2, x1:x2, :]
area_high, area_width, _ = area.shape
tmp_area = area.copy()
tmp_area.fill(255)
for index_y in range(area_high):
for index_x in range(area_width):
new_x, new_y = index_x + delta_x, index_y + delta_y
if new_x < area_width and new_y < area_high:
tmp_area[new_y, new_x, :] = area[index_y, index_x, :]
area[:, :, :] = tmp_area[:, :, :]
def _distort_img(self, img):
high, width, _ = img.shape
tmp_img = img.copy()
tmp_img.fill(255)
coef_vertical = np.random.randint(1, 5)
coef_horizontal = np.random.choice([2, 3, 4]) * math.pi / width
scale_biase = np.random.randint(0, 360) * math.pi / 180
def new_coordinate(x, y):
return int(x+coef_vertical*math.sin(coef_horizontal*y+scale_biase))
for y in range(width):
for x in range(high):
new_x = new_coordinate(x, y)
try:
tmp_img[x, y, :] = img[new_x, y, :]
except IndexError:
pass
img[:, :, :] = tmp_img[:, :, :]
def _draw_basic(self, img, text):
font_face = getattr(cv2, np.random.choice(self.fs))
font_scale = 1
font_thickness = 2
max_width = max_high = 0
for i in text:
(width, high), _ = cv2.getTextSize(
i, font_face, font_scale, font_thickness)
max_width, max_high = max(max_width, width), max(max_high, high)
total_width = max_width * self.lc
width_delta = np.random.randint(0, self.width - total_width)
vertical_range = self.high - max_high
images = list()
for index, letter in enumerate(text):
tmp_img = img.copy()
delta_high = np.random.randint(
int(2*vertical_range/5), int(3*vertical_range/5)
)
bottom_left_coordinate = (
index*max_width + width_delta,
self.high - delta_high
)
font_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
cv2.putText(tmp_img, letter, bottom_left_coordinate, font_face,
font_scale, font_color, font_thickness)
self._tilt_img(tmp_img)
images.append(tmp_img)
high, width, _ = img.shape
for y in range(width):
for x in range(high):
r, g, b = 0, 0, 0
for tmp_img in images:
r += tmp_img[x, y, 0]
g += tmp_img[x, y, 1]
b += tmp_img[x, y, 2]
r, g, b = r % 256, g % 256, b % 256
img[x, y, :] = (r, g, b)
def _draw_line(self, img):
left_x = np.random.randint(0, self.width//4)
left_y = np.random.randint(self.high)
right_x = np.random.randint(self.width*3//4, self.width)
right_y = np.random.randint(self.high)
start, end = (left_x, left_y), (right_x, right_y)
line_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
line_thickness = np.random.randint(1, 3)
cv2.line(img, start, end, line_color, line_thickness)
def _put_noise(self, img):
for i in range(600):
x = np.random.randint(self.width)
y = np.random.randint(self.high)
dot_color = tuple(int(np.random.choice(range(0, 156)))
for _ in range(3))
img[y, x, :] = dot_color
def save_img(self, text):
img = np.zeros((self.high, self.width, 3), np.uint8)
img.fill(255)
self._draw_basic(img, text)
self._put_noise(img)
self._distort_img(img)
self._draw_line(img)
if self.debug:
cv2.imshow(text, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
fn = text + ('_'+str(uuid.uuid1())[4: 8])
cv2.imwrite('{}/{}.jpg'.format(self.folder, fn), img)
def batch_create_img(self, number=5):
exits = set()
while(len(exits)) < number:
word = ''.join(np.random.choice(self.letter, self.lc))
if word not in exits:
exits.add(word)
self.save_img(word)
if not self.debug:
if len(exits) % 10 == 0:
print('{} generated.'.format(len(exits)))
if not self.debug:
print('{} captchas saved into {}.'.format(len(exits), self.folder))
if __name__ == '__main__':
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'K', 'M',
'N', 'P', 'R', 'T', 'U', 'V', 'W', 'X', 'Y']
c = Captcha(120, 36, letters, fs=['FONT_ITALIC'], debug=True)
c.batch_create_img(3)
|
477349
|
class Solution:
def canConvert(self, s1: str, s2: str) -> bool:
if s1 == s2: return True
dp = {}
for i, j in zip(s1, s2):
if dp.setdefault(i, j) != j:
return False
return len(set(s2)) < 26
|
477355
|
import json
import seaborn as sns
import re
import sys
import os
import os.path
import pandas as pd
import tinydb
from matplotlib import pyplot as plt
import pprint
DEFAULT_STYLE = {
'secondary': 'style'
}
pp = pprint.PrettyPrinter()
class ExperimentPlotter(object):
def __init__(self, base_path, data, config):
self._data = data
self._config = config
self._base_path = base_path
def _build_query(self, qspec):
if isinstance(qspec, list):
# implicit and
dummy_spec = {'sub': qspec}
return self._build_and_query(dummy_spec)
kind = qspec.get('kind', 'simple')
if kind == 'and':
return self._build_and_query(qspec)
elif kind == 'or':
return self._build_or_query(qspec)
elif kind == 'set':
return self._build_set_query(qspec)
else:
assert kind == 'simple'
return self._build_simple_query(qspec)
def _build_set_query(self, qspec):
q_builder = tinydb.Query()
composed_q = None
field = qspec['field']
for item in qspec['set']:
print(f"Item: {item}")
q = getattr(q_builder, field).search(item)
if composed_q is None:
composed_q = q
else:
composed_q |= q
return composed_q
def _build_and_query(self, qspec):
composed_q = None
for sub_spec in qspec['sub']:
sub_q = self._build_query(sub_spec)
if composed_q is None:
composed_q = sub_q
else:
composed_q &= sub_q
return composed_q
def _build_or_query(self, qspec):
composed_q = None
for sub_spec in qspec['sub']:
sub_q = self._build_query(sub_spec)
if composed_q is None:
composed_q = sub_q
else:
composed_q |= sub_q
return composed_q
def _build_simple_query(self, qspec):
q_builder = tinydb.Query()
field = qspec['field']
if 'match' in qspec:
q = getattr(q_builder, field).search(qspec['match'])
elif 'min' in qspec:
q = getattr(q_builder, field).test(
lambda s: float(s) >= float(qspec['min']))
elif 'max' in qspec:
q = getattr(q_builder, field).test(
lambda s: float(s) <= float(qspec['min']))
if qspec.get('negate', False):
q = ~q
return q
def _get_data(self):
q = self._build_query(self._config['filters'])
return list(self._data.search(q))
def plot(self):
plt.clf()
d = self._get_data()
df = pd.DataFrame(d)
x_axis = self._config.get('x_axis', 'base_size')
x_power = int(self._config.get('x_power', 6))
x_label = self._config.get('x_label', 'Size')
if x_power > 1:
df[x_axis] = df[x_axis].apply(lambda x: x / (10**x_power))
x_label += f" $(\\times 10^{{{x_power}}})$"
y_axis = self._config.get('y_axis', 'cpu_time')
y_power = int(self._config.get('y_power', 6))
y_label = self._config.get('y_label', 'Time (ns)')
if y_power > 1:
df[y_axis] = df[y_axis].apply(lambda y: y / (10**y_power))
y_label += f" $(\\times 10^{{{y_power}}})$"
plot = sns.lineplot(x=x_axis,
y=y_axis,
hue=self._config.get('hue', 'full_algo'),
data=df)
plot.set(xlabel=x_label,
ylabel=y_label)
output_file = os.path.join(self._base_path, self._config['filename'])
plot.get_figure().savefig(output_file)
class DataReader(object):
name_re = re.compile(
r'(?P<group>[^\s]+) :: (?P<experiment>[^\s]+) :: (?P<algorithm>[^[/]+)(\[(?P<algopts>[^\]]*)\])?/(?P<base_size>\d+)/(?P<experiment_size>\d+)/\d+/manual_time')
def __init__(self, json_data):
self._data = []
self._json_data = json_data
self._process()
def _process(self):
for entry in self._json_data['benchmarks']:
if entry['run_type'] == 'aggregate':
continue
name = entry['name']
m = DataReader.name_re.match(name)
if not m:
print("Could not parse experiment name:")
print(name)
exit(-1)
d = {
'cpu_time': float(entry['cpu_time']),
'real_time': float(entry['real_time']),
'iterations': float(entry['iterations']),
'time_unit': entry['time_unit'], # TODO convert between them!
'base_size': float(m.group('base_size')),
'experiment_size': float(m.group('experiment_size')),
'group': m.group('group'),
'experiment': m.group('experiment'),
'algorithm': m.group('algorithm'),
'algopts': m.group('algopts'),
'full_algo': "{} [{}]".format(m.group('algorithm'),
m.groupdict().get('algopts', 'foo'))
}
self._data.append(d)
def get(self):
db = tinydb.TinyDB(storage=tinydb.storages.MemoryStorage)
for entry in self._data:
db.insert(entry)
return db
if __name__ == '__main__':
data_filename = sys.argv[1]
cfg_filename = sys.argv[2]
output_dir = sys.argv[3]
with open(data_filename, 'r') as json_file:
json_data = json.load(json_file)
with open(cfg_filename, 'r') as cfg_file:
cfg = json.load(cfg_file)
reader = DataReader(json_data)
data = reader.get()
for plot_cfg in cfg:
p = ExperimentPlotter(output_dir, data, plot_cfg)
p.plot()
|
477423
|
import sys
import decouple
try:
from .base import * # noqa
except decouple.UndefinedValueError as exp:
print('ERROR:', exp.message)
sys.exit(1)
|
477429
|
import torch
from torch import nn
import math
from typing import Optional
from torch import Tensor
eps=1e-5
################################################################
# DGL's implementation of FeedForwardNet (MLP) for SIGN
class FeedForwardNet(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_layers, dropout):
super(FeedForwardNet, self).__init__()
self.layers = nn.ModuleList()
self.n_layers = n_layers
if n_layers == 1:
self.layers.append(nn.Linear(in_feats, out_feats))
else:
self.layers.append(nn.Linear(in_feats, hidden))
for i in range(n_layers - 2):
self.layers.append(nn.Linear(hidden, hidden))
self.layers.append(nn.Linear(hidden, out_feats))
if self.n_layers > 1:
self.prelu = nn.PReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight, gain=gain)
nn.init.zeros_(layer.bias)
def forward(self, x):
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self.n_layers - 1:
x = self.dropout(self.prelu(x))
return x
################################################################
# More general MLP layer
class MLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(MLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if n_layers == 1:
self.layers.append(nn.Linear(in_feats, out_feats))
else:
self.layers.append(nn.Linear(in_feats, hidden))
if normalization == "batch":
self.norms.append(nn.BatchNorm1d(hidden))
if normalization == "layer":
self.norms.append(nn.LayerNorm(hidden))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(n_layers - 2):
self.layers.append(nn.Linear(hidden, hidden))
if normalization == "batch":
self.norms.append(nn.BatchNorm1d(hidden))
if normalization == "layer":
self.norms.append(nn.LayerNorm(hidden))
if normalization == "none":
self.norms.append(nn.Identity())
self.layers.append(nn.Linear(hidden, out_feats))
if self.n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight, gain=gain)
nn.init.zeros_(layer.bias)
for norm in self.norms:
norm.reset_parameters()
# print(self.layers[0].weight)
def forward(self, x):
x = self.input_drop(x)
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self.n_layers - 1:
x = self.dropout(self.relu(self.norms[layer_id](x)))
if self._residual:
if x.shape[1] == prev_x.shape[1]:
x += prev_x
prev_x = x
return x
# Multi-head (ensemble) MLP, note that different heads are processed
# sequentially
class MultiHeadMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., concat=False, residual=False, normalization="batch"):
super().__init__()
self._concat = concat
self.mlp_list = nn.ModuleList([MLP(in_feats, hidden, out_feats, n_layers, dropout, input_drop=input_drop, residual=residual, normalization=normalization) for _ in range(n_heads)])
# self.reset_parameters()
def reset_parameters(self):
for mlp in self.mlp_list:
mlp.reset_parameters()
def forward(self, x):
# x size:
# [N, d_in] or [N, H, d_in]
if len(x.shape) == 3:
out = [mlp(x[:, i, :]) for i, mlp in enumerate(self.mlp_list)]
if len(x.shape) == 2:
out = [mlp(x) for mlp in self.mlp_list]
out = torch.stack(out, dim=1)
if self._concat:
out = out.flatten(1, -1)
return out
class ParallelMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(ParallelMLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self._n_heads = n_heads
self._n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if self._n_layers == 1:
# self.layers.append(MultiHeadLinear(in_feats, out_feats, n_heads))
self.layers.append(nn.Conv1d(in_feats * n_heads, out_feats * n_heads, kernel_size=1, groups=n_heads))
else:
# self.layers.append(MultiHeadLinear(in_feats, hidden, n_heads))
self.layers.append(nn.Conv1d(in_feats * n_heads, hidden * n_heads, kernel_size=1, groups=n_heads))
if normalization == "batch":
# self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(self._n_layers - 2):
# self.layers.append(MultiHeadLinear(hidden, hidden, n_heads))
self.layers.append(nn.Conv1d(hidden * n_heads, hidden * n_heads, kernel_size=1, groups=n_heads))
if normalization == "batch":
# self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
# self.layers.append(MultiHeadLinear(hidden, out_feats, n_heads))
self.layers.append(nn.Conv1d(hidden * n_heads, out_feats * n_heads, kernel_size=1, groups=n_heads))
if self._n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
# for head in range(self._n_heads):
# for layer in self.layers:
# nn.init.kaiming_uniform_(layer.weight[head], a=math.sqrt(5))
# if layer.bias is not None:
# fan_in, _ = nn.init._calculate_fan_in_and_fan_out(layer.weight[head])
# bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
# nn.init.uniform_(layer.bias[head], -bound, bound)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for head in range(self._n_heads):
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight[head], gain=gain)
if layer.bias is not None:
nn.init.zeros_(layer.bias[head])
for norm in self.norms:
norm.reset_parameters()
# for norm in self.norms:
# norm.moving_mean[head].zero_()
# norm.moving_var[head].fill_(1)
# if norm._affine:
# nn.init.ones_(norm.scale[head])
# nn.init.zeros_(norm.offset[head])
# print(self.layers[0].weight[0])
def forward(self, x):
x = self.input_drop(x)
if len(x.shape) == 2:
x = x.view(-1, 1, x.shape[1])
x = x.repeat(1, self._n_heads, 1)
# x = x.repeat(1, self._n_heads).unsqueeze(-1)
if len(x.shape) == 3:
x = x.flatten(1, -1).unsqueeze(-1)
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
# x = x.flatten(1, -1)
if layer_id < self._n_layers - 1:
shape = x.shape
x = self.dropout(self.relu(self.norms[layer_id](x)))
# x = x.reshape(shape=shape)
if self._residual:
if x.shape[2] == prev_x.shape[2]:
x += prev_x
prev_x = x
x = x.view(-1, self._n_heads, x.shape[1] // self._n_heads)
return x
################################################################
# Modified multi-head Linear layer
class MultiHeadLinear(nn.Module):
def __init__(self, in_feats, out_feats, n_heads, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(size=(n_heads, in_feats, out_feats)))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(size=(n_heads, 1, out_feats)))
else:
self.bias = None
def reset_parameters(self) -> None:
for weight, bias in zip(self.weight, self.bias):
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
if bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(bias, -bound, bound)
# def reset_parameters(self):
# gain = nn.init.calculate_gain("relu")
# for weight in self.weight:
# nn.init.xavier_uniform_(weight, gain=gain)
# if self.bias is not None:
# nn.init.zeros_(self.bias)
def forward(self, x):
# input size: [N, d_in] or [H, N, d_in]
# output size: [H, N, d_out]
if len(x.shape) == 3:
x = x.transpose(0, 1)
x = torch.matmul(x, self.weight)
if self.bias is not None:
x += self.bias
return x.transpose(0, 1)
# Modified multi-head BatchNorm1d layer
class MultiHeadBatchNorm(nn.Module):
def __init__(self, n_heads, in_feats, momentum=0.1, affine=True, device=None,
dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
assert in_feats % n_heads == 0
self._in_feats = in_feats
self._n_heads = n_heads
self._momentum = momentum
self._affine = affine
if affine:
self.weight = nn.Parameter(torch.empty(size=(n_heads, in_feats // n_heads)))
self.bias = nn.Parameter(torch.empty(size=(n_heads, in_feats // n_heads)))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.register_buffer("running_mean", torch.zeros(size=(n_heads, in_feats // n_heads)))
self.register_buffer("running_var", torch.ones(size=(n_heads, in_feats // n_heads)))
self.running_mean: Optional[Tensor]
self.running_var: Optional[Tensor]
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_() # type: ignore[union-attr]
self.running_var.fill_(1) # type: ignore[union-attr]
if self._affine:
nn.init.zeros_(self.bias)
for weight in self.weight:
nn.init.ones_(weight)
def forward(self, x):
assert x.shape[1] == self._in_feats
x = x.view(-1, self._n_heads, self._in_feats // self._n_heads)
self.running_mean = self.running_mean.to(x.device)
self.running_var = self.running_var.to(x.device)
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
if bn_training:
mean = x.mean(dim=0, keepdim=True)
var = x.var(dim=0, unbiased=False, keepdim=True)
out = (x-mean) * torch.rsqrt(var + eps)
self.running_mean = (1 - self._momentum) * self.running_mean + self._momentum * mean.detach()
self.running_var = (1 - self._momentum) * self.running_var + self._momentum * var.detach()
else:
out = (x - self.running_mean) * torch.rsqrt(self.running_var + eps)
if self._affine:
out = out * self.weight + self.bias
return out
# Another multi-head MLP defined from scratch
class GroupMLP(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_heads, n_layers, dropout, input_drop=0., residual=False, normalization="batch"):
super(GroupMLP, self).__init__()
self._residual = residual
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self._n_heads = n_heads
self._n_layers = n_layers
self.input_drop = nn.Dropout(input_drop)
if self._n_layers == 1:
self.layers.append(MultiHeadLinear(in_feats, out_feats, n_heads))
else:
self.layers.append(MultiHeadLinear(in_feats, hidden, n_heads))
if normalization == "batch":
self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
# self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
for i in range(self._n_layers - 2):
self.layers.append(MultiHeadLinear(hidden, hidden, n_heads))
if normalization == "batch":
self.norms.append(MultiHeadBatchNorm(n_heads, hidden * n_heads))
# self.norms.append(nn.BatchNorm1d(hidden * n_heads))
if normalization == "layer":
self.norms.append(nn.GroupNorm(n_heads, hidden * n_heads))
if normalization == "none":
self.norms.append(nn.Identity())
self.layers.append(MultiHeadLinear(hidden, out_feats, n_heads))
if self._n_layers > 1:
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
for head in range(self._n_heads):
for layer in self.layers:
nn.init.kaiming_uniform_(layer.weight[head], a=math.sqrt(5))
if layer.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(layer.weight[head])
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(layer.bias[head], -bound, bound)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for head in range(self._n_heads):
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight[head], gain=gain)
if layer.bias is not None:
nn.init.zeros_(layer.bias[head])
for norm in self.norms:
norm.reset_parameters()
# for norm in self.norms:
# norm.moving_mean[head].zero_()
# norm.moving_var[head].fill_(1)
# if norm._affine:
# nn.init.ones_(norm.scale[head])
# nn.init.zeros_(norm.offset[head])
# print(self.layers[0].weight[0])
def forward(self, x):
x = self.input_drop(x)
if len(x.shape) == 2:
x = x.view(-1, 1, x.shape[1])
if self._residual:
prev_x = x
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self._n_layers - 1:
shape = x.shape
x = x.flatten(1, -1)
x = self.dropout(self.relu(self.norms[layer_id](x)))
x = x.reshape(shape=shape)
if self._residual:
if x.shape[2] == prev_x.shape[2]:
x += prev_x
prev_x = x
return x
|
477473
|
from pymclevel.infiniteworld import MCInfdevOldLevel
from pymclevel import mclevel
from timeit import timeit
import templevel
#import logging
#logging.basicConfig(level=logging.INFO)
def natural_relight():
world = mclevel.fromFile("testfiles/AnvilWorld")
t = timeit(lambda: world.generateLights(world.allChunks), number=1)
print "Relight natural terrain: %d chunks in %.02f seconds (%.02fms per chunk)" % (world.chunkCount, t, t / world.chunkCount * 1000)
def manmade_relight():
t = templevel.TempLevel("TimeRelight", createFunc=lambda f:MCInfdevOldLevel(f, create=True))
world = t.level
station = mclevel.fromFile("testfiles/station.schematic")
times = 2
for x in range(times):
for z in range(times):
world.copyBlocksFrom(station, station.bounds, (x * station.Width, 63, z * station.Length), create=True)
t = timeit(lambda: world.generateLights(world.allChunks), number=1)
print "Relight manmade building: %d chunks in %.02f seconds (%.02fms per chunk)" % (world.chunkCount, t, t / world.chunkCount * 1000)
if __name__ == '__main__':
natural_relight()
manmade_relight()
|
477491
|
import re
from core.msfActionModule import msfActionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_msf_jboss_vulnscan(msfActionModule):
def __init__(self, config, display, lock):
super(scan_msf_jboss_vulnscan, self).__init__(config, display, lock)
self.triggers = ["newService_http", "newPort_tcp_80", "newPort_tcp_8080"]
self.requirements = ["msfconsole"]
self.types = ["http"]
self.title = "Attempt to determine if a jboss instance has default creds"
self.shortName = "MSFJbossVulnscan"
self.description = "execute [auxiliary/scanner/http/jboss_vulnscan] on each target"
self.safeLevel = 4
def getTargets(self):
self.targets = kb.get('port/tcp/443', 'port/tcp/8443', 'service/https', 'service/ssl')
def process(self):
# load any targets we are interested in
self.getTargets()
if len(self.targets) > 0:
# loop over each target
for t in self.targets:
ports = kb.get('service/http/' + t + '/tcp')
for p in ports:
# verify we have not tested this host before
if not self.seentarget(t+p):
# add the new IP to the already seen list
self.addseentarget(t+p)
cmd = {
'config':[
"use auxiliary/scanner/http/jboss_vulnscan",
"set RHOSTS %s" % t,
"set RPORT %s" % p
],
'payload':'none'}
result, outfile = self.msfExec(t, cmds)
for line in result.splitlines():
m = re.match(r'.*Authenticated using (.*):(.*)', line)
if (m):
self.display.error("Jboss on [" + t + ":" + p + "] has default creds of [" +
m.group(1).strip() +"]/[" + m.group(2).strip() + "]")
kb.add("creds/service/jboss/" + t + "/port/" + p + "/username/"
+ m.group(1).strip() + "/password/" + m.group(2).strip())
self.fire("newJbossPassword")
return
|
477500
|
import threading
from lemoncheesecake.helpers.threading import ThreadedFactory
def test_threaded_factory():
class TestFactory(ThreadedFactory):
def setup_object(self):
return object()
factory = TestFactory()
objects = []
class TestThread(threading.Thread):
def run(self):
objects.append(factory.get_object())
objects.append(factory.get_object())
thread_1 = TestThread()
thread_1.start()
thread_1.join()
thread_2 = TestThread()
thread_2.start()
thread_2.join()
assert len(objects) == 4
assert objects[0] is objects[1]
assert objects[2] is objects[3]
assert objects[1] is not objects[2]
def test_threaded_factory_teardown():
marker = []
class TestFactory(ThreadedFactory):
def setup_object(self):
return 42
def teardown_object(self, obj):
marker.append(obj)
factory = TestFactory()
factory.get_object()
factory.teardown_factory()
assert marker == [42]
|
477509
|
from collections import defaultdict
from data_util.global_var import *
class UnimorphSchema:
# schema in unimorph-schema.tsv automatically extracted from https://unimorph.github.io/doc/unimorph-schema.pdf
# using tabula https://github.com/tabulapdf/tabula
def __init__(self, src=UNIMORPH_SCHEMA_SRC, ignore_feats=("Deixis")):
code_ix = defaultdict()
feature_value = defaultdict(list)
for line in open(src):
feat, val, code = line.strip().split("\t")
if feat not in ignore_feats:
code = code.upper()
assert code not in code_ix, f"Duplicate code {code}" # PROX can be Case or Deixis
code_ix[code] = (feat, val)
feature_value[feat] += [(val, code)]
self.code_ix = code_ix
self.feature_value = feature_value
# interpret single code
def get_feature(self, code):
code = code.upper().replace("{", "").replace("}", "").replace("/", "+").split(
"+") # sometimes several values are given
out = []
for c in code:
assert c in self.code_ix, f"Unknown code {c}, {code}" # just to be sure
out += [self.code_ix[c]]
return out[0][0], "+".join(sorted([x[1] for x in out]))
# decode full Unimorph MSD
def decode_msd(self, msd):
msd = msd.split(";")
feature_values = []
residue = [] # whatever we couldn't decypher
for code in msd:
try:
feature_values += [self.get_feature(code)]
except AssertionError:
residue += [code]
out = {}
for feature, value in feature_values:
# TODO: actually there are quite a few such errors
# assert feature not in out, f"Duplicate value for {feature}, {msd}"
out[feature] = value
return out, residue
|
477511
|
import deep_architect.search_logging as sl
import deep_architect.visualization as vi
import deep_architect.utils as ut
from deep_architect.searchers.random import RandomSearcher
import deep_architect.modules as mo
import deep_architect.contrib.misc.search_spaces.tensorflow.dnn as css_dnn
import search_space as ss
def main():
# Loading the config file.
cfg = ut.get_config()
# Creating the searcher.
searcher = RandomSearcher(ss.search_space_fn)
# Creating the search folder for logging information.
sl.create_search_folderpath(cfg['folderpath'],
cfg['search_name'],
abort_if_exists=cfg["abort_if_exists"],
delete_if_exists=cfg['delete_if_exists'],
create_parent_folders=True)
# Search loop.
for evaluation_id in range(cfg['num_samples']):
logger = sl.EvaluationLogger(cfg["folderpath"], cfg["search_name"],
evaluation_id)
if not logger.config_exists():
(inputs, outputs, hyperp_value_lst,
searcher_eval_token) = searcher.sample()
# Logging results (including graph).
logger.log_config(hyperp_value_lst, searcher_eval_token)
vi.draw_graph(
outputs,
draw_module_hyperparameter_info=False,
print_to_screen=False,
out_folderpath=logger.get_evaluation_data_folderpath())
if __name__ == '__main__':
main()
|
477512
|
import math
import bisect
import copy
import os.path as osp
import json
from functools import partial
import numpy as np
from PIL import Image
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms.functional import normalize as tf_norm, to_tensor
from .. import cfg as global_cfg
from .augmentations import get_composed_augmentations
from .samplers.grouped_batch_sampler import GroupedBatchSampler
from .gt_producer import (
ignore_index, convert_pan_m_to_sem_m, GTGenerator
)
from panopticapi.utils import rgb2id
from fabric.utils.logging import setup_logging
logger = setup_logging(__file__)
'''
Expect a data root directory with the following layout (only coco sub-tree is
expanded for simplicity)
.
├── coco
│ ├── annotations
│ │ ├── train/
│ │ ├── train.json
│ │ ├── val/
│ │ └── val.json
│ └── images
│ ├── train/
│ └── val/
├── mapillary/
└── cityscapes/
'''
'''
A reminder of Panoptic meta json structure: (only the relevant fields are listed)
info/
licenses/
categories:
- id: 1
name: person
supercategory: person
isthing: 1
- id: 191
name: pavement-merged
supercategory: ground
isthing: 0
images:
- id: 397133
file_name: 000000397133.jpg
height: 427
width: 640
- id: 397134
file_name: 000000397134.jpg
height: 422
width: 650
annotations:
- image_id: 139
file_name: 000000000139.png
segments_info:
- id: 3226956
category_id: 1
iscrowd: 0,
bbox: [413, 158, 53, 138] in xywh form,
area: 2840
- repeat omitted
'''
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
def imagenet_normalize(im):
"""This only operates on single image"""
if im.shape[0] == 1:
return im # deal with these gray channel images in coco later. Hell
return tf_norm(im, imagenet_mean, imagenet_std)
def caffe_imagenet_normalize(im):
im = im * 255.0
im = tf_norm(im, (102.9801, 115.9465, 122.7717), (1.0, 1.0, 1.0))
return im
def check_and_tuplize_splits(splits):
if not isinstance(splits, (tuple, list)):
splits = (splits, )
for split in splits:
assert split in ('train', 'val', 'test')
return splits
def mapify_iterable(iter_of_dict, field_name):
"""Convert an iterable of dicts into a big dict indexed by chosen field
I can't think of a better name. 'Tis catchy.
"""
acc = dict()
for item in iter_of_dict:
acc[item[field_name]] = item
return acc
def ttic_find_data_root(dset_name):
'''Find the fastest data root on TTIC slurm cluster'''
default = osp.join('/share/data/vision-greg/panoptic', dset_name)
return default
fast = osp.join('/vscratch/vision/panoptic', dset_name)
return fast if osp.isdir(fast) else default
def test_meta_conform(rmeta):
'''the metadata for test set does not conform to panoptic format;
Test ann only has 'images' and 'categories; let's fill in annotations'
'''
images = rmeta['images']
anns = []
for img in images:
_curr_ann = {
'image_id': img['id'],
'segments_info': []
# do not fill in file_name
}
anns.append(_curr_ann)
rmeta['annotations'] = anns
return rmeta
class PanopticBase(Dataset):
def __init__(self, name, split):
available_dsets = ('coco', 'cityscapes', 'mapillary')
assert name in available_dsets, '{} dset is not available'.format(name)
root = ttic_find_data_root(name)
logger.info('using data root {}'.format(root))
self.root = root
self.name = name
self.split = split
self.img_root = osp.join(root, 'images', split)
self.ann_root = osp.join(root, 'annotations', split)
meta_fname = osp.join(root, 'annotations', '{}.json'.format(split))
with open(meta_fname) as f:
rmeta = json.load(f) # rmeta stands for raw metadata
if self.split.startswith('test'):
rmeta = test_meta_conform(rmeta)
# store category metadata
self.meta = dict()
self.meta['cats'] = mapify_iterable(rmeta['categories'], 'id')
self.meta['cat_IdToName'] = dict()
self.meta['cat_NameToId'] = dict()
for cat in rmeta['categories']:
id, name = cat['id'], cat['name']
self.meta['cat_IdToName'][id] = name
self.meta['cat_NameToId'][name] = id
# store image and annotations metadata
self.imgs = mapify_iterable(rmeta['images'], 'id')
self.imgToAnns = mapify_iterable(rmeta['annotations'], 'image_id')
self.imgIds = list(sorted(self.imgs.keys()))
def confine_to_subset(self, imgIds):
'''confine data loading to a subset of images
This is used for figure making.
'''
# confirm that the supplied imgIds are all valid
for supplied_id in imgIds:
assert supplied_id in self.imgIds
self.imgIds = imgIds
def test_seek_imgs(self, i, total_splits):
assert isinstance(total_splits, int)
length = len(self.imgIds)
portion_size = int(math.ceil(length * 1.0 / total_splits))
start = i * portion_size
end = min(length, (i + 1) * portion_size)
self.imgIds = self.imgIds[start:end]
acc = { k: self.imgs[k] for k in self.imgIds }
self.imgs = acc
def __len__(self):
return len(self.imgIds)
def read_img(self, img_fname):
# there are some gray scale images in coco; convert to RGB
return Image.open(img_fname).convert('RGB')
def get_meta(self, index):
imgId = self.imgIds[index]
imgMeta = self.imgs[imgId]
anns = self.imgToAnns[imgId]
return imgMeta, anns
def __getitem__(self, index):
imgMeta, anns = self.get_meta(index)
img_fname = osp.join(self.img_root, imgMeta['file_name'])
img = self.read_img(img_fname)
if self.split.startswith('test'):
mask = Image.fromarray(
np.zeros(np.array(img).shape, dtype=np.uint8)
)
else:
mask = Image.open(osp.join(self.ann_root, anns['file_name']))
segments_info = mapify_iterable(anns['segments_info'], 'id')
return imgMeta, segments_info, img, mask
class SemanticSeg(PanopticBase):
def __init__(self, name, split, transforms):
super().__init__(name=name, split=split)
self.transforms = transforms
# produce train cat index id starting at 0
self.meta['catId_2_trainId'] = dict()
self.meta['trainId_2_catId'] = dict()
self.meta['trainId_2_catName'] = dict() # all things grouped into "things"
self.meta['trainId_2_catName'][ignore_index] = 'ignore'
self.prep_trainId()
self.meta['num_classes'] = len(self.meta['trainId_2_catName']) - 1
def prep_trainId(self):
curr_inx = 0
for catId, cat in self.meta['cats'].items():
self.meta['catId_2_trainId'][catId] = curr_inx
self.meta['trainId_2_catId'][curr_inx] = catId
self.meta['trainId_2_catName'][curr_inx] = cat['name']
curr_inx += 1
def __getitem__(self, index):
raise ValueError('diabling data loading through this class for now')
_, segments_info, im, mask = super().__getitem__(index)
mask = np.array(mask)
mask = rgb2id(np.array(mask))
mask = convert_pan_m_to_sem_m(
mask, segments_info, self.meta['catId_2_trainId'])
mask = Image.fromarray(mask, mode='I')
if self.transforms is not None:
im, mask = self.transforms(im, mask)
im, mask = to_tensor(im), to_tensor(mask).squeeze(dim=0).long()
im = imagenet_normalize(im)
return im, mask
class PanopticSeg(SemanticSeg):
def __init__(
self, name, split, transforms, pcv, gt_producers,
caffe_mode=False, tensorize=True
):
super().__init__(name=name, split=split, transforms=transforms)
self.pcv = pcv
self.meta['stuff_pred_thresh'] = -1
self.gt_producer_cfgs = gt_producers
self.tensorize = tensorize
self.caffe_mode = caffe_mode
self.gt_prod_handle = partial(GTGenerator, producer_cfgs=gt_producers)
def read_img(self, img_fname):
if self.caffe_mode:
# cv2 reads imgs in BGR, which is what caffe trained models expect
img = cv2.imread(img_fname) # cv2 auto converts gray to BGR
img = Image.fromarray(img)
else:
img = Image.open(img_fname).convert('RGB')
return img
def pan_getitem(self, index, apply_trans=True):
# this is now exposed as public API
imgMeta, segments_info, img, mask = PanopticBase.__getitem__(self, index)
if apply_trans and self.transforms is not None:
img, mask = self.transforms(img, mask)
return imgMeta, segments_info, img, mask
def __getitem__(self, index):
_, segments_info, im, pan_mask = self.pan_getitem(index)
gts = []
if not self.split.startswith('test'):
lo_pan_mask = pan_mask.resize(
np.array(im.size, dtype=np.int) // 4, resample=Image.NEAREST
)
gts = self.gt_prod_handle(
self.meta, self.pcv, lo_pan_mask, segments_info
).generate_gt()
if self.split == 'train':
sem_gt = gts[0]
else:
hi_sem_gt = self.gt_prod_handle(
self.meta, self.pcv, pan_mask, segments_info,
).sem_gt
sem_gt = hi_sem_gt
gts[0] = sem_gt
# else for test/test-dev, do not produce ground truth at all
if self.tensorize:
im = to_tensor(im)
gts = [ torch.as_tensor(elem) for elem in gts ]
if self.caffe_mode:
im = caffe_imagenet_normalize(im)
else:
im = imagenet_normalize(im)
else:
im = np.array(im)
gts.insert(0, im)
return tuple(gts)
@classmethod
def make_loader(
cls, data_cfg, pcv_module, is_train, mp_distributed, world_size,
val_split='val'
):
if is_train:
split = 'train'
batch_size = data_cfg.train_batch_size
transforms_cfg = data_cfg.train_transforms
else:
split = val_split
batch_size = data_cfg.test_batch_size
transforms_cfg = data_cfg.test_transforms
num_workers = data_cfg.num_loading_threads
if mp_distributed:
num_workers = int((num_workers + world_size - 1) / world_size)
if is_train:
batch_size = int(batch_size / world_size)
# at test time a model does not need to reduce its batch size
# 1. dataset
trans = get_composed_augmentations(transforms_cfg)
instance = cls(
split=split, transforms=trans, pcv=pcv_module,
gt_producers=data_cfg.dataset.gt_producers,
**data_cfg.dataset.params,
)
# if split.startswith('test'):
# inx = global_cfg.testing.inx
# total_splits = global_cfg.testing.portions
# instance.test_seek_imgs(inx, total_splits)
# 2. sampler
sampler = cls.make_sampler(instance, is_train, mp_distributed)
# 3. batch sampler
batch_sampler = cls.make_batch_sampler(
instance, sampler, batch_size=batch_size,
aspect_grouping=cls.aspect_grouping,
)
del sampler
# 4. collator
if split.startswith('test'):
collator = None
else:
collator = BatchCollator(data_cfg.dataset.gt_producers)
# 5. loader
loader = DataLoader(
instance,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
# pin_memory=True maskrcnn-benchmark does not pin memory
)
return loader
@staticmethod
def make_sampler(dataset, is_train, distributed):
if is_train:
if distributed:
# as of pytorch 1.1.0 the distributed sampler always shuffles
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
@staticmethod
def make_batch_sampler(
dataset, sampler, batch_size, aspect_grouping,
):
if aspect_grouping:
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, bins=[1, ])
batch_sampler = GroupedBatchSampler(
sampler, group_ids, batch_size, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False
)
'''
I've decided after much deliberation not to use iteration based training.
Our cluster has a 4-hour time limit before job interrupt.
Under this constraint, I have to resume from where the sampling stopped
at the exact epoch the interrupt occurs and this requires checkpointing
the sampler state.
However, under distributed settings, each process has to checkpoint a
different state since each sees only a portion of the data by virtue of the
distributed sampler. This is bug-prone and brittle.
'''
# if num_iters is not None:
# batch_sampler = samplers.IterationBasedBatchSampler(
# batch_sampler, num_iters, start_iter
# )
return batch_sampler
class BatchCollator(object):
def __init__(self, producer_cfg):
fills = [0, ignore_index, ] # always 0 for img, ignore for sem_gt
fills.extend(
[cfg['params']['fill'] for cfg in producer_cfg]
)
self.fills = fills
def __call__(self, batch):
transposed_batch = list(zip(*batch))
assert len(self.fills) == len(transposed_batch), 'must match in length'
acc = []
for tsr_list, fill in zip(transposed_batch, self.fills):
tsr = self.collate_tensor_list(tsr_list, fill=fill)
acc.append(tsr)
return tuple(acc)
@staticmethod
def collate_tensor_list(tensors, fill):
"""
Pad the Tensors with the specified constant
so that they have the same shape
"""
assert isinstance(tensors, (tuple, list))
# largest size along each dimension
max_size = tuple(max(s) for s in zip(*[tsr.shape for tsr in tensors]))
assert len(max_size) == 2 or len(max_size) == 3
batch_shape = (len(tensors),) + max_size
batched_tsr = tensors[0].new(*batch_shape).fill_(fill)
for tsr, pad_tsr in zip(tensors, batched_tsr):
# WARNING only pad the last 2, that is the spatial dimensions
pad_tsr[..., :tsr.shape[-2], :tsr.shape[-1]].copy_(tsr)
return batched_tsr
def _quantize(x, bins):
bins = copy.copy(bins)
bins = sorted(bins)
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
imgMeta, _ = dataset.get_meta(i)
aspect_ratio = float(imgMeta["height"]) / float(imgMeta["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
|
477633
|
import os
matlab_src_dir = os.path.abspath('.')
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.matlab']
primary_domain = 'mat'
master_doc = 'contents'
# The suffix of source filenames.
source_suffix = '.rst'
nitpicky = True
|
477659
|
import json
import mongodb_consistent_backup
import sys
from datetime import datetime
from argparse import Action
from pkgutil import walk_packages
from yconf import BaseConfiguration
from yconf.util import NestedDict
def parse_config_bool(item):
try:
if isinstance(item, bool):
return item
elif isinstance(item, str):
if item.rstrip().lower() is "true":
return True
return False
except Exception:
return False
class PrintVersions(Action):
def __init__(self, option_strings, dest, nargs=0, **kwargs):
super(PrintVersions, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print("%s version: %s, git commit hash: %s" % (
mongodb_consistent_backup.prog_name,
mongodb_consistent_backup.__version__,
mongodb_consistent_backup.git_commit
))
import platform
print("Python version: %s" % platform.python_version())
print("Python modules:")
import fabric.version
print("\t%s: %s" % ('Fabric', fabric.version.get_version()))
modules = ['pymongo', 'multiprocessing', 'yaml', 'boto', 'filechunkio']
for module_name in modules:
module = __import__(module_name)
if hasattr(module, '__version__'):
print("\t%s: %s" % (module_name, module.__version__))
sys.exit(0)
class ConfigParser(BaseConfiguration):
def makeParserLoadSubmodules(self, parser):
for _, modname, ispkg in walk_packages(path=mongodb_consistent_backup.__path__, prefix=mongodb_consistent_backup.__name__ + '.'):
if not ispkg:
continue
try:
components = modname.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
parser = mod.config(parser)
except AttributeError:
continue
return parser
def makeParser(self):
parser = super(ConfigParser, self).makeParser()
parser.add_argument("-V", "--version", dest="version", help="Print mongodb_consistent_backup version info and exit", action=PrintVersions)
parser.add_argument("-v", "--verbose", dest="verbose", help="Verbose output", default=False, action="store_true")
parser.add_argument("-H", "--host", dest="host", default="localhost", type=str,
help="MongoDB Hostname, IP address or '<replset>/<host:port>,<host:port>,..' URI (default: localhost)")
parser.add_argument("-P", "--port", dest="port", help="MongoDB Port (default: 27017)", default=27017, type=int)
parser.add_argument("-u", "--user", "--username", dest="username", help="MongoDB Authentication Username (for optional auth)", type=str)
parser.add_argument("-p", "--password", dest="password", help="MongoDB Authentication Password (for optional auth)", type=str)
parser.add_argument("-a", "--authdb", dest="authdb", help="MongoDB Auth Database (for optional auth - default: admin)", default='admin', type=str)
parser.add_argument("--ssl.enabled", dest="ssl.enabled", default=False, action="store_true",
help="Use SSL secured database connections to MongoDB hosts (default: false)")
parser.add_argument("--ssl.insecure", dest="ssl.insecure", default=False, action="store_true",
help="Do not validate the SSL certificate and hostname of the server (default: false)")
parser.add_argument("--ssl.ca_file", dest="ssl.ca_file", default=None, type=str,
help="Path to SSL Certificate Authority file in PEM format (default: use OS default CA)")
parser.add_argument("--ssl.crl_file", dest="ssl.crl_file", default=None, type=str,
help="Path to SSL Certificate Revocation List file in PEM or DER format (for optional cert revocation)")
parser.add_argument("--ssl.client_cert_file", dest="ssl.client_cert_file", default=None, type=str,
help="Path to Client SSL Certificate file in PEM format (for optional client ssl auth)")
parser.add_argument("-L", "--log-dir", dest="log_dir", help="Path to write log files to (default: disabled)", default='', type=str)
parser.add_argument("-T", "--backup-time", dest="backup_time",
default=datetime.now().strftime("%Y%m%d_%H%M"), type=str,
help="Backup timestamp as yyyymmdd_HHMM. (default: current time)")
parser.add_argument("--lock-file", dest="lock_file", default='/tmp/mongodb-consistent-backup.lock', type=str,
help="Location of lock file (default: /tmp/mongodb-consistent-backup.lock)")
parser.add_argument("--rotate.max_backups", dest="rotate.max_backups", default=0, type=int,
help="Maximum number of backups to keep in backup directory (default: unlimited)")
parser.add_argument("--rotate.max_days", dest="rotate.max_days", default=0, type=float,
help="Maximum age in days for backups in backup directory (default: unlimited)")
parser.add_argument("--sharding.balancer.wait_secs", dest="sharding.balancer.wait_secs", default=300, type=int,
help="Maximum time to wait for balancer to stop, in seconds (default: 300)")
parser.add_argument("--sharding.balancer.ping_secs", dest="sharding.balancer.ping_secs", default=3, type=int,
help="Interval to check balancer state, in seconds (default: 3)")
return self.makeParserLoadSubmodules(parser)
class Config(object):
# noinspection PyUnusedLocal
def __init__(self):
self._config = ConfigParser()
self.parse()
self.version = mongodb_consistent_backup.__version__
self.git_commit = mongodb_consistent_backup.git_commit
def _get(self, keys, data=None):
if not data:
data = self._config
if "." in keys:
key, rest = keys.split(".", 1)
return self._get(rest, data[key])
else:
return data[keys]
def check_required(self):
required = [
'backup.name',
'backup.location'
]
for key in required:
try:
self._get(key)
except Exception:
raise mongodb_consistent_backup.Errors.OperationError(
'Field "%s" (config file field: "%s.%s") must be set via command-line or config file!' % (
key,
self._config.environment,
key
)
)
def parse(self):
self._config.parse(self.cmdline)
self.check_required()
def to_dict(self, data):
if isinstance(data, dict) or isinstance(data, NestedDict):
ret = {}
for key in data:
value = self.to_dict(data[key])
if value and key is not ('merge'):
if key == "password" or key == "secret_key":
value = "******"
ret[key] = value
return ret
elif isinstance(data, (str, int, bool)):
return data
def dump(self):
return self.to_dict(self._config)
def to_json(self):
return json.dumps(self.dump(), sort_keys=True)
def __repr__(self):
return self.to_json()
def __getattr__(self, key):
try:
return self._config.get(key)
# TODO-timv What can we do to make this better?
except Exception:
return None
|
477662
|
from joblib import load as jl_load
from pathlib import Path
from os.path import dirname
def load_model(model_name: str):
pipeline = jl_load(Path(dirname(__file__)) / 'model' / f"{model_name}.joblib")
pipeline.steps[0][1].init()
return pipeline
|
477671
|
import dramatiq
from account.models import User
from submission.models import Submission
from judge.dispatcher import JudgeDispatcher
from judge.ide import IDEDispatcher
from utils.shortcuts import DRAMATIQ_WORKER_ARGS
@dramatiq.actor(**DRAMATIQ_WORKER_ARGS())
def judge_task(submission_id, problem_id):
uid = Submission.objects.get(id=submission_id).user_id
if User.objects.get(id=uid).is_disabled:
return
JudgeDispatcher(submission_id, problem_id).judge()
@dramatiq.actor(**DRAMATIQ_WORKER_ARGS())
def judge_IDE_task(src, language, test_case):
return IDEDispatcher(src, language, test_case).judge()
|
477713
|
import random
import os
import shutil
from bench import resources
from audiomate.corpus import io
def run(corpus, base_path):
target_path = os.path.join(base_path, 'out')
shutil.rmtree(target_path, ignore_errors=True)
os.makedirs(target_path)
writer = io.KaldiWriter()
writer.save(corpus, target_path)
def test_kaldi_write(benchmark, tmp_path):
corpus = resources.generate_corpus(
200,
(5, 10),
(1, 5),
(0, 6),
(1, 20),
random.Random(x=234)
)
benchmark(run, corpus, str(tmp_path))
|
477717
|
from gi.repository import Gtk, GObject, Gio
from core.engine import GameEngine
from gui.controllers.main_window_controller import MainWindowController
from gui.controllers.theme_selection_controller import ThemeSelectionController
from os import path
from gui.controllers import gmenu_controller, about_controller
from gui.controllers.gmenu_controller import GmenuController
from gui.controllers.about_controller import AboutController
from properties import Directories
from model.configuration.user import UserConfig
from core.configuration.json.user import UserConfigManager
class Gnome2048Application(Gtk.Application):
def __init__(self):
Gtk.Application.__init__(self)
engine = GameEngine()
config_file = path.join(Directories.USER_CONFIG_DIR, "settings.json")
self.config_manager = UserConfigManager(config_file)
self.user_config = UserConfig()
self.css_provider = Gtk.CssProvider()
self.main_window_controller = MainWindowController(engine, self.css_provider)
self.theme_selection_controller = ThemeSelectionController()
self.theme_selection_controller.window.set_transient_for(self.main_window_controller.window)
self.theme_selection_controller.register_theme_changed(self)
self.gmenu_controller = GmenuController(self)
self.about_controller = AboutController()
about_win = self.about_controller.window
main_win = self.main_window_controller.window
about_win.set_transient_for(main_win)
self.load_config()
def load_config(self):
self.user_config = self.config_manager.load()
self.apply_theme(self.user_config.theme)
def save_config(self):
self.config_manager.save(self.user_config)
def do_activate(self):
self.set_app_menu(self.gmenu_controller.menu)
self.main_window_controller.window.set_application(self)
self.main_window_controller.show()
def do_startup(self):
Gtk.Application.do_startup(self)
def show_theme_selection(self):
try:
self.theme_selection_controller.show(self.user_config.theme)
except Exception as e:
print(e)
raise
def show_about(self):
self.about_controller.show()
def theme_changed(self, theme):
self.user_config.theme = theme
self.save_config()
self.apply_theme(theme)
def apply_theme(self, theme):
main_css = path.join(theme, "main.css")
self.css_provider.load_from_path(main_css)
|
477741
|
from django.contrib import admin
from reversion.admin import VersionAdmin
from .models import Municipality, Suministro, Tag
@admin.register(Tag)
class TagAdmin(VersionAdmin):
list_display = ["name", "slug"]
@admin.register(Municipality)
class MunicipalityAdmin(VersionAdmin):
list_display = ["get_name", "slug", "created_at", "modified_at"]
def get_name(self, obj):
return obj.get_name_display()
get_name.short_description = "name"
@admin.register(Suministro)
class SuministroAdmin(VersionAdmin):
list_display = [
"title",
"slug",
"municipality",
"created_at",
"modified_at",
]
|
477792
|
import collections
def jewels(J,S):
c=collections.Counter(S)
count =0
for js in J:
count+=c[js]
return count
J='aA'
S='aAAd'
print(jewels(J, S ))
|
477834
|
import os
from openl3.cli import run
import tempfile
import numpy as np
import shutil
import pytest
TEST_DIR = os.path.dirname(__file__)
TEST_AUDIO_DIR = os.path.join(TEST_DIR, 'data', 'audio')
TEST_IMAGE_DIR = os.path.join(TEST_DIR, 'data', 'image')
TEST_VIDEO_DIR = os.path.join(TEST_DIR, 'data', 'video')
# Test audio file paths
CHIRP_MONO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_mono.wav')
CHIRP_STEREO_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_stereo.wav')
CHIRP_44K_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_44k.wav')
CHIRP_1S_PATH = os.path.join(TEST_AUDIO_DIR, 'chirp_1s.wav')
EMPTY_PATH = os.path.join(TEST_AUDIO_DIR, 'empty.wav')
SHORT_PATH = os.path.join(TEST_AUDIO_DIR, 'short.wav')
SILENCE_PATH = os.path.join(TEST_AUDIO_DIR, 'silence.wav')
# Test image file paths
DAISY_PATH = os.path.join(TEST_IMAGE_DIR, 'daisy.jpg')
BLANK_PATH = os.path.join(TEST_IMAGE_DIR, 'blank.png')
SMALL_PATH = os.path.join(TEST_IMAGE_DIR, 'smol.png')
# Test video file paths
BENTO_PATH = os.path.join(TEST_VIDEO_DIR, 'bento.mp4')
# Regression file paths
TEST_REG_DIR = os.path.join(TEST_DIR, 'data', 'regression')
REG_CHIRP_44K_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_{}.npz')
REG_CHIRP_44K_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'chirp_44k_{}_linear.npz')
REG_DAISY_PATH = os.path.join(TEST_REG_DIR, 'daisy.npz')
REG_DAISY_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'daisy_linear.npz')
REG_BENTO_AUDIO_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_{}.npz')
REG_BENTO_AUDIO_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_audio_{}_linear.npz')
REG_BENTO_IMAGE_PATH = os.path.join(TEST_REG_DIR, 'bento_image_{}.npz')
REG_BENTO_IMAGE_LINEAR_PATH = os.path.join(TEST_REG_DIR, 'bento_image_{}_linear.npz')
@pytest.mark.parametrize("frontend", ['kapre', 'librosa'])
def test_audio_regression(capsys, frontend):
# test correct execution on test audio file (regression)
tempdir = tempfile.mkdtemp()
run('audio', CHIRP_44K_PATH, output_dir=tempdir, audio_frontend=frontend, verbose=True)
# check output file created
audio_outfile = os.path.join(tempdir, 'chirp_44k.npz')
assert os.path.isfile(audio_outfile)
# regression test
audio_data_reg = np.load(REG_CHIRP_44K_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# SECOND regression test
run('audio', CHIRP_44K_PATH, output_dir=tempdir, suffix='linear', input_repr='linear',
content_type='env', audio_embedding_size=512, audio_center=False, audio_hop_size=0.5,
audio_frontend=frontend, verbose=False)
# check output file created
audio_outfile = os.path.join(tempdir, 'chirp_44k_linear.npz')
assert os.path.isfile(audio_outfile)
# regression test
audio_data_reg = np.load(REG_CHIRP_44K_LINEAR_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# delete output file and temp folder
shutil.rmtree(tempdir)
def test_image_regression(capsys):
# test correct execution on test image file (regression)
tempdir = tempfile.mkdtemp()
run('image', DAISY_PATH, output_dir=tempdir, verbose=True)
# check output file created
image_outfile = os.path.join(tempdir, 'daisy.npz')
assert os.path.isfile(image_outfile)
# regression test
image_data_reg = np.load(REG_DAISY_PATH)
image_data_out = np.load(image_outfile)
assert sorted(image_data_out.files) == sorted(image_data_reg.files) == ['embedding']
assert np.allclose(image_data_out['embedding'], image_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# SECOND regression test
run('image', DAISY_PATH, output_dir=tempdir, suffix='linear', input_repr='linear',
content_type='env', image_embedding_size=512, verbose=False)
# check output file created
image_outfile = os.path.join(tempdir, 'daisy_linear.npz')
assert os.path.isfile(image_outfile)
# regression test
image_data_reg = np.load(REG_DAISY_LINEAR_PATH)
image_data_out = np.load(image_outfile)
assert sorted(image_data_out.files) == sorted(image_data_reg.files) == ['embedding']
assert np.allclose(image_data_out['embedding'], image_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# delete output file and temp folder
shutil.rmtree(tempdir)
@pytest.mark.parametrize("frontend", ['kapre', 'librosa'])
def test_video_regression(capsys, frontend):
tempdir = tempfile.mkdtemp()
## Video processing regression tests
run('video', BENTO_PATH, output_dir=tempdir, audio_frontend=frontend, verbose=True)
# check output files created
audio_outfile = os.path.join(tempdir, 'bento_audio.npz')
assert os.path.isfile(audio_outfile)
image_outfile = os.path.join(tempdir, 'bento_image.npz')
assert os.path.isfile(image_outfile)
# regression test
audio_data_reg = np.load(REG_BENTO_AUDIO_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
image_data_reg = np.load(REG_BENTO_IMAGE_PATH.format(frontend))
image_data_out = np.load(image_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert sorted(image_data_out.files) == sorted(image_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(image_data_out['timestamps'], image_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(image_data_out['embedding'], image_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
# SECOND regression test
run('video', BENTO_PATH, output_dir=tempdir, suffix='linear', input_repr='linear',
content_type='env', audio_embedding_size=512, image_embedding_size=512,
audio_center=False, audio_hop_size=0.5, audio_frontend=frontend, verbose=False)
# check output files created
audio_outfile = os.path.join(tempdir, 'bento_audio_linear.npz')
assert os.path.isfile(audio_outfile)
image_outfile = os.path.join(tempdir, 'bento_image_linear.npz')
assert os.path.isfile(image_outfile)
# regression test
audio_data_reg = np.load(REG_BENTO_AUDIO_LINEAR_PATH.format(frontend))
audio_data_out = np.load(audio_outfile)
image_data_reg = np.load(REG_BENTO_IMAGE_LINEAR_PATH.format(frontend))
image_data_out = np.load(image_outfile)
assert sorted(audio_data_out.files) == sorted(audio_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(audio_data_out['timestamps'], audio_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(audio_data_out['embedding'], audio_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert sorted(image_data_out.files) == sorted(image_data_reg.files) == sorted(
['embedding', 'timestamps'])
assert np.allclose(image_data_out['timestamps'], image_data_reg['timestamps'],
rtol=1e-05, atol=1e-05, equal_nan=False)
assert np.allclose(image_data_out['embedding'], image_data_reg['embedding'],
rtol=1e-05, atol=1e-05, equal_nan=False)
|
477852
|
import pytest
import leaguepedia_parser
@pytest.mark.parametrize("team_tuple", [("tsm", "TSM"), ("IG", "Invictus Gaming")])
def test_get_long_team_name(team_tuple):
assert (
leaguepedia_parser.get_long_team_name_from_trigram(team_tuple[0])
== team_tuple[1]
)
@pytest.mark.parametrize(
"team_tournament",
[("TSM", "LCS 2021 Summer"), ("TSM Academy", "NA Academy 2021 Summer")],
)
def test_get_long_team_name_in_tournament(team_tournament):
team_name, tournament = team_tournament
assert (
leaguepedia_parser.get_long_team_name_from_trigram("TSM", tournament)
== team_name
)
def test_get_wrong_team_name():
assert leaguepedia_parser.get_long_team_name_from_trigram("mister mv") is None
@pytest.mark.parametrize("team_name", ["T1", "G2 Esports"])
def test_get_team_logo(team_name):
assert leaguepedia_parser.get_team_logo(team_name)
@pytest.mark.parametrize("team_name", ["T1", "G2 Esports"])
def test_get_team_thumbnail(team_name):
thumbnail_url = leaguepedia_parser.get_team_thumbnail(team_name)
assert thumbnail_url
@pytest.mark.parametrize("team_name", ["T1", "G2 Esports"])
def test_get_all_team_assets(team_name):
assets = leaguepedia_parser.get_all_team_assets(team_name)
assert assets.thumbnail_url
assert assets.logo_url
assert assets.long_name
|
477874
|
from __future__ import unicode_literals
from django.db import migrations
from django.utils import translation
from django.utils.translation import gettext_lazy as _
def insert_modules(apps, schema):
from django.conf import settings
translation.activate(settings.LANGUAGE_CODE)
ModuleGroup = apps.get_model('modules', 'ModuleGroup')
Module = apps.get_model('modules', 'Module')
module = {
'sort': 2,
'is_enabled': True,
'name': _('CMS User groups'),
'slug': 'user-groups',
'description': '',
'group': ModuleGroup.objects.get(slug='admin')
}
Module.objects.create(**module)
translation.deactivate()
def delete_modules(apps, schema):
Module = apps.get_model('modules', 'Module')
Module.objects.get(slug='user-groups').delete()
class Migration(migrations.Migration):
dependencies = [
('modules', '0008_module_config'),
]
operations = [
migrations.RunPython(insert_modules, delete_modules)
]
|
477885
|
import random
"""
A helper module for making rolls.
"""
def d20_check_roll(difficulty_class, modifiers=0, advantage=None):
"""
:param difficulty_class: Target for Success
:param modifiers: Total amount of modifiers
:param advantage: If is applicable, True if advantage, False if disadvantage.
:return: Tuple Success, Critical, Natural Roll, Total Roll
"""
natural_roll = random.randint(1, 20)
additional_roll = random.randint(1, 20)
final_roll = natural_roll
if advantage is not None:
if advantage:
final_roll = natural_roll if natural_roll > additional_roll else additional_roll
else:
final_roll = natural_roll if natural_roll < additional_roll else additional_roll
if natural_roll == 1:
return False, True, natural_roll, final_roll
if natural_roll == 20:
return True, True, natural_roll, final_roll
if final_roll + modifiers >= difficulty_class:
return True, False, natural_roll, final_roll + modifiers
return False, False, natural_roll, final_roll
def roll_damage(dice_stacks, modifiers, critical=False):
"""
:param dice_stacks: Stacks of Dice to apply
:param modifiers: Total of modifiers affecting the roll
:param critical: If is a critical damage roll
:return: Total damage to apply.
"""
if critical:
for dice_stack in dice_stacks:
dice_stack.amount *= 2
total_dice_result = 0
for dice_stack in dice_stacks:
for i in range(0, dice_stack.amount):
total_dice_result += random.randint(1, dice_stack.dice.sides)
return total_dice_result + modifiers
|
477934
|
import altair as alt
def rapids_theme():
font = "Open Sans"
text_color = "#666666"
main_palette = ["#7400ff", "#36c9dd", "#d216d2", "#ffb500"]
secondary_palette = ["#bababc", "#666666", "#8824ff", "#9942ff", "#a785e7"]
return {
"config": {
"axis": {
"labelFontSize:": 20,
"labelColor": text_color,
"titleFontSize": 20,
"titleColor": text_color,
},
"axisY": {
"font": font,
"labelFontSize": 20,
"labelColor": text_color,
"titleFontSize": 20,
"titleColor": text_color,
},
"axisX": {
"font": font,
"labelFontSize": 20,
"labelColor": text_color,
"titleFontSize": 20,
"titleColor": text_color,
},
"header": {
"font": font,
"labelFontSize": 20,
"labelColor": text_color,
"titleFontSize": 20,
"titleColor": text_color,
},
"legend": {
"font": font,
"labelFontSize": 18,
"labelColor": text_color,
"titleFontSize": 18,
"titleColor": text_color,
"strokeColor": text_color,
"padding": 10,
},
"range": {
"category": main_palette,
"diverging": secondary_palette,
},
}
}
alt.themes.register("RAPIDS", rapids_theme)
|
478013
|
from .import_ai import *
class TimedPickle:
def __init__(self, data, name, enabled=True):
self.data = data
self.name = name
self.enabled = enabled
def __getstate__(self):
return (time.time(), self.data, self.name, self.enabled)
def __setstate__(self, s):
tstart, self.data, self.name, self.enabled = s
if self.enabled:
print(f'pickle time for {self.name} = {time.time() - tstart} seconds')
@contextmanager
def use_seed(seed):
# Save all the states
python_state = random.getstate()
np_state = np.random.get_state()
# Seed all the rngs (note: adding different values to the seeds
# in case the same underlying RNG is used by all and in case
# that could be a problem. Probably not necessary)
random.seed(seed + 2)
np.random.seed(seed + 3)
# Yield control!
yield
# Reset the rng states
random.setstate(python_state)
np.random.set_state(np_state)
def get_code_hash():
cur_dir = os.path.dirname(os.path.realpath(__file__))
all_code = ''
for f in glob.glob(cur_dir + '/*.py'):
# We assume all whitespace is irrelevant, as well as comments
with open(f) as f:
for line in f:
line = line.partition('#')[0]
line = line.rstrip()
all_code += ''.join(line.split())
return hashlib.sha256(all_code.encode('utf8')).hexdigest()
|
478025
|
from __future__ import division
import math
import collections
import six
import tensorflow as tf
from neupy import init
from neupy.utils import as_tuple
from neupy.exceptions import LayerConnectionError
from neupy.core.properties import (
TypedListProperty, Property,
ParameterProperty,
)
from .base import BaseLayer
__all__ = ('Convolution', 'Deconvolution')
class Spatial2DProperty(TypedListProperty):
expected_type = (list, tuple, int)
def __init__(self, *args, **kwargs):
kwargs['element_type'] = int
super(Spatial2DProperty, self).__init__(*args, **kwargs)
def __set__(self, instance, value):
if isinstance(value, collections.Iterable) and len(value) == 1:
value = (value[0], 1)
if isinstance(value, int):
value = (value, value)
super(Spatial2DProperty, self).__set__(instance, value)
def validate(self, value):
super(Spatial2DProperty, self).validate(value)
if len(value) > 2:
raise ValueError(
"Stride can have only one or two elements "
"in the list. Got {}".format(len(value)))
if any(element <= 0 for element in value):
raise ValueError(
"Stride size should contain only values greater than zero")
def deconv_output_shape(dimension_size, filter_size, padding, stride,
dilation=1):
"""
Computes deconvolution's output shape for one spatial dimension.
Parameters
----------
dimension_size : int or None
Size of the dimension. Typically it's image's weight or height.
It might be equal to ``None`` when we input might have variable
dimension.
filter_size : int
Size of the convolution filter.
padding : {``valid``, ``same``} or int
Type or size of the zero-padding.
stride : int
Stride size.
dilation : int
Dilation rate. Only ``dilation=1`` is supported for the
deconvolution.
Returns
-------
int
Dimension size after applying deconvolution
operation with specified configurations.
"""
if isinstance(dimension_size, tf.Dimension):
dimension_size = dimension_size.value
if dimension_size is None:
return None
if dilation != 1:
raise ValueError("Deconvolution layer doesn't support dilation")
if padding in ('VALID', 'valid'):
return dimension_size * stride + max(filter_size - stride, 0)
elif padding in ('SAME', 'same'):
return dimension_size * stride
elif isinstance(padding, int):
return dimension_size * stride - 2 * padding + filter_size - 1
raise ValueError(
"`{!r}` is unknown deconvolution's padding value".format(padding))
def conv_output_shape(dimension_size, filter_size, padding, stride,
dilation=1):
"""
Computes convolution's output shape for one spatial dimension.
Parameters
----------
dimension_size : int or None
Size of the dimension. Typically it's image's weight or height.
It might be equal to ``None`` when we input might have variable
dimension.
filter_size : int
Size of the convolution filter.
padding : {``valid``, ``same``} or int
Type or size of the zero-padding.
stride : int
Stride size.
dilation : int
Dilation rate. Defaults to ``1``.
Returns
-------
int
Dimension size after applying convolution
operation with specified configurations.
"""
if isinstance(dimension_size, tf.Dimension):
dimension_size = dimension_size.value
if dimension_size is None:
return None
if not isinstance(stride, int):
raise ValueError(
"Stride needs to be an integer, got {} (value {!r})"
"".format(type(stride), stride))
if not isinstance(filter_size, int):
raise ValueError(
"Filter size needs to be an integer, got {} "
"(value {!r})".format(type(filter_size), filter_size))
# We can think of the dilation as very sparse convolutional filter
# filter=3 and dilation=2 the same as filter=5 and dilation=1
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ('VALID', 'valid'):
return int(math.ceil((dimension_size - filter_size + 1) / stride))
elif padding in ('SAME', 'same'):
return int(math.ceil(dimension_size / stride))
elif isinstance(padding, int):
return int(math.ceil(
(dimension_size + 2 * padding - filter_size + 1) / stride))
raise ValueError(
"`{!r}` is unknown convolution's padding value".format(padding))
class PaddingProperty(Property):
expected_type = (six.string_types, int, tuple)
valid_string_choices = ('VALID', 'SAME', 'same', 'valid')
def __set__(self, instance, value):
if isinstance(value, int):
if value < 0:
raise ValueError(
"Integer border mode value needs to be "
"greater or equal to zero, got {}".format(value))
value = (value, value)
if isinstance(value, six.string_types):
value = value.upper()
super(PaddingProperty, self).__set__(instance, value)
def validate(self, value):
super(PaddingProperty, self).validate(value)
if isinstance(value, tuple) and len(value) != 2:
raise ValueError(
"Border mode property suppose to get a tuple that "
"contains two elements, got {} elements"
"".format(len(value)))
is_invalid_string = (
isinstance(value, six.string_types) and
value not in self.valid_string_choices
)
if is_invalid_string:
valid_choices = ', '.join(self.valid_string_choices)
raise ValueError(
"`{}` is invalid string value. Available choices: {}"
"".format(value, valid_choices))
if isinstance(value, tuple) and any(element < 0 for element in value):
raise ValueError(
"Tuple border mode value needs to contain only elements "
"that greater or equal to zero, got {}".format(value))
class Convolution(BaseLayer):
"""
Convolutional layer.
Parameters
----------
size : tuple of int
Filter shape. In should be defined as a tuple with three
integers ``(filter rows, filter columns, output channels)``.
padding : {{``same``, ``valid``}}, int, tuple
Zero padding for the input tensor.
- ``valid`` - Padding won't be added to the tensor. Result will be
the same as for ``padding=0``
- ``same`` - Padding will depend on the number of rows and columns
in the filter. This padding makes sure that image with the
``stride=1`` won't change its width and height. It's the same as
``padding=(filter rows // 2, filter columns // 2)``.
- Custom value for the padding can be specified as an integer, like
``padding=1`` or it can be specified as a tuple when different
dimensions have different padding values, for example
``padding=(2, 3)``.
Defaults to ``valid``.
stride : tuple with ints, int.
Stride size. Defaults to ``(1, 1)``
dilation : int, tuple
Rate for the filter upsampling. When ``dilation > 1`` layer will
become dilated convolution (or atrous convolution). Defaults to ``1``.
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Shape of the weight will be equal to
``(filter rows, filter columns, input channels, output channels)``.
Default initialization methods you can find
:ref:`here <init-methods>`. Defaults to
:class:`HeNormal(gain=2) <neupy.init.HeNormal>`.
bias : 1D array-like, Tensorfow variable, scalar, Initializer or None
Defines layer's bias. Default initialization methods you can find
:ref:`here <init-methods>`. Defaults to
:class:`Constant(0) <neupy.init.Constant>`.
The ``None`` value excludes bias from the calculations and
do not add it into parameters list.
{BaseLayer.name}
Examples
--------
2D Convolution
>>> from neupy import layers
>>>
>>> layers.join(
... layers.Input((28, 28, 3)),
... layers.Convolution((3, 3, 16)),
... )
1D Convolution
>>> from neupy.layers import *
>>> network = join(
... Input((30, 10)),
... Reshape((30, 1, 10)), # convert 3D to 4D
... Convolution((3, 1, 16)),
... Reshape((-1, 16)) # convert 4D back to 3D
... )
>>> network
(?, 30, 10) -> [... 4 layers ...] -> (?, 28, 16)
Methods
-------
{BaseLayer.Methods}
Attributes
----------
{BaseLayer.Attributes}
"""
size = TypedListProperty(element_type=int, n_elements=3)
weight = ParameterProperty()
bias = ParameterProperty(allow_none=True)
padding = PaddingProperty()
stride = Spatial2DProperty()
dilation = Spatial2DProperty()
# We use gain=2 because it's suitable choice for relu non-linearity
# and relu is the most common non-linearity used for CNN.
def __init__(self, size, padding='valid', stride=1, dilation=1,
weight=init.HeNormal(gain=2), bias=0, name=None):
super(Convolution, self).__init__(name=name)
self.size = size
self.padding = padding
self.stride = stride
self.dilation = dilation
self.weight = weight
self.bias = bias
def fail_if_shape_invalid(self, input_shape):
if input_shape and input_shape.ndims != 4:
raise LayerConnectionError(
"Convolutional layer expects an input with 4 "
"dimensions, got {} with shape {}"
"".format(len(input_shape), input_shape))
def output_shape_per_dim(self, *args, **kwargs):
return conv_output_shape(*args, **kwargs)
def expected_output_shape(self, input_shape):
n_samples = input_shape[0]
row_filter_size, col_filter_size, n_kernels = self.size
row_stride, col_stride = self.stride
row_dilation, col_dilation = self.dilation
if isinstance(self.padding, (list, tuple)):
row_padding, col_padding = self.padding
else:
row_padding, col_padding = self.padding, self.padding
return (
n_samples,
self.output_shape_per_dim(
input_shape[1], row_filter_size,
row_padding, row_stride, row_dilation
),
self.output_shape_per_dim(
input_shape[2], col_filter_size,
col_padding, col_stride, col_dilation
),
n_kernels,
)
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
self.fail_if_shape_invalid(input_shape)
if input_shape.ndims is None:
n_samples = input_shape[0]
n_kernels = self.size[-1]
return tf.TensorShape((n_samples, None, None, n_kernels))
return tf.TensorShape(self.expected_output_shape(input_shape))
def create_variables(self, input_shape):
self.input_shape = input_shape
n_channels = input_shape[-1]
n_rows, n_cols, n_filters = self.size
# Compare to the regular convolution weights,
# transposed one has switched input and output channels.
self.weight = self.variable(
value=self.weight, name='weight',
shape=(n_rows, n_cols, n_channels, n_filters))
if self.bias is not None:
self.bias = self.variable(
value=self.bias, name='bias',
shape=as_tuple(n_filters))
def output(self, input, **kwargs):
input = tf.convert_to_tensor(input, tf.float32)
self.fail_if_shape_invalid(input.shape)
padding = self.padding
if not isinstance(padding, six.string_types):
height_pad, width_pad = padding
input = tf.pad(input, [
[0, 0],
[height_pad, height_pad],
[width_pad, width_pad],
[0, 0],
])
# VALID option will make sure that
# convolution won't use any padding.
padding = 'VALID'
output = tf.nn.convolution(
input,
self.weight,
padding=padding,
strides=self.stride,
dilation_rate=self.dilation,
data_format="NHWC",
)
if self.bias is not None:
bias = tf.reshape(self.bias, (1, 1, 1, -1))
output += bias
return output
def __repr__(self):
return self._repr_arguments(
self.size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
weight=self.weight,
bias=self.bias,
name=self.name,
)
class Deconvolution(Convolution):
"""
Deconvolution layer (also known as Transposed Convolution.).
Parameters
----------
{Convolution.size}
{Convolution.padding}
{Convolution.stride}
{Convolution.dilation}
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Shape of the weight will be equal to
``(filter rows, filter columns, output channels, input channels)``.
Default initialization methods you can find
:ref:`here <init-methods>`. Defaults to
:class:`HeNormal(gain=2) <neupy.init.HeNormal>`.
{Convolution.bias}
{Convolution.name}
Methods
-------
{Convolution.Methods}
Attributes
----------
{Convolution.Attributes}
Examples
--------
>>> from neupy.layers import *
>>> network = join(
... Input((28, 28, 3)),
... Convolution((3, 3, 16)),
... Deconvolution((3, 3, 1)),
... )
>>> network
(?, 28, 28, 3) -> [... 3 layers ...] -> (?, 28, 28, 1)
"""
def __init__(self, size, padding='valid', stride=1,
weight=init.HeNormal(gain=2), bias=0, name=None):
super(Deconvolution, self).__init__(
size=size, padding=padding, stride=stride,
dilation=1, weight=weight, bias=bias, name=name)
def output_shape_per_dim(self, *args, **kwargs):
return deconv_output_shape(*args, **kwargs)
def create_variables(self, input_shape):
self.input_shape = input_shape
n_channels = input_shape[-1]
n_rows, n_cols, n_filters = self.size
# Compare to the regular convolution weights,
# transposed one has switched input and output channels.
self.weight = self.variable(
value=self.weight, name='weight',
shape=(n_rows, n_cols, n_filters, n_channels))
if self.bias is not None:
self.bias = self.variable(
value=self.bias, name='bias',
shape=as_tuple(n_filters))
def output(self, input, **kwargs):
input = tf.convert_to_tensor(input, tf.float32)
# We need to get information about output shape from the input
# tensor's shape, because for some inputs we might have
# height and width specified as None and shape value won't be
# computed for these dimensions.
padding = self.padding
# It's important that expected output shape gets computed on then
# Tensor (produced by tf.shape) rather than on TensorShape object.
# Tensorflow cannot convert TensorShape object into Tensor and
# it will cause an exception in the conv2d_transpose layer.
output_shape = self.expected_output_shape(tf.shape(input))
if isinstance(self.padding, (list, tuple)):
height_pad, width_pad = self.padding
# VALID option will make sure that
# deconvolution won't use any padding.
padding = 'VALID'
# conv2d_transpose doesn't know about extra paddings that we added
# in the convolution. For this reason, we have to expand our
# expected output shape and later we will remove these paddings
# manually after transpose convolution.
output_shape = (
output_shape[0],
output_shape[1] + 2 * height_pad,
output_shape[2] + 2 * width_pad,
output_shape[3],
)
output = tf.nn.conv2d_transpose(
value=input,
filter=self.weight,
output_shape=list(output_shape),
strides=as_tuple(1, self.stride, 1),
padding=padding,
data_format="NHWC"
)
if isinstance(self.padding, (list, tuple)):
h_pad, w_pad = self.padding
if h_pad > 0:
output = output[:, h_pad:-h_pad, :, :]
if w_pad > 0:
output = output[:, :, w_pad:-w_pad, :]
if self.bias is not None:
bias = tf.reshape(self.bias, (1, 1, 1, -1))
output += bias
return output
def __repr__(self):
return self._repr_arguments(
self.size,
padding=self.padding,
stride=self.stride,
weight=self.weight,
bias=self.bias,
name=self.name,
)
|
478027
|
from .converters import (
BIGINTEGER, BINARY, BOOLEAN, BOOLEAN_ARRAY, BYTES, Binary, CHAR,
CHAR_ARRAY, DATE, DATETIME, DECIMAL, DECIMAL_ARRAY, Date, DateFromTicks,
FLOAT, FLOAT_ARRAY, INET, INT2VECTOR, INTEGER, INTEGER_ARRAY, INTERVAL,
JSON, JSONB, MACADDR, NAME, NAME_ARRAY, NULLTYPE, NUMBER, OID, PGEnum,
PGInterval, PGJson, PGJsonb, PGText, PGTsvector, PGVarchar, ROWID, STRING,
TEXT, TEXT_ARRAY, TIME, TIMEDELTA, TIMESTAMP, TIMESTAMPTZ, Time,
TimeFromTicks, Timestamp, TimestampFromTicks, UNKNOWN, UUID_TYPE, VARCHAR,
VARCHAR_ARRAY, XID)
from .core import Connection, Cursor
from .exceptions import (
DataError, DatabaseError, Error, IntegrityError, InterfaceError,
InternalError, NotSupportedError, OperationalError, ProgrammingError,
Warning)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# Copyright (c) 2007-2009, <NAME>
# Copyright (c) The Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
def connect(
user, host='localhost', database=None, port=5432, password=<PASSWORD>,
source_address=None, unix_sock=None, ssl_context=None, timeout=None,
tcp_keepalive=True, application_name=None, replication=None):
return Connection(
user, host=host, database=database, port=port, password=password,
source_address=source_address, unix_sock=unix_sock,
ssl_context=ssl_context, timeout=timeout, tcp_keepalive=tcp_keepalive,
application_name=application_name, replication=replication)
apilevel = "2.0"
"""The DBAPI level supported, currently "2.0".
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
threadsafety = 1
"""Integer constant stating the level of thread safety the DBAPI interface
supports. This DBAPI module supports sharing of the module only. Connections
and cursors my not be shared between threads. This gives pg8000 a threadsafety
value of 1.
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
paramstyle = 'format'
__all__ = [
Warning, DataError, DatabaseError, connect, InterfaceError,
ProgrammingError, Error, OperationalError, IntegrityError, InternalError,
NotSupportedError, Connection, Cursor, Binary, Date, DateFromTicks, Time,
TimeFromTicks, Timestamp, TimestampFromTicks, BINARY, PGInterval, PGEnum,
PGJson, PGJsonb, PGTsvector, PGText, PGVarchar, STRING, NUMBER, DATETIME,
TIME, BOOLEAN, INTEGER, BIGINTEGER, INTERVAL, JSON, JSONB, UNKNOWN,
NULLTYPE, ROWID, BOOLEAN_ARRAY, BYTES, CHAR, CHAR_ARRAY, DATE, DECIMAL,
DECIMAL_ARRAY, FLOAT, FLOAT_ARRAY, INET, INT2VECTOR, INTEGER_ARRAY,
MACADDR, NAME, NAME_ARRAY, OID, TEXT, TEXT_ARRAY, TIMEDELTA, TIMESTAMP,
TIMESTAMPTZ, UUID_TYPE, VARCHAR, VARCHAR_ARRAY, XID
]
|
478041
|
from base import pipeline, clean_db
import random
def test_null_groups(pipeline, clean_db):
"""
Verify that null group columns are considered equal
"""
pipeline.create_stream('s', x='int', y='int', z='int')
q = """
SELECT x::integer, y::integer, z::integer, COUNT(*) FROM s
GROUP BY x, y, z;
"""
desc = ('x', 'y', 'z')
pipeline.create_cv('test_null_groups', q)
pipeline.create_table('test_null_groups_t', x='integer', y='integer', z='integer')
rows = []
for n in range(10000):
vals = list(random.randint(0, 10) for n in range(3))
vals = map(lambda n: random.random() > 0.1 and n or None, vals)
rows.append(tuple(vals))
pipeline.insert('s', desc, rows)
pipeline.insert('test_null_groups_t', desc, rows)
table_q = """
SELECT x, y, z, COUNT(*) FROM test_null_groups_t
GROUP BY x, y, z ORDER BY x, y, z;
"""
expected = pipeline.execute(table_q)
result = pipeline.execute('SELECT x, y, z, count FROM test_null_groups ORDER BY x, y, z')
for r, e in zip(result, expected):
assert r == e
|
478073
|
import os
import numpy as np
import pandas as pd
import torch.utils.data as td
from csl_common.vis import vis
from csl_common.utils import geometry
from datasets import facedataset
class AFLW(facedataset.FaceDataset):
NUM_LANDMARKS = 19
ALL_LANDMARKS = list(range(NUM_LANDMARKS))
LANDMARKS_NO_OUTLINE = ALL_LANDMARKS # no outlines in AFLW
LANDMARKS_ONLY_OUTLINE = ALL_LANDMARKS # no outlines in AFLW
def __init__(self, root, cache_root=None, test_split='full', landmark_ids=range(19), **kwargs):
assert test_split in ['full', 'frontal']
fullsize_img_dir = os.path.join(root, 'data/flickr')
super().__init__(root=root,
cache_root=cache_root,
fullsize_img_dir=fullsize_img_dir,
test_split=test_split,
landmark_ids=landmark_ids,
**kwargs)
@property
def labels(self):
return self.annotations.ID.values
@property
def heights(self):
return self.annotations.face_h.values
@property
def widths(self):
return self.annotations.face_w.values
def _load_annotations(self, split):
annotation_filename = os.path.join(self.cache_root, 'alfw.pkl')
self.annotations_original = pd.read_pickle(annotation_filename)
print("Number of images: {}".format(len(self.annotations_original)))
self.frontal_only = split == 'frontal'
return self.make_split(self.train, self.frontal_only)
def make_split(self, train, only_frontal):
import scipy.io
# Additional annotations from http://mmlab.ie.cuhk.edu.hk/projects/compositional.html
annots = scipy.io.loadmat(os.path.join(self.cache_root, 'AFLWinfo_release.mat'))
train_ids, test_ids = annots['ra'][0][:20000] - 1, annots['ra'][0][20000:] - 1
ids = annots['ra'][0] - 1
# merge original and additional annotations
lms = annots['data'][ids]
lms = np.dstack((lms[:,:19], lms[:, 19:]))
lms_list = [l for l in lms]
mask_new = annots['mask_new'][ids]
bbox = annots['bbox'][ids]
x1, x2, y1, y2 = bbox[:,0], bbox[:,1], bbox[:, 2], bbox[:, 3]
fnames = [f[0][0] for f in annots['nameList'][ids]]
annotations_additional = pd.DataFrame({
'fname':fnames,
'ra': ids,
'landmarks_full':lms_list,
'masks': [m for m in mask_new],
'face_x': x1,
'face_y': y1,
'face_w': x2 - x1,
'face_h': y2 - y1
})
ad = annotations_additional
ao = self.annotations_original
# self.annotations_test = self.annotations_original[self.annotations.fname.isin(fnames)]
pd.set_option('display.expand_frame_repr', False)
merge_on=['fname', 'face_x', 'face_y', 'face_w', 'face_h']
annotations = pd.merge(ad, ao, on=merge_on)
annotations = annotations.sort_values('ra')
split_ids = train_ids if train else test_ids
annotations = annotations[annotations.ra.isin(split_ids)]
if not train and only_frontal:
mask_all_lms_visible = np.stack(annotations.masks.values).min(axis=1) == 1
annotations = annotations[mask_all_lms_visible]
print(len(annotations))
return annotations
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
sample = self.annotations.iloc[idx]
face_id = sample.ra
bb = [sample.face_x, sample.face_y, sample.face_x+sample.face_w, sample.face_y+sample.face_h]
landmarks = sample.landmarks_full.astype(np.float32)
landmarks_for_crop = landmarks if self.crop_source == 'lm_ground_truth' else None
return self.get_sample(sample.fname, bb, landmarks_for_crop=landmarks_for_crop, id=face_id,
landmarks_to_return=landmarks)
import config
config.register_dataset(AFLW)
if __name__ == '__main__':
from csl_common.utils.nn import Batch, denormalize
import utils.common
utils.common.init_random()
ds = AFLW(train=True, deterministic=True, use_cache=True, image_size=256)
dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
for data in dl:
batch = Batch(data, gpu=False)
inputs = batch.images.clone()
denormalize(inputs)
imgs = vis.add_landmarks_to_images(inputs.numpy(), batch.landmarks.numpy(), radius=3, color=(0,255,0))
print(batch.fnames)
vis.vis_square(imgs, nCols=10, fx=1.0, fy=1.0, normalize=False)
|
478161
|
import unittest
from nlpmodels.utils.elt import dataset
class TestAbstractDataset(unittest.TestCase):
def test_cannot_instantiate_abstract_class(self):
with self.assertRaises(TypeError):
dataset.AbstractNLPDataset()
|
478205
|
import pygame, sys
import numpy as np
from random import randint
ON = 1
OFF = 0
class GameOfLife:
def __init__(self, width=100, height=100):
self.width = width
self.height = height
self.state_new = np.zeros((height, width))
self.state_old = np.zeros((height, width))
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_state_old(self, x, y):
return self.state_old[y, x]
def get_state_new(self, x, y):
return self.state_new[y, x]
def num_neighbors_with_state(self, x, y, required_state):
res = 0
for i in range(max(0, x-1), min(x+1, self.width-1)+1):
for j in range(max(0, y-1), min(y+1, self.height-1)+1):
if i == x and j == y:
continue
if self.get_state_old(i, j) == required_state:
res += 1
return res
def set_square_state(self, x, y, st):
self.state_new[y, x] = st
def iterate_state(self):
self.state_old = np.copy(self.state_new)
for j in range(self.height):
for i in range(self.width):
cur_state = self.get_state_old(i, j)
numberOfOnNeighbors = self.num_neighbors_with_state(i,j,ON)
if cur_state == OFF and numberOfOnNeighbors == 3:
self.set_square_state(i, j, ON)
elif cur_state == ON and (numberOfOnNeighbors < 2 or numberOfOnNeighbors > 3):
self.set_square_state(i, j, OFF)
def main():
#screen dimensions
size = width, height = 1000, 1000
black = (0, 0, 0)
white = (255, 255, 255)
screen = pygame.display.set_mode(size)
life = GameOfLife(int(width/10), int(height/10))
size_x = width / life.get_width()
size_y = height / life.get_height()
num_live_cells = randint(280, 380)
for n in range(num_live_cells):
i, j = randint(int(life.get_width()/5), int(3*life.get_width()/4)), randint(int(life.get_height()/5), int(3*life.get_height()/4))
life.set_square_state(i, j, ON)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill(black)
scaled_x, scaled_y = 0, 0
for j in range(life.get_height()):
for i in range(life.get_width()):
scaled_x = (i / life.get_width()) * width
scaled_y = (j / life.get_height()) * height
state = life.get_state_new(i, j)
color = black
if state == ON:
color = white
pygame.draw.rect(screen, color, pygame.Rect(scaled_x, scaled_y, size_x, size_y))
pygame.time.wait(50)
pygame.display.flip()
life.iterate_state()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.