content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import glob
import os
from sqlalchemy import create_engine, exists
from sqlalchemy.orm import sessionmaker
try:
import config
except ImportError:
config = {'db_user': None, 'db_password': None }
from backend.database.objects import DBObjectBase, User, Replay, Model
connection_string = 'postgresql:///saltie'.format(config.db_user, config.db_password)
print (connection_string)
engine = create_engine(connection_string, echo=True)
DBObjectBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = session()
for replay in glob.glob(os.path.join('replays', '*.gz')):
base = os.path.basename(replay)
uuid = base.split('_')[-1].split('.')[0]
ip = base.split('_')[0]
user = -1
print (uuid, ip, user)
if not session.query(exists().where(User.id == -1)).scalar():
u = User(id=-1, name='Undefined', password='')
session.add(u)
session.commit()
if not session.query(exists().where(Model.model_hash == '0')).scalar():
u = Model(model_hash='0')
session.add(u)
session.commit()
if not session.query(exists().where(Replay.uuid == uuid)).scalar():
r = Replay(uuid=uuid, ip=ip, user=user, model_hash='0', num_team0=1, num_players=1, is_eval=False)
session.add(r)
print('Added', uuid, ip, user)
session.commit()
| 32.560976 | 106 | 0.669663 | [
"Apache-2.0"
] | SaltieRL/Distributed-Replays | helpers/convert_existing_replays.py | 1,335 | Python |
import numpy as np
import os
from sklearn.neighbors import NearestNeighbors
from pydrake.multibody.rigid_body import RigidBody
from pydrake.all import (
AddFlatTerrainToWorld,
AddModelInstancesFromSdfString,
AddModelInstanceFromUrdfFile,
FindResourceOrThrow,
FloatingBaseType,
InputPort,
Isometry3,
OutputPort,
RgbdCamera,
RigidBodyPlant,
RigidBodyTree,
RigidBodyFrame,
RollPitchYaw,
RollPitchYawFloatingJoint,
RotationMatrix,
Value,
VisualElement,
)
import meshcat
import meshcat.transformations as tf
import meshcat.geometry as g
# From
# https://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices
def normalize(x):
return x / np.linalg.norm(x)
def save_pointcloud(pc, normals, path):
joined = np.hstack([pc.T, normals.T])
np.savetxt(path, joined)
def load_pointcloud(path):
joined = np.loadtxt(path)
return joined[:, 0:3].T, joined[:, 3:6].T
def translate(x):
T = np.eye(4)
T[0:3, 3] = x[:3]
return T
def get_pose_error(tf_1, tf_2):
rel_tf = transform_inverse(tf_1).dot(tf_2)
if np.allclose(np.diag(rel_tf[0:3, 0:3]), [1., 1., 1.]):
angle_dist = 0.
else:
# Angle from rotation matrix
angle_dist = np.arccos(
(np.sum(np.diag(rel_tf[0:3, 0:3])) - 1) / 2.)
euclid_dist = np.linalg.norm(rel_tf[0:3, 3])
return euclid_dist, angle_dist
# If misalignment_tol = None, returns the average
# distance between the model clouds when transformed
# by est_tf and gt_tf (using nearest-point lookups
# for each point in the gt-tf'd model cloud).
# If misalignment_tol is a number, it returns
# the percent of points that are misaligned by more
# than the misalignment error under the same distance
# metric.
def get_earth_movers_error(est_tf, gt_tf, model_cloud,
misalignment_tol=0.005):
# Transform the model cloud into both frames
est_model_cloud = transform_points(est_tf, model_cloud)
gt_model_cloud = transform_points(gt_tf, model_cloud)
# For every point in the model cloud, find the distance
# to the closest point in the estimated model cloud,
# as a way of finding the swept volume between the
# models in those poses.
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(gt_model_cloud.T)
dist, _ = neigh.kneighbors(
est_model_cloud[0:3, :].T, return_distance=True)
if misalignment_tol is None:
return np.mean(dist)
else:
return np.mean(dist > misalignment_tol)
def draw_points(vis, vis_prefix, name, points,
normals=None, colors=None, size=0.001,
normals_length=0.01):
vis[vis_prefix][name].set_object(
g.PointCloud(position=points,
color=colors,
size=size))
n_pts = points.shape[1]
if normals is not None:
# Drawing normals for debug
lines = np.zeros([3, n_pts*2])
inds = np.array(range(0, n_pts*2, 2))
lines[:, inds] = points[0:3, :]
lines[:, inds+1] = points[0:3, :] + \
normals * normals_length
vis[vis_prefix]["%s_normals" % name].set_object(
meshcat.geometry.LineSegmentsGeometry(
lines, None))
def transform_points(tf, pts):
return ((tf[:3, :3].dot(pts).T) + tf[:3, 3]).T
def transform_inverse(tf):
new_tf = np.eye(4)
new_tf[:3, :3] = tf[:3, :3].T
new_tf[:3, 3] = -new_tf[:3, :3].dot(tf[:3, 3])
return new_tf
def lookat(eye, target, up):
# For a camera with +x right, +y down, and +z forward.
eye = np.array(eye)
target = np.array(target)
up = np.array(up)
F = target[:3] - eye[:3]
f = normalize(F)
U = normalize(up[:3])
s = np.cross(f, U) # right
u = np.cross(s, f) # up
M = np.eye(4)
M[:3, :3] = np.vstack([s, -u, f]).T
# OLD:
# flip z -> x
# -x -> y
# -y -> z
# CAMERA FORWARD is +x-axis
# CAMERA RIGHT is -y axis
# CAMERA UP is +z axis
# Why does the Drake documentation lie to me???
T = translate(eye)
return T.dot(M)
def add_single_instance_to_rbt(
rbt, config, instance_config, i,
floating_base_type=FloatingBaseType.kRollPitchYaw):
class_name = instance_config["class"]
if class_name not in config["objects"].keys():
raise ValueError("Class %s not in classes." % class_name)
if len(instance_config["pose"]) != 6:
raise ValueError("Class %s has pose size != 6. Use RPY plz" %
class_name)
frame = RigidBodyFrame(
"%s_%d" % (class_name, i), rbt.world(),
instance_config["pose"][0:3],
instance_config["pose"][3:6])
model_path = config["objects"][class_name]["model_path"]
_, extension = os.path.splitext(model_path)
if extension == ".urdf":
AddModelInstanceFromUrdfFile(
model_path, floating_base_type, frame, rbt)
elif extension == ".sdf":
AddModelInstancesFromSdfString(
open(model_path).read(), floating_base_type, frame, rbt)
else:
raise ValueError("Class %s has non-sdf and non-urdf model name." %
class_name)
def setup_scene(rbt, config):
if config["with_ground"] is True:
AddFlatTerrainToWorld(rbt)
for i, instance_config in enumerate(config["instances"]):
add_single_instance_to_rbt(rbt, config, instance_config, i,
floating_base_type=FloatingBaseType.kFixed)
# Add camera geometry!
camera_link = RigidBody()
camera_link.set_name("camera_link")
# necessary so this last link isn't pruned by the rbt.compile() call
camera_link.set_spatial_inertia(np.eye(6))
camera_link.add_joint(
rbt.world(),
RollPitchYawFloatingJoint(
"camera_floating_base",
np.eye(4)))
rbt.add_rigid_body(camera_link)
# - Add frame for camera fixture.
camera_frame = RigidBodyFrame(
name="rgbd_camera_frame", body=camera_link,
xyz=[0.0, 0., 0.], rpy=[0., 0., 0.])
rbt.addFrame(camera_frame)
rbt.compile() | 31.388889 | 94 | 0.626388 | [
"BSD-3-Clause"
] | gizatt/pose_estimation_segmentation_analysis | src/utils.py | 6,215 | Python |
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
| 35.918919 | 99 | 0.674191 | [
"Apache-2.0"
] | fcarsten/ai_playground | udacity_deep_learning/download_data.py | 2,658 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat.core.registry import registry
from habitat.core.simulator import Simulator
def _try_register_igibson_socialnav():
try:
import habitat_sim # noqa: F401
has_habitat_sim = True
except ImportError as e:
has_habitat_sim = False
habitat_sim_import_error = e
if has_habitat_sim:
from habitat.sims.igibson_challenge.social_nav import (
iGibsonSocialNav
) # noqa: F401
from habitat.sims.igibson_challenge.interactive_nav import (
iGibsonInteractiveNav
) # noqa: F401
else:
@registry.register_simulator(name="iGibsonSocialNav")
class iGibsonSocialNavImportError(Simulator):
def __init__(self, *args, **kwargs):
raise habitat_sim_import_error
@registry.register_simulator(name="iGibsonInteractiveNav")
class iGibsonSocialNavImportError(Simulator):
def __init__(self, *args, **kwargs):
raise habitat_sim_import_error
| 34.742857 | 68 | 0.685855 | [
"MIT"
] | qianLyu/habitat-lab | habitat/sims/igibson_challenge/__init__.py | 1,216 | Python |
import unittest
from datetime import datetime
from unittest.mock import patch
from common.repository import Repository
from contract_api.config import NETWORKS, NETWORK_ID
from contract_api.consumers.service_event_consumer import ServiceCreatedEventConsumer
from contract_api.dao.service_repository import ServiceRepository
class TestOrganizationEventConsumer(unittest.TestCase):
def setUp(self):
pass
@patch('common.s3_util.S3Util.push_io_bytes_to_s3')
@patch('common.ipfs_util.IPFSUtil.read_file_from_ipfs')
@patch('common.ipfs_util.IPFSUtil.read_bytesio_from_ipfs')
@patch('contract_api.consumers.service_event_consumer.ServiceEventConsumer._fetch_tags')
def test_on_service_created_event(self, mock_fetch_tags, nock_read_bytesio_from_ipfs, mock_ipfs_read, mock_s3_push):
event = {"data": {'row_id': 202, 'block_no': 6325625, 'event': 'ServiceCreated',
'json_str': "{'orgId': b'snet\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'serviceId': b'gene-annotation-service\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'metadataURI': b'ipfs://QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'}",
'processed': b'\x00',
'transactionHash': 'b"\\xa7P*\\xaf\\xfd\\xd5.E\\x8c\\x0bKAF\'\\x15\\x03\\xef\\xdaO\'\\x86/<\\xfb\\xc4\\xf0@\\xf0\\xc1P\\x8c\\xc7"',
'logIndex': '0', 'error_code': 1, 'error_msg': '',
'row_updated': datetime(2019, 10, 21, 9, 59, 37),
'row_created': datetime(2019, 10, 21, 9, 59, 37)}, "name": "ServiceCreated"}
connection = Repository(NETWORK_ID, NETWORKS=NETWORKS)
service_repository = ServiceRepository(connection)
service_repository.delete_service(org_id='snet', service_id='gene-annotation-service')
service_repository.delete_service_dependents(org_id='snet', service_id='gene-annotation-service')
nock_read_bytesio_from_ipfs.return_value = "some_value to_be_pushed_to_s3_whic_is_mocked"
mock_ipfs_read.return_value = {
"version": 1,
"display_name": "Annotation Service",
"encoding": "proto",
"service_type": "grpc",
"model_ipfs_hash": "QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf",
"mpe_address": "0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C",
"groups": [
{
"group_name": "default_group",
"pricing": [
{
"price_model": "fixed_price",
"price_in_cogs": 1,
"default": True
}
],
"endpoints": [
"https://mozi.ai:8000"
],
"group_id": "m5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc="
}
],
"assets": {
"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"
},
"service_description": {
"url": "https://mozi-ai.github.io/annotation-service/",
"description": "Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.",
"short_description": "short description"
},
"contributors": [
{
"name": "dummy dummy",
"email_id": "[email protected]"
}
]
}
mock_fetch_tags.return_value = ["test", "", "", [b'\x61\x74\x6F\x6D\x65\x73\x65',
b'\x62\x69\x6F\x69\x6E\x66\x6F\x72\x6D\x61\x74\x69\x63\x73']]
mock_s3_push.return_value = "https://test-s3-push"
org_event_consumer = ServiceCreatedEventConsumer("wss://ropsten.infura.io/ws", "http://ipfs.singularitynet.io",
80)
org_event_consumer.on_event(event=event)
service = service_repository.get_service(org_id='snet', service_id='gene-annotation-service')
service_metadata = service_repository.get_service_metadata(org_id='snet', service_id='gene-annotation-service')
service_endpoints = service_repository.get_service_endpoints(org_id='snet',
service_id='gene-annotation-service')
service_tags = service_repository.get_service_tags(org_id='snet', service_id='gene-annotation-service')
assert service == {'org_id': 'snet', 'service_id': 'gene-annotation-service', 'service_path': None,
'ipfs_hash': 'QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf', 'is_curated': 0}
assert service_metadata == {'org_id': 'snet', 'service_id': 'gene-annotation-service',
'display_name': 'Annotation Service',
'description': 'Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.',
'short_description': 'short description',
'url': 'https://mozi-ai.github.io/annotation-service/', 'json': '',
'model_ipfs_hash': 'QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf',
'encoding': 'proto', 'type': 'grpc',
'mpe_address': '0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C',
'assets_url': '{"hero_image": "https://test-s3-push"}',
'assets_hash': '{"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"}',
'service_rating': '{"rating": 0.0, "total_users_rated": 0}', 'ranking': 1,
'contributors': '[{"name": "dummy dummy", "email_id": "[email protected]"}]'}
assert service_endpoints == [{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'group_id': 'm5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc=',
'endpoint': 'https://mozi.ai:8000'}]
assert service_tags == [{'org_id': 'snet', 'service_id': 'gene-annotation-service', 'tag_name': 'atomese'},
{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'tag_name': 'bioinformatics'}]
| 65.047619 | 413 | 0.569985 | [
"MIT"
] | vinthedark/snet-marketplace-service | contract_api/testcases/unit_testcases/consumers/test_service_event_consumer.py | 6,830 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from .forms import PostForm, CommentForm
from .models import Post, Comment
def post_list(request):
queryset_list = Post.objects.all().order_by('-publish', 'id')
paginator = Paginator(queryset_list, 25) # Show 25 contacts per page
page = request.GET.get('page')
try:
post_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
post_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
post_list = paginator.page(paginator.num_pages)
return render(request, "pages/home.html", {
'post_list': post_list,
})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
if request.method == 'POST':
if request.user:
form = CommentForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.post = post
instance.save()
messages.add_message(request, messages.SUCCESS, 'Comment Added')
form = CommentForm()
return render(request, 'blog/post_detail.html', {
'post': post,
'form': form,
})
@login_required
def post_add(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.add_message(request, messages.SUCCESS, 'Blog Post Added')
form = PostForm()
else:
form = PostForm()
return render(request, 'blog/post_form.html', {
'form': form,
})
| 30.514706 | 80 | 0.637108 | [
"MIT"
] | sachiv/django_blog | blog/blog/views.py | 2,075 | Python |
import numpy as np
import hypothesis
import strax.testutils
import straxen
def channel_split_naive(r, channel_ranges):
"""Slower but simpler implementation of straxen.split_channel_ranges"""
results = []
for left, right in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, right + 1))])
return results
@hypothesis.settings(deadline=None)
@hypothesis.given(strax.testutils.several_fake_records)
def test_channel_split(records):
channel_range = np.asarray([[0, 0], [1, 2], [3, 3], [4, 999]])
result = list(straxen.split_channel_ranges(records, channel_range))
result_2 = channel_split_naive(records, channel_range)
assert len(result) == len(result_2)
for i, _ in enumerate(result):
np.testing.assert_array_equal(
np.unique(result[i]['channel']),
np.unique(result_2[i]['channel']))
np.testing.assert_array_equal(result[i], result_2[i])
| 32.551724 | 76 | 0.700212 | [
"BSD-3-Clause"
] | AlexElykov/straxen | tests/test_channel_split.py | 944 | Python |
import pytest
from django.urls import reverse, resolve
pytestmark = pytest.mark.django_db
def test_index():
assert reverse("sample_search:sample_search") == "/sample_search/"
assert resolve("/sample_search/").view_name == "sample_search:sample_search"
| 26.3 | 80 | 0.764259 | [
"MIT"
] | BFSSI-Bioinformatics-Lab/miseq_portal | miseq_portal/sample_search/tests/test_urls.py | 263 | Python |
import sys
import resource
from recommender import recommender
reload(sys)
sys.setdefaultencoding("UTF8")
import os
import uuid
from flask import *
from flask.ext.socketio import SocketIO, emit
from flask_socketio import join_room, leave_room
import psycopg2
import psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
def connect_to_db():
return psycopg2.connect('dbname=movie_recommendations user=movie_normal password=password host=localhost')
# return psycopg2.connect('dbname=movie_recommendations user=postgres password=Cmpgamer1 host=localhost')
@socketio.on('connect', namespace='/movie')
def makeConnection():
session['uuid'] = uuid.uuid1()
print ('Connected')
@socketio.on('identify', namespace='/movie')
def on_identify(user):
print('Identify: ' + user)
users[session['uuid']] = {'username' : user}
movieSearchQuery = "SELECT movie_title FROM movie_titles WHERE movie_title LIKE %s"
newMovieSearch = "select mt.movie_title, my.year from movie_titles mt join movie_years my on mt.id = my.movie_id WHERE movie_title LIKE %s"
movieGenreSearch = "select mt.movie_title, mg.movie_genre from movie_titles mt join movie_genres mg on mt.id = mg.movie_id WHERE movie_title LIKE %s"
@socketio.on('search', namespace='/movie')
def search(searchItem):
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
searchQuery = ""
results = []
queryResults = []
searchTerm = '%{0}%'.format(searchItem)
try:
cur.execute(newMovieSearch, (searchTerm,))
results = cur.fetchall()
except Exception as e:
print("Error: Invalid SEARCH in 'movie_titles' table: %s" % e)
try:
cur.execute(movieGenreSearch, (searchTerm,))
genreResults = cur.fetchall()
except Exception as e:
print("Error: Invalid SEARCH in 'movie_titles' table: %s" % e)
movieGenres = {}
copyGenres = genreResults
parsedResults = []
movieList = {}
prevMovie = None
for movie in genreResults:
if prevMovie is not None and prevMovie[0] == movie[0]:
movieList[movie[0]].append(movie[1])
else:
movieList[movie[0]] = [movie[1]]
prevMovie = movie
for i in range(len(results)):
resultsDict = {'text' : results[i]['movie_title'], 'year' : results[i]['year']}
if results[i]['movie_title'] in movieList:
resultsDict['genres'] = movieList[results[i]['movie_title']]
queryResults.append(resultsDict)
print(queryResults)
cur.close()
db.close()
emit('searchResults', queryResults)
doesUserAlreadyExist = 'SELECT * FROM users WHERE username = %s LIMIT 1'
registerNewUser = "INSERT INTO users VALUES (default, %s, %s, %s, crypt(%s, gen_salt('md5')))"
@app.route('/register', methods=['GET', 'POST'])
def register():
redirectPage = 'landing.html'
error = ''
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
firstName = request.form['firstName']
lastName = request.form['lastName']
username = request.form['registerUsername']
password = request.form['registerPassword']
password2 = request.form['registerConfirmPassword']
if username.isspace():
error += 'Username is required.\n'
if firstName.isspace():
error += 'First Name is required.\n'
if lastName.isspace():
error += 'Last Name is required.\n'
if password.isspace():
error += 'Password is required.\n'
if password2.isspace():
error += 'Password must be entered in twice.\n'
if password != password2:
error += 'Passwords do not match.\n'
if len(error) == 0:
try:
cur.execute(doesUserAlreadyExist, (username,)) # check whether user already exists
if cur.fetchone():
error += 'Username is already taken.\n'
else:
try:
cur.execute(registerNewUser, (firstName, lastName, username, password)) # add user to database
db.commit()
except Exception as e:
print("Error: Invalid INSERT in 'user' table: %s" % e)
except Exception as e:
print("Error: Invalid SEARCH in 'user' table: %s" % e)
cur.close()
db.close()
if len(error) != 0:
redirectPage = 'landing.html'
if len(error) != 0:
pass
# flash error message
return render_template(redirectPage, error=error)
loginQuery = 'SELECT * from users WHERE username = %s AND password = crypt(%s, password)'
@app.route('/login', methods=['GET', 'POST'])
def login():
redirectPage = 'landing.html'
error = ''
results = None
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
username = request.form['username']
pw = request.form['password']
try:
cur.execute(loginQuery, (username, pw))
results = cur.fetchone()
except Exception as e:
print("Error: SEARCH in 'user' table: %s" % e)
cur.close()
db.close()
if not results: # user does not exist
error += 'Incorrect username or password.\n'
else:
print(results['username'])
session['username'] = results['username']
session['id'] = results['id']
results = []
return redirect(url_for('index'))
if len(error) != 0:
pass
# flash error
return render_template(redirectPage, error=error)
@app.route('/landing', methods=['GET', 'POST'])
def landing():
if 'username' in session:
print("index")
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
#get dynamic top 12
query = "SELECT movie_titles.movie_title, movie_ratings.rating FROM movie_titles INNER JOIN movie_ratings ON movie_titles.id=movie_ratings.movie_id ORDER BY movie_ratings.rating DESC LIMIT 12;"
#print("are we getting here?????????????")
try:
cur.execute(query)
results=cur.fetchall()
except Exception, e:
raise e
return render_template('index.html', results=results)
else:
return render_template('landing.html')
@app.route('/', methods=['GET', 'POST'])
def index():
if 'username' in session:
print("index")
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
#get dynamic top 12
query = "SELECT movie_titles.movie_title, movie_ratings.rating FROM movie_titles INNER JOIN movie_ratings ON movie_titles.id=movie_ratings.movie_id ORDER BY movie_ratings.rating DESC LIMIT 12;"
#print("are we getting here?????????????")
try:
cur.execute(query)
results=cur.fetchall()
except Exception, e:
raise e
return render_template('index.html', results=results)
else:
return render_template('landing.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session.clear()
return redirect(url_for('index'))
movieRatingQuery = "SELECT mt.movie_title as movie_id, u.id, mr.rating FROM movie_ratings mr JOIN users u on u.id = mr.user_id JOIN movie_titles mt ON mt.id = mr.movie_id"
movieIDQuery = "SELECT * FROM movie_titles"
@socketio.on('recommend', namespace='/movie')
def recommend(test):
print("Do I get here?")
redirectPage = 'recommendations.html'
data = {}
productid2name = {}
userRatings= {}
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cur.execute(movieRatingQuery)
results = cur.fetchall()
except Exception as e:
print("Error: SEARCH in 'movie_ratings table: %s" % e)
for row in results:
user = row['id']
movie = row['movie_id']
rating = float(row['rating'])
if user in data:
currentRatings = data[user]
else:
currentRatings = {}
currentRatings[movie] = rating
data[user] = currentRatings
try:
cur.execute(movieIDQuery)
results = cur.fetchall()
except Exception as e:
print("Error: SEARCH in 'movie_titles' table: %s" % e)
cur.close()
db.close()
movieLens = recommender(5, 15) #Manhattan Distance 5 Nearest Neighbors
movieLens.data = data
results = movieLens.recommend(session['id'])
print(results)
queryResults = []
for i,movie in results:
queryResults.append({'text': movie[0]})
print(queryResults)
emit('recommendationResults', queryResults)
getMovieIDQuery= "SELECT movie_titles.id FROM movie_titles JOIN movie_years ON movie_titles.id = movie_years.movie_id WHERE movie_title = %s AND year = %s"
insertRateQuery= "INSERT INTO movie_ratings VALUES(default, %s, %s, %s)"
## default, movie_id, user_id, movie_review
insertReviewQuery="INSERT INTO movie_reviews VALUES(default, %s, %s, %s)"
@app.route('/rateMovie', methods=['GET', 'POST'])
def rateMovie():
redirectPage= "index.html"
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
movie_title= request.form['moviename'] #both queries
rating = request.form['movierating'] #insertRateQuery
review = request.form['moviereview']
year = request.form['movieyear']
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print(rating)
# print(year)
# print(session['id'])
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
try:
cur.execute(getMovieIDQuery, (movie_title, year))
movieID = cur.fetchone()
except Exception as e:
print(e)
#
# Work out logic to prevent people from rating movies twice.
#
if rating:
try:
cur.execute(insertRateQuery, (session['id'], movieID['id'], rating))
db.commit()
except Exception as e:
pas
print(e)
else:
pass
if review:
try:
cur.execute(insertReviewQuery, (movieID['id'], session['id'], review))
except Exception as e:
print(e)
else:
pass
return redirect(url_for('index'))
# start the server
if __name__ == '__main__':
socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port =int(os.getenv('PORT', 8080)), debug=True)
| 33.679641 | 201 | 0.59712 | [
"MIT"
] | cmpgamer/Sprint2 | .~c9_invoke_iUgkLr.py | 11,249 | Python |
import os
import sys
import tempfile
import pytest
import logging
from pathlib import Path
from dtaidistance import dtw, dtw_ndim, clustering, util_numpy
import dtaidistance.dtw_visualisation as dtwvis
from dtaidistance.exceptions import PyClusteringException
logger = logging.getLogger("be.kuleuven.dtai.distance")
directory = None
numpyonly = pytest.mark.skipif("util_numpy.test_without_numpy()")
scipyonly = pytest.mark.skipif("util_numpy.test_without_scipy()")
@numpyonly
def test_clustering():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 3, 4}
assert cluster_idx[2] == {2, 5}
@numpyonly
def test_clustering_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@numpyonly
def test_clustering_tree_ndim():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[[0.,0.], [0,0], [1,0], [2,0], [1,0], [0,0], [1,0], [0,0], [0,0]],
[[0.,0.], [1,0], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [0,0]],
[[1.,0.], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [1,0], [1,0]]])
model = clustering.Hierarchical(dtw_ndim.distance_matrix_fast, {'ndim':2},
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 2}
@numpyonly
def test_clustering_tree_maxdist():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False, max_dist=0.1)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_linkage_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
model.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(model.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_controlchart():
with util_numpy.test_uses_numpy() as np:
series = np.zeros((600, 60))
rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')
with open(rsrc_fn, 'r') as ifile:
for idx, line in enumerate(ifile.readlines()):
series[idx, :] = line.split()
s = []
for idx in range(0, 600, 20):
s.append(series[idx, :])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {'parallel': True})
cluster_idx = model.fit(s)
if not dtwvis.test_without_visualization():
import matplotlib.pyplot as plt
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
show_ts_label = lambda idx: "ts-" + str(idx)
# show_ts_label = list(range(len(s)))
def curcmap(idx):
if idx % 2 == 0:
return 'r'
return 'g'
model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,
show_tr_label=True, ts_label_margin=-10,
ts_left_margin=10, ts_sample_length=1, ts_color=curcmap)
print("Figure saved to", hierarchy_fn)
@scipyonly
@numpyonly
def test_plotbug1():
with util_numpy.test_uses_numpy() as np:
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])
s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])
series = s1, s2
m = clustering.LinkageTree(dtw.distance_matrix, {})
m.fit(series)
if not dtwvis.test_without_visualization():
if directory:
hierarchy_fn = os.path.join(directory, "clustering.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_clustering.png"
m.plot(hierarchy_fn)
print("Figure save to", hierarchy_fn)
@numpyonly
def test_clustering_centroid():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
# def test_hook(from_idx, to_idx, distance):
# assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.KMedoids(dtw.distance_matrix_fast, {}, k=3,
show_progress=False)
try:
cluster_idx = model.fit(s)
except PyClusteringException:
return
# assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if not dtwvis.test_without_visualization():
if directory:
png_fn = os.path.join(directory, "centroid.png")
else:
file = tempfile.NamedTemporaryFile()
png_fn = file.name + "_centroid.png"
model.plot(png_fn)
print("Figure saved to", png_fn)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_clustering_tree()
test_clustering_tree_ndim()
# test_clustering_tree_maxdist()
# test_linkage_tree()
# test_controlchart()
# test_plotbug1()
# test_clustering_centroid()
| 37.053435 | 108 | 0.525546 | [
"Apache-2.0"
] | Baael/dtaidistance | tests/test_clustering.py | 9,708 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import logging
import numpy as np
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.tracking_dict import UsageTrackingDict
tf = try_import_tf()
logger = logging.getLogger(__name__)
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
"""
def __init__(self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=None,
grad_stats_fn=None,
before_loss_init=None,
make_model=None,
action_sampler_fn=None,
existing_inputs=None,
existing_model=None,
get_batch_divisibility_req=None,
obs_include_prev_action_reward=True):
"""Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (func): optional function to run prior to loss
init that takes the same arguments as __init__
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): when copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
"""
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
# Setup standard placeholders
prev_actions = None
prev_rewards = None
if existing_inputs is not None:
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(
action_space)
prev_rewards = tf.placeholder(
tf.float32, [None], name="prev_reward")
self._input_dict = {
SampleBatch.CUR_OBS: obs,
SampleBatch.PREV_ACTIONS: prev_actions,
SampleBatch.PREV_REWARDS: prev_rewards,
"is_training": self._get_is_training_placeholder(),
}
self._seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Setup model
if action_sampler_fn:
if not make_model:
raise ValueError(
"make_model is required if action_sampler_fn is given")
self.dist_class = None
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="tf")
if existing_inputs:
self._state_in = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_in:
self._seq_lens = existing_inputs["seq_lens"]
else:
self._state_in = [
tf.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
model_out, self._state_out = self.model(self._input_dict,
self._state_in, self._seq_lens)
# Setup action sampler
if action_sampler_fn:
action_sampler, action_logp = action_sampler_fn(
self, self.model, self._input_dict, obs_space, action_space,
config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
# Phase 1 init
sess = tf.get_default_session() or tf.Session()
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(
self,
obs_space,
action_space,
sess,
obs_input=obs,
action_sampler=action_sampler,
action_logp=action_logp,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_in,
state_outputs=self._state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req)
# Phase 2 init
before_loss_init(self, obs_space, action_space, config)
if not existing_inputs:
self._initialize_loss()
@override(TFPolicy)
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if self._state_inputs:
num_state_inputs = len(self._state_inputs) + 1
else:
num_state_inputs = 0
if len(self._loss_inputs) + num_state_inputs != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_inputs,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_inputs):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(("state_in_{}".format(i),
existing_inputs[len(self._loss_inputs) + i]))
if rnn_inputs:
rnn_inputs.append(("seq_lens", existing_inputs[-1]))
input_dict = OrderedDict(
[(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)] + rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
@override(Policy)
def get_initial_state(self):
if self.model:
return self.model.get_initial_state()
else:
return []
def is_recurrent(self):
return len(self._state_in) > 0
def num_state_tensors(self):
return len(self._state_in)
def _initialize_loss(self):
def fake_array(tensor):
shape = tensor.shape.as_list()
shape = [s if s is not None else 1 for s in shape]
return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)
dummy_batch = {
SampleBatch.CUR_OBS: fake_array(self._obs_input),
SampleBatch.NEXT_OBS: fake_array(self._obs_input),
SampleBatch.DONES: np.array([False], dtype=np.bool),
SampleBatch.ACTIONS: fake_array(
ModelCatalog.get_action_placeholder(self.action_space)),
SampleBatch.REWARDS: np.array([0], dtype=np.float32),
}
if self._obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: fake_array(self._prev_action_input),
SampleBatch.PREV_REWARDS: fake_array(self._prev_reward_input),
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = np.expand_dims(h, 0)
dummy_batch["state_out_{}".format(i)] = np.expand_dims(h, 0)
state_batches.append(np.expand_dims(h, 0))
if state_init:
dummy_batch["seq_lens"] = np.array([1], dtype=np.int32)
for k, v in self.extra_compute_action_fetches().items():
dummy_batch[k] = fake_array(v)
# postprocessing might depend on variable init, so run it first here
self._sess.run(tf.global_variables_initializer())
postprocessed_batch = self.postprocess_trajectory(
SampleBatch(dummy_batch))
# model forward pass for the loss (needed after postprocess to
# overwrite any tensor state from that call)
self.model(self._input_dict, self._state_in, self._seq_lens)
if self._obs_include_prev_action_reward:
train_batch = UsageTrackingDict({
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.PREV_ACTIONS, self._prev_action_input),
(SampleBatch.PREV_REWARDS, self._prev_reward_input),
(SampleBatch.CUR_OBS, self._obs_input),
]
else:
train_batch = UsageTrackingDict({
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.CUR_OBS, self._obs_input),
]
for k, v in postprocessed_batch.items():
if k in train_batch:
continue
elif v.dtype == np.object:
continue # can't handle arbitrary objects in TF
elif k == "seq_lens" or k.startswith("state_in_"):
continue
shape = (None, ) + v.shape[1:]
dtype = np.float32 if v.dtype == np.float64 else v.dtype
placeholder = tf.placeholder(dtype, shape=shape, name=k)
train_batch[k] = placeholder
for i, si in enumerate(self._state_in):
train_batch["state_in_{}".format(i)] = si
train_batch["seq_lens"] = self._seq_lens
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
self._loss_input_dict = train_batch
loss = self._do_loss_init(train_batch)
for k in sorted(train_batch.accessed_keys):
if k != "seq_lens" and not k.startswith("state_in_"):
loss_inputs.append((k, train_batch[k]))
TFPolicy._initialize_loss(self, loss, loss_inputs)
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
self._sess.run(tf.global_variables_initializer())
def _do_loss_init(self, train_batch):
loss = self._loss_fn(self, self.model, self.dist_class, train_batch)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# override the update ops to be those of the model
self._update_ops = self.model.update_ops()
return loss
| 41.558659 | 79 | 0.611843 | [
"Apache-2.0"
] | lisadunlap/ray | rllib/policy/dynamic_tf_policy.py | 14,878 | Python |
from sklearn import linear_model # noqa
from sklearn.linear_model import LogisticRegressionCV # noqa
import logging
module_logger = logging.getLogger(__name__)
| 32.4 | 61 | 0.839506 | [
"MIT"
] | motleystate/moonstone | moonstone/analysis/regression.py | 162 | Python |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.colors import BoundaryNorm
import matplotlib.image as mpimg
Uinf=1
R=15
PI=np.pi
alpha = 1
w = alpha/R
gamma= -w * 2*PI* R*R
angle = np.linspace(0, 360, 360)
cp = 1 - (4*(np.sin(angle*(PI/180) )**2) + (2*gamma*np.sin(angle *(PI/180)))/(PI*R*Uinf) + (gamma/(2*PI*R*Uinf))**2 )
fig, ax = plt.subplots()
ax.plot(angle, cp, '--k')
#ax.plot(angle, Z[edge_x,edge_y], 'ok', markersize=5)
#ax.set_ylim(limits[0], limits[1])
#Grid
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)
ax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)
fig.savefig(f'./cp_{alpha}.png')
plt.close() | 23.073171 | 119 | 0.708245 | [
"MIT"
] | ajupatatero/neurasim | util/unit_test/potential_test/cp_potential.py | 946 | Python |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/jrdsite"
# docs_base_url = "https://[org_name].github.io/jrdsite"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "jrdsite"
| 26.083333 | 68 | 0.722045 | [
"MIT"
] | jrd2017/jrdsite | jrdsite/config/docs.py | 313 | Python |
# MIT License
#
# Copyright (c) 2017 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Deep Learning Course | Fall 2018
# Date Created: 2018-09-04
################################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch
class TextGenerationModel(nn.Module):
def __init__(self, batch_size, seq_length, vocabulary_size,
lstm_num_hidden=256, lstm_num_layers=2, device='cuda:0', input_size=1):
super(TextGenerationModel, self).__init__()
self.emb_size = 64
self.device = device
# self.emb = nn.Embedding(batch_size * seq_length, 64)
# self.lstm = nn.LSTM(64, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)
self.lstm = nn.LSTM(input_size, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)
self.linear = nn.Linear(lstm_num_hidden, vocabulary_size)
self.h = None
def forward(self, x):
# Reset hidden layer for Training
if self.training:
self.h = None
# x = self.emb(x.squeeze(-1).type(torch.LongTensor).to(self.device))
out, h = self.lstm(x.transpose(0, 1), self.h)
out = self.linear(out)
# Handle hidden layer for Inference
if not self.training:
self.h = h
return out
def reset_hidden(self):
self.h = None
| 32.745763 | 95 | 0.635611 | [
"MIT"
] | davide-belli/deep-learning-labs | assignment_2/part3/model.py | 1,932 | Python |
'''
xbmcswift2.cli.cli
------------------
The main entry point for the xbmcswift2 console script. CLI commands can be
registered in this module.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import sys
from optparse import OptionParser
from xbmcswift2.cli.app import RunCommand
from xbmcswift2.cli.create import CreateCommand
# TODO: Make an ABC for Command
COMMANDS = {
RunCommand.command: RunCommand,
CreateCommand.command: CreateCommand,
}
# TODO: Make this usage dynamic based on COMMANDS dict
USAGE = '''%prog <command>
Commands:
create
Create a new plugin project.
run
Run an xbmcswift2 plugin from the command line.
Help:
To see options for a command, run `xbmcswift2 <command> -h`
'''
def main():
'''The entry point for the console script xbmcswift2.
The 'xbcmswift2' script is command bassed, so the second argument is always
the command to execute. Each command has its own parser options and usages.
If no command is provided or the -h flag is used without any other
commands, the general help message is shown.
'''
parser = OptionParser()
if len(sys.argv) == 1:
parser.set_usage(USAGE)
parser.error('At least one command is required.')
# spy sys.argv[1] in order to use correct opts/args
command = sys.argv[1]
if command == '-h':
parser.set_usage(USAGE)
opts, args = parser.parse_args()
if command not in COMMANDS.keys():
parser.error('Invalid command')
# We have a proper command, set the usage and options list according to the
# specific command
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for args, kwargs in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
opts, args = parser.parse_args()
# Since we are calling a specific comamnd's manager, we no longer need the
# actual command in sys.argv so we slice from position 1
manager.run(opts, args[1:])
| 28.675325 | 80 | 0.652627 | [
"Apache-2.0"
] | liberty-developer/plugin.video.metalliq-forqed | resources/lib/xbmcswift2/cli/cli.py | 2,208 | Python |
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'logstash', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-logstash',
version=ABOUT['__version__'],
description='The Logstash check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent logstash check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='[email protected]',
author_email='[email protected]',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks.logstash'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
| 30.194805 | 81 | 0.669247 | [
"BSD-3-Clause"
] | chrroberts-pure/integrations-extras | logstash/setup.py | 2,325 | Python |
import json
import os
import pathlib
from decouple import config
LIVE_DEMO_MODE = config('DEMO_MODE', cast=bool, default=False)
PORT = config('PORT', cast=int, default=5000)
APP_URL = 'https://bachelor-thesis.herokuapp.com/'
DEBUG_MODE = config('DEBUG', cast=bool, default=False)
NO_DELAYS = config('NO_DELAYS', cast=bool, default=False)
REDIS_URL = config('REDIS_URL')
DIALOGFLOW_ACCESS_TOKEN = config('DIALOGFLOW_ACCESS_TOKEN')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_ACCESS_TOKEN')
TELEGRAM_ACCESS_TOKEN = config('TELEGRAM_ACCESS_TOKEN')
TWILIO_ACCESS_TOKEN = config('TWILIO_ACCESS_TOKEN')
TWILIO_ACCOUNT_SID = config('TWILIO_ACCOUNT_SID')
DATABASE_URL = config('DATABASE_URL')
ENABLE_CONVERSATION_RECORDING = config('RECORD_CONVERSATIONS', cast=bool, default=True)
CONTEXT_LOOKUP_RECENCY = 15
SUPPORT_CHANNEL_ID = -1001265422831
GOOGLE_SERVICE_ACCOUNT_KEY = config('GOOGLE_SERVICE_ACCOUNT_KEY').replace("\\n", "\n")
# Insert google private key into a template of the json configuration and add it to environment vars
_root_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists('tmp'):
os.makedirs('tmp')
google_service_account_file = _root_dir / 'tmp' / 'service-account-file.json'
template = json.load(open(_root_dir / "google-service-template.json", 'r'))
template["private_key"] = GOOGLE_SERVICE_ACCOUNT_KEY
json.dump(template, open(google_service_account_file, 'w+'))
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(google_service_account_file)
# Whether to remove the ForceReply markup in Telegram for any non-keyboard message (useful for demo)
ALWAYS_REMOVE_MARKUP = LIVE_DEMO_MODE
| 45.416667 | 100 | 0.798165 | [
"MIT"
] | JosXa/bachelor-thesis-insurance | settings.py | 1,635 | Python |
# Generated by Django 2.1.4 on 2018-12-29 01:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='bankaccount',
old_name='customer_id',
new_name='customer',
),
migrations.AlterField(
model_name='bankaccount',
name='account_opened',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 28, 19, 40, 54, 177327)),
),
]
| 24.16 | 100 | 0.594371 | [
"MIT"
] | blarmon/bank-account-microservice | account/migrations/0002_auto_20181228_1940.py | 604 | Python |
import logging
import os
import types
from datetime import datetime
import pandas as pd
from sdgym.data import load_dataset
from sdgym.evaluate import compute_scores
from sdgym.synthesizers import BaseSynthesizer
LOGGER = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(__file__)
LEADERBOARD_PATH = os.path.join(BASE_DIR, 'leaderboard.csv')
DEFAULT_DATASETS = [
"adult",
"alarm",
"asia",
"census",
"child",
"covtype",
"credit",
"grid",
"gridr",
"insurance",
"intrusion",
"mnist12",
"mnist28",
"news",
"ring"
]
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3):
"""Compute the scores of a synthesizer over a list of datasets.
The results are returned in a raw format as a ``pandas.DataFrame`` containing:
- One row for each dataset+scoring method (for example, a classifier)
- One column for each computed metric
- The columns:
- dataset
- distance
- name (of the scoring method)
- iteration
For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2
iterations produces a table similar to this::
dataset name iter distance accuracy f1 syn_likelihood test_likelihood
adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN
adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN
adult Logistic... 0 0.0 0.79 0.66 NaN NaN
adult MLP... 0 0.0 0.84 0.67 NaN NaN
adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN
adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN
adult Logistic... 1 0.0 0.79 0.65 NaN NaN
adult MLP... 1 0.0 0.84 0.64 NaN NaN
asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24
asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24
"""
results = list()
for dataset_name in datasets:
LOGGER.info('Evaluating dataset %s', dataset_name)
train, test, meta, categoricals, ordinals = load_dataset(dataset_name, benchmark=True)
for iteration in range(iterations):
try:
synthesized = synthesizer(train, categoricals, ordinals)
scores = compute_scores(train, test, synthesized, meta)
scores['dataset'] = dataset_name
scores['iteration'] = iteration
results.append(scores)
except Exception:
LOGGER.exception('Error computing scores for %s on dataset %s - iteration %s',
_get_synthesizer_name(synthesizer), dataset_name, iteration)
return pd.concat(results, sort=False)
def _dataset_summary(grouped_df):
dataset = grouped_df.name
scores = grouped_df.mean().dropna()
scores.index = dataset + '/' + scores.index
return scores
def _summarize_scores(scores):
"""Computes a summary of the scores obtained by a synthesizer.
The raw scores returned by the ``compute_benchmark`` function are summarized
by grouping them by dataset and computing the average.
The results are then put in a ``pandas.Series`` object with one value per
dataset and metric.
As an example, the summary of a synthesizer that has been evaluated on the
``adult`` and the ``asia`` datasets produces the following output::
adult/accuracy 0.8765
adult/f1_micro 0.7654
adult/f1_macro 0.7654
asia/syn_likelihood -2.5364
asia/test_likelihood -2.4321
dtype: float64
Args:
scores (pandas.DataFrame):
Raw Scores dataframe as returned by the ``compute_benchmark`` function.
Returns:
pandas.Series:
Summarized scores series in the format described above.
"""
scores = scores.drop(['distance', 'iteration', 'name'], axis=1, errors='ignore')
grouped = scores.groupby('dataset').apply(_dataset_summary)
if isinstance(grouped, pd.Series):
# If more than one dataset, grouped result is a series
# with a multilevel index.
return grouped.droplevel(0)
# Otherwise, if there is only one dataset, it is DataFrame
return grouped.iloc[0]
def _get_synthesizer_name(synthesizer):
"""Get the name of the synthesizer function or class.
If the given synthesizer is a function, return its name.
If it is a method, return the name of the class to which
the method belongs.
Args:
synthesizer (function or method):
The synthesizer function or method.
Returns:
str:
Name of the function or the class to which the method belongs.
"""
if isinstance(synthesizer, types.MethodType):
synthesizer_name = synthesizer.__self__.__class__.__name__
else:
synthesizer_name = synthesizer.__name__
return synthesizer_name
def _get_synthesizers(synthesizers):
"""Get the dict of synthesizers from the input value.
If the input is a synthesizer or an iterable of synthesizers, get their names
and put them on a dict.
Args:
synthesizers (function, class, list, tuple or dict):
A synthesizer (function or method or class) or an iterable of synthesizers
or a dict containing synthesizer names as keys and synthesizers as values.
Returns:
dict[str, function]:
dict containing synthesizer names as keys and function as values.
Raises:
TypeError:
if neither a synthesizer or an iterable or a dict is passed.
"""
if callable(synthesizers):
synthesizers = {_get_synthesizer_name(synthesizers): synthesizers}
if isinstance(synthesizers, (list, tuple)):
synthesizers = {
_get_synthesizer_name(synthesizer): synthesizer
for synthesizer in synthesizers
}
elif not isinstance(synthesizers, dict):
raise TypeError('`synthesizers` can only be a function, a class, a list or a dict')
for name, synthesizer in synthesizers.items():
# If the synthesizer is one of the SDGym Synthesizer classes,
# create and instance and replace it with its fit_sample method.
if isinstance(synthesizer, type) and issubclass(synthesizer, BaseSynthesizer):
synthesizers[name] = synthesizer().fit_sample
return synthesizers
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True,
leaderboard_path=LEADERBOARD_PATH, replace_existing=True):
"""Compute the benchmark scores for the synthesizers and return a leaderboard.
The ``synthesizers`` object can either be a single synthesizer or, an iterable of
synthesizers or a dict containing synthesizer names as keys and synthesizers as values.
If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard
stored in the ``lederboard_path``. By default, the leaderboard used is the one which
is included in the package, which contains the scores obtained by the SDGym Synthesizers.
If ``replace_existing`` is ``True`` and any of the given synthesizers already existed
in the leaderboard, the old rows are dropped.
Args:
synthesizers (function, class, list, tuple or dict):
The synthesizer or synthesizers to evaluate. It can be a single synthesizer
(function or method or class), or an iterable of synthesizers, or a dict
containing synthesizer names as keys and synthesizers as values. If the input
is not a dict, synthesizer names will be extracted from the given object.
datasets (list[str]):
Names of the datasets to use for the benchmark. Defaults to all the ones available.
iterations (int):
Number of iterations to perform over each dataset and synthesizer. Defaults to 3.
add_leaderboard (bool):
Whether to append the obtained scores to the previous leaderboard or not. Defaults
to ``True``.
leaderboard_path (str):
Path to where the leaderboard is stored. Defaults to the leaderboard included
with the package, which contains the scores obtained by the SDGym synthesizers.
replace_existing (bool):
Whether to replace old scores or keep them in the returned leaderboard. Defaults
to ``True``.
Returns:
pandas.DataFrame:
Table containing one row per synthesizer and one column for each dataset and metric.
"""
synthesizers = _get_synthesizers(synthesizers)
scores = list()
for synthesizer_name, synthesizer in synthesizers.items():
synthesizer_scores = compute_benchmark(synthesizer, datasets, iterations)
summary_row = _summarize_scores(synthesizer_scores)
summary_row.name = synthesizer_name
scores.append(summary_row)
leaderboard = pd.DataFrame(scores)
leaderboard['timestamp'] = datetime.utcnow()
if add_leaderboard:
old_leaderboard = pd.read_csv(
leaderboard_path,
index_col=0,
parse_dates=['timestamp']
)[leaderboard.columns]
if replace_existing:
old_leaderboard.drop(labels=[leaderboard.index], errors='ignore', inplace=True)
leaderboard = old_leaderboard.append(leaderboard, sort=False)
return leaderboard
| 38.964286 | 97 | 0.633364 | [
"MIT"
] | csala/SDGym | sdgym/benchmark.py | 9,819 | Python |
from typing import Any, Dict
import pandas
import numpy as np
from sklearn import datasets
from opticverge.core.chromosome.abstract_chromosome import AbstractChromosome
from opticverge.core.enum.objective import Objective
from opticverge.core.log.logger import data_logger, DATA
from opticverge.core.solver.abstract_solver import AbstractSolver
from opticverge.external.scikit.enum.normaliser import Normaliser
from opticverge.external.scikit.enum.scoring_function import Scoring
from opticverge.external.scikit.problem.abstract_regression_problem import AbstractRegressionProblem
class RedWineQualityPredictionProblem(AbstractRegressionProblem):
def __init__(self, scoring_function: Scoring, normaliser: Normaliser = None, folds: int = 1):
df = pandas.read_csv("./winequality-red.csv", sep=";", usecols=[
"fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide",
"total sulfur dioxide", "density", "pH", "sulphates", "alcohol", "quality"
])
data = np.array(df[["fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide",
"total sulfur dioxide", "density", "pH", "sulphates", "alcohol"]])
target = np.array(df["quality"])
super(RedWineQualityPredictionProblem, self).__init__(
Objective.Minimisation,
"Red Wine Quality Prediction",
data_x=data,
target_x=target,
normaliser=normaliser,
folds=folds,
scoring_function=scoring_function
)
def log_chromosome(self, chromosome: AbstractChromosome, solver: AbstractSolver,
additional_data: Dict[str, Any] = None, separator="|"):
data_str = super(RedWineQualityPredictionProblem, self).log_chromosome(
chromosome,
solver,
None
)
data_logger.log(DATA, data_str)
def objective_function(self, chromosome: AbstractChromosome):
super(RedWineQualityPredictionProblem, self).objective_function(chromosome)
| 42.3 | 133 | 0.700236 | [
"MIT"
] | opticverge/evolutionary-machine-learning | opticverge/examples/machine_learning/regression/red_wine_quality/problem.py | 2,115 | Python |
"""
# Hello
Demonstrate:
* conversion of regular python script into _Jupyter notebook_
* support **Markdown**
* this is a list
"""
from __future__ import absolute_import, print_function, division
"""
## Hello
This is a *hello world* function.
"""
def hello():
"""
This is a docstring
"""
print("hello")
"""
## Another Cell 1
"""
def main():
hello()
"""
### Run this
"""
if __name__ == '__main__':
def what():
main()
print(what())
"""
## Another Cell 2
"""
| 10.367347 | 64 | 0.582677 | [
"BSD-3-Clause"
] | bwohlberg/py2jn | tests/example.py | 508 | Python |
"""Parser for envpy config parser"""
# Errors
class EnvpyError(Exception):
"""Base class for all envpy errors."""
class MissingConfigError(EnvpyError):
"""Raised when a config item is missing from the environment and has
no default.
"""
class ValueTypeError(EnvpyError):
"""Raised when a Schema is created with an invalid value type"""
class ParsingError(EnvpyError):
"""Raised when the value pulled from the environment cannot be parsed
as the given value type."""
# Parsers
def _parse_str(value):
return value
def _parse_int(value):
return int(value)
def _parse_float(value):
return float(value)
def _parse_bool(value):
is_true = (
value.upper() == "TRUE"
or value == "1"
)
is_false = (
value.upper() == "FALSE"
or value == "0"
)
if is_true:
return True
elif is_false:
return False
else:
raise ValueError()
PARSERS = {
str: _parse_str,
int: _parse_int,
float: _parse_float,
bool: _parse_bool,
}
# Parsing logic
SENTINAL = object()
class Schema: #pylint: disable=too-few-public-methods
"""Schema that describes a single environment config item
Args:
value_type (optional, default=str): The type that envpy should try to
parse the environment variable into.
default (optional): The value that should be used if the variable
cannot be found in the environment.
"""
def __init__(self, value_type=str, default=SENTINAL):
if value_type not in PARSERS:
raise ValueTypeError()
self._parser = PARSERS.get(value_type)
self._default = default
def parse(self, key, value):
"""Parse the environment value for a given key against the schema.
Args:
key: The name of the environment variable.
value: The value to be parsed.
"""
if value is not None:
try:
return self._parser(value)
except Exception:
raise ParsingError("Error parsing {}".format(key))
elif self._default is not SENTINAL:
return self._default
else:
raise KeyError(key)
def parse_env(config_schema, env):
"""Parse the values from a given environment against a given config schema
Args:
config_schema: A dict which maps the variable name to a Schema object
that describes the requested value.
env: A dict which represents the value of each variable in the
environment.
"""
try:
return {
key: item_schema.parse(key, env.get(key))
for key, item_schema in config_schema.items()
}
except KeyError as error:
raise MissingConfigError(
"Required config not set: {}".format(error.args[0])
)
| 25.633929 | 78 | 0.619993 | [
"MIT"
] | jonathanlloyd/envpy | envpy/parser.py | 2,871 | Python |
from . import generator
from . import discriminator
| 17.333333 | 27 | 0.807692 | [
"MIT"
] | rexwangcc/gengine | GenerativeModels/BGAN/__init__.py | 52 | Python |
# -*- coding: utf-8 -*-
"""Application configuration.
Most configuration is set via environment variables.
For local development, use a .env file to set
environment variables.
"""
from environs import Env
env = Env()
env.read_env()
ENV = env.str("FLASK_ENV", default="production")
DEBUG = ENV == "development"
SQLALCHEMY_DATABASE_URI = env.str("DATABASE_URL")
SECRET_KEY = env.str("SECRET_KEY")
SEND_FILE_MAX_AGE_DEFAULT = env.int("SEND_FILE_MAX_AGE_DEFAULT")
BCRYPT_LOG_ROUNDS = env.int("BCRYPT_LOG_ROUNDS", default=13)
DEBUG_TB_ENABLED = DEBUG
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = "simple" # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
APPLICATION_ROOT = "/"
SCRIPT_NAME = "/"
AUTH_METHOD = env.str("AUTH_METHOD") # can be 'LDAP', 'OMERO'
if AUTH_METHOD == "LDAP":
LDAP_PORT = env.int("LDAP_PORT", 369)
LDAP_HOST = env.str("LDAP_HOST", "localhost")
LDAP_READONLY = env.bool("LDAP_READONLY", True)
LDAP_BASE_DN = env.str("LDAP_BASE_DN", "")
LDAP_BIND_USER_DN = env.str("LDAP_BIND_USER_DN")
LDAP_BIND_USER_PASSWORD = env.str("LDAP_BIND_USER_PASSWORD")
LDAP_BIND_DIRECT_CREDENTIALS = env.bool("LDAP_BIND_DIRECT_CREDENTIALS")
LDAP_ALWAYS_SEARCH_BIND = env.bool("LDAP_ALWAYS_SEARCH_BIND")
LDAP_USER_LOGIN_ATTR = env.str("LDAP_USER_LOGIN_ATTR", "uid")
LDAP_USER_RDN_ATTR = env.str("LDAP_USER_RDN_ATTR", "uid")
LDAP_USER_DN = env.str("LDAP_USER_DN")
LDAP_USER_SEARCH_SCOPE = env.str("LDAP_USER_SEARCH_SCOPE", "LEVEL")
LDAP_SEARCH_FOR_GROUPS = env.bool("LDAP_SEARCH_FOR_GROUPS", False)
elif AUTH_METHOD == "OMERO":
OMERO_HOST = env.str("OMERO_HOST", "localhost")
OMERO_PORT = env.int("OMERO_PORT", 4064)
| 34.24 | 75 | 0.739486 | [
"MIT"
] | centuri-engineering/cataloger | cataloger/settings.py | 1,712 | Python |
# -*- coding: utf-8 -*-
"""Test human2bytes function."""
import pytest
from pcof import bytesconv
@pytest.mark.parametrize(
"size, unit, result",
[
(1, "KB", "1024.00"),
(1, "MB", "1048576.00"),
(1, "GB", "1073741824.00"),
(1, "TB", "1099511627776.00"),
(1, "PB", "1125899906842624.00"),
(1, "EB", "1152921504606846976.00"),
],
)
def test_human2bytes(size, unit, result):
assert bytesconv.human2bytes(size, unit) == result
@pytest.mark.parametrize(
"size, unit, precision, result",
[
(1, "KB", 0, "1024"),
(2, "GB", 0, "2147483648"),
(2, "GB", 1, "2147483648.0"),
(2, "GB", 3, "2147483648.000"),
],
)
def test_human2bytes_precision(size, unit, precision, result):
assert bytesconv.human2bytes(size, unit, precision=precision) == result
@pytest.mark.parametrize(
"size, unit, base, result",
[
(1, "KB", 1000, "1000.00"),
(1, "MB", 1000, "1000000.00"),
(1, "GB", 1000, "1000000000.00"),
(1, "TB", 1000, "1000000000000.00"),
(4, "TB", 1000, "4000000000000.00"),
(1, "PB", 1000, "1000000000000000.00"),
(1, "EB", 1000, "1000000000000000000.00"),
],
)
def test_human2bytes_base(size, unit, base, result):
assert bytesconv.human2bytes(size, unit, base=base) == result
def test_human2bytes_raise():
with pytest.raises(ValueError, match="value is not a number"):
bytesconv.human2bytes("notnumber", "KB")
with pytest.raises(
ValueError, match="invalid unit. It must be KB, MB, GB, TB, PB, EB, ZB"
):
bytesconv.human2bytes(1, "XX")
# vim: ts=4
| 26.854839 | 79 | 0.572973 | [
"MIT"
] | thobiast/pcof | tests/test_bytesconv_human2bytes.py | 1,665 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import multiprocessing
import sys
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
import six
from .framework import Program, default_main_program, Variable
from . import core
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
__all__ = ['Executor', 'global_scope', 'scope_guard']
g_scope = core.Scope()
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
def global_scope():
"""
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
Returns:
Scope: The global/default scope instance.
"""
return g_scope
def _switch_scope(scope):
global g_scope
ex = g_scope
g_scope = scope
return ex
@signature_safe_contextmanager
def scope_guard(scope):
"""
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Args:
scope: The new global/default scope.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
"""
ex = _switch_scope(scope)
yield
_switch_scope(ex)
def as_numpy(tensor):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
"""
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors hold LoD information. \
They can not be completely cast to Python ndarray. \
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
def has_feed_operators(block, feed_targets, feed_holder_name):
""" Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
"""
feed_count = 0
for op in block.ops:
if op.desc.type() == 'feed':
feed_count += 1
assert op.desc.input('X')[0] == feed_holder_name
feed_target_name = op.desc.output('Out')[0]
if feed_target_name not in feed_targets:
raise Exception("'feed_targets' does not have {} variable".
format(feed_target_name))
else:
break
if feed_count > 0 and feed_count != len(feed_targets):
raise Exception(
"Feed operators in program desc do not match 'feed_targets'")
return feed_count > 0
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
""" Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
"""
fetch_count = 0
for op in block.ops:
if op.desc.type() == 'fetch':
fetch_count += 1
assert op.desc.output('Out')[0] == fetch_holder_name
fetch_target_name = op.desc.input('X')[0]
if fetch_target_name not in [
var.desc.name() for var in fetch_targets
]:
raise Exception("'fetch_targets' does not have {} variable".
format(fetch_target_name))
idx = op.desc.attr('col')
assert fetch_target_name == fetch_targets[idx].desc.name()
if fetch_count > 0 and fetch_count != len(fetch_targets):
raise Exception(
"Fetch operators in program desc do not match 'fetch_targets'")
return fetch_count > 0
def _fetch_var(name, scope=None, return_numpy=True):
"""
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, str)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(name)
assert var is not None, (
"Cannot find " + name + " in scope. Perhaps you need to make the"
" variable persistable by using var.persistable = True in your"
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
def _to_name_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
else:
raise TypeError(str(var) + " should be Variable or str")
def _get_strong_program_cache_key(program, feed, fetch_list):
return str(id(program)) + _get_program_cache_key(feed, fetch_list)
def _get_program_cache_key(feed, fetch_list):
feed_var_names = list(feed.keys())
fetch_var_names = list(map(_to_name_str, fetch_list))
return str(feed_var_names + fetch_var_names)
def _as_lodtensor(data, place):
"""
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
Returns:
LoDTensor
"""
if isinstance(data, list):
raise RuntimeError("Some of your feed data hold LoD information. \
They can not be completely cast from a list of Python \
ndarray to LoDTensor. Please convert data to LoDTensor \
directly before feeding the data.\
")
# single tensor case
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
class Executor(object):
"""
An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running. Python executor takes a program,
adds feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the
program. fetch_list provides the variables(or names) that user wants
to get after program runs. Note: the executor will run all operators
in the program but not only the operators dependent by the fetch_list.
It stores the global variables into the global scope, and creates a
local scope for the temporary variables. The contents in local scope
may be discarded after every minibatch forward/backward finished.
But the global scope variables will be persistent through different runs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
startup_program.random_seed=1
exe.run(startup_program)
# Run the main program directly without compile.
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(train_program,
feed={"X": x},
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram`
# for more detail.
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
loss_name=loss.name)
loss_data, = exe.run(compiled_prog,
feed={"X": x},
fetch_list=[loss.name])
Args:
place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.
"""
def __init__(self, place):
self.place = place
self.program_caches = dict()
self.ctx_caches = dict()
self.scope_caches = dict()
self.var_caches = dict()
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
def _get_var_cache(self, program_cache_key):
return self.var_caches.get(program_cache_key, None)
def _get_scope_cache(self, program_cache_key):
return self.scope_caches.get(program_cache_key, None)
def _get_ctx_cache(self, program_cache_key):
return self.ctx_caches.get(program_cache_key, None)
def _get_program_cache(self, program_cache_key):
return self.program_caches.get(program_cache_key, None)
def _add_program_cache(self, program_cache_key, program):
self.program_caches[program_cache_key] = program
def _add_ctx_cache(self, ctx_cache_key, ctx):
self.ctx_caches[ctx_cache_key] = ctx
def _add_scope_cache(self, scope_cache_key, scope):
self.scope_caches[scope_cache_key] = scope
def _add_var_cache(self, var_cache_key, var):
self.var_caches[var_cache_key] = var
def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
fetch_var_name):
tmp_program = program.clone()
global_block = tmp_program.global_block()
if feed_var_name in global_block.vars:
feed_var = global_block.var(feed_var_name)
else:
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
if fetch_var_name in global_block.vars:
fetch_var = global_block.var(fetch_var_name)
else:
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
# prepend feed operators
if not has_feed_operators(global_block, feed, feed_var_name):
for i, name in enumerate(feed):
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
# append fetch_operators
if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), (
"Wrong type for fetch_list[%s]: %s" % (i, type(var)))
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
return tmp_program
def _feed_data(self, program, feed, feed_var_name, scope):
# feed var to framework
for op in program.global_block().ops:
if op.desc.type() == 'feed':
feed_target_name = op.desc.output('Out')[0]
cur_feed = feed[feed_target_name]
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = _as_lodtensor(cur_feed, self.place)
idx = op.desc.attr('col')
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
else:
break
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
]
return outs
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def close(self):
"""
Close this executor.
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource
on PServers related to the current Trainer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
"""
if not self._closed:
self._default_executor.close()
self._closed = True
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
return_numpy):
exe = program._executor
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
if not isinstance(feed_tensor, core.LoDTensor):
feed_tensor = core.LoDTensor()
# always set to CPU place, since the tensor need to be splitted
# it is fast in CPU
feed_tensor.set(feed[feed_name], core.CPUPlace())
feed_tensor_dict[feed_name] = feed_tensor
exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
if len(feed) != len(program._places):
raise ValueError(
"Feed a list of tensor, the list should be the same size as places"
)
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
if not isinstance(tensor, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(tensor, program._places[i])
tensor = tmp
res_dict[feed_name] = tensor
res.append(res_dict)
exe.feed_tensors_into_local_scopes(res)
fetch_var_names = list(map(_to_name_str, fetch_list))
exe.run(fetch_var_names, fetch_var_name)
arr = scope.find_var(fetch_var_name).get_lod_tensor_array()
if return_numpy:
return as_numpy(arr)
return [arr[i] for i in range(len(arr))]
def _check_fetch_vars_persistable(self, program, fetch_list):
for var in fetch_list:
if isinstance(var, Variable):
persistable = var.persistable
else:
block_num = program.desc.num_blocks()
persistable = None
var_name = cpt.to_bytes(var)
for i in six.moves.range(block_num):
var_desc = program.desc.block(i).find_var(var_name)
if var_desc:
persistable = var_desc.persistable()
break
assert persistable is not None, "Variable {} is not found".format(
var)
if not persistable:
logging.warn("""
Detect that build_strategy.memory_optimize = True, but the some variables in the fetch
list is not persistable, you may get wrong fetched value, or an exeception may be thrown
about cannot find variable of the fetch list.
TO FIX this:
# Sample
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
# if you need to fetch conv1, then:
conv1.persistable = True
""")
def run(self,
program=None,
feed=None,
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None,
return_numpy=True,
use_program_cache=False):
"""
Run program by this Executor. Feed data by feed map, fetch result by
fetch_list. Python executor takes a program, add feed operators and
fetch operators to this program according to feed map and fetch_list.
Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want to get after program run.
Note: the executor will run all operators in the program but not
only the operators dependent by the fetch_list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
Args:
program(Program|CompiledProgram): the program that need to run,
if not provided, then default_main_program (not compiled) will be used.
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
fetch_list(list): a list of variable or variable names that user
wants to get, this method will return them according to this list.
feed_var_name(str): the name for the input variable of
feed Operator.
fetch_var_name(str): the name for the output variable of
fetch Operator.
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is global_scope
return_numpy(bool): if convert the fetched tensor to numpy
use_program_cache(bool): whether to use the cached program
settings across batches. Setting it be true would be faster
only when (1) the program is not compiled with data parallel,
and (2) program, feed variable names and fetch_list variable
names do not changed compared to the last step.
Returns:
list(numpy.array): fetch result according to fetch_list.
"""
try:
return self._run_impl(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
except Exception as e:
if not isinstance(e, core.EOFException):
print("An exception was thrown!\n {}".format(str(e)))
raise e
def _run_impl(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
compiled = isinstance(program, compiler.CompiledProgram)
# For backward compatibility, run directly.
if not compiled:
return self._run_program(
program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
else:
if fetch_list and program._is_data_parallel and program._program and \
program._build_strategy._use_legacy_memory_optimize_strategy:
self._check_fetch_vars_persistable(program._program, fetch_list)
program._compile(scope, self.place)
if program._is_data_parallel:
return self._run_parallel(
program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy)
elif program._is_inference:
return self._run_inference(program._executor, feed)
else:
# TODO(panyx0718): Can compile program to optimize executor
# performance.
# TODO(panyx0718): executor should be able to run graph.
assert program._program, "CompiledProgram is compiled from graph, can only run with_data_parallel."
# use_program_cache is not valid with CompiledProgram
return self._run_program(
program._program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=False)
def _run_program(self, program, exe, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
if program is None:
program = default_main_program()
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
if use_program_cache:
cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
cached_program = self._get_program_cache(cache_key)
cached_ctx = self._get_ctx_cache(cache_key)
cached_scope = self._get_scope_cache(cache_key)
cached_var = self._get_var_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
fetch_list_str = list(map(_to_name_str, fetch_list))
cached_ctx = self._default_executor.prepare_ctx_cache(
cached_program.desc, 0, fetch_list_str, False)
cached_var = self._default_executor.create_variables(
cached_program.desc, scope, 0)
# currently, we cache program, vars, sub_scope here
# we suppose that in a life cycle of training, a user
# will not create many programs. So, here the basic
# rule of caching is to cache all unseen (program, var, scope)
# when a user use use_program_cache.
cached_scope = scope.new_scope()
self._add_ctx_cache(cache_key, cached_ctx)
self._add_var_cache(cache_key, cached_var)
self._add_scope_cache(cache_key, cached_scope)
program = cached_program
ctx = cached_ctx
scope = cached_scope
var = cached_var
else:
program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
if not use_program_cache:
exe.run(program.desc, scope, 0, True, True, fetch_var_name)
else:
exe.run_cached_prepared_ctx(ctx, scope, False, False, False)
outs = self._fetch_data(fetch_list, fetch_var_name, scope)
if return_numpy:
outs = as_numpy(outs)
return outs
def _run_inference(self, exe, feed):
return exe.run(feed)
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(str(trainer))
if program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
filelist_length = len(dataset.dataset.get_filelist())
if filelist_length < pipeline_num:
pipeline_num = filelist_length
print(
"Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
% (filelist_length, filelist_length))
if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
print(
"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
% (filelist_length // pipeline_num, filelist_length))
pipeline_opt["concurrency_list"][
0] = filelist_length // pipeline_num
dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
return pipeline_num
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if not compiled:
# TODO: Need a better way to distinguish and specify different execution mode
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_program(program)
else:
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program.program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(
program.program._fleet_opt)
trainer._set_program(program.program)
# The following thread_num-determined logic will be deprecated
if thread <= 0:
if dataset.thread_num <= 0:
raise RuntimeError(
"You should set thread num first, either in Dataset"
"or in Executor.train_from_dataset")
else:
trainer._set_thread(dataset.thread_num)
else:
trainer._set_thread(thread)
trainer._set_debug(debug)
trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
return scope, trainer
def infer_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
The document of infer_from_dataset is almost the same as
train_from_dataset, except that in distributed training,
push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-thread
very easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable
will be printed during training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is needed and should be initialized")
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(True)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
dataset._finish_to_run()
return None
def train_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread)
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable
will be printed during training
fetch_info(String List): print information for each variable
print_period(int): the number of mini-batches for each print
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is need and should be initialized")
if program._pipeline_opt:
thread = self._adjust_pipeline_resource(program._pipeline_opt,
dataset, thread)
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
dataset._finish_to_run()
return None
| 40.401388 | 128 | 0.598504 | [
"Apache-2.0"
] | AnKingOne/Paddle | python/paddle/fluid/executor.py | 40,765 | Python |
def area(l,c):
a = l*c
return f'A area de um terreno {l}x{c} e de {a}m²'
print("Controle de Terrenos")
print()
largura = float(input("Largura (m): "))
altura = float(input("altura (m): "))
print(area(largura,altura)) | 28 | 53 | 0.633929 | [
"MIT"
] | AbelRapha/Python-Exercicios-CeV | Mundo 3/ex096 Funcao que Calcula Area.py | 225 | Python |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=['a', 'b', 'c'])
result = df.groupby('a').nth(10)
expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])
assert_frame_equal(result, expected)
result = df.groupby(['a', 'b']).nth(10)
expected = DataFrame(index=MultiIndex([[], []], [[], []],
names=['a', 'b']),
columns=['c'])
assert_frame_equal(result, expected)
| 40.225806 | 79 | 0.498697 | [
"MIT"
] | QiqeMtz/Ethereum_Forecast | lib/python3.6/site-packages/pandas/tests/groupby/test_nth.py | 9,976 | Python |
import tensorflow as tf
class Layers(object):
def __init__(self):
self.name_bank, self.params_trainable = [], []
self.num_params = 0
self.initializer_xavier = tf.initializers.glorot_normal()
def elu(self, inputs): return tf.nn.elu(inputs)
def relu(self, inputs): return tf.nn.relu(inputs)
def sigmoid(self, inputs): return tf.nn.sigmoid(inputs)
def softmax(self, inputs): return tf.nn.softmax(inputs)
def swish(self, inputs): return tf.nn.swish(inputs)
def relu6(self, inputs): return tf.nn.relu6(inputs)
def dropout(self, inputs, rate): return tf.nn.dropout(inputs, rate=rate)
def maxpool(self, inputs, pool_size, stride_size):
return tf.nn.max_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \
padding='VALID', strides=[1, stride_size, stride_size, 1])
def avgpool(self, inputs, pool_size, stride_size):
return tf.nn.avg_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \
padding='VALID', strides=[1, stride_size, stride_size, 1])
def get_weight(self, vshape, transpose=False, bias=True, name=""):
try:
idx_w = self.name_bank.index("%s_w" %(name))
if(bias): idx_b = self.name_bank.index("%s_b" %(name))
except:
w = tf.Variable(self.initializer_xavier(vshape), \
name="%s_w" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_w" %(name))
self.params_trainable.append(w)
tmpparams = 1
for d in vshape: tmpparams *= d
self.num_params += tmpparams
if(bias):
if(transpose): b = tf.Variable(self.initializer_xavier([vshape[-2]]), \
name="%s_b" %(name), trainable=True, dtype=tf.float32)
else: b = tf.Variable(self.initializer_xavier([vshape[-1]]), \
name="%s_b" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_b" %(name))
self.params_trainable.append(b)
self.num_params += vshape[-2]
else:
w = self.params_trainable[idx_w]
if(bias): b = self.params_trainable[idx_b]
if(bias): return w, b
else: return w
def fullcon(self, inputs, variables):
[weights, biasis] = variables
out = tf.matmul(inputs, weights) + biasis
return out
def conv2d(self, inputs, variables, stride_size, padding):
[weights, biasis] = variables
out = tf.nn.conv2d(inputs, weights, \
strides=[1, stride_size, stride_size, 1], padding=padding) + biasis
return out
def dwconv2d(self, inputs, variables, stride_size, padding):
[weights, biasis] = variables
out = tf.nn.depthwise_conv2d(inputs, weights, \
strides=[1, stride_size, stride_size, 1], padding=padding) + biasis
return out
def batch_norm(self, inputs, name=""):
# https://arxiv.org/pdf/1502.03167.pdf
mean = tf.reduce_mean(inputs)
std = tf.math.reduce_std(inputs)
var = std**2
try:
idx_offset = self.name_bank.index("%s_offset" %(name))
idx_scale = self.name_bank.index("%s_scale" %(name))
except:
offset = tf.Variable(0, \
name="%s_offset" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_offset" %(name))
self.params_trainable.append(offset)
self.num_params += 1
scale = tf.Variable(1, \
name="%s_scale" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_scale" %(name))
self.params_trainable.append(scale)
self.num_params += 1
else:
offset = self.params_trainable[idx_offset]
scale = self.params_trainable[idx_scale]
offset # zero
scale # one
out = tf.nn.batch_normalization(
x = inputs,
mean=mean,
variance=var,
offset=offset,
scale=scale,
variance_epsilon=1e-12,
name=name
)
return out
| 33.943548 | 87 | 0.578997 | [
"MIT"
] | YeongHyeon/ReXNet-TF2 | source/layers.py | 4,209 | Python |
# python3 imports
from re import compile as compile_regex
from gettext import gettext as _
# project imports
from wintersdeep_postcode.postcode import Postcode
from wintersdeep_postcode.exceptions.validation_fault import ValidationFault
## A wrapper for validation of standard postcodes
# @remarks see \ref wintersdeep_postcode.postcode_types.standard_postcode
class StandardPostcodeValidator(object):
## Areas that only have single digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlySingleDigitDistricts = []
## Checks if a postcode is in an area with only single digit districts and if
# so - that the district specified is only a single digit.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlySingleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district >= 10:
single_digit_districts = cls.AreasWithOnlySingleDigitDistricts
impacted_by_rule = postcode.outward_area in single_digit_districts
return impacted_by_rule
## Areas that only have double digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlyDoubleDigitDistricts = []
## Checks if a postcode is in an area with only double digit districts and
# if so - that the district specified has two digits as required.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlyDoubleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district <= 9:
double_digit_districts = cls.AreasWithOnlyDoubleDigitDistricts
impacted_by_rule = postcode.outward_area in double_digit_districts
return impacted_by_rule
## Areas that have a district zero.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithDistrictZero = []
## Checks if a postcode has a district zero if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithDistrictZero(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 0:
areas_with_district_zero = cls.AreasWithDistrictZero
impacted_by_rule = not postcode.outward_area in areas_with_district_zero
return impacted_by_rule
## Areas that do not have a district 10
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithoutDistrictTen = []
## Checks if a postcode has a district ten if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithoutDistrictTen(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 10:
areas_without_district_ten = cls.AreasWithoutDistrictTen
impacted_by_rule = postcode.outward_area in areas_without_district_ten
return impacted_by_rule
## Only a few areas have subdivided districts
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithSubdistricts = {}
## If a postcode has subdistricts, check its supposed to.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
impacted_by_rule = not postcode.outward_area in areas_with_subdistricts
if not impacted_by_rule:
subdivided_districts_in_area = areas_with_subdistricts[postcode.outward_area]
if subdivided_districts_in_area:
impacted_by_rule = not postcode.outward_district in subdivided_districts_in_area
return impacted_by_rule
## If a postcode has a limited selection of subdistricts, makes sure any set are in scope.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSpecificSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
subdivided_districts_in_area = areas_with_subdistricts.get(postcode.outward_area, {})
specific_subdistrict_codes = subdivided_districts_in_area.get(postcode.outward_district, None)
impacted_by_rule = specific_subdistrict_codes and \
not postcode.outward_subdistrict in specific_subdistrict_codes
return impacted_by_rule
## Charactesr that are not used in the first position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
FirstPositionExcludes = []
## Checks that a postcode does not include usued characters in the first postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstPositionExcludes(cls, postcode):
first_postion_char = postcode.outward_area[0]
impacted_by_rule = first_postion_char in cls.FirstPositionExcludes
return impacted_by_rule
## Charactesr that are not used in the second position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SecondPositionExcludes = []
## Checks that a postcode does not include unused characters in the second postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondPositionExcludes(cls, postcode):
impacted_by_rule = False
if len(postcode.outward_area) > 1:
second_postion_char = postcode.outward_area[1]
impacted_by_rule = second_postion_char in cls.SecondPositionExcludes
return impacted_by_rule
## Charactesr that are used in the third apha position (for single digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SingleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for single digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSingleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 1:
allowed_subdistricts = cls.SingleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are used in the fourth apha position (for double digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
DoubleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for double digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckDoubleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 2:
allowed_subdistricts = cls.DoubleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are not used in the unit string.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
UnitExcludes = []
## Checks that a postcode does not include characters in the first character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[0]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Checks that a postcode does not include characters in the second character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[1]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Loads various static members used for validation of standard postcodes from
# a JSON file - this is expected to be co-located with this class.
def load_validator_params_from_json():
from json import load
from os.path import dirname, join
json_configuration_file = join( dirname(__file__), "standard_postcode_validator.json" )
with open(json_configuration_file, 'r') as file_handle:
config_json = load(file_handle)
StandardPostcodeValidator.AreasWithDistrictZero = config_json['has-district-zero']
StandardPostcodeValidator.AreasWithoutDistrictTen = config_json['no-district-ten']
StandardPostcodeValidator.AreasWithOnlyDoubleDigitDistricts = config_json['double-digit-districts']
StandardPostcodeValidator.AreasWithOnlySingleDigitDistricts = config_json['single-digit-districts']
StandardPostcodeValidator.SingleDigitAreaSubdistricts = config_json['single-digit-area-subdistricts']
StandardPostcodeValidator.DoubleDigitAreaSubdistricts = config_json['double-digit-area-subdistricts']
StandardPostcodeValidator.SecondPositionExcludes = config_json['second-position-excludes']
StandardPostcodeValidator.FirstPositionExcludes = config_json['first-position-excludes']
StandardPostcodeValidator.UnitExcludes = config_json['unit-excludes']
subdivision_map = config_json["subdivided-districts"]
StandardPostcodeValidator.AreasWithSubdistricts = { k: {
int(k1): v1 for k1, v1 in v.items()
} for k, v in subdivision_map.items() }
load_validator_params_from_json()
if __name__ == "__main__":
##
## If this is the main entry point - someone might be a little lost?
##
print(f"{__file__} ran, but doesn't do anything on its own.")
print(f"Check 'https://www.github.com/wintersdeep/wintersdeep_postcode' for usage.") | 47.148855 | 117 | 0.727273 | [
"MIT"
] | WintersDeep/wintersdeep_postcode | wintersdeep_postcode/postcode_types/standard_postcode/standard_postcode_validator.py | 12,360 | Python |
# Generated by Django 2.2.8 on 2019-12-20 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m2mbasic', '0002_auto_20191220_1716'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('similar_products', models.ManyToManyField(related_name='_product_similar_products_+', to='m2mbasic.Product')),
],
),
]
| 29.272727 | 128 | 0.607143 | [
"MIT"
] | djangojeng-e/TIL | django/models/m2mbasic/migrations/0003_product.py | 644 | Python |
"""
Tests for the :mod:`fiftyone.utils.cvat` module.
You must run these tests interactively as follows::
python tests/intensive/cvat_tests.py
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from bson import ObjectId
from collections import defaultdict
import numpy as np
import os
import unittest
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.utils.cvat as fouc
import fiftyone.zoo as foz
from fiftyone.core.expressions import ViewField as F
def _find_shape(anno_json, label_id):
shape = _parse_shapes(anno_json["shapes"], label_id)
if shape is not None:
return shape
for track in anno_json["tracks"]:
shape = _parse_shapes(track["shapes"], label_id)
if shape is not None:
return shape
def _parse_shapes(shapes, label_id):
for shape in shapes:
for attr in shape["attributes"]:
if attr["value"] == label_id:
return shape
def _get_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
return _find_shape(anno_json, label_id)
def _delete_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
del_json = {"version": 1, "tags": [], "shapes": [shape], "tracks": []}
del_url = api.task_annotation_url(task_id) + "?action=delete"
api.patch(del_url, json=del_json)
def _get_label(api, task_id, label=None):
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if isinstance(label, str):
label = class_id_map[label]
else:
label = list(class_id_map.values())[0]
return label
def _create_annotation(
api, task_id, shape=None, tag=None, track=None, points=None, _type=None
):
if points is None:
points = [10, 20, 30, 40]
if _type is None:
_type = "rectangle"
shapes = []
tags = []
tracks = []
if shape is not None:
if not isinstance(shape, dict):
label = _get_label(api, task_id, label=shape)
shape = {
"type": _type,
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
"points": points,
"occluded": False,
}
shapes = [shape]
if tag is not None:
if not isinstance(tag, dict):
label = _get_label(api, task_id, label=tag)
tag = {
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
}
tags = [tag]
if track is not None:
if not isinstance(track, dict):
label = _get_label(api, task_id, label=track)
if isinstance(track, tuple):
start, end = track
else:
start, end = 0, -1
track = {
"frame": start,
"label_id": label,
"group": 0,
"shapes": [
{
"type": _type,
"occluded": False,
"points": points,
"frame": start,
"outside": False,
"attributes": [],
"z_order": 0,
}
],
"attributes": [],
}
if end > start:
track["shapes"].append(
{
"type": _type,
"occluded": False,
"points": points,
"frame": end,
"outside": True,
"attributes": [],
"z_order": 0,
}
)
tracks.append(track)
create_json = {
"version": 1,
"tags": tags,
"shapes": shapes,
"tracks": tracks,
}
create_url = api.task_annotation_url(task_id) + "?action=create"
api.patch(create_url, json=create_json)
def _update_shape(
api,
task_id,
label_id,
label=None,
points=None,
attributes=None,
occluded=None,
):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
if points is not None:
shape["points"] = points
if occluded is not None:
shape["occluded"] = occluded
if attributes is not None:
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if label is None:
label_id = shape["label_id"]
attr_id_map = attr_id_map[label_id]
else:
label_id = class_id_map[label]
prev_attr_id_map = attr_id_map[shape["label_id"]]
prev_attr_id_map = {v: k for k, v in prev_attr_id_map.items()}
attr_id_map = attr_id_map[label_id]
shape["label_id"] = label_id
for attr in shape["attributes"]:
spec = prev_attr_id_map[attr["spec_id"]]
attr["spec_id"] = attr_id_map[spec]
for attr_name, attr_val in attributes:
if attr_name in attr_id_map:
shape["attributes"].append(
{"spec_id": attr_id_map[attr_name], "value": attr_val}
)
update_json = {
"version": 1,
"tags": [],
"shapes": [shape],
"tracks": [],
}
update_url = api.task_annotation_url(task_id) + "?action=update"
api.patch(update_url, json=update_json)
class CVATTests(unittest.TestCase):
def test_upload(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("ground_truth.detections.id", unwind=True),
)
# Test Videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_ids = dataset.values(
"frames.detections.detections.id", unwind=True
)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="frames.detections",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().frames[1].detections.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("frames.detections.detections.id", unwind=True),
)
def test_detection_labelling(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:2
].clone()
previous_dataset = dataset.clone()
previous_label_ids = dataset.values(
"ground_truth.detections.id", unwind=True
)
anno_key = "anno_key"
attributes = {"test": {"type": "text"}}
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
attributes=attributes,
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_label_id = previous_label_ids[0]
updated_label_id = previous_label_ids[1]
_delete_shape(api, task_id, deleted_label_id)
_create_annotation(api, task_id, shape=True)
_update_shape(
api, task_id, updated_label_id, attributes=[("test", "1")]
)
dataset.load_annotations(anno_key, cleanup=True)
label_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(label_ids), len(previous_label_ids))
added_label_ids = list(set(label_ids) - set(previous_label_ids))
self.assertEqual(len(added_label_ids), 1)
deleted_label_ids = list(set(previous_label_ids) - set(label_ids))
self.assertEqual(len(deleted_label_ids), 1)
updated_sample = dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
prev_updated_sample = previous_dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
self.assertEqual(len(updated_sample.ground_truth.detections), 1)
self.assertEqual(len(prev_updated_sample.ground_truth.detections), 1)
self.assertEqual(
updated_sample.ground_truth.detections[0].id,
prev_updated_sample.ground_truth.detections[0].id,
)
self.assertEqual(updated_sample.ground_truth.detections[0].test, 1)
api.close()
def test_multiple_fields(self):
dataset = foz.load_zoo_dataset(
"open-images-v6",
split="validation",
label_types=["detections", "segmentations", "classifications"],
classes=["Person"],
max_samples=10,
).clone()
prev_dataset = dataset.clone()
anno_key = "anno_key"
label_schema = {
"detections": {},
"segmentations": {"type": "instances"},
"positive_labels": {},
"negative_labels": {},
}
results = dataset.annotate(
anno_key,
backend="cvat",
label_schema=label_schema,
classes=["Person"],
)
api = results.connect_to_api()
task_id = results.task_ids[0]
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def _remove_bbox(dataset, label_field):
view = dataset.set_field(
"%s.detections" % label_field,
F("detections").map(
F().set_field("bounding_box", []).set_field("mask", None)
),
)
return view
# Ensure ids and attrs are equal
view = _remove_bbox(dataset, "detections")
prev_view = _remove_bbox(prev_dataset, "detections")
self.assertListEqual(
view.values("detections", unwind=True),
prev_view.values("detections", unwind=True),
)
view = _remove_bbox(dataset, "segmentations")
prev_view = _remove_bbox(prev_dataset, "segmentations")
self.assertListEqual(
view.values("segmentations", unwind=True),
prev_view.values("segmentations", unwind=True),
)
self.assertListEqual(
dataset.values("positive_labels", unwind=True),
prev_dataset.values("positive_labels", unwind=True),
)
self.assertListEqual(
dataset.values("negative_labels", unwind=True),
prev_dataset.values("negative_labels", unwind=True),
)
def test_task_creation_arguments(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=4)
.select_fields("ground_truth")
.clone()
)
user = fo.annotation_config.backends.get("cvat", {})
user = user.get("username", None)
users = [user] if user is not None else None
anno_key = "anno_key"
bug_tracker = "test_tracker"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
task_size=2,
segment_size=1,
task_assignee=users,
job_assignees=users,
job_reviewers=users,
issue_tracker=bug_tracker,
)
task_ids = results.task_ids
api = results.connect_to_api()
self.assertEqual(len(task_ids), 2)
for task_id in task_ids:
task_json = api.get(api.task_url(task_id)).json()
self.assertEqual(task_json["bug_tracker"], bug_tracker)
self.assertEqual(task_json["segment_size"], 1)
if user is not None:
self.assertEqual(task_json["assignee"]["username"], user)
for job in api.get(api.jobs_url(task_id)).json():
job_json = api.get(job["url"]).json()
if user is not None:
self.assertEqual(job_json["assignee"]["username"], user)
if api.server_version == 1:
self.assertEqual(
job_json["reviewer"]["username"], user
)
results.print_status()
status = results.get_status()
self.assertEqual(
status["ground_truth"][task_ids[0]]["assignee"]["username"], user,
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_project(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=2)
.select_fields("ground_truth")
.clone()
)
anno_key = "anno_key"
project_name = "cvat_unittest_project"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
api = results.connect_to_api()
project_id = api.get_project_id(project_name)
self.assertIsNotNone(project_id)
self.assertIn(project_id, results.project_ids)
anno_key2 = "anno_key2"
results2 = dataset.annotate(
anno_key2,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
self.assertNotIn(project_id, results2.project_ids)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key2, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
api.delete_project(project_id)
api.close()
api = results.connect_to_api()
self.assertIsNone(api.get_project_id(project_name))
api.close()
def test_example_add_new_label_fields(self):
# Test label field arguments
dataset = foz.load_zoo_dataset("quickstart", max_samples=10).clone()
view = dataset.take(1)
anno_key = "cvat_new_field"
results = view.annotate(
anno_key,
label_field="new_classifications",
label_type="classifications",
classes=["dog", "cat", "person"],
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="dog")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "dog")
# Test label schema
anno_key = "cvat_new_field_schema"
label_schema = {
"new_classifications_2": {
"type": "classifications",
"classes": ["dog", "cat", "person"],
}
}
results = view.annotate(anno_key, label_schema=label_schema)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api.close()
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="person")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications_2.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "person")
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_example_restricting_label_edits(self):
dataset = foz.load_zoo_dataset("quickstart").clone()
# Grab a sample that contains at least 2 people
view = dataset.match(
F("ground_truth.detections")
.filter(F("label") == "person")
.length()
> 1
).limit(1)
previous_labels = view.values("ground_truth.detections", unwind=True)
previous_person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
anno_key = "cvat_edit_restrictions"
# The new attributes that we want to populate
attributes = {
"sex": {"type": "select", "values": ["male", "female"],},
"age": {"type": "text",},
}
results = view.annotate(
anno_key,
label_field="ground_truth",
classes=["person", "test"],
attributes=attributes,
allow_additions=False,
allow_deletions=False,
allow_label_edits=False,
allow_spatial_edits=False,
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
task_id = results.task_ids[0]
api = results.connect_to_api()
# Delete label
deleted_id = previous_person_labels[0].id
_delete_shape(api, task_id, deleted_id)
# Add label
_create_annotation(api, task_id, shape="person")
# Edit label and bounding box
edited_id = previous_person_labels[1].id
_update_shape(
api,
task_id,
edited_id,
label="test",
points=[10, 20, 30, 40],
attributes=[("sex", "male")],
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
labels = view.values("ground_truth.detections", unwind=True)
person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
self.assertListEqual(
[d.label for d in labels], [d.label for d in previous_labels],
)
self.assertListEqual(
[d.bounding_box for d in labels],
[d.bounding_box for d in previous_labels],
)
self.assertListEqual(
[d.id for d in labels], [d.id for d in previous_labels],
)
self.assertEqual(
len(dataset.filter_labels("ground_truth", F("sex") == "male")), 1,
)
def test_issue_1634(self):
# tests: https://github.com/voxel51/fiftyone/issues/1634
dataset = (
foz.load_zoo_dataset("quickstart-video", max_samples=1)
.select_fields("frames.detections")
.clone()
)
anno_key = "issue_1634_test"
results = dataset.annotate(
anno_key,
label_field="frames.ground_truth",
label_type="detections",
classes=["test"],
)
task_id = results.task_ids[0]
api = results.connect_to_api()
# Create overlapping tracks of different type
_create_annotation(
api,
task_id,
track=(0, 30),
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api, task_id, track=(20, 40),
)
api.close()
imported_dataset = fo.Dataset()
with etau.TempDir() as tmp:
fouc.import_annotations(
imported_dataset,
task_ids=[task_id],
data_path=tmp,
download_media=True,
)
imported_dataset.compute_metadata()
self.assertEqual(
imported_dataset.first().metadata.total_frame_count,
dataset.first().metadata.total_frame_count,
)
imported_dataset.export(
export_dir=tmp, dataset_type=fo.types.CVATVideoDataset
)
filename = os.path.splitext(
os.path.basename(imported_dataset.first().filepath)
)[0]
labels_filepath = os.path.join(tmp, "labels", "%s.xml" % filename)
with open(labels_filepath, "r") as f:
label_file_info = f.read()
track_1 = '<track id="1" label="test">'
track_2 = '<track id="2" label="test">'
polygon_frame_0 = '<polygon frame="0"'
polygon_frame_30 = '<polygon frame="30"'
box_frame_20 = '<box frame="20"'
box_frame_40 = '<box frame="40"'
self.assertTrue(track_1 in label_file_info)
self.assertTrue(track_2 in label_file_info)
self.assertTrue(polygon_frame_0 in label_file_info)
self.assertTrue(polygon_frame_30 in label_file_info)
self.assertTrue(box_frame_20 in label_file_info)
self.assertTrue(box_frame_40 in label_file_info)
cvat_video_dataset = fo.Dataset.from_dir(
dataset_dir=tmp, dataset_type=fo.types.CVATVideoDataset,
)
detections = cvat_video_dataset.values(
"frames.detections", unwind=True
)
detections = [i for i in detections if i is not None]
self.assertEqual(len(detections), 20)
polylines = cvat_video_dataset.values(
"frames.polylines", unwind=True
)
polylines = [i for i in polylines if i is not None]
self.assertEqual(len(polylines), 30)
dataset.load_annotations(anno_key, cleanup=True)
def test_deleted_tasks(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
api.delete_task(task_id)
status = results.get_status()
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
dataset.values("ground_truth.detections.id", unwind=True),
prev_ids,
)
def test_occluded_attr(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
anno_key = "cvat_occluded_widget"
# Populate a new `occluded` attribute on the existing `ground_truth` labels
# using CVAT's occluded widget
label_schema = {
"ground_truth": {"attributes": {"occluded": {"type": "occluded",}}}
}
results = dataset.annotate(
anno_key, label_schema=label_schema, backend="cvat"
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_update_shape(api, task_id, shape_id, occluded=True)
dataset.load_annotations(anno_key, cleanup=True)
id_occ_map = dict(
zip(
*dataset.values(
[
"ground_truth.detections.id",
"ground_truth.detections.occluded",
],
unwind=True,
)
)
)
self.assertTrue(id_occ_map.pop(shape_id))
self.assertFalse(any(id_occ_map.values()))
def test_map_view_stage(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:1
].clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
# Set one of the detections to upper case
sample = dataset.first()
label = sample.ground_truth.detections[0].label
sample.ground_truth.detections[0].label = label.upper()
sample.save()
prev_unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
labels = dataset.distinct("ground_truth.detections.label")
label_map = {l: l.upper() for l in labels}
view = dataset.map_labels("ground_truth", label_map)
anno_key = "anno_key"
results = view.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_id = prev_ids[0]
self.assertIsNotNone(_get_shape(api, task_id, deleted_id))
_create_annotation(api, task_id, shape=labels[0].upper())
_delete_shape(api, task_id, deleted_id)
dataset.load_annotations(anno_key, cleanup=True)
loaded_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(loaded_ids), len(prev_ids))
# We expect existing labels to have been updated according to the
# mapping
unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertNotEqual(unchanged_label, prev_unchanged_label)
# Expect newly created labels to retain whatever class they were
# annotated as
new_id = list(set(loaded_ids) - set(prev_ids))[0]
new_label = dataset.select_labels(ids=new_id).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertEqual(labels[0].upper(), new_label)
def test_dest_field(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field",
)
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field.detections.id", unwind=True)),
)
# Test dict
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
label_schema = {
"ground_truth": {},
"new_points": {"type": "keypoints", "classes": ["test"],},
"new_polygon": {"type": "polygons", "classes": ["test2"],},
}
results = dataset.annotate(anno_key, label_schema=label_schema)
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(
api,
task_id,
shape="test",
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api,
task_id,
shape="test2",
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
dest_field = {
"ground_truth": "test_field_1",
"new_points": "test_field_2",
}
dataset.load_annotations(
anno_key, cleanup=True, dest_field=dest_field,
)
self.assertFalse(dataset.has_sample_field("new_points"))
self.assertTrue(dataset.has_sample_field("new_polygon"))
self.assertTrue(dataset.has_sample_field("test_field_1"))
self.assertTrue(dataset.has_sample_field("test_field_2"))
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field_1.detections.id", unwind=True)),
)
self.assertEqual(
len(dataset.values("test_field_2.keypoints.id", unwind=True)), 1,
)
self.assertEqual(
len(dataset.values("new_polygon.polylines.id", unwind=True)), 1,
)
# Test modification
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_delete_shape(api, task_id, shape_id)
_create_annotation(api, task_id, shape=True)
_create_annotation(
api,
task_id,
shape=True,
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field", unexpected="keep",
)
self.assertListEqual(
sorted(prev_ids),
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
)
test_ids = dataset.values("test_field.detections.id", unwind=True)
self.assertEqual(len(set(test_ids) - set(prev_ids)), 1)
self.assertEqual(len(set(prev_ids) - set(test_ids)), 1)
# Test videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_labels = dataset.values("frames.detections", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="frames.detections")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="frames.test_field",
)
self.assertListEqual(
prev_labels, dataset.values("frames.detections", unwind=True),
)
self.assertListEqual(
sorted(
dataset.values("frames.detections.detections.id", unwind=True)
),
sorted(
dataset.values("frames.test_field.detections.id", unwind=True)
),
)
if __name__ == "__main__":
fo.config.show_progress_bars = False
unittest.main(verbosity=2)
| 33.348739 | 83 | 0.572886 | [
"Apache-2.0"
] | bisraelsen/fiftyone | tests/intensive/cvat_tests.py | 31,748 | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
class StringProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.value_))
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.STRING,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class Integer32Proto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.value_)
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatInt32(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class Integer64Proto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.value_)
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatInt64(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class BoolProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
return n + 2
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putBoolean(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatBool(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class DoubleProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0.0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
return n + 9
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(9)
out.putDouble(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_value(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormat(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.DOUBLE,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class VoidProto(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
_TEXT = (
"ErrorCode",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['StringProto','Integer32Proto','Integer64Proto','BoolProto','DoubleProto','VoidProto']
| 22.085417 | 97 | 0.651637 | [
"Apache-2.0"
] | Arachnid/google_appengine | google/appengine/api/api_base_pb.py | 10,601 | Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import c3d_model
import numpy as np
# Basic model parameters as external flags.
flags = tf.app.flags
gpu_num = 1
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,c3d_model.NUM_FRAMES_PER_CLIP,c3d_model.CROP_SIZE,
c3d_model.CROP_SIZE,c3d_model.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
return images_placeholder, labels_placeholder
def _variable_on_cpu(name, shape, initializer):
#with tf.device('/cpu:%d' % cpu_id):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.nn.l2_loss(var) * wd
tf.add_to_collection('losses', weight_decay)
return var
def run_test(ds_dir, mean_file, model_name, test_list_file, batch_size):
tf.reset_default_graph()
try:
FLAGS = flags.FLAGS
FLAGS.batch_size = batch_size
except:
flags.DEFINE_integer('batch_size', batch_size, 'Batch size.')
FLAGS = flags.FLAGS
#model_name = "./models-5sec/c3d_ucf_model-4999"
#model_name = "./models.5sec/c3d_ucf_model-75450"
#model_name = "./models-1sec/c3d_ucf_model-4999"
#model_name = "./models.5sec.summarized.1sec/c3d_ucf_model-4999"
#model_name = "./models-multi-5sec-5sec_sum_1/c3d_ucf_model-4999"
#model_name = "./models-multi-5-5sum1/c3d_ucf_model-9999"
num_test_videos = len(list(open(test_list_file,'r')))
print("Number of test videos={}".format(num_test_videos))
# max_bt_sz = -1;min
#
# for factor in range(1, 31):
# if num_test_videos%factor==0:
# max_bt_sz=factor
# if max_bt_sz == 1:
# print("no good batchsize available, setting to 25")
# max_bt_sz = 20
# FLAGS.batch_size = max_bt_sz
# print("batch size:", FLAGS.batch_size)
# Get the sets of images and labels for testing
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size * gpu_num)
with tf.variable_scope('var_name') as var_scope:
weights = {
'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00),
'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00),
'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00),
'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00),
'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00),
'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00),
'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.04, 0.005)
}
biases = {
'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04, 0.0),
}
logits = []
for gpu_index in range(0, gpu_num):
with tf.device('/gpu:%d' % gpu_index):
logit = c3d_model.inference_c3d(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1)
* FLAGS.batch_size,:,:,:,:],
0,
FLAGS.batch_size,
weights,
biases)
logits.append(logit)
logits = tf.concat(logits,0)
norm_score = tf.nn.softmax(logits)
saver = tf.train.Saver()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# Restoring a saved model.
if not model_name.__contains__(".meta"):
saver = tf.train.import_meta_graph(model_name+'.meta')
else:
# saver = tf.train.import_meta_graph(model_name)
var_list = [v for v in tf.trainable_variables()]
saver = tf.train.Saver(weights.values() + biases.values())
saver.restore(sess, model_name)
# And then after everything is built, start the testing loop.
bufsize = 0
write_file = open("predict_ret.txt", "w+", bufsize)
next_start_pos = 0
all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
print ("num_test_videos, batch_size, gpu_num,all steps", num_test_videos, FLAGS.batch_size, gpu_num, all_steps)
total_testing_duration = 0
for step in range(all_steps):
# Fill a feed dictionary with the actual set of images and labels
# for this particular testing step.
start_time = time.time()
# try:
test_images, test_labels, next_start_pos, _, valid_len = \
input_data.read_clip_and_label(
ds_dir,
mean_file,
test_list_file,
FLAGS.batch_size * gpu_num,
start_pos=next_start_pos,
num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP
)
# except:
# print("exception occured loading at step:", step)
# try:
predict_score = norm_score.eval(
session=sess,
feed_dict={images_placeholder: test_images}
)
# except:
# print("exception occured prediction at step:", step)
duration = time.time() - start_time
print('Step %d: %.3f sec' % (step, duration), 'next start index:', next_start_pos)
total_testing_duration += duration
# try:
for i in range(0, valid_len):
true_label = test_labels[i],
top1_predicted_label = np.argmax(predict_score[i])
# Write results: true label, class prob for true label, predicted label, class prob for predicted label
write_file.write('{}, {}, {}, {}\n'.format(
true_label[0],
predict_score[i][true_label],
top1_predicted_label,
predict_score[i][top1_predicted_label]))
# except:
# print ("exception occured saving predictions at step:", step)
# break # test only 1 batch
print('Prediction time taken =', total_testing_duration)
import datetime
now = datetime.datetime.now()
with open('stats.txt', 'a') as f:
f.write(now.strftime("%Y-%m-%d %H:%M\n"))
f.write(" testing time:"+ str(total_testing_duration) + "\n")
write_file.close()
print("done")
import sys
def main(_):
# run_test(sys.argv[1])
ds_dir = "/home/bassel/data/office-actions/office_actions_19/short_clips/resized_frms"
mean_file = "../c3d_data_preprocessing/oa_kinetics_calculated_mean.npy"
model_name = "c3d_ucf_model-14698"
testing_file = ""
TESTING_BATCH_SIZE = 16
run_test(ds_dir, mean_file, "model/" + model_name, testing_file, TESTING_BATCH_SIZE)
if __name__ == '__main__':
tf.app.run()
| 44.359833 | 188 | 0.581305 | [
"Apache-2.0"
] | b-safwat/multi_action_recognition | c3d_model/predict_c3d_ucf101.py | 10,602 | Python |
#!/usr/bin/env python
"""GRR HTTP server implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import hashlib
import hmac
import logging
import os
import string
from cryptography.hazmat.primitives import constant_time
from future.builtins import int
from future.builtins import str
import jinja2
import psutil
from typing import Text
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing as werkzeug_routing
from werkzeug import wrappers as werkzeug_wrappers
from werkzeug import wsgi as werkzeug_wsgi
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import precondition
from grr_response_server import access_control
from grr_response_server import server_logging
from grr_response_server.gui import http_api
from grr_response_server.gui import webauth
CSRF_DELIMITER = b":"
CSRF_TOKEN_DURATION = rdfvalue.Duration("10h")
def GenerateCSRFToken(user_id, time):
"""Generates a CSRF token based on a secret key, id and time."""
precondition.AssertType(user_id, Text)
precondition.AssertOptionalType(time, int)
time = time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
secret = config.CONFIG.Get("AdminUI.csrf_secret_key", None)
if secret is None:
raise ValueError("CSRF secret not available.")
digester = hmac.new(secret.encode("ascii"), digestmod=hashlib.sha256)
digester.update(user_id.encode("ascii"))
digester.update(CSRF_DELIMITER)
digester.update(str(time).encode("ascii"))
digest = digester.digest()
token = base64.urlsafe_b64encode(b"%s%s%d" % (digest, CSRF_DELIMITER, time))
return token.rstrip(b"=")
def StoreCSRFCookie(user, response):
"""Decorator for WSGI handler that inserts CSRF cookie into response."""
csrf_token = GenerateCSRFToken(user, None)
response.set_cookie(
"csrftoken", csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
def ValidateCSRFTokenOrRaise(request):
"""Decorator for WSGI handler that checks CSRF cookie against the request."""
# CSRF check doesn't make sense for GET/HEAD methods, because they can
# (and are) used when downloading files through <a href> links - and
# there's no way to set X-CSRFToken header in this case.
if request.method in ("GET", "HEAD"):
return
# In the ideal world only JavaScript can be used to add a custom header, and
# only within its origin. By default, browsers don't allow JavaScript to
# make cross origin requests.
#
# Unfortunately, in the real world due to bugs in browsers plugins, it can't
# be guaranteed that a page won't set an HTTP request with a custom header
# set. That's why we also check the contents of a header via an HMAC check
# with a server-stored secret.
#
# See for more details:
# https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet
# (Protecting REST Services: Use of Custom Request Headers).
csrf_token = request.headers.get("X-CSRFToken", "").encode("ascii")
if not csrf_token:
logging.info("Did not find headers CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("CSRF token is missing")
try:
decoded = base64.urlsafe_b64decode(csrf_token + b"==")
digest, token_time = decoded.rsplit(CSRF_DELIMITER, 1)
token_time = int(token_time)
except (TypeError, ValueError):
logging.info("Malformed CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Malformed CSRF token")
if len(digest) != hashlib.sha256().digest_size:
logging.info("Invalid digest size for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Malformed CSRF token digest")
expected = GenerateCSRFToken(request.user, token_time)
if not constant_time.bytes_eq(csrf_token, expected):
logging.info("Non-matching CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Non-matching CSRF token")
current_time = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
if current_time - token_time > CSRF_TOKEN_DURATION.microseconds:
logging.info("Expired CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Expired CSRF token")
class RequestHasNoUser(AttributeError):
"""Error raised when accessing a user of an unautenticated request."""
class HttpRequest(werkzeug_wrappers.Request):
"""HTTP request object to be used in GRR."""
def __init__(self, *args, **kwargs):
super(HttpRequest, self).__init__(*args, **kwargs)
self._user = None
self.token = None
self.timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.method_metadata = None
self.parsed_args = None
@property
def user(self):
if self._user is None:
raise RequestHasNoUser(
"Trying to access Request.user while user is unset.")
if not self._user:
raise RequestHasNoUser(
"Trying to access Request.user while user is empty.")
return self._user
@user.setter
def user(self, value):
if not isinstance(value, Text):
message = "Expected instance of '%s' but got value '%s' of type '%s'"
message %= (Text, value, type(value))
raise TypeError(message)
self._user = value
def LogAccessWrapper(func):
"""Decorator that ensures that HTTP access is logged."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception: # pylint: disable=g-broad-except
# This should never happen: wrapped function is supposed to handle
# all possible exceptions and generate a proper Response object.
# Still, handling exceptions here to guarantee that the access is logged
# no matter what.
response = werkzeug_wrappers.Response("", status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
return Wrapper
def EndpointWrapper(func):
return webauth.SecurityCheck(LogAccessWrapper(func))
class AdminUIApp(object):
"""Base class for WSGI GRR app."""
def __init__(self):
self.routing_map = werkzeug_routing.Map()
self.routing_map.add(
werkzeug_routing.Rule(
"/",
methods=["HEAD", "GET"],
endpoint=EndpointWrapper(self._HandleHomepage)))
self.routing_map.add(
werkzeug_routing.Rule(
"/api/<path:path>",
methods=["HEAD", "GET", "POST", "PUT", "PATCH", "DELETE"],
endpoint=EndpointWrapper(self._HandleApi)))
self.routing_map.add(
werkzeug_routing.Rule(
"/help/<path:path>",
methods=["HEAD", "GET"],
endpoint=EndpointWrapper(self._HandleHelp)))
def _BuildRequest(self, environ):
return HttpRequest(environ)
def _BuildToken(self, request, execution_time):
"""Build an ACLToken from the request."""
token = access_control.ACLToken(
username=request.user,
reason=request.args.get("reason", ""),
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["Remote_Addr", "X-Forwarded-For"]:
remote_addr = request.headers.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def _HandleHomepage(self, request):
"""Renders GRR home page by rendering base.html Jinja template."""
_ = request
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]),
autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {
"heading":
config.CONFIG["AdminUI.heading"],
"report_url":
config.CONFIG["AdminUI.report_url"],
"help_url":
config.CONFIG["AdminUI.help_url"],
"timestamp":
utils.SmartStr(create_time),
"use_precompiled_js":
config.CONFIG["AdminUI.use_precompiled_js"],
# Used in conjunction with FirebaseWebAuthManager.
"firebase_api_key":
config.CONFIG["AdminUI.firebase_api_key"],
"firebase_auth_domain":
config.CONFIG["AdminUI.firebase_auth_domain"],
"firebase_auth_provider":
config.CONFIG["AdminUI.firebase_auth_provider"],
"grr_version":
config.CONFIG["Source.version_string"]
}
template = env.get_template("base.html")
response = werkzeug_wrappers.Response(
template.render(context), mimetype="text/html")
# For a redirect-based Firebase authentication scheme we won't have any
# user information at this point - therefore checking if the user is
# present.
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response
def _HandleApi(self, request):
"""Handles API requests."""
# Checks CSRF token. CSRF token cookie is updated when homepage is visited
# or via GetPendingUserNotificationsCount API call.
ValidateCSRFTokenOrRaise(request)
response = http_api.RenderHttpResponse(request)
# GetPendingUserNotificationsCount is an API method that is meant
# to be invoked very often (every 10 seconds). So it's ideal
# for updating the CSRF token.
# We should also store the CSRF token if it wasn't yet stored at all.
if (("csrftoken" not in request.cookies) or response.headers.get(
"X-API-Method", "") == "GetPendingUserNotificationsCount"):
StoreCSRFCookie(request.user, response)
return response
def _RedirectToRemoteHelp(self, path):
"""Redirect to GitHub-hosted documentation."""
allowed_chars = set(string.ascii_letters + string.digits + "._-/")
if not set(path) <= allowed_chars:
raise RuntimeError("Unusual chars in path %r - "
"possible exploit attempt." % path)
target_path = os.path.join(config.CONFIG["AdminUI.docs_location"], path)
# We have to redirect via JavaScript to have access to and to preserve the
# URL hash. We don't know the hash part of the url on the server.
return werkzeug_wrappers.Response(
"""
<script>
var friendly_hash = window.location.hash;
window.location = '%s' + friendly_hash;
</script>
""" % target_path,
mimetype="text/html")
def _HandleHelp(self, request):
"""Handles help requests."""
help_path = request.path.split("/", 2)[-1]
if not help_path:
raise werkzeug_exceptions.Forbidden("Error: Invalid help path.")
# Proxy remote documentation.
return self._RedirectToRemoteHelp(help_path)
@werkzeug_wsgi.responder
def __call__(self, environ, start_response):
"""Dispatches a request."""
request = self._BuildRequest(environ)
matcher = self.routing_map.bind_to_environ(environ)
try:
endpoint, _ = matcher.match(request.path, request.method)
return endpoint(request)
except werkzeug_exceptions.NotFound as e:
logging.info("Request for non existent url: %s [%s]", request.path,
request.method)
return e
except werkzeug_exceptions.HTTPException as e:
logging.exception("http exception: %s [%s]", request.path, request.method)
return e
def WSGIHandler(self):
"""Returns GRR's WSGI handler."""
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {
"/": config.CONFIG["AdminUI.document_root"],
})
# Use DispatcherMiddleware to make sure that SharedDataMiddleware is not
# used at all if the URL path doesn't start with "/static". This is a
# workaround for cases when unicode URLs are used on systems with
# non-unicode filesystems (as detected by Werkzeug). In this case
# SharedDataMiddleware may fail early while trying to convert the
# URL into the file path and not dispatch the call further to our own
# WSGI handler.
return werkzeug_wsgi.DispatcherMiddleware(self, {
"/static": sdm,
})
| 34.764368 | 92 | 0.704827 | [
"Apache-2.0"
] | Codehardt/grr | grr/server/grr_response_server/gui/wsgiapp.py | 12,098 | Python |
# coding: utf8
# try something like
# coding: utf8
# try something like
def index():
rows = db((db.activity.type=='stand')&(db.activity.status=='accepted')).select()
if rows:
return dict(projects=rows)
else:
return plugin_flatpage()
| 23.818182 | 84 | 0.641221 | [
"BSD-3-Clause"
] | bkahlerventer/web2conf | controllers/stands.py | 262 | Python |
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative no update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""Returns the name of current scope as a string, e.g. deepq/q_func"""
return tf.compat.v1.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in tf.compat.v1.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_eps")
eps = tf.compat.v1.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None,
param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.compat.v1.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.compat.v1.placeholder(tf.bool, (), name="reset")
eps = tf.compat.v1.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.compat.v1.get_variable("param_noise_scale", (),
initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.compat.v1.get_variable("param_noise_threshold", (),
initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.compat.v1.assign(perturbed_var, var + tf.compat.v1.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(
tf.nn.softmax(q_values) * (tf.compat.v1.log(tf.nn.softmax(q_values)) - tf.compat.v1.log(tf.nn.softmax(q_values_adaptive))),
axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(
inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,
update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,
update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True,
update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.compat.v1.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=tf.compat.v1.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=tf.compat.v1.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values}
| 46.17234 | 135 | 0.65232 | [
"MIT"
] | rwill128/baselines | baselines/deepq/build_graph.py | 21,701 | Python |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
from auto_gen import DBVistrail as _DBVistrail
from auto_gen import DBAdd, DBChange, DBDelete, DBAbstraction, DBGroup, \
DBModule
from id_scope import IdScope
class DBVistrail(_DBVistrail):
def __init__(self, *args, **kwargs):
_DBVistrail.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAdd.vtType: 'operation',
DBChange.vtType: 'operation',
DBDelete.vtType: 'operation',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
self.idScope.setBeginId('action', 1)
self.db_objects = {}
# keep a reference to the current logging information here
self.log_filename = None
self.log = None
def __copy__(self):
return DBVistrail.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBVistrail.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBVistrail
cp.idScope = copy.copy(self.idScope)
cp.db_objects = copy.copy(self.db_objects)
cp.log_filename = self.log_filename
if self.log is not None:
cp.log = copy.copy(self.log)
else:
cp.log = None
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBVistrail()
new_obj = _DBVistrail.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
if hasattr(old_obj, 'log_filename'):
new_obj.log_filename = old_obj.log_filename
if hasattr(old_obj, 'log'):
new_obj.log = old_obj.log
return new_obj
def update_id_scope(self):
def getOldObjId(operation):
if operation.vtType == 'change':
return operation.db_oldObjId
return operation.db_objectId
def getNewObjId(operation):
if operation.vtType == 'change':
return operation.db_newObjId
return operation.db_objectId
for action in self.db_actions:
self.idScope.updateBeginId('action', action.db_id+1)
if action.db_session is not None:
self.idScope.updateBeginId('session', action.db_session + 1)
for operation in action.db_operations:
self.idScope.updateBeginId('operation', operation.db_id+1)
if operation.vtType == 'add' or operation.vtType == 'change':
# update ids of data
self.idScope.updateBeginId(operation.db_what,
getNewObjId(operation)+1)
if operation.db_data is None:
if operation.vtType == 'change':
operation.db_objectId = operation.db_oldObjId
self.db_add_object(operation.db_data)
for annotation in action.db_annotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
def db_add_object(self, obj):
self.db_objects[(obj.vtType, obj.db_id)] = obj
def db_get_object(self, type, id):
return self.db_objects.get((type, id), None)
def db_update_object(self, obj, **kwargs):
# want to swap out old object with a new version
# need this for updating aliases...
# hack it using setattr...
real_obj = self.db_objects[(obj.vtType, obj.db_id)]
for (k, v) in kwargs.iteritems():
if hasattr(real_obj, k):
setattr(real_obj, k, v)
| 43.244275 | 79 | 0.625243 | [
"BSD-3-Clause"
] | MaritimeResearchInstituteNetherlands/VisTrails | vistrails/db/versions/v0_9_4/domain/vistrail.py | 5,665 | Python |
import os
import torch
import tkdet.utils.comm as comm
from tkdet.checkpoint import DetectionCheckpointer
from tkdet.config import get_cfg
from tkdet.data import MetadataCatalog
from tkdet.engine import DefaultTrainer
from tkdet.engine import default_argument_parser
from tkdet.engine import default_setup
from tkdet.engine import launch
from tkdet.evaluation import CityscapesInstanceEvaluator
from tkdet.evaluation import COCOEvaluator
from tkdet.evaluation import DatasetEvaluators
from tkdet.evaluation import LVISEvaluator
from tkdet.evaluation import verify_results
from point_rend import add_pointrend_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if evaluator_type == "coco":
return COCOEvaluator(dataset_name, cfg, True, output_folder)
if evaluator_type == "cityscapes":
assert torch.cuda.device_count() >= comm.get_rank(), \
"CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
f"no Evaluator for the dataset {dataset_name} with the type {evaluator_type}"
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def setup(args):
cfg = get_cfg()
add_pointrend_config(cfg)
cfg.merge_from_file(args.config)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 31.870588 | 93 | 0.695829 | [
"MIT"
] | tkhe/tkdetection | projects/PointRend/train_net.py | 2,709 | Python |
# Copyright (c) 2013, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from six.moves import range
from six import string_types
import frappe
import json
from frappe.utils import (getdate, cint, add_months, date_diff, add_days,
nowdate, get_datetime_str, cstr, get_datetime, now_datetime, format_datetime)
from datetime import datetime
from calendar import monthrange
from frappe import _, msgprint
from frappe.utils import flt
from frappe.utils import cstr, cint, getdate
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
conditions, filters = get_conditions(filters)
attendance = get_attendance(conditions,filters)
for att in attendance:
data.append(att)
return columns, data
def get_columns():
columns = [
_("ID") + ":Data:200",
_("Attendance Date") + ":Data:200",
_("Employee") + ":Data:120",
_("Employee Name") + ":Data:120",
_("Department") + ":Data:120",
_("Status") + ":Data:120",
# _("Present Shift") + ":Data:120"
]
return columns
def get_attendance(conditions,filters):
attendance = frappe.db.sql("""Select name,employee, employee_name, department,attendance_date, shift,status
From `tabAttendance` Where status = "Absent" and docstatus = 1 and %s group by employee,attendance_date"""% conditions,filters, as_dict=1)
employee = frappe.db.get_all("Employee",{"status":"Active"},["name"])
row = []
emp_count = 0
import pandas as pd
mydates = pd.date_range(filters.from_date, filters.to_date).tolist()
absent_date = []
for emp in employee:
for date in mydates:
for att in attendance:
if emp.name == att.employee:
if att.attendance_date == date.date():
att_date = date.date()
absent_date += [(date.date())]
emp_count += 1
if emp_count >= 3:
for ab_date in absent_date:
row += [(att.name,ab_date,att.employee,att.employee_name,att.department,att.status)]
frappe.errprint(row)
return row
def get_conditions(filters):
conditions = ""
if filters.get("from_date"): conditions += " attendance_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and attendance_date <= %(to_date)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
if filters.get("department"): conditions += " and department = %(department)s"
return conditions, filters | 35.763158 | 142 | 0.64496 | [
"MIT"
] | thispl/tpl-hrpro | hrpro/hrpro/report/continuous_absent_report/continuous_absent_report.py | 2,718 | Python |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from useradmin.models import HuePermission
try:
perm = HuePermission.objects.get(app='metastore', action='read_only_access')
perm.delete()
except HuePermission.DoesNotExist:
pass
def backwards(self, orm):
perm, created = HuePermission.objects.get_or_create(app='metastore', action='read_only_access')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'useradmin.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'hue_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['useradmin.HuePermission']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.huepermission': {
'Meta': {'object_name': 'HuePermission'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'app': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'through': "orm['useradmin.GroupPermission']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.ldapgroup': {
'Meta': {'object_name': 'LdapGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'HUE'", 'max_length': '64'}),
'home_directory': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['useradmin']
symmetrical = True
| 64.597701 | 182 | 0.568149 | [
"Apache-2.0"
] | 10088/hue | apps/useradmin/src/useradmin/old_migrations/0003_remove_metastore_readonly_huepermission.py | 5,620 | Python |
"""
.. module: lemur.users.models
:platform: unix
:synopsis: This module contains all of the models need to create a user within
lemur
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, String, Column, Boolean
from sqlalchemy.event import listen
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.database import db
from lemur.models import roles_users
from lemur.extensions import bcrypt
def hash_password(mapper, connect, target):
"""
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
"""
target.hash_password()
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
password = Column(String(128))
active = Column(Boolean())
confirmed_at = Column(ArrowType())
username = Column(String(255), nullable=False, unique=True)
email = Column(String(128), unique=True)
profile_picture = Column(String(255))
roles = relationship('Role', secondary=roles_users, passive_deletes=True, backref=db.backref('user'), lazy='dynamic')
certificates = relationship('Certificate', backref=db.backref('user'), lazy='dynamic')
pending_certificates = relationship('PendingCertificate', backref=db.backref('user'), lazy='dynamic')
authorities = relationship('Authority', backref=db.backref('user'), lazy='dynamic')
keys = relationship('ApiKey', backref=db.backref('user'), lazy='dynamic')
logs = relationship('Log', backref=db.backref('user'), lazy='dynamic')
sensitive_fields = ('password',)
def check_password(self, password):
"""
Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
"""
if self.password:
return bcrypt.check_password_hash(self.password, password)
def hash_password(self):
"""
Generate the secure hash for the password.
:return:
"""
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode('utf-8')
@property
def is_admin(self):
"""
Determine if the current user has the 'admin' role associated
with it.
:return:
"""
for role in self.roles:
if role.name == 'admin':
return True
def __repr__(self):
return "User(username={username})".format(username=self.username)
listen(User, 'before_insert', hash_password)
| 30.733333 | 121 | 0.66992 | [
"Apache-2.0"
] | Brett-Wood/lemur | lemur/users/models.py | 2,766 | Python |
# SPDX-License-Identifier: MIT
# (c) 2019 The TJHSST Director 4.0 Development Team & Contributors
import asyncio
import json
from typing import Any, Dict
import websockets
from docker.models.services import Service
from ..docker.services import get_director_service_name, get_service_by_name
from ..docker.utils import create_client
from ..logs import DirectorSiteLogFollower
from .utils import mainloop_auto_cancel, wait_for_event
async def logs_handler(
websock: websockets.client.WebSocketClientProtocol,
params: Dict[str, Any],
stop_event: asyncio.Event,
) -> None:
client = create_client()
site_id = int(params["site_id"])
service: Service = get_service_by_name(client, get_director_service_name(site_id))
if service is None:
await websock.close()
return
async def echo_loop() -> None:
while True:
try:
msg = json.loads(await websock.recv())
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
break
if isinstance(msg, dict) and "heartbeat" in msg:
try:
await websock.send(json.dumps(msg))
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
break
async def log_loop(log_follower: DirectorSiteLogFollower) -> None:
try:
async for line in log_follower.iter_lines():
if not line:
break
await websock.send(json.dumps({"line": line}))
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
pass
async with DirectorSiteLogFollower(client, site_id) as log_follower:
await log_follower.start(last_n=10)
await mainloop_auto_cancel(
[echo_loop(), log_loop(log_follower), wait_for_event(stop_event)]
)
await websock.close()
| 30.634921 | 88 | 0.661658 | [
"MIT"
] | Rushilwiz/director4 | orchestrator/orchestrator/consumers/logs.py | 1,930 | Python |
from reqlist import *
import random
from catalog.models import Course
def ceiling_thresh(progress, maximum):
"""Creates a progress object
Ensures that 0 < progress < maximum"""
effective_progress = max(0, progress)
if maximum > 0:
return Progress(min(effective_progress, maximum), maximum)
else:
return Progress(effective_progress, maximum)
def total_units(courses):
"""Finds the total units in a list of Course objects"""
total = 0
for course in courses:
total += course.total_units
return total
def sum_progresses(progresses, criterion_type, maxFunc):
"""Adds together a list of Progress objects by combining them one by one
criterion_type: either subjects or units
maxFunc: describes how to combine the maximums of the Progress objects"""
if criterion_type == CRITERION_SUBJECTS:
mapfunc = lambda p: p.subject_fulfillment
elif criterion_type == CRITERION_UNITS:
mapfunc = lambda p: p.unit_fulfillment
sum_progress = reduce(lambda p1, p2: p1.combine(p2, maxFunc), map(mapfunc, progresses))
return sum_progress
def force_unfill_progresses(satisfied_by_category, current_distinct_threshold, current_threshold):
"""Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds
These requirements follow the form "X subjects/units from at least N categories"
satisfied_by_category: list of lists of Courses for each category
current_distinct_threshold: threshold object for distinct threshold
current_threshold: threshold object for regular threshold"""
subject_cutoff = current_threshold.cutoff_for_criterion(CRITERION_SUBJECTS)
unit_cutoff = current_threshold.cutoff_for_criterion(CRITERION_UNITS)
#list of subjects by category sorted by units
max_unit_subjects = map(lambda sat_cat: sorted(sat_cat, key = lambda s: s.total_units), satisfied_by_category)
#split subjects into two sections: fixed and free
#fixed subjects: must have one subject from each category
#free subjects: remaining subjects to fill requirement can come from any category
#choose maximum-unit courses to fulfill requirement with least amount of courses possible
fixed_subject_progress = 0
fixed_subject_max = current_distinct_threshold.get_actual_cutoff()
fixed_unit_progress = 0
fixed_unit_max = 0
#fill fixed subjects with maximum-unit course in each category
for category_subjects in max_unit_subjects:
if len(category_subjects) > 0:
subject_to_count = category_subjects.pop()
fixed_subject_progress += 1
fixed_unit_progress += subject_to_count.total_units
fixed_unit_max += subject_to_count.total_units
else:
fixed_unit_max += DEFAULT_UNIT_COUNT
#remaining subjects/units to fill
remaining_subject_progress = subject_cutoff - fixed_subject_max
remaining_unit_progress = unit_cutoff - fixed_unit_max
#choose free courses from all remaining courses
free_courses = sorted([course for category in max_unit_subjects for course in category], key = lambda s: s.total_units, reverse = True)
free_subject_max = subject_cutoff - fixed_subject_max
free_unit_max = unit_cutoff - fixed_unit_max
free_subject_progress = min(len(free_courses), free_subject_max)
free_unit_progress = min(total_units(free_courses), free_unit_max)
#add fixed and free courses to get total progress
subject_progress = Progress(fixed_subject_progress + free_subject_progress, subject_cutoff)
unit_progress = Progress(fixed_unit_progress + free_unit_progress, unit_cutoff)
return (subject_progress, unit_progress)
class JSONProgressConstants:
"""Each of these keys will be filled in a RequirementsStatement JSON
representation decorated by a RequirementsProgress object."""
is_fulfilled = "fulfilled"
progress = "progress"
progress_max = "max"
percent_fulfilled = "percent_fulfilled"
satisfied_courses = "sat_courses"
# Progress assertions
is_bypassed = "is_bypassed"
assertion = "assertion"
class Progress(object):
"""An object describing simple progress towards a requirement
Different from RequirementsProgress object as it only includes progress information,
not nested RequirementsProgress objects, fulfillment status, title, and other information
progress: number of units/subjects completed
max: number of units/subjects needed to fulfill requirement"""
def __init__(self, progress, max):
self.progress = progress
self.max = max
def get_percent(self):
if self.max > 0:
return min(100, int(round((self.progress / float(self.max)) * 100)))
else:
return "N/A"
def get_fraction(self):
if self.max > 0:
return self.progress / float(self.max)
else:
return "N/A"
def get_raw_fraction(self, unit):
denom = max(self.max, DEFAULT_UNIT_COUNT if unit == CRITERION_UNITS else 1)
return self.progress/denom
def combine(self, p2, maxFunc):
if maxFunc is not None:
return Progress(self.progress + p2.progress, self.max + maxFunc(p2.max))
return Progress(self.progress + p2.progress, self.max + p2.max)
def __repr__(self):
return str(self.progress) + " / " + str(self.max)
class RequirementsProgress(object):
"""
Stores a user's progress towards a given requirements statement. This object
wraps a requirements statement and has a to_json_object() method which
returns the statement's own JSON dictionary representation with progress
information added.
Note: This class is maintained separately from the Django model so that
persistent information can be stored in a database-friendly format, while
information specific to a user's request is transient.
"""
def __init__(self, statement, list_path):
"""Initializes a progress object with the given requirements statement."""
self.statement = statement
self.threshold = self.statement.get_threshold()
self.distinct_threshold = self.statement.get_distinct_threshold()
self.list_path = list_path
self.children = []
if self.statement.requirement is None:
for index, child in enumerate(self.statement.requirements.iterator()):
self.children.append(RequirementsProgress(child, list_path + "." + str(index)))
def courses_satisfying_req(self, courses):
"""
Returns the whole courses and the half courses satisfying this requirement
separately.
"""
if self.statement.requirement is not None:
req = self.statement.requirement
if "GIR:" in req or "HASS" in req or "CI-" in req:
# Separate whole and half courses
whole_courses = []
half_courses = []
for c in courses:
if not c.satisfies(req, courses):
continue
if c.is_half_class:
half_courses.append(c)
else:
whole_courses.append(c)
return whole_courses, half_courses
else:
return [c for c in courses if c.satisfies(req, courses)], []
return [], []
def override_requirement(self, manual_progress):
"""
Sets the progress fulfillment variables based on a manual progress value, which is
expressed in either units or subjects depending on the requirement's threshold.
"""
self.is_fulfilled = manual_progress >= self.threshold.get_actual_cutoff()
subjects = 0
units = 0
satisfied_courses = set()
if self.threshold.criterion == CRITERION_UNITS:
units = manual_progress
subjects = manual_progress / DEFAULT_UNIT_COUNT
else:
units = manual_progress * DEFAULT_UNIT_COUNT
subjects = manual_progress
subject_progress = ceiling_thresh(subjects, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(units, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
#fill with dummy courses
random_ids = random.sample(range(1000, max(10000, subject_progress.progress + 1000)), subject_progress.progress)
for rand_id in random_ids:
dummy_course = Course(id = self.list_path + "_" + str(rand_id), subject_id = "gen_course_" + self.list_path + "_" + str(rand_id), title = "Generated Course " + self.list_path + " " + str(rand_id))
satisfied_courses.add(dummy_course)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
def compute_assertions(self, courses, progress_assertions):
"""
Computes the fulfillment of this requirement based on progress assertions, and returns
True if the requirement has an assertion available or False otherwise.
Assertions are in the format of a dictionary keyed by requirements list paths, where the
values are dictionaries containing three possible keys: "substitutions", which should be a
list of course IDs that combine to substitute for the requirement, "ignore", which
indicates that the requirement is not to be used when satisfying later requirements, and
"override", which is equivalent to the old manual progress value and indicates a progress
toward the requirement in the unit specified by the requirement's threshold type (only
used if the requirement is a plain string requirement and has a threshold). The order of
precedence is override, ignore, substitutions.
"""
self.assertion = progress_assertions.get(self.list_path, None)
self.is_bypassed = False
if self.assertion is not None:
substitutions = self.assertion.get("substitutions", None) #List of substitutions
ignore = self.assertion.get("ignore", False) #Boolean
override = self.assertion.get("override", 0)
else:
substitutions = None
ignore = False
override = 0
if self.statement.is_plain_string and self.threshold is not None and override:
self.override_requirement(override)
return True
if ignore:
self.is_fulfilled = False
subject_progress = Progress(0, 0)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
unit_progress = Progress(0, 0)
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = Progress(0, 0)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = []
return True
if substitutions is not None:
satisfied_courses = set()
subs_satisfied = 0
units_satisfied = 0
for sub in substitutions:
for course in courses:
if course.satisfies(sub, courses):
subs_satisfied += 1
units_satisfied += course.total_units
satisfied_courses.add(course)
break
if self.statement.is_plain_string and self.threshold is not None:
subject_progress = Progress(subs_satisfied,
self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(units_satisfied,
self.threshold.cutoff_for_criterion(CRITERION_UNITS))
progress = subject_progress if self.threshold.criterion == CRITERION_SUBJECTS else unit_progress
self.is_fulfilled = progress.progress == progress.max
else:
subject_progress = Progress(subs_satisfied, len(substitutions))
self.is_fulfilled = subs_satisfied == len(substitutions)
unit_progress = Progress(subs_satisfied * DEFAULT_UNIT_COUNT, len(substitutions) * DEFAULT_UNIT_COUNT)
progress = subject_progress
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
return True
return False
def bypass_children(self):
"""Sets the is_bypassed flag of the recursive children of this progress object to True."""
for child in self.children:
child.is_bypassed = True
child.is_fulfilled = False
child.subject_fulfillment = Progress(0, 0)
child.subject_progress = 0
child.subject_max = 0
child.unit_fulfillment = Progress(0, 0)
child.unit_progress = 0
child.unit_max = 0
child.progress = 0
child.progress_max = 0
child.percent_fulfilled = 0
child.fraction_fulfilled = 0
child.satisfied_courses = []
child.assertion = None
child.bypass_children()
def compute(self, courses, progress_overrides, progress_assertions):
"""Computes and stores the status of the requirements statement using the
given list of Course objects."""
# Compute status of children and then self, adapted from mobile apps' computeRequirementsStatus method
satisfied_courses = set()
if self.compute_assertions(courses, progress_assertions):
self.bypass_children()
return
if self.list_path in progress_overrides:
manual_progress = progress_overrides[self.list_path]
else:
manual_progress = 0
self.is_bypassed = False
self.assertion = None
if self.statement.requirement is not None:
#it is a basic requirement
if self.statement.is_plain_string and manual_progress != 0 and self.threshold is not None:
#use manual progress
self.override_requirement(manual_progress)
return
else:
#Example: requirement CI-H, we want to show how many have been fulfilled
whole_courses, half_courses = self.courses_satisfying_req(courses)
satisfied_courses = whole_courses + half_courses
if not self.threshold is None:
#A specific number of courses is required
subject_progress = ceiling_thresh(len(whole_courses) + len(half_courses) // 2, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
else:
#Only one is needed
progress_subjects = min(len(satisfied_courses), 1)
is_fulfilled = len(satisfied_courses) > 0
subject_progress = ceiling_thresh(progress_subjects, 1)
if len(satisfied_courses) > 0:
unit_progress = ceiling_thresh(list(satisfied_courses)[0].total_units, DEFAULT_UNIT_COUNT)
else:
unit_progress = ceiling_thresh(0, DEFAULT_UNIT_COUNT)
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
if len(self.children) > 0:
#It's a compound requirement
num_reqs_satisfied = 0
satisfied_by_category = []
satisfied_courses = set()
num_courses_satisfied = 0
open_children = []
for req_progress in self.children:
req_progress.compute(courses, progress_overrides, progress_assertions)
req_satisfied_courses = req_progress.satisfied_courses
# Don't count anything from a requirement that is ignored
if req_progress.assertion and req_progress.assertion.get("ignore", False):
continue
open_children.append(req_progress)
if req_progress.is_fulfilled and len(req_progress.satisfied_courses) > 0:
num_reqs_satisfied += 1
satisfied_courses.update(req_satisfied_courses)
satisfied_by_category.append(list(req_satisfied_courses))
# For thresholded ANY statements, children that are ALL statements
# count as a single satisfied course. ANY children count for
# all of their satisfied courses.
if req_progress.statement.connection_type == CONNECTION_TYPE_ALL and req_progress.children:
num_courses_satisfied += req_progress.is_fulfilled and len(req_progress.satisfied_courses) > 0
else:
num_courses_satisfied += len(req_satisfied_courses)
satisfied_by_category = [sat for prog, sat in sorted(zip(open_children, satisfied_by_category), key = lambda z: z[0].raw_fraction_fulfilled, reverse = True)]
sorted_progresses = sorted(open_children, key = lambda req: req.raw_fraction_fulfilled, reverse = True)
if self.threshold is None and self.distinct_threshold is None:
is_fulfilled = (num_reqs_satisfied > 0)
if self.statement.connection_type == CONNECTION_TYPE_ANY:
#Simple "any" statement
if len(sorted_progresses) > 0:
subject_progress = sorted_progresses[0].subject_fulfillment
unit_progress = sorted_progresses[0].unit_fulfillment
else:
subject_progress = Progress(0, 0)
unit_progress = Progress(0, 0)
else:
#"All" statement, will be finalized later
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, None)
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, None)
else:
if self.distinct_threshold is not None:
#Clip the progresses to the ones which the user is closest to completing
num_progresses_to_count = min(self.distinct_threshold.get_actual_cutoff(), len(sorted_progresses))
sorted_progresses = sorted_progresses[:num_progresses_to_count]
satisfied_by_category = satisfied_by_category[:num_progresses_to_count]
satisfied_courses = set()
num_courses_satisfied = 0
for i, child in zip(range(num_progresses_to_count), open_children):
satisfied_courses.update(satisfied_by_category[i])
if child.statement.connection_type == CONNECTION_TYPE_ALL:
num_courses_satisfied += (child.is_fulfilled and len(child.satisfied_courses) > 0)
else:
num_courses_satisfied += len(satisfied_by_category[i])
if self.threshold is None and self.distinct_threshold is not None:
#Required number of statements
if self.distinct_threshold == THRESHOLD_TYPE_GTE or self.distinct_threshold.type == THRESHOLD_TYPE_GT:
is_fulfilled = num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()
else:
is_fulfilled = True
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, lambda x: max(x, 1))
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, lambda x: (x, DEFAULT_UNIT_COUNT)[x == 0])
elif self.threshold is not None:
#Required number of subjects or units
subject_progress = Progress(num_courses_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
if self.distinct_threshold is not None and (self.distinct_threshold.type == THRESHOLD_TYPE_GT or self.distinct_threshold.type == THRESHOLD_TYPE_GTE):
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress) and num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()
if num_reqs_satisfied < self.distinct_threshold.get_actual_cutoff():
(subject_progress, unit_progress) = force_unfill_progresses(satisfied_by_category, self.distinct_threshold, self.threshold)
else:
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
if self.statement.connection_type == CONNECTION_TYPE_ALL:
#"All" statement - make above progresses more stringent
is_fulfilled = is_fulfilled and (num_reqs_satisfied == len(open_children))
if subject_progress.progress == subject_progress.max and len(open_children) > num_reqs_satisfied:
subject_progress.max += len(open_children) - num_reqs_satisfied
unit_progress.max += (len(open_children) - num_reqs_satisfied) * DEFAULT_UNIT_COUNT
#Polish up values
subject_progress = ceiling_thresh(subject_progress.progress, subject_progress.max)
unit_progress = ceiling_thresh(unit_progress.progress, unit_progress.max)
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
progress_units = CRITERION_SUBJECTS if self.threshold is None else self.threshold.criterion
self.is_fulfilled = is_fulfilled
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.raw_fraction_fulfilled = progress.get_raw_fraction(progress_units)
self.satisfied_courses = list(satisfied_courses)
def to_json_object(self, full = True, child_fn = None):
"""Returns a JSON dictionary containing the dictionary representation of
the enclosed requirements statement, as well as progress information."""
# Recursively decorate the JSON output of the children
# Add custom keys indicating progress for this statement
stmt_json = self.statement.to_json_object(full=False)
stmt_json[JSONProgressConstants.is_fulfilled] = self.is_fulfilled
stmt_json[JSONProgressConstants.progress] = self.progress
stmt_json[JSONProgressConstants.progress_max] = self.progress_max
stmt_json[JSONProgressConstants.percent_fulfilled] = self.percent_fulfilled
stmt_json[JSONProgressConstants.satisfied_courses] = map(lambda c: c.subject_id, self.satisfied_courses)
if self.is_bypassed:
stmt_json[JSONProgressConstants.is_bypassed] = self.is_bypassed
if self.assertion:
stmt_json[JSONProgressConstants.assertion] = self.assertion
if full:
if self.children:
if child_fn is None:
child_fn = lambda c: c.to_json_object(full=full)
stmt_json[JSONConstants.requirements] =[child_fn(child) for child in self.children]
return stmt_json
| 49.898438 | 208 | 0.663144 | [
"MIT"
] | georgiashay/fireroad-server2 | requirements/progress.py | 25,548 | Python |
"""Constants for the AVM FRITZ!SmartHome integration."""
from __future__ import annotations
import logging
from typing import Final
from homeassistant.components.binary_sensor import DEVICE_CLASS_WINDOW
from homeassistant.components.fritzbox.model import (
FritzBinarySensorEntityDescription,
FritzSensorEntityDescription,
)
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
)
ATTR_STATE_BATTERY_LOW: Final = "battery_low"
ATTR_STATE_DEVICE_LOCKED: Final = "device_locked"
ATTR_STATE_HOLIDAY_MODE: Final = "holiday_mode"
ATTR_STATE_LOCKED: Final = "locked"
ATTR_STATE_SUMMER_MODE: Final = "summer_mode"
ATTR_STATE_WINDOW_OPEN: Final = "window_open"
ATTR_TEMPERATURE_UNIT: Final = "temperature_unit"
CONF_CONNECTIONS: Final = "connections"
CONF_COORDINATOR: Final = "coordinator"
DEFAULT_HOST: Final = "fritz.box"
DEFAULT_USERNAME: Final = "admin"
DOMAIN: Final = "fritzbox"
LOGGER: Final[logging.Logger] = logging.getLogger(__package__)
PLATFORMS: Final[list[str]] = ["binary_sensor", "climate", "switch", "sensor"]
BINARY_SENSOR_TYPES: Final[tuple[FritzBinarySensorEntityDescription, ...]] = (
FritzBinarySensorEntityDescription(
key="alarm",
name="Alarm",
device_class=DEVICE_CLASS_WINDOW,
suitable=lambda device: device.has_alarm, # type: ignore[no-any-return]
is_on=lambda device: device.alert_state, # type: ignore[no-any-return]
),
)
SENSOR_TYPES: Final[tuple[FritzSensorEntityDescription, ...]] = (
FritzSensorEntityDescription(
key="temperature",
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
suitable=lambda device: (
device.has_temperature_sensor and not device.has_thermostat
),
native_value=lambda device: device.temperature, # type: ignore[no-any-return]
),
FritzSensorEntityDescription(
key="battery",
name="Battery",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_BATTERY,
suitable=lambda device: device.battery_level is not None,
native_value=lambda device: device.battery_level, # type: ignore[no-any-return]
),
FritzSensorEntityDescription(
key="power_consumption",
name="Power Consumption",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
suitable=lambda device: device.has_powermeter, # type: ignore[no-any-return]
native_value=lambda device: device.power / 1000 if device.power else 0.0,
),
FritzSensorEntityDescription(
key="total_energy",
name="Total Energy",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
suitable=lambda device: device.has_powermeter, # type: ignore[no-any-return]
native_value=lambda device: device.energy / 1000 if device.energy else 0.0,
),
)
| 34.587629 | 88 | 0.732042 | [
"Apache-2.0"
] | WireFuCo/core | homeassistant/components/fritzbox/const.py | 3,355 | Python |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import SagbitTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(SagbitTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print "Testing p2pkh and p2sh address index..."
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print "Testing querying txids by range of block heights.."
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print "Testing for txid uniqueness..."
addressHash = "6349a418fc4578d10a372b54b45c280cc8c4382f".decode("hex")
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print "Testing balances..."
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print "Testing balances after spending..."
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc".decode("hex")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print "Testing utxos..."
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print "Testing reorg..."
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print "Testing mempool indexing..."
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = "aa9872b5bbcdb511d89e0e11aa27da73fd2c3f50".decode("hex")
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = unspent[1]["amount"] * 100000000
tx2.vout = [
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey4),
CTxOut(amount / 4, scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(amount / 2 - 10000, scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = "c192bff751af8efec15135d42bfeedf91a6f3e34".decode("hex")
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = utxos[0]["satoshis"] - 1000
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print "Testing results with chain info..."
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print "Passed\n"
if __name__ == '__main__':
AddressIndexTest().main()
| 42.145714 | 144 | 0.650736 | [
"MIT"
] | mirzaei-ce/linux-sagbit | qa/rpc-tests/addressindex.py | 14,751 | Python |
# Copyright (c) 2008-2013 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from test_base import *
from mockito import mock, when, verify, VerificationError, verifyNoMoreInteractions
from mockito.verification import never
class VerificationErrorsTest(TestBase):
def testPrintsNicely(self):
theMock = mock()
try:
verify(theMock).foo()
except VerificationError, e:
self.assertEquals('\nWanted but not invoked: foo()\nInstead got: []', str(e))
def testPrintsNicelyOneArgument(self):
theMock = mock()
try:
verify(theMock).foo("bar")
except VerificationError, e:
self.assertEquals("\nWanted but not invoked: foo('bar')\nInstead got: []", str(e))
def testPrintsNicelyArguments(self):
theMock = mock()
try:
verify(theMock).foo(1, 2)
except VerificationError, e:
self.assertEquals('\nWanted but not invoked: foo(1, 2)\nInstead got: []', str(e))
def testPrintsNicelyStringArguments(self):
theMock = mock()
try:
verify(theMock).foo(1, 'foo')
except VerificationError, e:
self.assertEquals("\nWanted but not invoked: foo(1, 'foo')\nInstead got: []", str(e))
def testPrintsOutThatTheActualAndExpectedInvocationCountDiffers(self):
theMock = mock()
when(theMock).foo().thenReturn(0)
theMock.foo()
theMock.foo()
try:
verify(theMock).foo()
except VerificationError, e:
self.assertEquals("\nWanted times: 1, actual times: 2", str(e))
# TODO: implement
def disabled_PrintsNicelyWhenArgumentsDifferent(self):
theMock = mock()
theMock.foo('foo', 1)
try:
verify(theMock).foo(1, 'foo')
except VerificationError, e:
self.assertEquals(
"""Arguments are different.
Wanted: foo(1, 'foo')
Actual: foo('foo', 1)""", str(e))
def testPrintsUnwantedInteraction(self):
theMock = mock()
theMock.foo(1, 'foo')
try:
verifyNoMoreInteractions(theMock)
except VerificationError, e:
self.assertEquals("\nUnwanted interaction: foo(1, 'foo')", str(e))
def testPrintsNeverWantedInteractionsNicely(self):
theMock = mock()
theMock.foo()
self.assertRaisesMessage("\nUnwanted invocation of foo(), times: 1", verify(theMock, never).foo)
if __name__ == '__main__':
unittest.main()
| 36.852632 | 104 | 0.676378 | [
"MIT"
] | mriehl/mockito-without-hardcoded-distribute-version | mockito-0.5.2/mockito_test/verification_errors_test.py | 3,501 | Python |
# version 0.1
# by DrLecter
import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "420_LittleWings"
# variables section
REQUIRED_EGGS = 20
#Drop rates in %
BACK_DROP = 30
EGG_DROP = 50
#Quest items
FRY_STN,FRY_STN_DLX,FSN_LIST,FSN_LIST_DLX,TD_BCK_SKN,JUICE,SCALE_1,EX_EGG,\
SCALE_2,ZW_EGG,SCALE_3,KA_EGG,SCALE_4,SU_EGG,SCALE_5,SH_EGG,FRY_DUST = range(3816,3832)+[3499]
#NPCs
PM_COOPER,SG_CRONOS,GD_BYRON,MC_MARIA,FR_MYMYU = 30829,30610,30711,30608,30747
DK_EXARION,DK_ZWOV,DK_KALIBRAN,WM_SUZET,WM_SHAMHAI = range(30748,30753)
#mobs
TD_LORD = 20231 #toad lord
LO_LZRD_W = 20580 #exarion's
MS_SPIDER = 20233 #zwov's
RD_SCVNGR = 20551 #kalibran's
BO_OVERLD = 20270 #suzet's
DD_SEEKER = 20202 #shamhai's
#Rewards
FOOD = 4038
ARMOR = 3912
# helper functions section
def check_level(st) :
if st.getPlayer().getLevel() < 35 :
st.exitQuest(True)
return "420_low_level.htm"
return "Start.htm"
def check_stone(st,progress) :
if st.getQuestItemsCount(FRY_STN) == 1 :
st.set("cond","3")
if progress == 1 :
st.set("progress","3")
return "420_cronos_8.htm"
elif progress == 8 :
st.set("progress","10")
return "420_cronos_14.htm"
elif st.getQuestItemsCount(FRY_STN_DLX) == 1 :
if progress == 2 :
st.set("progress","4")
return "420_cronos_8.htm"
elif progress == 9 :
st.set("progress","11")
return "420_cronos_14.htm"
else :
return "420_cronos_7.htm"
def check_elements(st,progress) :
coal = st.getQuestItemsCount(1870)
char = st.getQuestItemsCount(1871)
gemd = st.getQuestItemsCount(2130)
gemc = st.getQuestItemsCount(2131)
snug = st.getQuestItemsCount(1873)
sofp = st.getQuestItemsCount(1875)
tdbk = st.getQuestItemsCount(TD_BCK_SKN)
if progress in [1,8] :
if coal >= 10 and char >= 10 and gemd >= 1 and snug >= 3 and tdbk >= 10 :
return "420_maria_2.htm"
else :
return "420_maria_1.htm"
elif progress in [2,9] :
if coal >= 10 and char >= 10 and gemc >= 1 and snug >= 5 and sofp >= 1 and tdbk >= 20 :
return "420_maria_4.htm"
else :
return "420_maria_1.htm"
def craft_stone(st,progress) :
if progress in [1,8]:
st.takeItems(1870,10)
st.takeItems(1871,10)
st.takeItems(2130,1)
st.takeItems(1873,3)
st.takeItems(TD_BCK_SKN,10)
st.takeItems(FSN_LIST,1)
st.giveItems(FRY_STN,1)
st.playSound("ItemSound.quest_itemget")
return "420_maria_3.htm"
elif progress in [2,9]:
st.takeItems(1870,10)
st.takeItems(1871,10)
st.takeItems(2131,1)
st.takeItems(1873,5)
st.takeItems(1875,1)
st.takeItems(TD_BCK_SKN,20)
st.takeItems(FSN_LIST_DLX,1)
st.giveItems(FRY_STN_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_maria_5.htm"
def check_eggs(st, npc, progress) :
whom = int(st.get("dragon"))
if whom == 1 : eggs = EX_EGG
elif whom == 2 : eggs = ZW_EGG
elif whom == 3 : eggs = KA_EGG
elif whom == 4 : eggs = SU_EGG
elif whom == 5 : eggs = SH_EGG
if npc == "mymyu" :
if progress in [19,20] and st.getQuestItemsCount(eggs) == 1 :
return "420_"+npc+"_10.htm"
else :
if st.getQuestItemsCount(eggs) >= 20 :
return "420_"+npc+"_9.htm"
else :
return "420_"+npc+"_8.htm"
elif npc == "exarion" and whom == 1 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_1,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.playSound("ItemSound.quest_itemget")
st.set("cond","7")
return "420_"+npc+"_4.htm"
elif npc == "zwov" and whom == 2 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_2,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_4.htm"
elif npc == "kalibran" and whom == 3 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
# st.takeItems(SCALE_3,1)
return "420_"+npc+"_4.htm"
elif npc == "suzet" and whom == 4 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_4.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_4,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_5.htm"
elif npc == "shamhai" and whom == 5 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_5,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_4.htm"
return "check_eggs sux"
# Main Quest Code
class Quest (JQuest):
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st):
id = st.getState()
progress = st.getInt("progress")
if id == CREATED :
st.set("cond","0")
if event == "ido" :
st.setState(STARTING)
st.set("progress","0")
st.set("cond","1")
st.set("dragon","0")
st.playSound("ItemSound.quest_accept")
return "Starting.htm"
elif id == STARTING :
if event == "wait" :
return craft_stone(st,progress)
elif event == "cronos_2" :
return "420_cronos_2.htm"
elif event == "cronos_3" :
return "420_cronos_3.htm"
elif event == "cronos_4" :
return "420_cronos_4.htm"
elif event == "fsn" :
st.set("cond","2")
if progress == 0:
st.set("progress","1")
st.giveItems(FSN_LIST,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_5.htm"
elif progress == 7:
st.set("progress","8")
st.giveItems(FSN_LIST,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_12.htm"
elif event == "fsn_dlx" :
st.set("cond","2")
if progress == 0:
st.set("progress","2")
st.giveItems(FSN_LIST_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_6.htm"
if progress == 7:
st.set("progress","9")
st.giveItems(FSN_LIST_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_13.htm"
elif event == "showfsn" :
return "420_byron_2.htm"
elif event == "askmore" :
st.set("cond","4")
if progress == 3 :
st.set("progress","5")
return "420_byron_3.htm"
elif progress == 4 :
st.set("progress","6")
return "420_byron_4.htm"
elif event == "give_fsn" :
st.takeItems(FRY_STN,1)
return "420_mymyu_2.htm"
elif event == "give_fsn_dlx" :
st.takeItems(FRY_STN_DLX,1)
st.giveItems(FRY_DUST,1)
st.playSound("ItemSound.quest_itemget")
return "420_mymyu_4.htm"
elif event == "fry_ask" :
return "420_mymyu_5.htm"
elif event == "ask_abt" :
st.setState(STARTED)
st.set("cond","5")
st.giveItems(JUICE,1)
st.playSound("ItemSound.quest_itemget")
return "420_mymyu_6.htm"
elif id == STARTED :
if event == "exarion_1" :
st.giveItems(SCALE_1,1)
st.playSound("ItemSound.quest_itemget")
st.set("dragon","1")
st.set("cond","6")
st.set("progress",str(progress+9))
return "420_exarion_2.htm"
elif event == "kalibran_1" :
st.set("dragon","3")
st.set("cond","6")
st.giveItems(SCALE_3,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_kalibran_2.htm"
elif event == "kalibran_2" :
if st.getQuestItemsCount(SCALE_3) :
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.takeItems(SCALE_3,1)
st.giveItems(KA_EGG,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_kalibran_5.htm"
elif event == "zwov_1" :
st.set("dragon","2")
st.set("cond","6")
st.giveItems(SCALE_2,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_zwov_2.htm"
elif event == "shamhai_1" :
st.set("dragon","5")
st.set("cond","6")
st.giveItems(SCALE_5,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_shamhai_2.htm"
elif event == "suzet_1" :
return "420_suzet_2.htm"
elif event == "suzet_2" :
st.set("dragon","4")
st.set("cond","6")
st.giveItems(SCALE_4,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_suzet_3.htm"
elif event == "hatch" :
whom = int(st.get("dragon"))
if whom == 1 : eggs = EX_EGG
elif whom == 2 : eggs = ZW_EGG
elif whom == 3 : eggs = KA_EGG
elif whom == 4 : eggs = SU_EGG
elif whom == 5 : eggs = SH_EGG
if st.getQuestItemsCount(eggs) and progress in [19,20] :
st.takeItems(eggs,1)
st.set("cond","8")
if progress == 19 :
st.giveItems(3500+st.getRandom(3),1)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return "420_mymyu_15.htm"
elif progress == 20 :
return "420_mymyu_11.htm"
elif event == "give_dust" :
if st.getQuestItemsCount(FRY_DUST):
st.takeItems(FRY_DUST,1)
luck = st.getRandom(2)
if luck == 0 :
extra = ARMOR
qty = 1
htmltext = "420_mymyu_13.htm"
else :
extra = FOOD
qty = 100
htmltext = "420_mymyu_14.htm"
st.giveItems(3500+st.getRandom(3),1)
st.giveItems(extra,qty)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return htmltext
elif event == "no_dust" :
st.giveItems(3500+st.getRandom(3),1)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return "420_mymyu_12.htm"
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == COMPLETED:
st.setState(CREATED)
id = CREATED
progress = st.getInt("progress")
if npcId == PM_COOPER :
if id == CREATED :
return check_level(st)
elif id == STARTING and progress == 0 :
return "Starting.htm"
else :
return "Started.htm"
elif npcId == SG_CRONOS :
if id == STARTING :
if progress == 0 :
return "420_cronos_1.htm"
elif progress in [ 1,2,8,9 ] :
return check_stone(st,progress)
elif progress in [ 3,4,10,11 ] :
return "420_cronos_9.htm"
elif progress in [5,6,12,13 ]:
return "420_cronos_11.htm"
elif progress == 7 :
return "420_cronos_10.htm"
elif npcId == MC_MARIA :
if id == STARTING :
if ((progress in [ 1,8 ] ) and st.getQuestItemsCount(FSN_LIST)==1) or ((progress in [ 2,9 ] ) and st.getQuestItemsCount(FSN_LIST_DLX)==1):
return check_elements(st,progress)
elif progress in [ 3,4,5,6,7,10,11 ] :
return "420_maria_6.htm"
elif npcId == GD_BYRON :
if id == STARTING :
if ((progress in [ 1,8 ] ) and st.getQuestItemsCount(FSN_LIST)==1) or ((progress in [ 2,9 ] ) and st.getQuestItemsCount(FSN_LIST_DLX)==1):
return "420_byron_10.htm"
elif progress == 7 :
return "420_byron_9.htm"
elif (progress == 3 and st.getQuestItemsCount(FRY_STN)==1) or (progress == 4 and st.getQuestItemsCount(FRY_STN_DLX)==1):
return "420_byron_1.htm"
elif progress == 10 and st.getQuestItemsCount(FRY_STN)==1 :
st.set("progress","12")
return "420_byron_5.htm"
elif progress == 11 and st.getQuestItemsCount(FRY_STN_DLX)==1 :
st.set("progress","13")
return "420_byron_6.htm"
elif progress in [5,12] :
return "420_byron_7.htm"
elif progress in [6,13] :
return "420_byron_8.htm"
elif npcId == FR_MYMYU :
if id == STARTING :
if ( progress in [5,12] ) and st.getQuestItemsCount(FRY_STN) == 1 :
return "420_mymyu_1.htm"
elif ( progress in [6,13] ) and st.getQuestItemsCount(FRY_STN_DLX) == 1 :
return "420_mymyu_3.htm"
elif id == STARTED :
if progress < 14 and st.getQuestItemsCount(JUICE) == 1 :
return "420_mymyu_7.htm"
elif progress > 13 :
return check_eggs(st,"mymyu",progress)
elif npcId == DK_EXARION :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_exarion_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_1) == 1:
return check_eggs(st,"exarion",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(EX_EGG) == 1 :
return "420_exarion_5.htm"
elif npcId == DK_ZWOV :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_zwov_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_2) == 1:
return check_eggs(st,"zwov",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(ZW_EGG) == 1 :
return "420_zwov_5.htm"
elif npcId == DK_KALIBRAN :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_kalibran_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_3) == 1:
return check_eggs(st,"kalibran",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(KA_EGG) == 1 :
return "420_kalibran_6.htm"
elif npcId == WM_SUZET :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_suzet_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_4) == 1:
return check_eggs(st,"suzet",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(SU_EGG) == 1 :
return "420_suzet_6.htm"
elif npcId == WM_SHAMHAI :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_shamhai_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_5) == 1:
return check_eggs(st,"shamhai",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(SH_EGG) == 1 :
return "420_shamhai_5.htm"
return "<html><body>I have nothing to say to you</body></html>"
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
id = st.getState()
npcId = npc.getNpcId()
#incipios drop
skins = st.getQuestItemsCount(TD_BCK_SKN)
if id == STARTING and (st.getQuestItemsCount(FSN_LIST) == 1 and skins < 10) or (st.getQuestItemsCount(FSN_LIST_DLX) == 1 and skins < 20) :
if npcId == TD_LORD :
count = 0
if st.getQuestItemsCount(FSN_LIST) == 1 :
count = 10
else :
count = 20
numItems, chance = divmod(BACK_DROP*Config.RATE_DROP_QUEST,100)
if st.getRandom(100) <= chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if count <= (skins + numItems) :
numItems = count - skins
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(TD_BCK_SKN,numItems)
#dragon detection
elif id == STARTED and (st.get("progress") in [ "14","15","21","22" ]) :
whom = int(st.get("dragon"))
if whom == 1 :
eggs = EX_EGG
scale = SCALE_1
eggdropper = LO_LZRD_W
elif whom == 2 :
eggs = ZW_EGG
scale = SCALE_2
eggdropper = MS_SPIDER
elif whom == 3 :
eggs = KA_EGG
scale = SCALE_3
eggdropper = RD_SCVNGR
elif whom == 4 :
eggs = SU_EGG
scale = SCALE_4
eggdropper = BO_OVERLD
elif whom == 5 :
eggs = SH_EGG
scale = SCALE_5
eggdropper = DD_SEEKER
prevItems = st.getQuestItemsCount(eggs)
if st.getQuestItemsCount(scale) == 1 and prevItems < REQUIRED_EGGS :
if npcId == eggdropper :
chance = EGG_DROP*Config.RATE_DROP_QUEST
numItems, chance = divmod(chance,100)
if st.getRandom(100) <= chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if REQUIRED_EGGS <= (prevItems + numItems) :
numItems = REQUIRED_EGGS - prevItems
st.playSound("ItemSound.quest_middle")
else:
st.playSound("ItemSound.quest_itemget")
st.giveItems(eggs,numItems)
#fairy stone destruction
elif id == STARTING and st.getQuestItemsCount(FRY_STN_DLX) == 1 :
if npcId in range(20589,20600)+[20719]:
st.takeItems(FRY_STN_DLX,1)
st.set("progress","7")
return "you lost fairy stone deluxe!"
# Quest class and state definition
QUEST = Quest(420, qn, "Little Wings")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
# Quest initialization
QUEST.setInitialState(CREATED)
# Quest NPC starter initialization
QUEST.addStartNpc(PM_COOPER)
# Quest Item Drop initialization
for i in [3499]+range(3816,3832):
STARTING.addQuestDrop(PM_COOPER,i,1)
# Quest mob initialization
#back skins
QUEST.addKillId(TD_LORD)
#fairy stone dlx destroyers
for i in range(20589,20600)+[21797]:
QUEST.addKillId(i)
#eggs
QUEST.addKillId(LO_LZRD_W)
QUEST.addKillId(RD_SCVNGR)
QUEST.addKillId(MS_SPIDER)
QUEST.addKillId(DD_SEEKER)
QUEST.addKillId(BO_OVERLD)
# Quest NPC initialization
QUEST.addTalkId(PM_COOPER)
QUEST.addTalkId(SG_CRONOS)
QUEST.addTalkId(GD_BYRON)
QUEST.addTalkId(MC_MARIA)
QUEST.addTalkId(FR_MYMYU)
for i in range(30748,30753):
QUEST.addTalkId(i) | 36.319372 | 149 | 0.557878 | [
"Unlicense"
] | DigitalCoin1/L2SPERO | datapack/data/scripts/quests/420_LittleWings/__init__.py | 20,811 | Python |
space1 = 'X'
space2 = 'X'
space3 = 'X'
space4 = 'X'
space5 = 'X'
space6 = ' '
space7 = 'O'
space8 = ' '
space9 = ' '
print(' | | ')
print(' {} | {} | {} '.format(space1,space2,space3))
print(' | | ')
print('-----------')
print(' | | ')
print(' {} | {} | {} '.format(space4,space5,space6))
print(' | | ')
print('-----------')
print(' | | ')
print(' {} | {} | {} '.format(space7,space8,space9))
print(' | | ')
#toplinewinning
if (space1 == space2) and (space1 == space3):
print('WIN')
#do the other winning options | 20.821429 | 53 | 0.4494 | [
"MIT"
] | chriswright61/python_shorts | noughts_crosses.py | 583 | Python |
from machine import Pin
import utime
led = Pin(28, Pin.OUT)
onboard_led = Pin(25, Pin.OUT)
led.low()
onboard_led.high()
while True:
led.toggle()
onboard_led.toggle()
print("Toggle")
utime.sleep(0.5)
| 18.25 | 30 | 0.666667 | [
"MIT"
] | luisC62/RPi_Pico_Examples | blink_001.py | 219 | Python |
"""Package exports."""
from .wrapper import Simulation
from .handler import SimulationHandler
from .parser import Parser
from .strategy import Strategy, Sequence, Legacy, Matrix, Sobol
from . import modules
from ._version import __version__, __author__
| 28.222222 | 63 | 0.80315 | [
"MIT"
] | ischoegl/ctwrap | ctwrap/__init__.py | 254 | Python |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
""" Test creating a new user with an email is successful """
def test_create_user_with_email_successful(self):
payload = {'email': '[email protected]', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(
email=payload['email'],
password=payload['password']
)
self.assertEqual(user.email, payload['email'])
self.assertTrue(user.check_password(payload['password']))
def test_create_user_with_lowercase_email(self):
""" Test creating a new user with an lowercase email words """
payload = {'email': '[email protected]', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(
email=payload['email'],
password=payload['password']
)
self.assertEqual(user.email, payload['email'].lower())
def test_create_user_with_invalid_email(self):
""" Test creating a new user with an invalid email address """
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "1234325")
def test_create_superuser_is_successful(self):
""" Test that create a new superuser """
user = get_user_model().objects.create_superuser("[email protected]", '1234')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 36.975 | 81 | 0.663962 | [
"MIT"
] | pudka/recipe-app-api | app/core/tests/test_models.py | 1,479 | Python |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import operator
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None # not found
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
continue
table = CmapSubtable.newSubtable(format)
table.platformID = platformID
table.platEncID = platEncID
# Note that by default we decompile only the subtable header info;
# any other data gets decompiled only when an attribute of the
# subtable is referenced.
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {} # Some tables are the same object reference. Don't compile them twice.
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
table = CmapSubtable.newSubtable(format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
@staticmethod
def getSubtableClass(format):
"""Return the subtable class for a format."""
return cmap_classes.get(format, cmap_format_unknown)
@staticmethod
def newSubtable(format):
"""Return a new instance of a subtable for format."""
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None) # use saved data.
self.data = None # Once this table has been decompiled, make sure we don't
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def getEncoding(self, default=None):
"""Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
"""
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
# implemented so that list.sort() sorts according to the spec.
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
glyphIdArray = array.array("B")
glyphIdArray.fromstring(self.data)
self.cmap = cmap = {}
lenArray = len(glyphIdArray)
charCodes = list(range(lenArray))
names = map(self.ttFont.getGlyphName, glyphIdArray)
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
charCodeList = sorted(self.cmap.items())
charCodes = [entry[0] for entry in charCodeList]
valueList = [entry[1] for entry in charCodeList]
assert charCodes == list(range(256))
valueList = map(ttFont.getGlyphID, valueList)
glyphIdArray = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
# find the minGI which is not zero.
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
# We would like to pick an idDelta such that the first glyphArray GID is 1,
# so that we are more likely to be able to combine glypharray GID subranges.
# This means that we have a problem when minGI is > 32K
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + idDelta) % 0x10000),
# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
# negative number to an unsigned short.
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
subHeaderKeys = []
maxSubHeaderindex = 0
# get the key array, and determine the number of subHeaders.
allKeys = array.array("H")
allKeys.fromstring(data[:512])
data = data[512:]
if sys.byteorder != "big":
allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
#Load subHeaders
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big":
giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
# How this gets processed.
# Charcodes may be one or two bytes.
# The first byte of a charcode is mapped through the subHeaderKeys, to select
# a subHeader. For any subheader but 0, the next byte is then mapped through the
# selected subheader. If subheader Index 0 is selected, then the byte itself is
# mapped through the subheader, and there is no second byte.
# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
#
# Each subheader references a range in the glyphIndexArray whose length is entryCount.
# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
# referenced by another subheader.
# The only subheader that will be referenced by more than one first-byte value is the subheader
# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
# {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
# A subheader specifies a subrange within (0...256) by the
# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
# (e.g. glyph not in font).
# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
# Example for Logocut-Medium
# first byte of charcode = 129; selects subheader 1.
# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
# second byte of charCode = 66
# the index offset = 66-64 = 2.
# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
# [glyphIndexArray index], [subrange array index] = glyphIndex
# [256], [0]=1 from charcode [129, 64]
# [257], [1]=2 from charcode [129, 65]
# [258], [2]=3 from charcode [129, 66]
# [259], [3]=4 from charcode [129, 67]
# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
# add it to the glyphID to get the final glyphIndex
# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
self.data = b""
self.cmap = cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue # gi is notdef.
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue # gi is notdef.
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
# If not subHeader.entryCount, then all char codes with this first byte are
# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
# same as mapping it to .notdef.
# cmap values are GID's.
glyphOrder = self.ttFont.getGlyphOrder()
gids = list(cmap.values())
charCodes = list(cmap.keys())
lenCmap = len(gids)
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
lenCharCodes = len(charCodes)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
# for the indices matching the char codes.
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
# init new subheader
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
# need to fill in with notdefs all the code points between the last charCode and the current charCode.
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
# fix GI's and iDelta of last subheader that we we added to the subheader array.
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0: # didn't find one.
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
#uint16 reservedPad # This value should be zero
#uint16 startCode[segCount] # Starting character code for each segment
#uint16 idDelta[segCount] # Delta for all character codes in segment
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
#uint16 glyphIndexArray[variable] # Glyph index array
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.fromstring(data)
self.data = data = None
if sys.byteorder != "big":
allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
# *someone* needs to get killed.
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0: # if not missing glyph
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0 # missing glyph
gids.append(glyphID & 0xFFFF)
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
if lenCharCodes == 0:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 4 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
# Build startCode and endCode lists.
# Split the char codes in ranges of consecutive char codes, then split
# each range in more ranges of consecutive/not consecutive glyph IDs.
# See splitRange().
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]: # skip the first code, it's the first start code
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big":
charCodeArray.byteswap()
idDeltaArray.byteswap()
restArray.byteswap()
data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
glyphIndexArray = array.array("H")
glyphIndexArray.fromstring(data[:2 * int(entryCount)])
if sys.byteorder != "big":
glyphIndexArray.byteswap()
self.data = data = None
self.cmap = cmap = {}
lenArray = len(glyphIndexArray)
charCodes = list(range(firstCode, firstCode + lenArray))
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, glyphIndexArray ))
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = list(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
glyphIndexArray = array.array("H", valueList)
if sys.byteorder != "big":
glyphIndexArray.byteswap()
data = glyphIndexArray.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 12 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
_format_step = 1
def __init__(self, format=12):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
_format_step = 0
def __init__(self, format=13):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF # has no language.
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append( [uv, glyphName] )
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("length", self.length),
("numVarSelectorRecords", self.numVarSelectorRecords),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
if gname is None:
gname = "None"
# I use the arg rather than th keyword syntax in order to preserve the attribute order.
writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] )
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.length = safeEval(attrs["length"])
self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs["name"]
if gname == "None":
gname = None
try:
uvsDict[uvs].append( [uv, gname])
except KeyError:
uvsDict[uvs] = [ [uv, gname] ]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
self.data = headerdata + data
return self.data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
| 35.289026 | 192 | 0.698888 | [
"MIT"
] | johanoren/IncrementalNumbers | FontTools/fontTools/ttLib/tables/_c_m_a_p.py | 45,664 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Exponential distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Exponential",
"ExponentialWithSoftplusRate",
]
@tf_export("distributions.Exponential")
class Exponential(gamma.Gamma):
"""Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
"""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Exponential"):
"""Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
# Even though all statistics of are defined for valid inputs, this is not
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[rate]):
self._rate = ops.convert_to_tensor(rate, name="rate")
super(Exponential, self).__init__(
concentration=array_ops.ones([], dtype=self._rate.dtype),
rate=self._rate,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
# While the Gamma distribution is not reparameterizable, the exponential
# distribution is.
self._reparameterization_type = True
self._parameters = parameters
self._graph_parents += [self._rate]
@staticmethod
def _param_shapes(sample_shape):
return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def rate(self):
return self._rate
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0)
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
shape,
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return -math_ops.log(sampled) / self._rate
class ExponentialWithSoftplusRate(Exponential):
"""Exponential with softplus transform on `rate`."""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="ExponentialWithSoftplusRate"):
parameters = locals()
with ops.name_scope(name, values=[rate]):
super(ExponentialWithSoftplusRate, self).__init__(
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| 35.88961 | 81 | 0.667451 | [
"Apache-2.0",
"MIT"
] | Soum-Soum/Tensorflow_Face_Finder | venv1/Lib/site-packages/tensorflow/python/ops/distributions/exponential.py | 5,527 | Python |
#from numba import jit
import numpy as np
#from joblib import Parallel, delayed, parallel_backend
#from joblib import load, dump
#import tempfile
#import shutil
#import os
#
#import sys
#sys.path.append('pyunicorn_timeseries')
#from pyunicorn_timeseries.surrogates import Surrogates
def set_model_constants(xx=50.E3,nx=100,va=10.,tmax=60*360*24*3600.,avep=24*3600.,dt=3600.,period=3600*24*360*1,B=2.,T0=273.15+6,dT=2.,Cs=1.E-3,Cp=1030.,ra=1.5,ro=1030.,ri=900.,Cpo=4.E3,Cpi=2.9E3,H=200.,vo=0.2,Hb=1.E3,Li=3.3E6,Tf=273.15-1.8,SW0=50.,SW_anom=100.,emissivity=0.99,Da=1.E6,Do=5.E2,tau_entrainment=30*24*3600.,**args):
'''Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.'''
#
C={}
C['xx'] = xx #grid size in [m]
C['nx'] = nx #number of grid cell - the total width of the domain is xx*nx long
C['va'] = va #wind in m/s
#
C['tmax'] = tmax #tmax seconds
C['dt'] = dt #timestep
#
C['avep'] = avep #averaging period in seconds
#
C['period'] = period #period of boundary restoring
C['Cs'] = Cs #exchange coefficient for bulk formula
C['Cp'] = Cp #air heat capacity
C['ra'] = ra #density of air [kg/m3]
C['ro'] = ro #density of sea water [kg/m3]
C['ri'] = ri #density of sea ice [kg/m3]
C['Cpo'] = Cpo #sea water heat capacity
C['T0'] = T0 #initial temp in degC
C['dT'] = dT #initial temp perturbationHb=2E3
C['H'] = H #mixed layer depth in ocean [m]
C['vo'] = vo #ocean current speed [m/s]
C['Hb'] = Hb #boundary layer height in the atmosphere [m]
C['Cpi'] = Cpi #sea ice heat capacity [J/ Kg K]
C['Li'] = Li #Latent heat of fusion of sea water [J / kg K]
C['Tf'] = Tf #Freezing point of sea water [C]
C['B'] = B # long-wave radiation constant [W/m2]
C['emissivity'] = emissivity #surface emissivity
C['SW0'] = SW0 # background net downwelling SW radiation
C['SW_anom']= SW_anom # amplitude of annual cycle in SW radiation
C['Da'] = Da # atmospheric diffusion [m2/s]
C['Do'] = Do # ocean diffusion [m2/s]
C['tau_entrainment'] = tau_entrainment # ocean entrainment/damping timescale
for var in args.keys():
C[var]=args[var]
#
return C
def CoupledChannel(C,forcing, T_boundary=None, dt_f=30*24*3600, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):
'''
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
'''
#
# number of simulation timesteps and output timesteps
nt = int(C['tmax']/C['dt']) #simulation
nt1 = int(C['tmax']/C['avep']) #output
# rtas = np.random.rand(C['nx'])
# intitialize the model variables, first dimension is due to 2 timesteps deep scheme
sst = C['T0']*np.ones((2,C['nx']))
tas = C['T0']*np.ones((2,C['nx'])) #+rtas
hice = np.zeros((2,C['nx']))
# INCOMING SHORTWAVE RADIATION
SW0 = np.tile(C['SW0'][:,np.newaxis],(1,nt))
naxis = np.tile(np.arange(nt)[np.newaxis,],(C['nx'],1))
SW_warming = np.max(np.concatenate([(SW0-C['SW_anom']*np.cos(2*np.pi*(naxis*C['dt'])/(360*24*3600)))[np.newaxis,],np.zeros((C['nx'],nt))[np.newaxis,]],axis=0),0)
# If boundary conditions are not defined, then set initially to T0
if np.all(T_boundary==None):
T_boundary=C['T0']*np.ones(nt)
#
sst_boundary=T_boundary[0]*np.ones((2)) #nt+1
# evolve_boundary=True
#else:
# sst_boundary=np.concatenate((sst_boundary[np.newaxis,],sst_boundary[np.newaxis,]),axis=0)
# evolve_boundary=False
#
# interpolate forcing to the new timescale
if np.all(forcing!=None):
forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)
else:
forcing = np.zeros(nt+1)
#
# initialize outputs
sst_out = np.zeros((nt1,C['nx']))
tas_out = np.zeros((nt1,C['nx']))
hice_out = np.zeros((nt1,C['nx']))
sflx_f_out = np.zeros((nt1,C['nx'])) #forcing
sflx_out = np.zeros((nt1,C['nx']))
# spatial pattern of the forcing - assume a sine wave
if np.all(spatial_pattern==None):
spatial_pattern=np.ones(C['nx'])
#
if np.all(atm_DA_tendencies!=None):
use_atm_tendencies=True
else:
use_atm_tendencies=False
if np.all(ocn_DA_tendencies!=None):
use_ocn_tendencies=True
else:
use_ocn_tendencies=False
#
if return_coupled_fluxes:
atm_DA_tendencies = np.zeros((nt,C['nx']))
ocn_DA_tendencies = np.zeros((nt,C['nx']))
# initialize counters
c=0; c2=0; c3=0; n=1
#####################
# --- TIME LOOP ---
#####################
for nn in range(nt):
#
# FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified
sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])
#
# save the forcing component
#
sflx_f_out[c,:]=sflx_f_out[c,:]+sflx
#
# SURFACE HEAT FLUXES
# Add sensible heat flux to the total surface flux in W/m**-2
sflx=sflx+C['ra']*C['Cp']*C['va']*C['Cs']*(sst[n-1,:]-tas[n-1,:])
# RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean
LW_cooling = C['emissivity']*5.67E-8*(tas[n-1,:]**4)
#
# OCEAN BOUNDARY CONDITION
#if evolve_boundary:
sst_boundary_tendency=SW_warming[0,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])-C['emissivity']*5.67E-8*(sst_boundary[n-1]**4)*C['dt']/(C['H']*C['Cpo']*C['ro'])+(T_boundary[nn]-sst_boundary[n-1])*C['dt']/C['period']
############################################
#
# ATMOSPHERE
#
############################################
#
# ADVECTION
#
# set atm_adv=False is no atmospheric advection - note that we still need to know the wind speed to resolve heat fluxes
if atm_adv:
a_adv = np.concatenate([sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]],axis=0)*(C['va']*C['dt']/C['xx'])
else:
a_adv = 0
#
# DIFFUSION
#
a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))
a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(C['Da']*C['dt']/(C['xx']**2))
a_diff = np.concatenate([np.array([a_diff0]),a_diff,a_diff[-1:]],axis=0)
#
# SURFACE FLUXES
#
a_netsflx = (sflx*C['dt'])/(C['Hb']*C['Cp']*C['ra']) - LW_cooling*C['dt']/(C['Hb']*C['Cp']*C['ra'])
#
#
if return_coupled_fluxes:
atm_DA_tendencies[nn,:] = a_adv + a_diff
#
# ATM UPDATE
#
if use_atm_tendencies:
tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]
else:
tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff
#
################################################
#
# OCEAN
#
################################################
# AND DIFFUSION + ENTRAINMENT
# ocean advection
#
# ADVECTION set vo=0 for stagnant ocean (slab)
#
o_adv = np.concatenate([sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]],axis=0)*(C['vo']*C['dt']/C['xx'])
#
# DIFFUSION
#
o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))
o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(C['Do']*C['dt']/(C['xx']**2))
o_diff = np.concatenate([np.array([o_diff0]),o_diff,o_diff[-1:]],axis=0)
#
# ENTRAINMENT - RESTORING TO AN AMBIENT WATER MASS (CAN BE SEEN AS LATERAL OR VERTICAL MIXING)
# set tau_entrainment=0 for no entrainment
if C['tau_entrainment']>0:
o_entrain = (C['T0']-sst[n-1,:])*C['dt']/C['tau_entrainment']
else:
o_entrain = 0
#
# SURFACE FLUXES
#
o_netsflx = -sflx*C['dt']/(C['H']*C['Cpo']*C['ro'])+SW_warming[:,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])
#
if return_coupled_fluxes:
ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain
#
# OCN update
if use_ocn_tendencies:
sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]
else:
sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain
#
if ice_model:
# THIS IS A DIAGNOSTIC SEA ICE MODEL
#
# SST is first allowed to cool below freezing and then we form sea ice from the excess_freeze
# i.e the amount that heat that is used to cool SST below freezing is converted to ice instead.
# Similarly, SST is allowed to warm above Tf even if there is ice, and then excess_melt,
# i.e. the amount of heat that is used to warm the water is first used to melt ice,
# and then the rest can warm the water.
#
# This scheme conserves energy - it simply switches it between ocean and ice storages
#
# advection
#hice[n-1,1:]=hice[n-1,1:]-(hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])
#dhice = (hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])
#hice[n-1,:-1] = hice[n-1,:-1] -dhice
#hice[n-1,-1] = hice[n-1,-1] + dhice[-1]
#
ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt
freezing_mask = (sst[n,:]<C['Tf']).astype(np.float) #cells where freezing will happen
# change in energy
dEdt = C['H']*C['ro']*C['Cpo']*(sst[n,:]-sst[n-1,:])/C['dt']
# negative change in energy will produce ice whenver the water would otherwise cool below freezing
excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(C['nx'])],axis=0)
# positive change will melt ice where there is ice
excess_melt = ice_mask*np.max([dEdt,np.zeros(C['nx'])],axis=0)
# note that freezing and melting will never happen at the same time in the same cell
# freezing
dhice_freeze = C['dt']*excess_freeze/(C['Li']*C['ri'])
# melting
dhice_melt= C['dt']*excess_melt/(C['Li']*C['ri'])
# update
hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt
# check how much energy was used for melting sea ice - remove this energy from ocean
hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)
# Do not allow ice to be negative - that energy is kept in the ocean all the time.
# The line above ensures that not more energy than is needed to melt the whole ice cover
# is removed from the ocean at any given time
hice[n,:] = np.max([hice[n,:],np.zeros(C['nx'])],axis=0)
#
# Update SST
# Give back the energy that was used for freezing (will keep the water temperature above freezing)
sst[n,:] = sst[n,:] + C['dt']*excess_freeze/(C['H']*C['Cpo']*C['ro'])
# take out the heat that was used to melt ice
# (need to cap to hice, the extra heat is never used and will stay in the ocean)
sst[n,:] = sst[n,:] - hice_melt*(C['Li']*C['ri'])/(C['ro']*C['Cpo']*C['H'])
#
#############################
# --- PREPARE OUTPUT ----
#############################
# accumulate output
tas_out[c,:] = tas_out[c,:]+tas[n,:]
sst_out[c,:] = sst_out[c,:]+sst[n,:]
hice_out[c,:] = hice_out[c,:]+hice[n,:]
sflx_out[c,:] = sflx_out[c,:]+sflx
# accumulate averaging counter
c2=c2+1
c3=c3+1
if ((nn+1)*C['dt'])%(360*24*3600)==0:
#print(nn)
c3=0
#calculate the average for the output
if (((nn+1)*C['dt'])%C['avep']==0 and nn>0):
tas_out[c,:] = tas_out[c,:]/c2
sst_out[c,:] = sst_out[c,:]/c2
sflx_out[c,:] = sflx_out[c,:]/c2
sflx_f_out[c,:] = sflx_f_out[c,:]/c2
hice_out[c,:] = hice_out[c,:]/c2
# update counters
c = c+1
c2 = 0
if ((nn+1)*C['dt'])%(360*24*3600)==0:
print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])
#update the variables
tas[0,:] = tas[1,:].copy()
sst[0,:] = sst[1,:].copy()
hice[0,:] = hice[1,:].copy()
# SST at the boundary
sst_boundary[n-1]=sst_boundary[n-1]+sst_boundary_tendency
#
#
# if there is no ice, set to nan
hice_out[np.where(hice_out==0)]=np.nan
#
if return_coupled_fluxes:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies
else:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt
#@jit(nopython=True)
def CoupledChannel_time(nt,nx,xx,dt,avep,sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,ra,Cp,va,vo,Da,Do,Cs,T0,Tf,emissivity,SW0,SW_anom,H,Hb,Cpo,ro,tau_entrainment,Li,ri,use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes):
'''
Separate time loop to enable numba
'''
#initialize counters
c=0; c2=0; c3=0; n=1
#####################
# --- TIME LOOP ---
#####################
for nn in range(nt):
#
# FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified
sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])
#
# save the forcing component
#
sflx_f_out[c,:]=sflx_f_out[c,:]+sflx
#
# SURFACE HEAT FLUXES
# Add sensible heat flux to the total surface flux in W/m**-2
sflx=sflx+ra*Cp*va*Cs*(sst[n-1,:]-tas[n-1,:])
# RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean
LW_cooling = emissivity*5.67E-8*(tas[n-1,:]**4)
SW_warming = SW0+max(SW_anom*np.sin(2*float(nn)*dt*np.pi/(360*24*3600)),0.0)
#net_radiation = SW_warming-LW_cooling
net_radiation = -LW_cooling
#
# OCEAN BOUNDARY CONDITION - SET dT to zero to suppress the sin
sst_boundary[n]=sst_boundary[n-1]+SW_warming[0]*dt/(H*Cpo*ro)-emissivity*5.67E-8*(sst_boundary[n-1]**4)*dt/(H*Cpo*ro)+(T0-sst_boundary[n-1])*dt/(360*24*3600) #C['T0']+C['dT']*np.sin(nn*C['dt']*np.pi/C['period']) +
#
# ATMOSPHERE - ADVECTION AND DIFFUSION
# set atm_adv=False is no atmospheric advection - note that we need to know the wind speed to resolve heat fluxes
if atm_adv:
a_adv = np.concatenate((sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]),axis=0)*(va*dt/xx)
#tas[n,0]=tas[n-1,0]+(C['T0']-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #always constant temperature blowing over the ocean from land
#tas[n,0]=tas[n-1,0]+(sst[n,0]-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #atmospheric temperature at the boundary is in equilibrium with the ocean
#tas[n,1:]=tas[n-1,1:]+(tas[n-1,:-1]-tas[n-1,1:])*(C['va']*C['dt']/C['xx'])
else:
#tas[n,:] = tas[n-1,0]
a_adv = np.zeros(nx)
#
# DIFFUSION
#
#tas[n,1:-1] = tas[n,1:-1] + (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))
a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(Da*dt/(xx**2))
a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(Da*dt/(xx**2))
a_diff = np.concatenate((np.array([a_diff0]),a_diff,a_diff[-1:]),axis=0)
#
# ATMOSPHERE - SURFACE FLUXES
#
a_netsflx = (sflx*dt)/(Hb*Cp*ra) + net_radiation*dt/(Hb*Cp*ra)
#
# full update
#
#
if return_coupled_fluxes:
atm_DA_tendencies[nn,:]=np.sum((a_adv,a_diff),axis=0)
#
if use_atm_tendencies:
tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]
else:
tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff
#
# OCEAN - ADVECTION AND DIFFUSION + ENTRAINMENT
# ocean advection
# set vo=0 for stagnant ocean (slab)
#
#sst[n,1:] = sst[n-1,1:]+(sst[n-1,:-1]-sst[n-1,1:])*(1-ocn_mixing_ratio)*(C['vo']*C['dt']/C['xx'])+(C['T0']-sst[n-1,1:])*ocn_mixing_ratio*(C['vo']*C['dt']/C['xx'])
o_adv = np.concatenate((sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]),axis=0)*(vo*dt/xx)
# DIFFUSION
#sst[n,1:-1] = sst[n,1:-1] + (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))
o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(Do*dt/(xx**2))
o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(Do*dt/(xx**2))
o_diff = np.concatenate((np.array([o_diff0]),o_diff,o_diff[-1:]),axis=0)
# ENTRAINMENT (damping by a lower layer)
o_entrain = (T0-sst[n-1,:])*dt/tau_entrainment
#sst[n,1:]=sst[n,1:]+(C['T0']-sst[n-1,1:])*C['dt']/C['tau_entrainment']
#
# OCEAN - SURFACE FLUXES
#
o_netsflx = -sflx*dt/(H*Cpo*ro)+SW_warming*dt/(H*Cpo*ro)
#sst[n,:]=sst[n,:]-(sflx*C['dt'])/(C['H']*C['Cpo']*C['ro'])
if return_coupled_fluxes:
ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain
# OCN update
if use_ocn_tendencies:
sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]
else:
sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain
#
if ice_model:
# THIS IS A DIAGNOSTIC SEA ICE MODEL
#
# sst is first allowed to cool below freezing and then we forM sea ice from the excess_freeze
# i.e the amount that heat that is used to cool sst below freezing is converted to ice instead
# similarly sst is allowed to warm above Tf even if there is ice, and then excess_melt,
# i.e. the amount of heat that is used to warm the water is first used to melt ice,
# and then the rest can warm water. This scheme conserves energy - it simply switches it between ocean and ice
#
ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt
freezing_mask = (sst[n,:]<Tf).astype(np.float) #cells where freezing will happen
# change in energy
dEdt = H*ro*Cpo*(sst[n,:]-sst[n-1,:])/dt
# negative change in energy will produce ice whenver the water would otherwise cool below freezing
excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(nx)],axis=0)
# positive change will melt ice where there is ice
excess_melt = ice_mask*np.max([dEdt,np.zeros(nx)],axis=0)
# note that freezing and melting will never happen at the same time in the same cell
# freezing
dhice_freeze = dt*excess_freeze/(Li*ri)
# melting
dhice_melt= dt*excess_melt/(Li*ri)
# update
hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt
# check how much energy was used for melting sea ice - remove this energy from ocean
hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)
# Do not allow ice to be negative - that energy is kept in the ocean all the time.
# The line above ensures that not more energy than is needed to melt the whole ice cover
# is removed from the ocean at any given time
hice[n,:] = np.max([hice[n,:],np.zeros(nx)],axis=0)
#
# Update SST
# Give back the energy that was used for freezing (will keep the water temperature above freezing)
sst[n,:] = sst[n,:] + dt*excess_freeze/(H*Cpo*ro)
# take out the heat that was used to melt ice
# (need to cap to hice, the extra heat is never used and will stay in the ocean)
sst[n,:] = sst[n,:] - hice_melt*(Li*ri)/(ro*Cpo*H)
#
#############################
# --- PREPARE OUTPUT ----
#############################
#accumulate
tas_out[c,:] = tas_out[c,:]+tas[n,:]
sst_out[c,:] = sst_out[c,:]+sst[n,:]
hice_out[c,:] = hice_out[c,:]+hice[n,:]
sflx_out[c,:] = sflx_out[c,:]+sflx
# accumulate averaging counter
c2=c2+1
c3=c3+1
if ((nn+1)*dt)%(360*24*3600)==0:
#print(nn)
c3=0
#calculate the average for the output
if (((nn+1)*dt)%avep==0 and nn>0):
tas_out[c,:] = tas_out[c,:]/c2
sst_out[c,:] = sst_out[c,:]/c2
sflx_out[c,:] = sflx_out[c,:]/c2
sflx_f_out[c,:] = sflx_f_out[c,:]/c2
hice_out[c,:] = hice_out[c,:]/c2
# update counters
c = c+1
c2 = 0
#if ((nn+1)*C['dt'])%(360*24*3600)==0:
# print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])
#update the variables
tas[0,:] = tas[1,:].copy()
sst[0,:] = sst[1,:].copy()
hice[0,:] = hice[1,:].copy()
sst_boundary[0]=sst_boundary[1].copy()
#
hice_out[np.where(hice_out==0)]=np.nan
#
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies
def CoupledChannel2(C,forcing, dt_f=30*24*3600, ocn_mixing_ratio=0, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):
'''
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
ocn_mixing: add non-local mixing to ocean
ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)
'''
#
#print(C)
#print(C['T0'],C['SW0'],C['Da'],C['xx'])
#
nt=int(C['tmax']/C['dt']) #steps
nt1=int(C['tmax']/C['avep'])
tau=float(C['period'])/float(C['dt']) #this is period/dt, previously nt/8
rtas=np.random.rand(C['nx'])
#print(rtas.max())
#intitialize the model variables, only 2 timesteps deep scheme
sst=C['T0']*np.ones((2,C['nx']))
tas=C['T0']*np.ones((2,C['nx']))+rtas
hice=np.zeros((2,C['nx']))
sst_boundary=C['T0']*np.ones((2))
#
#print(sst.max(),tas.max())
#interpolate forcing to the new timescale
if np.all(forcing!=None):
forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)
else:
forcing = np.zeros(nt+1)
#
#initialize outputs
sst_out = np.zeros((nt1,C['nx']))
tas_out = np.zeros((nt1,C['nx']))
hice_out = np.zeros((nt1,C['nx']))
sflx_f_out = np.zeros((nt1,C['nx'])) #forcing
sflx_out = np.zeros((nt1,C['nx']))
#spatial pattern of the forcing - assume a sine wave
if np.all(spatial_pattern==None):
spatial_pattern=np.ones(C['nx'])
#
if np.all(atm_DA_tendencies!=None):
use_atm_tendencies=True
else:
use_atm_tendencies=False
if np.all(ocn_DA_tendencies!=None):
use_ocn_tendencies=True
else:
use_ocn_tendencies=False
#
atm_DA_tendencies = np.zeros((nt,C['nx']))
ocn_DA_tendencies = np.zeros((nt,C['nx']))
#
tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies=CoupledChannel_time(nt,C['nx'],C['xx'],C['dt'],C['avep'],sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,C['ra'],C['Cp'],C['va'],C['vo'],C['Da'],C['Do'],C['Cs'],C['T0'],C['Tf'],C['emissivity'],C['SW0'],C['SW_anom'],C['H'],C['Hb'],C['Cpo'],C['ro'],C['tau_entrainment'],C['Li'],C['ri'],use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes)
#
if return_coupled_fluxes:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies
else:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt
| 46.994434 | 538 | 0.557521 | [
"MIT"
] | AleksiNummelin/coupled_channel | coupled_channel/cutils.py | 25,330 | Python |
import cv2
import os
import numpy as np
import faceReacognition as fr
test_img = cv2.imread('b.jpg')
faces_detected,gray_img = fr.faceDetection(test_img)
print("faces_detected ",faces_detected)
for (x,y,w,h) in faces_detected:
cv2.rectangle(test_img,(x,y),(x+w, y+h),(0,0,255),thickness=1)
resized_img = cv2.resize(test_img,(1000,700))
cv2.imshow('faces',resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows
| 22.944444 | 66 | 0.755448 | [
"MIT"
] | BathiyaSeneviratne/OpenFace | tester.py | 413 | Python |
"""
Subdivide Cells
~~~~~~~~~~~~~~~
Increase the number of triangles in a single, connected triangular mesh.
The :func:`pyvista.PolyDataFilters.subdivide` filter utilitizes three different
subdivision algorithms to subdivide a mesh's cells: `butterfly`, `loop`,
or `linear`.
"""
from pyvista import examples
import pyvista as pv
###############################################################################
# First, let's load a **triangulated** mesh to subdivide. We can use the
# :func:`pyvista.DataSetFilters.triangulate` filter to ensure the mesh we are
# using is purely triangles.
mesh = examples.download_bunny_coarse().triangulate()
cpos = [(-0.02788175062966399, 0.19293295656233056, 0.4334449972621349),
(-0.053260899930287015, 0.08881197167521734, -9.016948161029588e-05),
(-0.10170607813337212, 0.9686438023715356, -0.22668272496584665)]
###############################################################################
# Now, lets do a few subdivisions with the mesh and compare the results.
# Below is a helper function to make a comparison plot of thee different
# subdivisions.
def plot_subdivisions(mesh, a, b):
display_args = dict(show_edges=True, color=True)
p = pv.Plotter(shape=(3,3))
for i in range(3):
p.subplot(i,0)
p.add_mesh(mesh, **display_args)
p.add_text("Original Mesh")
def row_plot(row, subfilter):
subs = [a, b]
for i in range(2):
p.subplot(row, i+1)
p.add_mesh(mesh.subdivide(subs[i], subfilter=subfilter), **display_args)
p.add_text(f"{subfilter} subdivision of {subs[i]}")
row_plot(0, "linear")
row_plot(1, "butterfly")
row_plot(2, "loop")
p.link_views()
p.view_isometric()
return p
###############################################################################
# Run the subdivisions for 1 and 3 levels.
plotter = plot_subdivisions(mesh, 1, 3)
plotter.camera_position = cpos
plotter.show()
| 33.741379 | 84 | 0.611139 | [
"MIT"
] | Boorhin/pyvista | examples/01-filter/subdivide.py | 1,957 | Python |
import argparse
import boto3
import json
from uuid import uuid4
import os
S3_BUCKET = os.environ["S3_BUCKET"]
S3_BUCKET_KEY_ID = os.environ["S3_BUCKET_KEY_ID"]
S3_BUCKET_KEY = os.environ["S3_BUCKET_KEY"]
AZ_PROCESSED_FILE = "/mnt/aws-things-azure-processed.json"
if __name__ == '__main__':
client = boto3.client(
'iot',
region_name=os.environ["AWS_REGION"],
aws_access_key_id=os.environ["AWS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_KEY"]
)
with open (AZ_PROCESSED_FILE) as file:
jsonJobDoc = json.load(file)
for thing in jsonJobDoc['things']:
print (thing['thingName'])
print (thing['thingArn'])
print (thing['azure']['iotconnstr'])
response = client.create_job(
jobId='upgrade-'+thing['thingName'] + "-" + str(uuid4()),
targets=[
thing['thingArn'],
],
document="{ \"operation\": \"upgradetoAzure\", \"fileBucket\": \""+S3_BUCKET+"\", \"ACCESS_KEY\": \""+S3_BUCKET_KEY_ID+ "\",\"SECRET_KEY\": \""+S3_BUCKET_KEY+ "\", \"AZURE_CONNECTION_STRING\": \""+thing['azure']['iotconnstr'] + "\" }",
jobExecutionsRolloutConfig={
'maximumPerMinute': 5,
'exponentialRate': {
'baseRatePerMinute': 5,
'incrementFactor': 1.1,
'rateIncreaseCriteria': {
'numberOfNotifiedThings': 1
}
}
},
abortConfig={
'criteriaList': [
{
'failureType': 'FAILED',
'action': 'CANCEL',
'thresholdPercentage': 100,
'minNumberOfExecutedThings': 1
},
]
}
) | 33.945455 | 247 | 0.508838 | [
"MIT"
] | drcrook1/AWS_IOT_MIGRATION_TOOL | STREAM_2/createawsupgradejob.py | 1,867 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@package EFIT2D_Classes
Support Library: efit2d-pyopencl
Manuscript Title: Optimized OpenCL implementation of the Elastodynamic Finite Integration Technique for viscoelastic media
Authors: M Molero, U Iturraran-Viveros, S Aparicio, M.G. Hernández
Program title: EFIT2D-PyOpenCL
Journal reference: Comput. Phys. Commun.
Programming language: Python.
External routines: numpy, scipy, matplotlib, glumpy, pyopencl
Computer: computers having GPU or Multicore CPU with OpenCL drivers.
All classes here defined are used to define:
- The scenario,
- Material objects,
- Input sources,
- Inspection setup,
- Simulation parameters
"""
import numpy as np
from math import sin, cos, sqrt, pi, exp
import random
import time
from scipy import signal
from scipy.fftpack import fftshift
from skimage.transform import rotate
try:
from Image import Image
except:
from PIL import Image
from matplotlib import cm
import matplotlib.pyplot as plt
def imresize(arr, size, **kwargs):
from PIL import Image
size_list = [int(arr.shape[0] * size), int(arr.shape[1] * size)]
return np.array(Image.fromarray(arr).resize(size_list))
def imrotate(arr, angle, **kwargs):
return rotate(arr, angle=angle)
def RaisedCosinePulse(t, Freq, Amplitude):
"""
Raised-Cosine Pulse
@param t time vector
@param Freq Frequency in Hz
@param Amplitude Real Value of Amplitude
@return Output signal vector
@retval P vector of length equals to the time vector t
"""
N = np.size(t,0)
P = np.zeros((N,),dtype=np.float32)
for m in range(0,N):
if t[m] <= 2.0/Freq:
P[m] = Amplitude *(1-cos(pi*Freq*t[m]))*cos(2*pi*Freq*t[m])
return P
def ricker(t,ts,fsavg):
"""
Ricker Pulse
@param t time vector
@param ts temporal delay
@param fsavg pulse width parameter
@return Output signal vector
"""
a = fsavg*pi*(t-ts)
a2 = a*a
return ((1.0-2.0*a2)*np.exp(-a2))
##
class NewImage:
"""
Class NewImage: Definition of the Main Geometric Scenario.
"""
def __init__(self, Width=40, Height=40,Pixel_mm=10,label=0,SPML=False):
"""
Constructor of the Class NewImage
@param Width Width of the Scenario
@param Height Height of the Scenario
@param Pixel_mm Ratio Pixel per mm
@param label Label
@param SPML Flag used to indicate the boundary conditions
"""
## Width of the Scenario
self.Width = Width
## Height of the Scenario
self.Height = Height
## Ratio Pixel per mm
self.Pixel_mm = Pixel_mm
## Label
self.Label = label
## Flag used to indicate the boundary conditions
self.SPML = SPML
## Dimension 1 of the Scenario Matrix
self.M = int(self.Height * self.Pixel_mm)
## Dimension 2 od the Scenario Matrix
self.N = int(self.Width * self.Pixel_mm)
## Scenarion Matrix (MxN)
self.I = np.ones((self.M,self.N),dtype=np.uint8)*label
self.Itemp = 0
## Size of the Boundary Layer
self.Tap = 0
## Configure if boundary layers will be treated as absorbing layers or air layers.
#
# False: Absorbing layers
#
# True : Air boundaries
self.AirBoundary = False
def createLayer(self, centerW, centerH, Width, Height, label, Theta=0):
"""
Create a Layer
@param centerW center in width-axis of the Layer
@param centerH center in height-axis of the Layer
@param Width Width of the Layer
@param Height Height of the Layer
@param label Label of the layer
@param Theta Rotation Angle
"""
a = int(Height*self.Pixel_mm/2.0)
b = int(Width*self.Pixel_mm/2.0)
for x in range(-a,a):
for y in range(-b,b):
tempX = round (x + centerH*self.Pixel_mm)
tempY = round (y + centerW*self.Pixel_mm)
self.I[tempX,tempY] = label
if Theta != 0:
self.I = imrotate(self.I,Theta,interp='nearest')
def createABS(self,Tap):
"""
Create the boundary layers depending on the boundary conditions required
@param Tap Layer Size
"""
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
self.M, self.N = np.shape(self.I)
TP = round(Tap* self.Pixel_mm )
M_pml = int( self.M + 2*TP )
N_pml = int( self.N + 2*TP )
self.Itemp = 255.0*np.ones((M_pml,N_pml),dtype=np.uint8)
self.Itemp[TP : M_pml-TP, TP : N_pml-TP] = np.copy(self.I)
class Material:
"""
Class Material: Definition of a material
@param name Material Name
@param rho Density (kg/m3)
@param c11 C11 (Pa)
@param c12 C12 (Pa)
@param c22 C22 (Pa)
@param c44 C44 (Pa)
@param eta_v Bulk Viscosity Constant (Pa s)
@param eta_s Shear Viscosity Constant (Pa s)
@param label Material Label
"""
def __init__(self, name="Water",rho=1000,c11=2.19e9,c12=0.0,c22=0.0,c44=0.0,eta_v=0, eta_s=0,label=0):
"""
Constructor of the Material object
"""
## Material Name
self.name = name
##Density (kg/m3)
self.rho = rho
## C11 (Pa)
self.c11 = c11
## C12 (Pa)
self.c12 = c12
## C22 (Pa)
self.c22 = c22
## C44 (Pa)
self.c44 = c44
## Longitudinal Velocity (m/s)
self.VL = sqrt( c11/rho )
## Shear Velocity (m/s)
self.VT = sqrt( c44/rho )
## Bulk Viscosity Constant (Pa s)
self.eta_v = eta_v
## Shear Viscosity Constant (Pa s)
self.eta_s = eta_s
## Material Label
self.Label = label
def __str__(self):
return "Material:"
def __repr__(self):
return "Material:"
class Source:
"""
Class Source: Define the Inspection Type
@param TypeLaunch Type of Inspection: Transmission or PulseEcho
"""
def __init__(self,TypeLaunch = 'Transmission'):
## Type of Inspection: Transmission or PulseEcho
self.TypeLaunch = TypeLaunch
## Define the location of the transducers in function of the type of the Inspection
self.Theta = 0
if self.TypeLaunch == 'PulseEcho':
self.pulseEcho()
elif self.TypeLaunch == 'Transmission':
self.transmission()
def __str__(self):
return "Source: "
def __repr__(self):
return "Source: "
def pulseEcho(self):
"""
Define Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver
"""
self.Theta = [270*pi/180, 270*pi/180]
def transmission(self):
"""
Define Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver
"""
self.Theta = [270*pi/180, 90*pi/180]
class Transducer:
"""
Class Transducer: Definition of the Transducer Object
@param Size Transducer Size
@param Offset Offset position of the Transducer. By default is set to zero
@param BorderOffset Border offset position of the Transducer. By default is set to zero
@param Location Location is set to zero that indicates Up location
@param name Transducer Name
"""
def __init__(self, Size = 10, Offset=0, BorderOffset=0, Location=0, name = 'emisor'):
"""
Constructor of the Class Transducer
"""
# Location = 0 => Top
## Transducer Size
self.Size = Size
## Offset position of the Transducer. By default is set to zero
#
# This offset is measured taking into account the center of the Scenario in the width-axis
#
# Positive Values indicate offsets toward the right
#
# Negative values indicate offsets toward the left
self.Offset = Offset
## Border offset position of the Transducer. By default is set to zero
#
# This border offset takes into account the center od the Scenario in the width axis
# but this offset is measured in direction of the height-axis
#
# Only Positive values must be defined.
self.BorderOffset = BorderOffset
##Size of the trasnducer in Pixels
self.SizePixel = 0
## Location-> 0: Top. This version only works when the location=0
self.Location = Location
## Name of the transducer
self.name = name
def __str__(self):
return "Transducer: "
def __repr__(self):
return "Transducer: "
####################################################################################
class Signal:
"""
Class Signal: Signal Definition (Source Input for the Simulation)
@param Amplitude Signal Amplitude
@param Frequency Frequency Amplitude
@param Name Name of the Signal: RaisedCosinePulse or RickerPulse
@param ts Time Delay: used only for RickerPulse
"""
def __init__(self, Amplitude=1, Frequency=1e6, name ="RaisedCosinePulse", ts=1):
## Signal Amplitude
self.Amplitude = Amplitude
## Frequency Amplitude
self.Frequency = Frequency
## Name of the Signal: RaisedCosinePulse or RickerPulse
self.name = name
## Time Delay: used only for RickerPulse
if ts == 1:
self.ts = 3.0/Frequency;
def __str__(self):
return "Signal: "
def __repr__(self):
return "Signal: "
def generate(self,t):
"""
Generate the signal waveform
@param t vector time
@return signal vector with the same length as the vector time
"""
if self.name == "RaisedCosinePulse":
return RaisedCosinePulse(t, self.Frequency, self.Amplitude)
elif self.name == "RickerPulse":
return ricker(t, self.ts, self.Frequency)
def saveSignal(self,t):
"""
Save the signal waveform into the object
@param t vector time
"""
self.time_signal = self.generate(t)
######################################
class Inspection:
"""
Class Inspection: used for the configuration of the inspections to be emulated
"""
def __init__(self):
"""
Constructor of the Class Inspection
"""
## Position of the Transducer (Angle)
self.Theta = 0
## Vector x-axis Position of the Transducer
self.XL = 0
## Vector y-axis Position of the Transducer
self.YL = 0
##
self.IR = 0
def __str__(self):
return "Inspection: "
def __repr__(self):
return "Inspection: "
def setTransmisor(self, source, transducer, x2, y2, X0, Y0):
self.Theta = source.Theta
Ntheta = np.size(self.Theta,0)
NXL = int(2*transducer.SizePixel)
xL = np.zeros((NXL,),dtype=np.float32)
yL = np.zeros((NXL,),dtype=np.float32)
for m in range(0,Ntheta):
if np.abs(np.cos(self.Theta[m])) < 1e-5:
yL = np.linspace(y2[m]-transducer.SizePixel,y2[m]+transducer.SizePixel,num=NXL, endpoint=True)
xL[:] = x2[m]*np.ones((NXL,),dtype=np.float32)
elif np.abs(np.cos(self.Theta[m])) == 1:
xL[:] = np.linspace(x2[m]-transducer.SizePixel, x2[m]+transducer.SizePixel,num=NXL, endpoint=True)
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
else:
xL[:] = np.linspace(x2[m]-(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))),x2[m]+(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))), num=NXL, endpoint=True )
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
if m==0:
self.XL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.YL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.XL[:,m] = (np.around(xL[:]))
self.YL[:,m] = (np.around(yL[:]))
def addOffset(self, image, transducer, NRI):
"""
Handle Offset
"""
NXL = np.size(self.XL,0)
Ntheta = np.size(self.Theta,0)
M_pml, N_pml = np.shape(image.Itemp)
self.YL += (np.around(transducer.Offset * image.Pixel_mm * NRI / float(N_pml)))
self.IR = np.zeros((Ntheta,Ntheta),dtype=np.float32)
B = list(range(0,Ntheta))
self.IR[:,0] = np.int32(B[:])
for i in range(1,Ntheta):
B = np.roll(B,-1)
self.IR[:,i] = np.int32(B)
def addBorderOffset(self, image, transducer, MRI):
"""
Handle Border Offset
"""
M_pml, N_pml = np.shape(image.Itemp)
ratio = float(MRI) / float(M_pml)
self.XL[:,0] += (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
self.XL[:,1] -= (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
def flip(self):
self.XL = np.fliplr(self.XL)
def SetReception(self,T):
ReceptorX = (self.XL)
ReceptorY = (self.YL)
M,N = np.shape(ReceptorX)
temp = np.zeros((M,N-1),dtype=np.float32)
for mm in range(0,M):
for ir in range(0,N-1):
temp[mm,ir] = T[ int(ReceptorX[ mm,int(self.IR[0,ir+1]) ] ) , int(ReceptorY[ mm,int(self.IR[0,ir+1]) ]) ]
if self.Field:
return temp.transpose()
else:
return np.mean(temp,0)
def SetReceptionVector(self, T, x, y):
M = np.size(x)
temp = np.zeros((M,),dtype=np.float32)
for mm in range(0,M):
temp[mm] = T[(int(x[mm])),(int(y[mm]))]
return temp
class SimulationModel:
"""
Class Simulation: setup the parameters for the numerical simulation
Usage:
- First Define an Instance of the SimulationModel Object
- Execute the method class: jobParameters using as input the materials list
- Execute the method class: createNumerical Model using as input the scenario
- Execute the method class: initReceivers to initialize the receivers
- Execute the mtehod class: save signal using as input the attribute simModel.t
- Save the Device into the simModel.Device attribute
@param TimeScale Scale Time Factor
@param MaxFreq Maximum Frequency
@param PointCycle Points per Cycle
@param SimTime Time Simuation
@param SpatialScale Spatial Scale: 1 -> meters, 1e-3 -> millimeters
"""
def __init__(self,TimeScale=1, MaxFreq=2e6, PointCycle=10, SimTime=50e6, SpatialScale=1e-3):
## Scale Time Factor
self.TimeScale = TimeScale
## Maximum Frequency
self.MaxFreq = MaxFreq # MHz
## Points per Cycle
self.PointCycle = PointCycle
## Time Simuation
self.SimTime = SimTime # microseconds
## Spatial Scale: 1 -> meters, 1e-3 -> millimeters
self.SpatialScale = SpatialScale
## Spatial Discretization
self.dx = 0
## Temporal Discretization
self.dt = 0
self.Rgrid = 0
self.TapG = 0
self.t = 0
self.Ntiempo = 0
self.MRI,self.NRI = (0,0)
self.receiver_signals = 0
self.Device = 'CPU'
self.XL = 0
self.YL = 0
def __str__(self):
return "Simulation Model: "
def __repr__(self):
return "Simulation Model: "
def jobParameters(self,materiales):
"""
Define Main Simulation Parameters
@parm materiales Materials List
"""
indVL = [mat.VL for mat in materiales if mat.VL > 400]
indVT = [mat.VT for mat in materiales if mat.VT > 400]
VL = np.array(indVL)
VT = np.array(indVT)
V = np.hstack( (VL, VT) )
self.dx = np.float32( np.min([V]) / (self.PointCycle*self.MaxFreq) )
self.dt = self.TimeScale * np.float32( 0.7071 * self.dx / ( np.max([V]) ) )
self.Ntiempo = int(round(self.SimTime/self.dt))
self.t = self.dt*np.arange(0,self.Ntiempo)
def createNumericalModel(self, image):
"""
Create the Numerical Model
@param image The Scenario Object
"""
#Spatial Scale
Mp = np.shape(image.Itemp)[0]*self.SpatialScale/image.Pixel_mm/self.dx
self.Rgrid = Mp/np.shape(image.Itemp)[0]
self.TapG = np.around(image.Tap * self.Rgrid * image.Pixel_mm)
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
self.MRI,self.NRI = np.shape(self.Im)
print("dt: " + str(self.dt) + " dx: " + str(self.dx) + " Grid: " + str(self.MRI) + " x " + str(self.NRI))
def initReceivers(self):
"""
Initialize the receivers
"""
self.receiver_signals = 0
def setDevice(self,Device):
"""
Set the Computation Device
@param Device Device to be used
Define the device used to compute the simulations:
- "CPU" : uses the global memory in th CPU
- "GPU_Global" : uses the global memory in the GPU
- "GPU_Local" : uses the local memory in the GPU
"""
if Device == 0:
self.Device = 'CPU'
elif Device ==1:
self.Device = 'GPU_Global'
elif Device ==2:
self.Device = 'GPU_Local'
| 22.671388 | 167 | 0.642197 | [
"MIT"
] | guillaumedavidphd/efit2d-pyopencl | EFIT2D_Classes.py | 16,007 | Python |
import csv
import enum
class Usability(enum.Enum):
UNKNOWN = 0
USER = 1
BOT = 2
BOTH = 4
class MethodInfo:
def __init__(self, name, usability, errors):
self.name = name
self.errors = errors
try:
self.usability = {
'unknown': Usability.UNKNOWN,
'user': Usability.USER,
'bot': Usability.BOT,
'both': Usability.BOTH,
}[usability.lower()]
except KeyError:
raise ValueError('Usability must be either user, bot, both or '
'unknown, not {}'.format(usability)) from None
def parse_methods(csv_file, errors_dict):
"""
Parses the input CSV file with columns (method, usability, errors)
and yields `MethodInfo` instances as a result.
"""
with csv_file.open(newline='') as f:
f = csv.reader(f)
next(f, None) # header
for line, (method, usability, errors) in enumerate(f, start=2):
try:
errors = [errors_dict[x] for x in errors.split()]
except KeyError:
raise ValueError('Method {} references unknown errors {}'
.format(method, errors)) from None
yield MethodInfo(method, usability, errors)
| 29.818182 | 75 | 0.548018 | [
"MIT"
] | Thorbijoern/Telethon | telethon_generator/parsers/methods.py | 1,312 | Python |
from math import pi
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.stocks import MSFT
df = pd.DataFrame(MSFT)[:50]
df["date"] = pd.to_datetime(df["date"])
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = "MSFT Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.date, df.high, df.date, df.low, color="black")
p.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title="candlestick.py example")
show(p) # open a browser
| 30.035714 | 94 | 0.72176 | [
"BSD-3-Clause"
] | AdityaJ7/bokeh | examples/plotting/file/candlestick.py | 841 | Python |
from collections import namedtuple
import json
import os
import unittest
import context
import ansi
import comment
class TestComment(unittest.TestCase):
def setUp(self):
self.maxDiff = None
comments_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'comments.json'
)
)
with open(comments_path) as f:
self.comments_data = json.load(f)
file_contents_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'file_contents.txt'
)
)
with open(file_contents_path) as f:
text = f.read()
self.file_contents = text.split('\n')
def test_create_comment_no_context(self):
filename = 'file2'
data = self.comments_data[filename][0]
c = comment.Comment(data, filename)
self.assertEqual('4', c.id)
self.assertEqual('3', c.patch_set)
self.assertEqual(None, c.parent)
self.assertEqual('Name1', c.author)
self.assertEqual('A file comment', c.message)
self.assertEqual('2021-04-24', c.date)
self.assertEqual(filename, c.file)
self.assertEqual('', c.context[0])
self.assertEqual('', c.context[1])
self.assertEqual('', c.context[2])
def test_create_comment_line(self):
filename = 'file1'
data = self.comments_data[filename][2]
c = comment.Comment(data, filename, self.file_contents)
self.assertEqual('', c.context[0])
self.assertEqual('Some more content.', c.context[1])
self.assertEqual('', c.context[2])
def test_create_comment_range_one_line(self):
filename = 'file2'
data = self.comments_data[filename][1]
c = comment.Comment(data, filename, self.file_contents)
self.assertEqual('File ', c.context[0])
self.assertEqual('starts', c.context[1])
self.assertEqual(' here.', c.context[2])
def test_create_comment_range_four_lines(self):
filename = 'file1'
data = self.comments_data[filename][0]
c = comment.Comment(data, filename, self.file_contents)
self.assertEqual('File ', c.context[0])
self.assertEqual('starts here.\nSome content.\nSome more content.\nThis', c.context[1])
self.assertEqual(' is the end.', c.context[2])
def test_str(self):
filename = 'file1'
data = self.comments_data[filename][0]
c = comment.Comment(data, filename, self.file_contents)
actual = str(c)
expected = ' '.join([
'Name1',
ansi.format('Can you update this, Name2?', [ansi.GREEN, ansi.ITALIC])
])
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 33.732558 | 95 | 0.590141 | [
"MIT"
] | slarwise/gercli | tests/test_comment.py | 2,901 | Python |
"""Classes for more complex applications that have tabbed or paged navigation."""
from collections import OrderedDict
from copy import deepcopy
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from implements import implements
from .utils_app import AppBase, AppInterface
TODO_CLIENT_CALLBACK = '''
TODO: Create clientside callbacks dynamically to update the title on navigation
See: http://dash.plotly.com/external-resources
```py
app.clientside_callback(
"""
function(tab_value) {
if (tab_value === 'tab-1') {
document.title = 'Tab 1'
} else if (tab_value === 'tab-2') {
document.title = 'Tab 2'
}
}
""",
Output('blank-output', 'children'),
[Input('tabs-example', 'value')]
)
```
'''
# TODO: Try to see if I can resolve the interface differences or if I need make a subclass interface
# @implements(AppInterface) # noqa: H601
class AppWithNavigation(AppBase):
"""Base class for building Dash Application with tabs or URL routing."""
app = None
"""Main Dash application to pass to all child tabs."""
nav_lookup = None
"""OrderedDict based on the list of tuples from `self.define_nav_elements()`."""
nav_layouts = None
"""Dictionary with nav_names as keys and corresponding layout as value."""
def define_nav_elements(self):
"""Return list of initialized pages or tabs accordingly.
Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('define_nav_elements must be implemented by child class') # pragma: no cover
def create(self, **kwargs):
"""Create each navigation componet, storing the layout. Then parent class to create application.
Args:
kwargs: keyword arguments passed to `self.create`
"""
# Initialize the lookup for each tab then configure each tab
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for nav_name, nav in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
# Store validation_layout that is later used for callback verification in base class
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
# Initialize parent application that handles navigation
super().create(**kwargs)
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(self.app_ids)
def create_elements(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
def create_callbacks(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
@implements(AppInterface) # noqa: H601
class StaticTab(AppBase):
"""Simple App without charts or callbacks."""
basic_style = {
'marginLeft': 'auto',
'marginRight': 'auto',
'maxWidth': '1000px',
'paddingTop': '10px',
}
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(['N/A'])
def create_elements(self) -> None:
"""Initialize the charts, tables, and other Dash elements.."""
...
def create_callbacks(self) -> None:
"""Register callbacks necessary for this tab."""
...
class AppWithTabs(AppWithNavigation):
"""Base class for building Dash Application with tabs."""
# App ids
id_tabs_content = 'tabs-wrapper'
id_tabs_select = 'tabs-content'
app_ids = [id_tabs_content, id_tabs_select]
"""List of all ids for the top-level tab view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
tabs = [dcc.Tab(label=name, value=name) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs,
),
html.Div(id=self._il[self.id_tabs_content]),
],
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
# > PLANNED: Make the tabs and chart compact as well when the compact argument is set to True
class FullScreenAppWithTabs(AppWithTabs): # noqa: H601
"""Base class for building Dash Application with tabs that uses the full window."""
tabs_location = 'left'
"""Tab orientation setting. One of `(left, top, bottom, right)`."""
tabs_margin = '10%'
"""Adjust this setting based on the width or height of the tabs to prevent the content from overlapping the tabs."""
tabs_compact = False
"""Boolean setting to toggle between a padded tab layout if False and a minimal compact version if True."""
def verify_app_initialization(self):
"""Check that the app was properly initialized.
Raises:
RuntimeError: if child class has not called `self.register_uniq_ids`
"""
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if self.tabs_location not in allowed_locations: # pragma: no cover
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
self.tab_menu(),
html.Div(
style={f'margin-{self.tabs_location}': self.tabs_margin},
children=[html.Div(id=self._il[self.id_tabs_content])],
),
],
)
def generate_tab_kwargs(self):
"""Create the tab keyword arguments. Intended to be modified through inheritance.
Returns:
tuple: keyword arguments and styling for the dcc.Tab elements
- tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab
- tabs_kwargs: to be passed to dcc.Tabs
- tabs_style: style for the dcc.Tabs HTML element
"""
# Unselected tab style
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
# Extend tab style for selected case
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = { # noqa: ECE001
'backgroundColor': '#F9F9F9',
'padding': tabs_padding,
'position': 'fixed',
'zIndex': '999',
f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6',
self.tabs_location: '0',
}
if self.tabs_location in ['left', 'right']:
# Configure for vertical case
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {
'vertical': True,
'style': {'width': '100%'},
'parent_style': {'width': '100%'},
}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
# Configure for horizontal case
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
def tab_menu(self):
"""Return the HTML elements for the tab menu.
Returns:
dict: Dash HTML object
"""
tab_kwargs, tabs_kwargs, tabs_style = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs, **tabs_kwargs,
),
], style=tabs_style,
)
class AppMultiPage(AppWithNavigation): # noqa: H601
"""Base class for building Dash Application with multiple pages."""
navbar_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
dropdown_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
logo = None
"""Optional path to logo. If None, no logo will be shown in navbar."""
# App ids
id_url = 'pages-url'
id_pages_content = 'pages-wrapper'
id_toggler = 'nav-toggle'
id_collapse = 'nav-collapse'
app_ids = [id_url, id_pages_content, id_toggler, id_collapse]
"""List of all ids for the top-level pages view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
dcc.Location(id=self._il[self.id_url], refresh=False),
self.nav_bar(),
html.Div(id=self._il[self.id_pages_content]),
],
)
def nav_bar(self):
"""Return the HTML elements for the navigation menu.
Returns:
dict: Dash HTML object
"""
# Create brand icon and name where icon in optional
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
# Create links in navbar and dropdown. Both are optional
links = []
if self.navbar_links:
links.append(
dbc.Nav(
children=[dbc.NavItem(dbc.NavLink(name, href=link)) for name, link in self.navbar_links],
fill=True,
navbar=True,
),
)
if self.dropdown_links:
links.append(
dbc.Nav(
dbc.DropdownMenu(
children=[dbc.DropdownMenuItem(name, href=link) for name, link in self.dropdown_links],
in_navbar=True,
label='Links',
nav=True,
),
navbar=True,
),
)
# Layout default navbar
return dbc.Navbar(
children=[
dbc.NavLink(
[
dbc.Row(
children=brand,
align='center',
no_gutters=True,
),
], href='/',
),
dbc.NavbarToggler(id=self._il[self.id_toggler]),
dbc.Collapse(
dbc.Row(
children=links,
no_gutters=True,
className='flex-nowrap mt-3 mt-md-0',
align='center',
),
id=self._il[self.id_collapse],
navbar=True,
),
],
sticky='top',
color='dark',
dark=True,
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
# TODO: Demo how pages could use parameters from pathname
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'Error rendering "{pathname}":\n{err}'])]
@self.callback(
[(self.id_collapse, 'is_open')],
[(self.id_toggler, 'n_clicks')],
[(self.id_collapse, 'is_open')],
)
def toggle_navbar_collapse(n_clicks, is_open):
return [not is_open if n_clicks else is_open]
def select_page_name(self, pathname):
"""Return the page name determined based on the pathname.
Should return str: page name
Args:
pathname: relative pathname from URL
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('nav_bar must be implemented by child class') # pragma: no cover
| 34.571072 | 120 | 0.572098 | [
"Unlicense"
] | KyleKing/dash_charts | dash_charts/utils_app_with_navigation.py | 13,863 | Python |
from test.test_json import PyTest, CTest
# 2007-10-05
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted"}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'[\\naked]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://json.org/JSON_checker/test/fail25.json
'["\ttab\tcharacter\tin\tstring\t"]',
# http://json.org/JSON_checker/test/fail26.json
'["tab\\ character\\ in\\ string\\ "]',
# http://json.org/JSON_checker/test/fail27.json
'["line\nbreak"]',
# http://json.org/JSON_checker/test/fail28.json
'["line\\\nbreak"]',
# http://json.org/JSON_checker/test/fail29.json
'[0e]',
# http://json.org/JSON_checker/test/fail30.json
'[0e+]',
# http://json.org/JSON_checker/test/fail31.json
'[0e+-1]',
# http://json.org/JSON_checker/test/fail32.json
'{"Comma instead if closing brace": true,',
# http://json.org/JSON_checker/test/fail33.json
'["mismatch"}',
# http://code.google.com/p/simplejson/issues/detail?id=3
'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail:
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except self.JSONDecodeError:
pass
else:
self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a' : 1, (1, 2) : 2}
#This is for c encoder
self.assertRaises(TypeError, self.dumps, data)
#This is for python encoder
self.assertRaises(TypeError, self.dumps, data, indent=True)
def test_truncated_input(self):
test_cases = [
('', 'Expecting value', 0),
('[', 'Expecting value', 1),
('[42', "Expecting ',' delimiter", 3),
('[42,', 'Expecting value', 4),
('["', 'Unterminated string starting at', 1),
('["spam', 'Unterminated string starting at', 1),
('["spam"', "Expecting ',' delimiter", 7),
('["spam",', 'Expecting value', 8),
('{', 'Expecting property name enclosed in double quotes', 1),
('{"', 'Unterminated string starting at', 1),
('{"spam', 'Unterminated string starting at', 1),
('{"spam"', "Expecting ':' delimiter", 7),
('{"spam":', 'Expecting value', 8),
('{"spam":42', "Expecting ',' delimiter", 10),
('{"spam":42,', 'Expecting property name enclosed in double quotes', 11),
]
test_cases += [
('"', 'Unterminated string starting at', 0),
('"spam', 'Unterminated string starting at', 0),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_unexpected_data(self):
test_cases = [
('[,', 'Expecting value', 1),
('{"spam":[}', 'Expecting value', 9),
('[42:', "Expecting ',' delimiter", 3),
('[42 "spam"', "Expecting ',' delimiter", 4),
('[42,]', 'Expecting value', 4),
('{"spam":[42}', "Expecting ',' delimiter", 11),
('["]', 'Unterminated string starting at', 1),
('["spam":', "Expecting ',' delimiter", 7),
('["spam",]', 'Expecting value', 8),
('{:', 'Expecting property name enclosed in double quotes', 1),
('{,', 'Expecting property name enclosed in double quotes', 1),
('{42', 'Expecting property name enclosed in double quotes', 1),
('[{]', 'Expecting property name enclosed in double quotes', 2),
('{"spam",', "Expecting ':' delimiter", 7),
('{"spam"}', "Expecting ':' delimiter", 7),
('[{"spam"]', "Expecting ':' delimiter", 8),
('{"spam":}', 'Expecting value', 8),
('[{"spam":]', 'Expecting value', 9),
('{"spam":42 "ham"', "Expecting ',' delimiter", 11),
('[{"spam":42]', "Expecting ',' delimiter", 11),
('{"spam":42,}', 'Expecting property name enclosed in double quotes', 11),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_extra_data(self):
test_cases = [
('[]]', 'Extra data', 2),
('{}}', 'Extra data', 2),
('[],[]', 'Extra data', 2),
('{},{}', 'Extra data', 2),
]
test_cases += [
('42,"spam"', 'Extra data', 2),
('"spam",42', 'Extra data', 6),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_linecol(self):
test_cases = [
('!', 1, 1, 0),
(' !', 1, 2, 1),
('\n!', 2, 1, 1),
('\n \n\n !', 4, 6, 10),
]
for data, line, col, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, 'Expecting value')
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, line)
self.assertEqual(err.colno, col)
self.assertEqual(str(err),
'Expecting value: line %s column %d (char %d)' %
(line, col, idx))
class TestPyFail(TestFail, PyTest): pass
class TestCFail(TestFail, CTest): pass
| 40.940092 | 86 | 0.529941 | [
"Apache-2.0"
] | 4nkitd/pyAutomation | Mark_attandance_py_selenium/py/App/Python/Lib/test/test_json/test_fail.py | 8,884 | Python |
import pytest
from sanic import Sanic
from sanic.response import json
from sanic_jwt import Authentication, exceptions, Initialize
class WrongAuthentication(Authentication):
async def build_payload(self, user, *args, **kwargs):
return {"not_user_id": 1}
class AnotherWrongAuthentication(Authentication):
async def build_payload(self, user, *args, **kwargs):
return list(range(5))
class AuthenticationWithNoMethod(Authentication):
authenticate = "foobar"
class AuthenticationInClassBody(Authentication):
async def authenticate(self, request, *args, **kwargs):
return {"user_id": 1}
async def authenticate(request, *args, **kwargs):
return {"user_id": 1}
def test_authentication_subclass_without_authenticate_parameter():
app = Sanic()
with pytest.raises(exceptions.AuthenticateNotImplemented):
Initialize(app, authentication_class=WrongAuthentication)
def test_authentication_subclass_with_autenticate_not_as_method():
app = Sanic()
with pytest.raises(exceptions.AuthenticateNotImplemented):
Initialize(app, authentication_class=AuthenticationWithNoMethod)
def test_authentication_subbclass_with_method_in_class():
app = Sanic()
sanicjwt = Initialize(app, authentication_class=AuthenticationInClassBody)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 200
assert sanicjwt.config.access_token_name() in response.json
def test_payload_without_correct_key():
app = Sanic()
Initialize(app, authenticate=authenticate, authentication_class=WrongAuthentication)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 500
assert response.json.get("exception") == "InvalidPayload"
def test_payload_not_a_dict():
app = Sanic()
Initialize(
app, authenticate=authenticate, authentication_class=AnotherWrongAuthentication
)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 500
assert response.json.get("exception") == "InvalidPayload"
def test_wrong_header(app):
sanic_app, sanic_jwt = app
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Foobar {}".format(access_token)}
)
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header is invalid." in response.json.get("reasons")
# assert "Auth required." in response.json.get('reasons')
def test_tricky_debug_option_true(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/another_protected")
@sanic_jwt.protected(debug=lambda: True)
def another_protected(request):
return json({"protected": True, "is_debug": request.app.auth.config.debug()})
# @sanic_app.exception(Exception)
# def in_case_of_exception(request, exception):
# exc_name = exception.args[0].__class__.__name__
# status_code = exception.args[0].status_code
# return json({"exception": exc_name}, status=status_code)
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.status == 200
_, response = sanic_app.test_client.get("/another_protected")
assert response.json.get("exception") == "Unauthorized"
assert response.status == 400
assert "Authorization header not present." in response.json.get("reasons")
_, response = sanic_app.test_client.get(
"/another_protected",
headers={"Authorization": "Foobar {}".format(access_token)},
)
assert response.json.get("exception") == "Unauthorized"
assert response.status == 400
assert "Authorization header is invalid." in response.json.get("reasons")
def test_tricky_debug_option_false(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/another_protected")
@sanic_jwt.protected(debug=lambda: False)
def another_protected(request):
return json({"protected": True, "is_debug": request.app.auth.config.debug()})
# @sanic_app.exception(Exception)
# def in_case_of_exception(request, exception):
# exc_name = exception.args[0].__class__.__name__
# status_code = exception.args[0].status_code
# return json({"exception": exc_name}, status=status_code)
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.status == 200
_, response = sanic_app.test_client.get("/another_protected")
assert response.json.get("exception") == "Unauthorized"
assert response.status == 401
assert "Authorization header not present." in response.json.get("reasons")
_, response = sanic_app.test_client.get(
"/another_protected",
headers={"Authorization": "Foobar {}".format(access_token)},
)
assert response.json.get("exception") == "Unauthorized"
assert response.status == 401
assert "Authorization header is invalid." in response.json.get("reasons")
| 29.658537 | 88 | 0.697533 | [
"MIT"
] | amor71/sanic-jwt | tests/test_authentication.py | 6,080 | Python |
import os
import copy
import pytest
import time
import shutil
import tempfile
import logging
from _pytest.logging import caplog as _caplog
from contextlib import suppress
from panoptes.utils.logging import logger
from panoptes.utils.database import PanDB
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.utils.config.server import config_server
# Doctest modules
import numpy as np
from matplotlib import pyplot as plt
_all_databases = ['file', 'memory']
logger.enable('panoptes')
logger.level("testing", no=15, icon="🤖", color="<YELLOW><black>")
log_file_path = os.path.join(
os.getenv('PANLOG', '/var/panoptes/logs'),
'panoptes-testing.log'
)
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue>({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>\n"
startup_message = ' STARTING NEW PYTEST RUN '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level='TRACE')
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
def pytest_addoption(parser):
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--astrometry",
action="store_true",
default=False,
help="If tests that require solving should be run")
group.addoption(
"--theskyx",
action="store_true",
default=False,
help="If running tests alongside a running TheSkyX program.")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=("Test databases in the list. List items can include: " + db_names +
". Note that travis-ci will test all of them by default."))
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
@pytest.fixture(scope='session')
def config_path():
return os.path.expandvars('${PANDIR}/panoptes-utils/tests/panoptes_utils_testing.yaml')
@pytest.fixture(scope='session', autouse=True)
def static_config_server(config_path, images_dir, db_name):
logger.log('testing', f'Starting static_config_server for testing session')
proc = config_server(
config_file=config_path,
ignore_local=True,
auto_save=False
)
logger.log('testing', f'static_config_server started with {proc.pid=}')
# Give server time to start
while get_config('name') is None: # pragma: no cover
logger.log('testing', f'Waiting for static_config_server {proc.pid=}, sleeping 1 second.')
time.sleep(1)
logger.log('testing', f'Startup config_server name=[{get_config("name")}]')
# Adjust various config items for testing
unit_id = 'PAN000'
logger.log('testing', f'Setting testing name and unit_id to {unit_id}')
set_config('pan_id', unit_id)
logger.log('testing', f'Setting testing database to {db_name}')
set_config('db.name', db_name)
fields_file = 'simulator.yaml'
logger.log('testing', f'Setting testing scheduler fields_file to {fields_file}')
set_config('scheduler.fields_file', fields_file)
logger.log('testing', f'Setting temporary image directory for testing')
set_config('directories.images', images_dir)
yield
logger.log('testing', f'Killing static_config_server started with PID={proc.pid}')
proc.terminate()
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list: # pragma: no cover
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(
request.param, 'panoptes_testing', really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type):
return PanDB(db_type=db_type, db_name='panoptes_testing', connect=True)
@pytest.fixture(scope='function')
def save_environ():
old_env = copy.deepcopy(os.environ)
yield
os.environ = old_env
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('/var/panoptes/panoptes-utils/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(cr2_path, tmpdirname)
yield copy_file
@pytest.fixture(autouse=True)
def add_doctest_dependencies(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['plt'] = plt
@pytest.fixture
def caplog(_caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.enable('panoptes')
handler_id = logger.add(PropogateHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
| 30.041096 | 98 | 0.689314 | [
"MIT"
] | sarumanplaysguitar/panoptes-utils | conftest.py | 6,582 | Python |
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['Patient Discharge System v2.0.py']
DATA_FILES = ['model.docx', 'logo.gif']
OPTIONS = {}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 16.65 | 48 | 0.66967 | [
"MIT"
] | dr-nandanpatel/patientdischargesystem-App-MacOS | setup.py | 333 | Python |
import os
from subprocess import call
if os.path.isdir("bin/test"):
call(["fusermount", "-u", "bin/test"])
os.rmdir("bin/test")
os.mkdir("bin/test")
call(["bin/simple", "-f", "bin/test"])
| 20.2 | 42 | 0.608911 | [
"MIT"
] | gareth-ferneyhough/SierraFS | examples/postbuild.py | 202 | Python |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from test import LisaTest
TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TESTS_CONF = os.path.join(TESTS_DIRECTORY, "rfc.config")
class RFC(LisaTest):
"""Tests for the Energy-Aware Scheduler"""
test_conf = TESTS_CONF
experiments_conf = TESTS_CONF
@classmethod
def setUpClass(cls, *args, **kwargs):
super(RFC, cls).runExperiments(args, kwargs)
def test_run(self):
"""A dummy test just to run configured workloads"""
pass
# vim :set tabstop=4 shiftwidth=4 expandtab
| 29.170732 | 75 | 0.732441 | [
"Apache-2.0"
] | ADVAN-ELAA-8QM-PRC1/platform-external-lisa | tests/eas/rfc.py | 1,196 | Python |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
IdList
)
import numpy as np
class MergeIdLists(ModelLayer):
"""Merge multiple ID_LISTs into a single ID_LIST
Arguments:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
blob=model.net.NextBlob(name),
metadata=schema.Metadata(categorical_limit=merge_dim)
)))
def add_ops(self, net):
return net.MergeIdLists(self.input_record.field_blobs(),
self.output_schema.field_blobs())
| 35.166667 | 78 | 0.652736 | [
"Apache-2.0"
] | AIHGF/caffe2 | caffe2/python/layers/merge_id_lists.py | 2,321 | Python |
DEPTH_LIMIT = 2
FOLLOW_DOMAINS = {'livejournal.com'}
RSS_TEMPLATES = {
r'.*\.livejournal\.com/?.*': {
'http://%(netloc)s/data/rss': 100,
'https://%(netloc)s/data/rss': 200,
'http://%(netloc)s/data/atom': 300,
'https://%(netloc)s/data/atom': 400,
},
}
| 22.538462 | 44 | 0.532423 | [
"MIT"
] | monotony113/feedly-link-aggregator | presets/livejournal.py | 293 | Python |
"""Test project for line_round_dot_setting.
Command examples:
$ python test_projects/line_round_dot_setting/main.py
"""
import sys
sys.path.append('./')
import os
from types import ModuleType
import apysc as ap
from apysc._file import file_util
this_module: ModuleType = sys.modules[__name__]
_DEST_DIR_PATH: str = os.path.join(
file_util.get_abs_module_dir_path(module=this_module),
'test_output/'
)
def main() -> None:
"""
Entry point of this test project.
"""
ap.Stage(
background_color='#333',
stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(
color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
def on_polyline_click(
e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
"""
Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.
"""
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
if __name__ == '__main__':
main()
| 26.21978 | 65 | 0.644174 | [
"MIT"
] | simon-ritchie/action-py-script | test_projects/line_round_dot_setting/main.py | 2,386 | Python |
"""
This module descibes how to split a dataset into two parts A and B: A is for
tuning the algorithm parameters, and B is for having an unbiased estimation of
its performances. The tuning is done by Grid Search.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import random
from surprise import SVD
from surprise import Dataset
from surprise import accuracy
from surprise import GridSearch
# Load the full dataset.
data = Dataset.load_builtin('ml-100k')
raw_ratings = data.raw_ratings
# shuffle ratings if you want
random.shuffle(raw_ratings)
# A = 90% of the data, B = 10% of the data
threshold = int(.9 * len(raw_ratings))
A_raw_ratings = raw_ratings[:threshold]
B_raw_ratings = raw_ratings[threshold:]
data.raw_ratings = A_raw_ratings # data is now the set A
data.split(n_folds=3)
# Select your best algo with grid search.
print('Grid Search...')
param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005]}
grid_search = GridSearch(SVD, param_grid, measures=['RMSE'], verbose=0)
grid_search.evaluate(data)
algo = grid_search.best_estimator['RMSE']
# retrain on the whole set A
trainset = data.build_full_trainset()
algo.train(trainset)
# Compute biased accuracy on A
predictions = algo.test(trainset.build_testset())
print('Biased accuracy on A,', end=' ')
accuracy.rmse(predictions)
# Compute unbiased accuracy on B
testset = data.construct_testset(B_raw_ratings) # testset is now the set B
predictions = algo.test(testset)
print('Unbiased accuracy on B,', end=' ')
accuracy.rmse(predictions)
| 28.745455 | 78 | 0.748893 | [
"BSD-3-Clause"
] | wyjiang0930/dissertation_reference_3 | examples/split_data_for_unbiased_estimation.py | 1,581 | Python |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# sla.slaprofile application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.sla.models.slaprofile import SLAProfile
from noc.core.translation import ugettext as _
class SLAProfileApplication(ExtDocApplication):
"""
SLAProfile application
"""
title = "SLA Profile"
menu = [_("Setup"), _("SLA Profiles")]
model = SLAProfile
def field_row_class(self, o):
return o.style.css_class_name if o.style else ""
| 30.230769 | 71 | 0.516539 | [
"BSD-3-Clause"
] | ewwwcha/noc | services/web/apps/sla/slaprofile/views.py | 786 | Python |
from flask import Blueprint, request, jsonify
import subprocess
import json
import yamale
import yaml
import app_conf
import logging.handlers
import mydb
imageinfo = Blueprint('imageinfo', __name__)
# set logger
logger = logging.getLogger(__name__)
path = f'./logs/{__name__}.log'
fileHandler = logging.handlers.RotatingFileHandler(path,
maxBytes=app_conf.Log.log_max_size,
backupCount=app_conf.Log.log_backup_count)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s'))
logger.addHandler(fileHandler)
logger.setLevel(app_conf.Log.log_level)
# temp
logger.addHandler(logging.StreamHandler())
db_path = "data/imageinfo.db"
mydb.init(db_path)
schema_create = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/create', methods=['post'])
def create():
msg = {
'err': None,
'res': None
}
try:
# schema validation
# yamale.validate(schema_create, yamale.make_data(content=request.data.decode('utf-8')))
# name
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
v = json.dumps(body).encode()
mydb.upsert(db_path, k, v)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
schema_delete = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/delete', methods=['delete'])
def delete():
msg = {
'err': None,
'res': None
}
try:
# schema validation
yamale.validate(schema_delete, yamale.make_data(content=request.data.decode('utf-8')))
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
mydb.delete(db_path, k)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/list', methods=['get'])
def list_():
msg = {
'err': None,
'res': []
}
try:
namespace = request.args.get('namespace')
temp = mydb.keys(db_path)
for x in temp:
term = x.split('/')
if term[0] == namespace:
msg['res'].append(term[1])
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/get', methods=['get'])
def get():
msg = {
'err': None,
'res': None
}
try:
name = request.args.get('name')
namespace = request.args.get('namespace')
k = f"{namespace}/{name}"
v = mydb.get(db_path, k)
if v is not None:
msg['res'] = json.loads(v.decode())
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
| 23.2 | 108 | 0.585212 | [
"Apache-2.0"
] | cynpna/gedge-platform | gs-engine/gse_api_server/imageinfo.py | 3,016 | Python |
def is_sha1(maybe_sha):
if len(maybe_sha) != 40:
return False
try:
sha_int = int(maybe_sha, 16)
except ValueError:
return False
return True
def validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%m-%Y:%S-%M-%H')
return True
except ValueError:
return False
from flask_cors import CORS
from flask import Flask, render_template, Response, request, jsonify
import pandas as pd
import os
import json
import shutil
import datetime
import base64
import binascii
import datetime
import requests as r
LOGIN_FILE_NAME = "login.csv"
DB = "templates/images"
GLOBAL_LIST = "acts.csv"
IP = "3.208.6.174:80"
INSTANCE_IP = "34.226.230.93"
count_requests = 0
#IP = "127.0.0.1:5000"
app = Flask(__name__)
CORS(app)
@app.errorhandler(405)
def method_not_allowed(e):
global count_requests
count_requests += 1
return jsonify({'error': 405}), 405
@app.route("/")
def index():
return render_template('index.html')
@app.route("/api/v1/categories", methods = ["GET", "POST"])
def list_categories():
global count_requests
count_requests += 1
if not os.path.exists(DB):
os.makedirs(DB, exist_ok = True)
if request.method == 'GET':
categories = os.listdir(DB)
if not categories:
return Response('{}', status=204, mimetype='application/json')
response_data = {}
for category in categories:
response_data[category] = len(os.listdir(DB + "/" + category))
return jsonify(response_data)
elif request.method == "POST":
category = json.loads(request.data)[0]
if category in os.listdir(DB):
return Response('{}', status=400, mimetype='application/json')
os.makedirs(DB + "/" + category, exist_ok = True)
return Response('{}', status=201, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>", methods = ["DELETE"])
def delete_category(category = None):
global count_requests
count_requests += 1
if request.method == 'DELETE':
categories = os.listdir(DB)
if category in categories:
if GLOBAL_LIST in os.listdir():
data = pd.read_csv(GLOBAL_LIST)
data = data[data.category != category]
data.to_csv(GLOBAL_LIST, index = False)
shutil.rmtree(DB + "/" + category)
return Response('{}', status=200, mimetype='application/json')
else:
return Response('{}', status=400, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>/acts", methods = ["GET"])
def list_acts(category = None):
global count_requests
count_requests += 1
if request.method == 'GET':
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
start = request.args.get('start')
end = request.args.get("end")
if start == None and end == None:
#print("This part")
if os.path.exists(temp_path):
data = pd.read_csv(temp_path)
rows = data.shape[0]
if rows == 0:
return Response('[]', status=204, mimetype='application/json')
elif rows >= 100:
return Response('[]', status=413, mimetype='application/json')
else:
response_data = data.to_json(orient = "records")
return Response(response_data, status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
start = int(start)
end = int(end)
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
if os.path.exists(temp_path):
data = pd.read_csv(temp_path)
data["timestamp"] = pd.to_datetime(data["timestamp"], format = '%d-%m-%Y:%S-%M-%H')
data["actId"] = data["actId"].astype(int)
sorted_data = data.sort_values(["timestamp", "actId"], ascending = [False, False], axis = 0)
#print(data)
#print(sorted_data)
rows = data.shape[0]
if start < 1 or end > rows:
return Response('[]', status=400, mimetype='application/json')
if rows == 0:
return Response('[]', status=204, mimetype='application/json')
else:
required_data = pd.DataFrame(sorted_data.iloc[start-1: end, :])
#print(required_data)
if required_data.shape[0] > 100:
return Response("[]", status=413, mimetype='application/json')
required_data["timestamp"] = pd.to_datetime(required_data["timestamp"], format = '%d-%m-%Y:%S-%M-%H')
required_data["timestamp"] = required_data["timestamp"].astype(str)
response_data = required_data.to_json(orient = "records")
return Response(response_data, status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>/acts/size", methods = ["GET"])
def count_acts(category = None):
global count_requests
count_requests += 1
if request.method == 'GET':
temp_path = DB + "/" + category
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
if os.path.exists(temp_path):
data = pd.read_csv(GLOBAL_LIST)
count = data[data.category == category].shape[0]
return Response('[{0}]'.format(str(count)), status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/upvote", methods = ["POST"])
def upvote():
global count_requests
count_requests += 1
if request.method == 'POST':
act_id = int(json.loads(request.data)[0])
data_id = pd.read_csv(GLOBAL_LIST)
if act_id not in data_id["act_id"].tolist():
return Response('[]', status=400, mimetype='application/json')
category = data_id[data_id["act_id"] == act_id]["category"].iloc[0]
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
data = pd.read_csv(temp_path)
data.set_index("actId", inplace = True)
data.at[act_id, "upvotes"] += 1
data.reset_index(inplace = True)
data.to_csv(temp_path,index = False)
return Response("{}", status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/<actId>", methods = ["DELETE"])
def delete_act(actId = None):
global count_requests
count_requests += 1
if request.method == 'DELETE':
act_id = int(actId)
data_id = pd.read_csv(GLOBAL_LIST)
if act_id not in data_id["act_id"].tolist():
return Response('[]', status=400, mimetype='application/json')
category = data_id[data_id["act_id"] == act_id]["category"].iloc[0]
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
data_id = data_id[data_id["act_id"] != act_id]
data_id.to_csv(GLOBAL_LIST, index = False)
data = pd.read_csv(temp_path)
data = data[data["actId"] != act_id]
data.to_csv(temp_path, index = False)
os.remove(DB + "/" + category + "/" + str(act_id) + ".png")
return Response("{}", status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
# @app.route("/api/v1/categories/<category>/acts?start=<startrange>&end=<endrange>", methods = ["GET"])
# def range_acts(category = None, startrange = 0, endrange = 0):
# if request.method == 'GET':
# temp_path = DB + "/" + category + "/" + GLOBAL_LIST
# if category not in os.listdir(DB):
# return Response('[]', status=400, mimetype='application/json')
# if os.path.exists(temp_path):
# data = pd.read_csv(temp_path)
# sorted_data = data.sort(columns = ["timestamp"], ascending = False)
# rows = data.shape[0]
# if startrange < 1 or endrange > rows:
# return Response('[]', status=400, mimetype='application/json')
# if rows == 0:
# return Response('[]', status=204, mimetype='application/json')
# else:
# required_data = sorted_data.ix[startrange-1: endrange-1, :]
# print(required_data)
# if required_data.shape[0] > 100:
# return Response("[]", status=413, mimetype='application/json')
# response_data = required_data.to_json(orient = "records")
# return Response(response_data, status=200, mimetype='application/json')
# else:
# return Response('[]', status=204, mimetype='application/json')
# else:
# return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts", methods = ["POST"])
def upload_act():
global count_requests
count_requests += 1
if request.method == 'POST':
if not os.path.exists(DB):
os.makedirs(DB, exist_ok = True)
request_data = json.loads(request.data.decode('utf-8'))
if not GLOBAL_LIST in os.listdir():
data = pd.DataFrame(columns = ['act_id', "category"])
data.to_csv(GLOBAL_LIST, index = False)
if not LOGIN_FILE_NAME in os.listdir():
data = pd.DataFrame(columns = ['username', 'password'])
data.to_csv(LOGIN_FILE_NAME, index = False)
data_acts = pd.read_csv(GLOBAL_LIST)
#data_users = pd.read_csv(LOGIN_FILE_NAME)
# Username and actId
header = {"origin": INSTANCE_IP}
resp = r.get( "http://"+ IP + "/api/v1/users", "{}", headers = header)
print("=============")
print(resp.text)
print("=============")
data_users = eval(resp.text)
if request_data['username'] not in data_users or request_data["actId"] in data_acts["act_id"].tolist():
return Response('{}', status=400, mimetype='application/json')
# Upvotes field
if "upvotes" in request_data.keys():
return Response('{}', status=400, mimetype='application/json')
request_data['upvotes'] = 0
# category name
if request_data["categoryName"] not in os.listdir(DB):
return Response('{}', status=400, mimetype='application/json')
# Date Validity
if not validate(request_data["timestamp"]):
return Response('{}', status=400, mimetype='application/json')
# Base64 validity
try:
base64.b64decode(request_data["imgB64"])
except binascii.Error:
return Response('{}', status=400, mimetype='application/json')
data_acts = data_acts.append({"act_id": int(request_data["actId"]), "category": request_data["categoryName"] }, ignore_index = True)
data_acts.to_csv(GLOBAL_LIST, index = False)
with open(DB + "/" + request_data["categoryName"] + "/" +str(request_data["actId"]) + ".png", "wb") as fp:
fp.write(base64.decodebytes(request_data["imgB64"].encode()))
temp_path = DB + "/" + request_data["categoryName"] + "/" + GLOBAL_LIST
if not GLOBAL_LIST in os.listdir(DB + "/" + request_data["categoryName"]):
data = pd.DataFrame(columns = list(request_data.keys()))
data.to_csv(temp_path, index = False)
data = pd.read_csv(temp_path)
data = data.append(request_data, ignore_index = True)
data.to_csv(temp_path, index = False)
return Response('{}', status=201, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/count", methods = ["GET"])
def count_act():
global count_requests
count_requests += 1
if request.method == 'GET':
if not GLOBAL_LIST in os.listdir():
return Response('[0]', status=200, mimetype='application/json')
else:
data_acts = pd.read_csv(GLOBAL_LIST)
count_acts = data_acts.shape[0]
return Response('['+ str(count_acts) +']', status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/_count", methods = ["GET", "DELETE"])
def count_request():
global count_requests
if request.method == 'GET':
return Response('['+ str(count_requests) +']', status=200, mimetype='application/json')
elif request.method == 'DELETE':
count_requests = 0
return Response('{}', status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 80, threaded=True)
#app.run(threaded = True, debug = True, port = 2000)
| 39.447293 | 140 | 0.584068 | [
"MIT"
] | craterkamath/microservice | load_balancer/docker_acts/app.py | 13,846 | Python |
"""Generated message classes for cloudasset version v1p2beta1.
The cloud asset API manages the history and inventory of cloud resources.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudasset'
class Asset(_messages.Message):
r"""Cloud asset. This includes all Google Cloud Platform resources, Cloud
IAM policies, and other non-GCP assets.
Fields:
ancestors: Asset's ancestry path in Cloud Resource Manager (CRM)
hierarchy, represented as a list of relative resource names. Ancestry
path starts with the closest CRM ancestor and ending at a visible root.
If the asset is a CRM project/ folder/organization, this starts from the
asset itself. Example: ["projects/123456789", "folders/5432",
"organizations/1234"]
assetType: Type of the asset. Example: "compute.googleapis.com/Disk".
iamPolicy: Representation of the actual Cloud IAM policy set on a cloud
resource. For each resource, there must be at most one Cloud IAM policy
set on it.
name: The full name of the asset. For example: `//compute.googleapis.com/p
rojects/my_project_123/zones/zone1/instances/instance1`. See [Resource N
ames](https://cloud.google.com/apis/design/resource_names#full_resource_
name) for more information.
resource: Representation of the resource.
"""
ancestors = _messages.StringField(1, repeated=True)
assetType = _messages.StringField(2)
iamPolicy = _messages.MessageField('Policy', 3)
name = _messages.StringField(4)
resource = _messages.MessageField('Resource', 5)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "sampleservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:[email protected]" ] } ] }
] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging,
and [email protected] from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:[email protected]" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
[email protected] from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
ignoreChildExemptions: Specifies whether principals can be exempted for
the same LogType in lower-level resource policies. If true, any lower-
level exemptions will be ignored.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
ignoreChildExemptions = _messages.BooleanField(2)
logType = _messages.EnumField('LogTypeValueValuesEnum', 3)
class BatchGetAssetsHistoryResponse(_messages.Message):
r"""Batch get assets history response.
Fields:
assets: A list of assets with valid time windows.
"""
assets = _messages.MessageField('TemporalAsset', 1, repeated=True)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`[email protected]` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`.
* `domain:{domain}`: The G Suite domain (primary) that represents all
the users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CloudassetBatchGetAssetsHistoryRequest(_messages.Message):
r"""A CloudassetBatchGetAssetsHistoryRequest object.
Enums:
ContentTypeValueValuesEnum: Required. The content type.
Fields:
assetNames: A list of the full names of the assets. For example: `//comput
e.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1
`. See [Resource Names](https://cloud.google.com/apis/design/resource_na
mes#full_resource_name) and [Resource Name
Format](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/resource-name-format) for more info. The request becomes a
no-op if the asset name list is empty, and the max size of the asset
name list is 100 in one request.
contentType: Required. The content type.
parent: Required. The relative name of the root asset. It can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id")", or a project number (such as
"projects/12345").
readTimeWindow_endTime: End time of the time window (inclusive). Current
timestamp if not specified.
readTimeWindow_startTime: Start time of the time window (exclusive).
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Required. The content type.
Values:
CONTENT_TYPE_UNSPECIFIED: <no description>
RESOURCE: <no description>
IAM_POLICY: <no description>
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetNames = _messages.StringField(1, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 2)
parent = _messages.StringField(3, required=True)
readTimeWindow_endTime = _messages.StringField(4)
readTimeWindow_startTime = _messages.StringField(5)
class CloudassetExportAssetsRequest(_messages.Message):
r"""A CloudassetExportAssetsRequest object.
Fields:
exportAssetsRequest: A ExportAssetsRequest resource to be passed as the
request body.
parent: Required. The relative name of the root asset. This can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id"), or a project number (such as
"projects/12345").
"""
exportAssetsRequest = _messages.MessageField('ExportAssetsRequest', 1)
parent = _messages.StringField(2, required=True)
class CloudassetFeedsCreateRequest(_messages.Message):
r"""A CloudassetFeedsCreateRequest object.
Fields:
createFeedRequest: A CreateFeedRequest resource to be passed as the
request body.
parent: Required. The name of the project/folder/organization where this
feed should be created in. It can only be an organization number (such
as "organizations/123"), a folder number (such as "folders/123"), a
project ID (such as "projects/my-project-id")", or a project number
(such as "projects/12345").
"""
createFeedRequest = _messages.MessageField('CreateFeedRequest', 1)
parent = _messages.StringField(2, required=True)
class CloudassetFeedsDeleteRequest(_messages.Message):
r"""A CloudassetFeedsDeleteRequest object.
Fields:
name: The name of the feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = _messages.StringField(1, required=True)
class CloudassetFeedsGetRequest(_messages.Message):
r"""A CloudassetFeedsGetRequest object.
Fields:
name: The name of the Feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = _messages.StringField(1, required=True)
class CloudassetFeedsListRequest(_messages.Message):
r"""A CloudassetFeedsListRequest object.
Fields:
parent: Required. The parent project/folder/organization whose feeds are
to be listed. It can only be using project/folder/organization number
(such as "folders/12345")", or a project ID (such as "projects/my-
project-id").
"""
parent = _messages.StringField(1, required=True)
class CloudassetFeedsPatchRequest(_messages.Message):
r"""A CloudassetFeedsPatchRequest object.
Fields:
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
updateFeedRequest: A UpdateFeedRequest resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
updateFeedRequest = _messages.MessageField('UpdateFeedRequest', 2)
class CreateFeedRequest(_messages.Message):
r"""Create asset feed request.
Fields:
feed: The feed details. The field `name` must be empty and it will be
generated in the format of: projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
feedId: Required. This is the client-assigned asset feed identifier and it
needs to be unique under a specific parent project/folder/organization.
"""
feed = _messages.MessageField('Feed', 1)
feedId = _messages.StringField(2)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class ExportAssetsRequest(_messages.Message):
r"""Export asset request.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name will be returned.
Fields:
assetTypes: A list of asset types of which to take a snapshot for. For
example: "compute.googleapis.com/Disk". If specified, only matching
assets will be returned. See [Introduction to Cloud Asset
Inventory](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/overview) for all supported asset types.
contentType: Asset content type. If not specified, no content but the
asset name will be returned.
outputConfig: Required. Output configuration indicating where the results
will be output to. All results will be in newline delimited JSON format.
readTime: Timestamp to take an asset snapshot. This can only be set to a
timestamp between 2018-10-02 UTC (inclusive) and the current time. If
not specified, the current time will be used. Due to delays in resource
data collection and indexing, there is a volatile window during which
running the same query may get different results.
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Asset content type. If not specified, no content but the asset name
will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetTypes = _messages.StringField(1, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 2)
outputConfig = _messages.MessageField('OutputConfig', 3)
readTime = _messages.StringField(4)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account
presence" description: "Determines whether the request has a user
account" expression: "size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class Feed(_messages.Message):
r"""An asset feed used to export asset updates to a destinations. An asset
feed filter controls what updates are exported. The asset feed must be
created within a project, organization, or folder. Supported destinations
are: Cloud Pub/Sub topics.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name and type will be returned.
Fields:
assetNames: A list of the full names of the assets to receive updates. You
must specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: `//compute.googleapis.com/projects/my_project_123
/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.go
ogle.com/apis/design/resource_names#full_resource_name) for more info.
assetTypes: A list of types of the assets to receive updates. You must
specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: "compute.googleapis.com/Disk" See [Introduction
to Cloud Asset Inventory](https://cloud.google.com/resource-
manager/docs/cloud-asset-inventory/overview) for all supported asset
types.
contentType: Asset content type. If not specified, no content but the
asset name and type will be returned.
feedOutputConfig: Required. Feed output configuration defining where the
asset updates are published to.
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Asset content type. If not specified, no content but the asset name
and type will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetNames = _messages.StringField(1, repeated=True)
assetTypes = _messages.StringField(2, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 3)
feedOutputConfig = _messages.MessageField('FeedOutputConfig', 4)
name = _messages.StringField(5)
class FeedOutputConfig(_messages.Message):
r"""Output configuration for asset feed destination.
Fields:
pubsubDestination: Destination on Cloud Pubsub.
"""
pubsubDestination = _messages.MessageField('PubsubDestination', 1)
class GcsDestination(_messages.Message):
r"""A Cloud Storage location.
Fields:
uri: The uri of the Cloud Storage object. It's the same uri that is used
by gsutil. For example: "gs://bucket_name/object_name". See [Viewing and
Editing Object Metadata](https://cloud.google.com/storage/docs/viewing-
editing-metadata) for more information.
"""
uri = _messages.StringField(1)
class ListFeedsResponse(_messages.Message):
r"""A ListFeedsResponse object.
Fields:
feeds: A list of feeds.
"""
feeds = _messages.MessageField('Feed', 1, repeated=True)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OutputConfig(_messages.Message):
r"""Output configuration for export assets destination.
Fields:
gcsDestination: Destination on Cloud Storage.
"""
gcsDestination = _messages.MessageField('GcsDestination', 1)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **JSON Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-other-
[email protected]" ] }, {
"role": "roles/viewer", "members": ["user:[email protected]"]
} ] } **YAML Example** bindings: - members: -
user:[email protected] - group:[email protected] -
domain:google.com - serviceAccount:my-other-
[email protected] role: roles/owner - members:
- user:[email protected] role: roles/viewer For a description of IAM
and its features, see the [IAM developer's
guide](https://cloud.google.com/iam/docs).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten.
version: Deprecated.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class PubsubDestination(_messages.Message):
r"""A Cloud Pubsub destination.
Fields:
topic: The name of the Cloud Pub/Sub topic to publish to. For example:
`projects/PROJECT_ID/topics/TOPIC_ID`.
"""
topic = _messages.StringField(1)
class Resource(_messages.Message):
r"""Representation of a cloud resource.
Messages:
DataValue: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Fields:
data: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
discoveryDocumentUri: The URL of the discovery document containing the
resource's JSON schema. For example:
`"https://www.googleapis.com/discovery/v1/apis/compute/v1/rest"`. It
will be left unspecified for resources without a discovery-based API,
such as Cloud Bigtable.
discoveryName: The JSON schema name listed in the discovery document.
Example: "Project". It will be left unspecified for resources (such as
Cloud Bigtable) without a discovery-based API.
parent: The full name of the immediate parent of this resource. See
[Resource Names](https://cloud.google.com/apis/design/resource_names#ful
l_resource_name) for more information. For GCP assets, it is the parent
resource defined in the [Cloud IAM policy
hierarchy](https://cloud.google.com/iam/docs/overview#policy_hierarchy).
For example:
`"//cloudresourcemanager.googleapis.com/projects/my_project_123"`. For
third-party assets, it is up to the users to define.
resourceUrl: The REST URL for accessing the resource. An HTTP GET
operation using this URL returns the resource itself. Example:
`https://cloudresourcemanager.googleapis.com/v1/projects/my-
project-123`. It will be left unspecified for resources without a REST
API.
version: The API version. Example: "v1".
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DataValue(_messages.Message):
r"""The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Messages:
AdditionalProperty: An additional property for a DataValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
data = _messages.MessageField('DataValue', 1)
discoveryDocumentUri = _messages.StringField(2)
discoveryName = _messages.StringField(3)
parent = _messages.StringField(4)
resourceUrl = _messages.StringField(5)
version = _messages.StringField(6)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class TemporalAsset(_messages.Message):
r"""Temporal asset. In addition to the asset, the temporal asset includes
the status of the asset and valid from and to time of it.
Fields:
asset: Asset.
deleted: If the asset is deleted or not.
window: The time window when the asset data and state was observed.
"""
asset = _messages.MessageField('Asset', 1)
deleted = _messages.BooleanField(2)
window = _messages.MessageField('TimeWindow', 3)
class TimeWindow(_messages.Message):
r"""A time window of (start_time, end_time].
Fields:
endTime: End time of the time window (inclusive). Current timestamp if not
specified.
startTime: Start time of the time window (exclusive).
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateFeedRequest(_messages.Message):
r"""Update asset feed request.
Fields:
feed: The new values of feed details. It must match an existing feed and
the field `name` must be in the format of:
projects/project_number/feeds/feed_id or
folders/folder_number/feeds/feed_id or
organizations/organization_number/feeds/feed_id.
updateMask: Only updates the `feed` fields indicated by this mask. The
field mask must not be empty, and it must not contain fields that are
immutable or only set by the server.
"""
feed = _messages.MessageField('Feed', 1)
updateMask = _messages.StringField(2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 40.752847 | 89 | 0.721556 | [
"Apache-2.0"
] | google-cloud-sdk-unofficial/google-cloud-sdk | lib/googlecloudsdk/third_party/apis/cloudasset/v1p2beta1/cloudasset_v1p2beta1_messages.py | 35,781 | Python |
# -*- coding: utf-8 -*-
"""Django page CMS test suite module for page links"""
from pages.tests.testcase import TestCase
from pages.models import Content
class LinkTestCase(TestCase):
"""Django page CMS link test suite class"""
def test_01_set_body_pagelink(self):
"""Test the get_body_pagelink_ids and set_body_pagelink functions."""
self.set_setting("PAGE_LINK_FILTER", True)
page1 = self.create_new_page()
page2 = self.create_new_page()
# page2 has a link on page1
content_string = 'test <a href="%s" class="page_%d">hello</a>'
content = Content(
page=page2,
language='en-us',
type='body',
body=content_string % ('#', page1.id)
)
content.save()
self.assertEqual(
Content.objects.get_content(page2, 'en-us', 'body'),
content_string % (page1.get_url_path(), page1.id)
)
self.assertFalse(page2.has_broken_link())
page1.delete()
self.assertEqual(
Content.objects.get_content(page2, 'en-us', 'body'),
'test <a href="#" class="pagelink_broken">hello</a>'
)
self.assertTrue(page2.has_broken_link()) | 36.147059 | 77 | 0.602929 | [
"BSD-3-Clause"
] | redsolution/django-page-cms | pages/tests/test_pages_link.py | 1,229 | Python |
import nextcord
from nextcord.ext import commands, menus
bot = commands.Bot(command_prefix="$")
class ButtonConfirm(menus.ButtonMenu):
def __init__(self, text):
super().__init__(timeout=15.0, delete_message_after=True)
self.text = text
self.result = None
async def send_initial_message(self, ctx, channel):
return await channel.send(self.text, view=self)
@nextcord.ui.button(emoji='\N{WHITE HEAVY CHECK MARK}')
async def do_confirm(self, button, interaction):
self.result = True
self.stop()
@nextcord.ui.button(emoji='\N{CROSS MARK}')
async def do_deny(self, button, interaction):
self.result = False
self.stop()
async def prompt(self, ctx):
await menus.Menu.start(self, ctx, wait=True)
return self.result
@bot.command()
async def confirm(ctx):
answer = await ButtonConfirm('Confirm?').prompt(ctx)
await ctx.send(f'You said: {answer}')
bot.run('token')
| 25.153846 | 65 | 0.66157 | [
"MIT"
] | Brettanda/nextcord-ext-menus | examples/confirm.py | 981 | Python |
from ParseHandler import ParseHandler
from PathHandler import PathHandler
import paths
parser = ParseHandler()
pather = PathHandler()
# match subdirectories in both folders
pather.build_matching_subdir(paths.TOY_RAW, paths.TOY_CLEAN)
# get paths to folders in raw/ directory
dir_names = pather.get_dir_names(paths.TOY_RAW)
raw_dir_paths = pather.get_dir_paths(paths.TOY_RAW)
clean_dir_paths = pather.get_dir_paths(paths.TOY_CLEAN)
# iterate through the contents of each folder
for raw_dir_path, clean_dir_path in zip(raw_dir_paths, clean_dir_paths):
# get raw file paths from each subdir
file_names = pather.get_file_names(raw_dir_path)
raw_file_paths = pather.get_file_paths(raw_dir_path)
clean_file_paths = [clean_dir_path + file_name for file_name in file_names]
# parse each raw_file into the clean_file
for raw_file_path, clean_file_path in zip(raw_file_paths, clean_file_paths):
parser.parse(raw_file_path, clean_file_path)
| 33.862069 | 80 | 0.792261 | [
"MIT"
] | richard-duong/GuessTheClass | old/src/integrationClean.py | 982 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditLoanapplyInsturlQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoanapplyInsturlQueryResponse, self).__init__()
self._target_url = None
@property
def target_url(self):
return self._target_url
@target_url.setter
def target_url(self, value):
self._target_url = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoanapplyInsturlQueryResponse, self).parse_response_content(response_content)
if 'target_url' in response:
self.target_url = response['target_url']
| 28.769231 | 114 | 0.727273 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/response/MybankCreditLoanapplyInsturlQueryResponse.py | 748 | Python |
"""
This file defines the database models
"""
from .common import db, Field, auth
from py4web import URL
from pydal.validators import IS_NOT_EMPTY, IS_FILE, IS_EMPTY_OR
import datetime
from . import settings
def get_time():
return datetime.datetime.utcnow()
def get_download_url(picture):
return f"images/{picture}"
def get_user():
return auth.current_user.get("id") if auth.current_user else None
db.define_table(
"post",
Field("title", "string", requires=IS_NOT_EMPTY()),
Field("content", "text", requires=IS_NOT_EMPTY()),
Field("date_posted", "datetime", default=get_time, readable=False, writable=False),
Field(
"author",
"reference auth_user",
default=get_user,
readable=False,
writable=False,
),
)
db.define_table(
"profile",
Field("user", "reference auth_user", readable=False, writable=False),
Field(
"image",
"upload",
requires = IS_EMPTY_OR(IS_FILE()),
default="",
uploadfolder=settings.UPLOAD_PATH,
download_url=get_download_url, label="Profile Picture",
),
)
# We do not want these fields to appear in forms by default.
db.post.id.readable = False
db.post.id.writable = False
db.profile.id.readable = False
db.profile.id.writable = False
db.commit()
| 22.655172 | 87 | 0.672755 | [
"MIT"
] | Kkeller83/py4web_spa_blog | models.py | 1,314 | Python |
import imageio
import tensorflow as tf
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import tf_py_environment
def load_policy(path):
return tf.compat.v2.saved_model.load(path)
def visualize_policy(environment, policy, output_filename, num_episodes=1, fps=5):
rendering_environment = environment
if isinstance(environment, tf_py_environment.TFPyEnvironment):
# The inner env should be used for rendering
rendering_environment = environment.pyenv.envs[0]
with imageio.get_writer(output_filename, fps=fps) as video:
font = ImageFont.load_default()
total_reward = None
def _add_environment_frame():
rendered_env = rendering_environment.render()
image = Image.fromarray(rendered_env.astype(np.uint8), mode='RGB')
draw = ImageDraw.Draw(image)
draw.text((5, 5), 'TR: %.1f' % total_reward, font=font)
image_as_numpy = np.array(image.getdata()).reshape(rendered_env.shape).astype(np.uint8)
video.append_data(image_as_numpy)
for _ in range(num_episodes):
total_reward = 0.0
time_step = environment.reset()
_add_environment_frame()
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
total_reward += time_step.reward.numpy()[0]
_add_environment_frame()
def evaluate_policy(env, policy, num_episodes):
total_return = 0.0
total_num_steps = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_return = 0.0
episode_num_steps = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = env.step(action_step.action)
episode_return += time_step.reward
episode_num_steps += 1
total_return += episode_return
total_num_steps += episode_num_steps
return (total_return / num_episodes).numpy()[0], total_num_steps / num_episodes
def as_tf_env(env):
return tf_py_environment.TFPyEnvironment(env)
def create_replay_buffer(agent, train_env, replay_buffer_size):
return tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_size,
)
def create_collect_driver(train_env, agent, replay_buffer, collect_steps):
return dynamic_step_driver.DynamicStepDriver(
train_env, agent.collect_policy,
observers=[replay_buffer.add_batch],
num_steps=collect_steps,
)
def cudnn_workaround():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
| 33.488889 | 99 | 0.69144 | [
"MIT"
] | hr0nix/trackdays | trackdays/training/utils.py | 3,014 | Python |
#!/usr/bin/env python
import argparse
import open3d as o3d
import numpy as np
import os
import time
from os.path import join, dirname, basename, splitext, exists, isdir, isfile
from os import listdir
from numpy import linalg as LA
import math
import cv2
from pathlib import Path
def pcd_to_bin(pcd_path, outdir=None):
pcd = o3d.io.read_point_cloud(pcd_path, format="pcd")
pcd_arr = np.asarray(pcd.points)
if len(pcd_arr) == 0:
return None
outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + ".bin")
# binarize array and save to the same file path with .bin extension
pcd_arr.tofile(outpath)
return outpath
def pcd_to_sphproj(pcd_path, nr_scans, width, outdir=None):
pcd = o3d.io.read_point_cloud(pcd_path, format="pcd")
pcd_arr = np.asarray(pcd.points)
if len(pcd_arr) == 0:
return None
# https://towardsdatascience.com/spherical-projection-for-point-clouds-56a2fc258e6c
# print(pcd_arr.shape)
# print(pcd_arr[:, :3].shape)
R = LA.norm(pcd_arr[:, :3], axis=1)
print("R {} | {} -- {}".format(R.shape, np.amin(R), np.amax(R)))
yaw = np.arctan2(pcd_arr[:, 1], pcd_arr[:, 0])
# print("yaw {} | {} -- {}".format(yaw.shape, np.amin(yaw), np.amax(yaw)))
# print("y {} | {} -- {}".format(pcd_arr[:, 1].shape, np.amin(pcd_arr[:, 1]), np.amax(pcd_arr[:, 1])))
pitch = np.arcsin(np.divide(pcd_arr[:, 2], R))
# print("pitch {} | {} -- {}".format(pitch.shape, np.amin(pitch), np.amax(pitch)))
# import matplotlib.pyplot as plt
# plt.plot(yaw, pitch, 'b.')
# plt.xlabel('yaw [rad]')
# plt.ylabel('pitch [rad]')
# plt.axis('equal')
# plt.show()
FOV_Down = np.amin(pitch)
FOV_Up = np.amax(pitch)
FOV = FOV_Up + abs(FOV_Down)
u = np.around((nr_scans-1) * (1.0-(pitch-FOV_Down)/FOV)).astype(np.int16)
# print("u {} | {} -- {} | {}".format(u.shape, np.amin(u), np.amax(u), u.dtype))
v = np.around((width-1) * (0.5 * ((yaw/math.pi) + 1))).astype(np.int16)
# print("v {} | {} -- {} | {}".format(v.shape, np.amin(v), np.amax(v), v.dtype))
sph_proj = np.zeros((nr_scans, width))
R[R > 100.0] = 100.0 # cut off all values above 100m
R = np.round((R / 100.0) * 255.0) # convert 0.0-100.0m into 0.0-255.0 for saving as byte8 image
sph_proj[u, v] = R
# print("sph_proj {} | {} -- {} | {}".format(sph_proj.shape, np.amin(sph_proj), np.amax(sph_proj), sph_proj.dtype))
outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + ".jpg")
cv2.imwrite(outpath, sph_proj)
print(outpath)
return np.amin(R), np.amax(R)
def bin_to_pcd(bin_path, outdir=None):
print(bin_path)
pcd_arr = np.fromfile(bin_path, dtype=np.float32)
pcd_arr = pcd_arr.reshape((-1, 4)) # kitti has 4 values per point
# print(type(pcd_arr), pcd_arr.shape, len(pcd_arr))
# print(pcd_arr[:, :3].shape)
if len(pcd_arr) == 0:
return None
outpath = join(Path(bin_path).parent if outdir is None else outdir, splitext(basename(bin_path))[0] + ".pcd")
print(outpath)
# save array as .pcd
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcd_arr[:, :3]) # 3 dimensions
o3d.io.write_point_cloud(outpath, pcd)
return outpath
def bin_to_sphproj(bin_path, outdir=None):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert between .pcd and .bin point cloud formats')
parser.add_argument("-t", type=str, required=True,
help="Conversion to run (pcd2bin, pcd2sphproj, bin2pcd, bin2sphproj)")
parser.add_argument("-p", type=str, required=True, help="Path to directory or file with point cloud")
parser.add_argument("-nr_scans", type=int, help="Number of lidar scans (default 16)", default=16)
parser.add_argument("-width", type=int, help="Spherical projection width (default 1024)", default=1024)
args = parser.parse_args()
if not exists(args.p):
exit("{} does not exist".format(args.p))
if isfile(args.p):
# check extension
ext = splitext(args.p)[-1].lower()
if args.t == "pcd2bin" and ext == ".pcd":
pcd_to_bin(args.p)
elif args.t == "bin2pcd" and ext == ".bin":
bin_to_pcd(args.p)
elif args.t == "pcd2sphproj" and ext == ".pcd":
pcd_to_sphproj(args.p, args.nr_scans, args.width)
elif args.t == "bin2sphproj" and ext == ".bin":
bin_to_sphproj(args.p)
else:
print("Wrong conversion or extension incompatible with conversion")
elif isdir(args.p):
# go through all files and convert .pcd or .bin files encountered within the directory
timestamp = time.strftime("%Y%m%d-%H%M%S")
outdir = join(Path(args.p).parent, str(args.t) + "_" + timestamp)
if not os.path.exists(outdir):
os.makedirs(outdir)
range_min = float('inf')
range_max = float('-inf')
for f in listdir(args.p):
# check extension
ext = splitext(f)[-1].lower()
if args.t == "pcd2bin" and ext == ".pcd":
pcd_to_bin(join(args.p, f), outdir)
elif args.t == "bin2pcd" and ext == ".bin":
bin_to_pcd(join(args.p, f), outdir)
elif args.t == "pcd2sphproj" and ext == ".pcd":
range_min1, range_max1 = pcd_to_sphproj(join(args.p, f), args.nr_scans, args.width, outdir)
if range_min1 < range_min:
range_min = range_min1
if range_max1 > range_max:
range_max = range_max1
elif args.t == "bin2sphproj" and ext == ".bin":
bin_to_sphproj(join(args.p, f), outdir)
else:
print("Wrong conversion or extension incompatible with conversion")
print("range: {} - {}".format(range_min, range_max)) | 36.723926 | 119 | 0.607751 | [
"MIT"
] | miroslavradojevic/python-snippets | pointcloud/pcl_conv.py | 5,986 | Python |
import os
import traceback
import datetime
class DuplicatePlotException(Exception): pass
class ModelLogUtils():
'''
Collection of utility methods for logging and plotting of messages & metrics during training.
'''
def __init__(self):
# Add logging to stdout for local debugging
self._logger = ModelLogUtilsLogger()
def set_logger(self, logger):
if not isinstance(logger, ModelLogUtilsLogger):
raise Exception('`logger` should subclass `ModelLogUtilsLogger`')
self._logger = logger
def log(self, message):
'''
Logs a message for analysis of model training.
'''
self._logger.log(message)
def define_loss_plot(self):
'''
Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.
'''
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
def log_loss_metric(self, loss, epoch):
'''
Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.
'''
self.log_metrics(loss=loss, epoch=epoch)
def define_plot(self, title, metrics, x_axis=None):
'''
Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.
'''
self._logger.define_plot(title, metrics, x_axis)
def log_metrics(self, **kwargs):
'''
Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.
'''
self._logger.log_metrics(**kwargs)
class ModelLogUtilsLogger():
def __init__(self):
self._plots = set()
def log(self, message):
self._print(message)
def define_plot(self, title, metrics, x_axis):
if title in self._plots:
raise DuplicatePlotException('Plot {} already defined'.format(title))
self._plots.add(title)
self._print('Plot with title `{}` of {} against {} will be registered when this model is being trained on Rafiki' \
.format(title, ', '.join(metrics), x_axis or 'time'))
def log_metrics(self, **kwargs):
self._print(', '.join(['{}={}'.format(metric, value) for (metric, value) in kwargs.items()]))
def _print(self, message):
print(message) | 32.808219 | 123 | 0.616284 | [
"Apache-2.0"
] | Yirui-Wang/rafiki | rafiki/model/log.py | 2,395 | Python |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template for WA-COP + CAD Cloud Integration
"""
T = current.T
# =========================================================================
# System Settings
#
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# Prepop options
settings.base.prepopulate_options = {"mandatory": "CAD",
"default": ("default/users",
"CAD/Demo",
),
}
# Prepop default
settings.base.prepopulate = "template:default"
# Theme (folder to use for views/layout.html)
#settings.base.theme = "default"
# -------------------------------------------------------------------------
# Self-Registration and User Profile
#
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
#
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# -------------------------------------------------------------------------
# GIS settings
#
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -------------------------------------------------------------------------
# Event Management Settings
#
settings.event.incident_teams_tab = "Units"
# -------------------------------------------------------------------------
# Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = "Event Management",
restricted = True,
module_type = 2,
)),
("project", Storage(
name_nice = "Project Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# END ========================================================================= | 38.672566 | 141 | 0.559725 | [
"MIT"
] | anurag-ks/eden | modules/templates/CAD/config.py | 8,740 | Python |
from urllib.parse import quote
import re
def parse_equation(match):
# Converts a latex expression into something the tex API can understand
eq = match.group(0)
# Curly brackets need to be escaped
eq = eq.replace('{', '\{')
eq = eq.replace('}', '\}')
# Create the url using the quote method which converts special characters
url = 'https://tex.s2cms.ru/svg/%s' % quote(eq)
# Return the markdown SVG tag
return '' % url
def parse_markdown(md):
# Define a pattern for catching latex equations delimited by dollar signs
eq_pattern = r'(\$.+?\$)'
# Substitute any latex equations found
return re.sub(eq_pattern, parse_equation, md)
def markdown_texify(file_in, file_out):
# Read input file
markdown = open(file_in).read()
# Parse markdown, take care of equations
latex = parse_markdown(markdown)
# Write to out-file
result = open(file_out, 'w').write(latex)
print('Finished, %i characters written to %s' % (result, file_out))
| 26.179487 | 77 | 0.666014 | [
"Apache-2.0"
] | gigumbrajaguru/SlackTats | venv/lib/python3.7/site-packages/github_markdown.py | 1,021 | Python |
import pandas as pd
from oss_hugo.API_Hugo_OSS import API_Hugo_OSS
class OSS_Schedule:
def __init__(self):
self.hugo = API_Hugo_OSS()
def sessions_mapped_by_size(self):
mapping = []
for path, session in self.hugo.sessions().items():
content = session.get('content')
metadata = session.get('metadata')
page_type = metadata.get('type')
title = metadata.get('title')
track = metadata.get('track')
organizers = metadata.get('organizers')
participants = metadata.get('participants')
if not organizers: organizers = []
if not participants: participants = []
if type(organizers) is str: organizers = organizers.split(',')
if type(participants) is str: participants = participants.split(',')
if 'TBD' in organizers: organizers.remove('TBD')
if 'Pending' in organizers: organizers.remove('Pending')
if 'you ?' in participants: participants.remove('you ?')
if title and page_type:
item = {
'title': title,
'track': track,
'page_type': page_type,
'organizers': organizers,
'participants': participants,
'content': len(content),
'path': path
}
mapping.append(item)
df_mappings = pd.DataFrame(mapping)
df_mappings = df_mappings[['title', 'track', 'page_type', 'content', 'organizers', 'participants']]
df_sessions = df_mappings[df_mappings['page_type'] != 'track']
df_sessions = df_sessions.sort_values(['content'], ascending=False).reset_index(drop=True)
return df_sessions
#todo get the result below using pandas
def df_sessions_registered_participants(self):
results = {}
for key, value in self.hugo.df_participants().to_dict(orient='index').items():
title = value.get('title')
sessions = value.get('sessions')
for session in sessions:
if results.get(session) is None: results[session] = []
results[session].append(title)
mappings = []
for key, value in results.items():
mappings.append({'title': key, 'participants': value, 'participants_count': len(value)})
df_mappings = pd.DataFrame(mappings)
df_mappings = df_mappings[['title', 'participants_count', 'participants']].sort_values(['participants_count'], ascending=False)
return df_mappings | 44.745763 | 136 | 0.580303 | [
"CC0-1.0"
] | Alone2671/oss2020 | notebooks/api/oss_hugo/OSS_Schedule.py | 2,640 | Python |
# --------------
##File path for the file
file_path
def read_file(path):
file = open(file_path , 'r')
sentence = file.readline()
file.close()
return sentence
sample_message = read_file(file_path)
print(sample_message)
#Code starts here
# --------------
#Code starts here
file_path_1
file_path_2
def read_file(path):
file = open(file_path_1 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_1 = read_file(file_path_1)
print(message_1)
def read_file(path):
file = open(file_path_2 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_2 = read_file(file_path_2)
print(message_2)
def fuse_msg(message_a , message_b):
quotient = int(message_b)//int(message_a)
return str(quotient)
secret_msg_1 = fuse_msg(message_1 , message_2)
print(secret_msg_1)
# --------------
#Code starts here
file_path_3
def read_file(path):
file = open(file_path_3 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_3 = read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
if message_c == 'Red':
sub = 'Army General'
if message_c == 'Green':
sub = 'Data Scientist'
if message_c == 'Blue' :
sub = 'Marine Biologist'
return sub
secret_msg_2 = substitute_msg(message_3)
print(secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
def read_file(path):
file = open(file_path_4 , 'r')
sentence = file.readline()
file.close()
return sentence
message_4 = read_file(file_path_4)
print(message_4)
def read_file(path):
file = open(file_path_5 , 'r')
sentence = file.readline()
file.close()
return sentence
message_5 = read_file(file_path_5)
print(message_5)
def compare_msg(message_d , message_e):
a_list = message_d.split()
b_list = message_e.split()
c_list = [x for x in a_list if x not in b_list]
final_msg = " ".join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4 , message_5)
print(secret_msg_3)
# --------------
#Code starts here
file_path_6
def read_file(path):
file = open(file_path_6 , 'r')
sentence = file.readline()
file.close()
return sentence
message_6 = read_file(file_path)
print(message_6)
def extract_msg(message_f):
a_list = message_f.split()
even_word = lambda x : (len(x) % 2 == 0)
b_list = filter(even_word , a_list)
final_msg = " ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
print(secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = secret_msg_3 + ' '+ secret_msg_1 + ' ' + secret_msg_4 + ' '+ secret_msg_2
def write_file(secret_msg , path):
file = open(final_path , 'a+')
sentence = file.write(secret_msg)
file.close()
return sentence
sample_message = write_file(secret_msg , final_path)
print(sample_message)
| 19.226744 | 87 | 0.638645 | [
"MIT"
] | umeshpal93/ga-learner-dsb-repo | Spy-Game/code.py | 3,307 | Python |
# signals are for when a user modifies something in the db, example, creates a post
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
# Creates a profile each time a new user is created
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save() | 30.944444 | 83 | 0.771993 | [
"MIT"
] | afern247/BookStore-Web | bookStore/users/signals.py | 557 | Python |
import os
import numpy as np
import torch
import time
import sys
from collections import OrderedDict
from torch.autograd import Variable
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
mainpath = os.getcwd()
pix2pixhd_dir = Path(mainpath+'/src/pix2pixHD/')
sys.path.append(str(pix2pixhd_dir))
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import src.config.train_opt as opt
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.benchmark = True
def main():
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
start_epoch, epoch_iter = 1, 0
total_steps = (start_epoch - 1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
model = create_model(opt)
model = model.cuda()
visualizer = Visualizer(opt)
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]
loss_dict = dict(zip(model.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0)
############### Backward Pass ####################
# update generator weights
model.optimizer_G.zero_grad()
loss_G.backward()
model.optimizer_G.step()
# update discriminator weights
model.optimizer_D.zero_grad()
loss_D.backward()
model.optimizer_D.step()
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} # CHANGE: removed [0] after v.data
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save('latest')
model.save(epoch)
np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.update_learning_rate()
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 39.070313 | 135 | 0.607678 | [
"MIT"
] | michellefli/EverybodyDanceNow_reproduce_pytorch | train_pose2vid.py | 5,001 | Python |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements classes to evaluate the performance of poison detection methods.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
from typing import Tuple, Union, List
import numpy as np
logger = logging.getLogger(__name__)
class GroundTruthEvaluator:
"""
Class to evaluate the performance of the poison detection method.
"""
def __init__(self):
"""
Evaluates ground truth constructor
"""
def analyze_correctness(
self, assigned_clean_by_class: Union[np.ndarray, List[np.ndarray]], is_clean_by_class: list
) -> Tuple[np.ndarray, str]:
"""
For each training sample, determine whether the activation clustering method was correct.
:param assigned_clean_by_class: Result of clustering.
:param is_clean_by_class: is clean separated by class.
:return: Two variables are returned:
1) all_errors_by_class[i]: an array indicating the correctness of each assignment
in the ith class. Such that:
all_errors_by_class[i] = 0 if marked poison, is poison
all_errors_by_class[i] = 1 if marked clean, is clean
all_errors_by_class[i] = 2 if marked poison, is clean
all_errors_by_class[i] = 3 marked clean, is poison
2) Json object with confusion matrix per-class.
"""
all_errors_by_class = []
poison = 0
clean = 1
dic_json = {}
logger.debug("Error rates per class:")
for class_i, (assigned_clean, is_clean) in enumerate(zip(assigned_clean_by_class, is_clean_by_class)):
errors = []
for assignment, bl_var in zip(assigned_clean, is_clean):
bl_var = int(bl_var)
# marked poison, is poison = 0
# true positive
if assignment == poison and bl_var == poison:
errors.append(0)
# marked clean, is clean = 1
# true negative
elif assignment == clean and bl_var == clean:
errors.append(1)
# marked poison, is clean = 2
# false positive
elif assignment == poison and bl_var == clean:
errors.append(2)
# marked clean, is poison = 3
# false negative
elif assignment == clean and bl_var == poison:
errors.append(3)
else:
raise Exception("Analyze_correctness entered wrong class")
errors = np.asarray(errors)
logger.debug("-------------------%d---------------", class_i)
key_i = "class_" + str(class_i)
matrix_i = self.get_confusion_matrix(errors)
dic_json.update({key_i: matrix_i})
all_errors_by_class.append(errors)
all_errors_by_class = np.asarray(all_errors_by_class)
conf_matrix_json = json.dumps(dic_json)
return all_errors_by_class, conf_matrix_json
def get_confusion_matrix(self, values: np.ndarray) -> dict:
"""
Computes and returns a json object that contains the confusion matrix for each class.
:param values: Array indicating the correctness of each assignment in the ith class.
:return: Json object with confusion matrix per-class.
"""
dic_class = {}
true_positive = np.where(values == 0)[0].shape[0]
true_negative = np.where(values == 1)[0].shape[0]
false_positive = np.where(values == 2)[0].shape[0]
false_negative = np.where(values == 3)[0].shape[0]
tp_rate = self.calculate_and_print(true_positive, true_positive + false_negative, "true-positive rate")
tn_rate = self.calculate_and_print(true_negative, false_positive + true_negative, "true-negative rate")
fp_rate = self.calculate_and_print(false_positive, false_positive + true_negative, "false-positive rate")
fn_rate = self.calculate_and_print(false_negative, true_positive + false_negative, "false-negative rate")
dic_tp = dict(
rate=round(tp_rate, 2),
numerator=true_positive,
denominator=(true_positive + false_negative),
)
if (true_positive + false_negative) == 0:
dic_tp = dict(
rate="N/A",
numerator=true_positive,
denominator=(true_positive + false_negative),
)
dic_tn = dict(
rate=round(tn_rate, 2),
numerator=true_negative,
denominator=(false_positive + true_negative),
)
if (false_positive + true_negative) == 0:
dic_tn = dict(
rate="N/A",
numerator=true_negative,
denominator=(false_positive + true_negative),
)
dic_fp = dict(
rate=round(fp_rate, 2),
numerator=false_positive,
denominator=(false_positive + true_negative),
)
if (false_positive + true_negative) == 0:
dic_fp = dict(
rate="N/A",
numerator=false_positive,
denominator=(false_positive + true_negative),
)
dic_fn = dict(
rate=round(fn_rate, 2),
numerator=false_negative,
denominator=(true_positive + false_negative),
)
if (true_positive + false_negative) == 0:
dic_fn = dict(
rate="N/A",
numerator=false_negative,
denominator=(true_positive + false_negative),
)
dic_class.update(dict(TruePositive=dic_tp))
dic_class.update(dict(TrueNegative=dic_tn))
dic_class.update(dict(FalsePositive=dic_fp))
dic_class.update(dict(FalseNegative=dic_fn))
return dic_class
@staticmethod
def calculate_and_print(numerator: int, denominator: int, name: str) -> float:
"""
Computes and prints the rates based on the denominator provided.
:param numerator: number used to compute the rate.
:param denominator: number used to compute the rate.
:param name: Rate name being computed e.g., false-positive rate.
:return: Computed rate
"""
try:
res = 100 * (numerator / float(denominator))
logger.debug("%s: %d/%d=%.3g", name, numerator, denominator, res)
return res
except ZeroDivisionError:
logger.debug("%s: couldn't calculate %d/%d", name, numerator, denominator)
return 0.0
| 40.632124 | 120 | 0.618082 | [
"MIT"
] | SecantZhang/adversarial-robustness-toolbox | art/defences/detector/poison/ground_truth_evaluator.py | 7,842 | Python |
import logging; log = logging.getLogger(__name__)
from .Menu import Menu
class HitboxMenu(Menu):
"""A menu for examining a hitbox."""
def __init__(self, parent):
super().__init__(parent)
self.title = "Hitboxes"
self.refresh()
def refresh(self):
if self.parent.model is None:
self.items = ["No Model"]
self.cursorPos = 0
return
self.items = ["Box Bone 02 14 1617 Radius X Y Z"]
for i, box in enumerate(self.parent.model.hitboxes):
self.items.append("%3d %04X %04X %04X %02X%02X %+7.2f %+7.2f %+7.2f %+7.2f" % (
i, box.bone,
box.unk02, box.unk14, box.unk16, box.unk17, box.radius,
box.pos[0], box.pos[1], box.pos[2],
))
self.cursorPos = 0
#def activate(self):
# selPoly = self.cursorPos - 1
# if selPoly >= 0:
# poly = self.dlist.polys[selPoly]
# menu = PolyMenu(self.parent, poly,
# "Display List %d Poly %d: %s" % (poly['list'], selPoly,
# self.drawModes[poly['mode']],
# ))
# self.parent.enterMenu(menu)
def render(self):
super().render()
#selPoly = self.cursorPos - 1
#if selPoly >= 0:
# poly = self.dlist.polys[selPoly]
# log.dprint("\x1B[16,400HPoly %d: %s, %d vtxs", selPoly,
# self.drawModes[poly['mode']],
# len(poly['vtxs']))
def _onChange(self):
sel = self.cursorPos - 1
self.parent.highlightedHitbox = sel
| 29.888889 | 91 | 0.513631 | [
"MIT"
] | RenaKunisaki/StarFoxAdventures | modelviewer/programs/SfaModel/Menu/HitboxMenu.py | 1,614 | Python |
import pandas as pd
import pprint
all_client_diagnoses = pd.read_csv('2021_encounters_with_diagnoses.csv')
print(all_client_diagnoses.columns)
nora_clients = all_client_diagnoses.drop_duplicates('Pid').drop(columns=['Date Of Service', 'Encounter', 'Age', 'Service Code'])
nora_gender = nora_clients[nora_clients.Facility == 'Northern Ohio Recovery Association'].groupby('Gender').count()
lorain_gender = nora_clients[nora_clients.Facility == 'Lorain'].groupby('Gender').count()
print('------------------------------------')
print('NORA All Client Gender Breakdown')
print('-------------------------------------')
pprint.pprint(nora_gender)
print('------------------------------------')
print('Lorain All Client Gender Breakdown')
print('-------------------------------------')
pprint.pprint(lorain_gender)
print('------------------------------------') | 42.75 | 128 | 0.610526 | [
"Apache-2.0"
] | thomps9012/noraML | demographics.py | 855 | Python |
from __future__ import unicode_literals
from .responses import OrganizationsResponse
url_bases = [
"https?://organizations.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': OrganizationsResponse.dispatch,
}
| 19.363636 | 48 | 0.723005 | [
"Apache-2.0"
] | 7minus2/moto | moto/organizations/urls.py | 213 | Python |
# -*- coding: utf-8 -*-
# Copyright 2017 Openstack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
PATH_OPTS = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the shadowfiend'
'python module is installed.'),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help='Directory where shadowfiend'
'binaries are installed.'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintainings"
"shadowfiend's state."),
]
CONF = cfg.CONF
CONF.register_opts(PATH_OPTS)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| 31.073529 | 78 | 0.637009 | [
"Apache-2.0"
] | RogerYuQian/shadowfiend | shadowfiend/common/paths.py | 2,113 | Python |
import csv
def save2csv(dst_fh, row):
"""
Appends a list with data to a dst_fh csv
args:
dst_fh: str, output file
row: list, list of values to write in a row
"""
with open(dst_fh, "a", encoding="utf-8") as csvfile:
out = csv.writer(
csvfile,
delimiter=",",
lineterminator="\n",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
try:
out.writerow(row)
except UnicodeEncodeError:
pass
| 22.166667 | 56 | 0.511278 | [
"MIT"
] | BookOps-CAT/CBH-migration | src/utils.py | 532 | Python |
import re
from chatterbot.conversation import Statement
from chatbot.const import const
from chatbot.vocabulary import Word
class Detector(object):
def __init__(self):
self.vocabulary = Word()
self.enum_type_key_word = ('类型', '等级', '方式', '分类', '模式', 'type', 'class', '系列')
self.brand_type_key_word = ('品牌', '产品')
self.text_type_key_word = ('简介', '描述', '简称', '备注', '说明',)
self.date_type_key_word = ('日期', '时间', '日', '年', '月',)
self.person_type_key_word = ('创办人', '负责人', '经理', '经手人', '经办人')
self.org_type_key_word = ('托管方', '保管方',)
self.price_type_key_word = ('价格', '金额', '价', '额度', '利润', '收益', '成本', '支出')
self.mass_type_key_word = ('重量', '毛重', '净重', '毛重量', '净重',)
self.volume_type_key_word = ('体积', '容量', '大小')
self.length_type_key_word = ('长度', '宽度', '高度', '长', '宽', '高')
self.operation_pattern = const.COMPARISON_PATTERN
def detect_type_column(self, col_name) -> str:
seg_words = self.vocabulary.get_seg_words(col_name)
last_word = str(seg_words[-1]).lower()
if last_word in self.enum_type_key_word:
return const.ENUM
if last_word in self.brand_type_key_word:
return const.BRAND
if last_word in self.date_type_key_word:
return const.DATE
if last_word in self.person_type_key_word:
return const.PERSON
if last_word in self.org_type_key_word:
return const.ORG
if last_word in self.price_type_key_word:
return const.PRICE
return const.TEXT
def detect_operation(self, statement: Statement):
query_text = statement.text
seg_word = statement.search_text.split(const.SEG_SEPARATOR)
operation = {}
phrase = []
for op in self.operation_pattern.keys():
for pattern, slot_type, word, unit in self.operation_pattern[op]:
match = re.search(pattern, query_text)
if match:
operation['op'] = op
operation['slot_type'] = slot_type
words = match.groups()[0]
for w in seg_word:
if w in words:
phrase.append(w)
operation['phrase'] = phrase
operation['word'] = word
operation['unit'] = unit
return operation
return operation
| 33.093333 | 87 | 0.560435 | [
"Apache-2.0"
] | zgj0607/ChatBot | chatbot/logic/table/detect_column_type.py | 2,670 | Python |
import unittest
from conans.client.conf import get_default_settings_yml
from conans.client.generators.b2 import B2Generator
from conans.model.build_info import CppInfo
from conans.model.conan_file import ConanFile
from conans.model.env_info import EnvValues
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.test.utils.tools import TestBufferConanOutput
class B2GeneratorTest(unittest.TestCase):
def b2_test(self):
settings = Settings.loads(get_default_settings_yml())
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
settings.cppstd = "gnu17"
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
conanfile.settings = settings
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo("dummy_root_folder1")
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
cpp_info.libs = ["MyLib1"]
conanfile.deps_cpp_info.update(cpp_info, ref.name)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo("dummy_root_folder2")
cpp_info.libs = ["MyLib2"]
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cxxflags = ["-cxxflag"]
cpp_info.public_deps = ["MyPkg"]
cpp_info.lib_paths.extend(["Path\\with\\slashes", "regular/path/to/dir"])
cpp_info.include_paths.extend(["other\\Path\\with\\slashes", "other/regular/path/to/dir"])
conanfile.deps_cpp_info.update(cpp_info, ref.name)
generator = B2Generator(conanfile)
content = {
'conanbuildinfo.jam': _main_buildinfo_full,
'conanbuildinfo-316f2f0b155dc874a672d40d98d93f95.jam':
_variation_full,
}
for ck, cv in generator.content.items():
self.assertEqual(cv, content[ck])
def b2_empty_settings_test(self):
conanfile = ConanFile(TestBufferConanOutput(), None)
conanfile.initialize(Settings({}), EnvValues())
generator = B2Generator(conanfile)
content = {
'conanbuildinfo.jam': _main_buildinfo_empty,
'conanbuildinfo-d41d8cd98f00b204e9800998ecf8427e.jam':
_variation_empty,
}
for ck, cv in generator.content.items():
self.assertEqual(cv, content[ck])
_main_buildinfo_full = '''\
#|
B2 definitions for Conan packages. This is a generated file.
Edit the corresponding conanfile.txt instead.
|#
import path ;
import project ;
import modules ;
import feature ;
local base-project = [ project.current ] ;
local base-project-mod = [ $(base-project).project-module ] ;
local base-project-location = [ project.attribute $(base-project-mod) location ] ;
rule project-define ( id )
{
id = $(id:L) ;
local saved-project = [ modules.peek project : .base-project ] ;
local id-location = [ path.join $(base-project-location) $(id) ] ;
local id-mod = [ project.load $(id-location) : synthesize ] ;
project.initialize $(id-mod) : $(id-location) ;
project.inherit-attributes $(id-mod) : $(base-project-mod) ;
local attributes = [ project.attributes $(id-mod) ] ;
$(attributes).set parent-module : $(base-project-mod) : exact ;
modules.poke $(base-project-mod) : $(id)-mod : $(id-mod) ;
modules.poke [ CALLER_MODULE ] : $(id)-mod : $(id-mod) ;
modules.poke project : .base-project : $(saved-project) ;
IMPORT $(__name__)
: constant-if call-in-project
: $(id-mod)
: constant-if call-in-project ;
if [ project.is-jamroot-module $(base-project-mod) ]
{
use-project /$(id) : $(id) ;
}
return $(id-mod) ;
}
rule constant-if ( name : value * )
{
if $(__define_constants__) && $(value)
{
call-in-project : constant $(name) : $(value) ;
modules.poke $(__name__) : $(name) : [ modules.peek $(base-project-mod) : $(name) ] ;
}
}
rule call-in-project ( project-mod ? : rule-name args * : * )
{
project-mod ?= $(base-project-mod) ;
project.push-current [ project.target $(project-mod) ] ;
local result = [ modules.call-in $(project-mod) :
$(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) : $(10) :
$(11) : $(12) : $(13) : $(14) : $(15) : $(16) : $(17) : $(18) :
$(19) ] ;
project.pop-current ;
return $(result) ;
}
rule include-conanbuildinfo ( cbi )
{
include $(cbi) ;
}
IMPORT $(__name__)
: project-define constant-if call-in-project include-conanbuildinfo
: $(base-project-mod)
: project-define constant-if call-in-project include-conanbuildinfo ;
if ! ( relwithdebinfo in [ feature.values variant ] )
{
variant relwithdebinfo : : <optimization>speed <debug-symbols>on <inlining>full <runtime-debugging>off ;
}
if ! ( minsizerel in [ feature.values variant ] )
{
variant minsizerel : : <optimization>space <debug-symbols>off <inlining>full <runtime-debugging>off ;
}
local __conanbuildinfo__ = [ GLOB $(__file__:D) : conanbuildinfo-*.jam : downcase ] ;
{
local __define_constants__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
# mypkg
project-define mypkg ;
# mypkg2
project-define mypkg2 ;
{
local __define_targets__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
'''
_variation_full = '''\
#|
B2 definitions for Conan packages. This is a generated file.
Edit the corresponding conanfile.txt instead.
|#
# global
constant-if rootpath(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
""
;
constant-if includedirs(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"other/Path/with/slashes"
"other/regular/path/to/dir"
;
constant-if libdirs(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"Path/with/slashes"
"regular/path/to/dir"
;
constant-if defines(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"MYDEFINE2"
"MYDEFINE1"
;
constant-if cppflags(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"-cxxflag"
;
constant-if cflags(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"-Flag1=23"
;
constant-if sharedlinkflags(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"-sharedlinkflag"
;
constant-if exelinkflags(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
"-exelinkflag"
;
constant-if requirements(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
<address-model>32
<architecture>x86
<cxxstd>17
<cxxstd:dialect>gnu
<target-os>linux
<toolset>gcc-6.3
<variant>release
;
constant-if usage-requirements(conan,32,x86,17,gnu,linux,gcc-6.3,release) :
<include>$(includedirs(conan,32,x86,17,gnu,linux,gcc-6.3,release))
<define>$(defines(conan,32,x86,17,gnu,linux,gcc-6.3,release))
<cflags>$(cflags(conan,32,x86,17,gnu,linux,gcc-6.3,release))
<cxxflags>$(cppflags(conan,32,x86,17,gnu,linux,gcc-6.3,release))
<link>shared:<linkflags>$(sharedlinkflags(conan,32,x86,17,gnu,linux,gcc-6.3,release))
;
# mypkg
constant-if rootpath(mypkg,32,x86,17,gnu,linux,gcc-6.3,release) :
"dummy_root_folder1"
;
constant-if defines(mypkg,32,x86,17,gnu,linux,gcc-6.3,release) :
"MYDEFINE1"
;
constant-if cflags(mypkg,32,x86,17,gnu,linux,gcc-6.3,release) :
"-Flag1=23"
;
constant-if requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release) :
<address-model>32
<architecture>x86
<cxxstd>17
<cxxstd:dialect>gnu
<target-os>linux
<toolset>gcc-6.3
<variant>release
;
constant-if usage-requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release) :
<include>$(includedirs(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
<define>$(defines(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
<cflags>$(cflags(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
<cxxflags>$(cppflags(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
<link>shared:<linkflags>$(sharedlinkflags(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
;
# mypkg2
constant-if rootpath(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"dummy_root_folder2"
;
constant-if includedirs(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"other/Path/with/slashes"
"other/regular/path/to/dir"
;
constant-if libdirs(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"Path/with/slashes"
"regular/path/to/dir"
;
constant-if defines(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"MYDEFINE2"
;
constant-if cppflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"-cxxflag"
;
constant-if sharedlinkflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"-sharedlinkflag"
;
constant-if exelinkflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
"-exelinkflag"
;
constant-if requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
<address-model>32
<architecture>x86
<cxxstd>17
<cxxstd:dialect>gnu
<target-os>linux
<toolset>gcc-6.3
<variant>release
;
constant-if usage-requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release) :
<include>$(includedirs(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
<define>$(defines(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
<cflags>$(cflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
<cxxflags>$(cppflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
<link>shared:<linkflags>$(sharedlinkflags(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
;
# mypkg
if $(__define_targets__) {
call-in-project $(mypkg-mod) : lib MyLib1
: ''' + '''
: <name>MyLib1 <search>$(libdirs(mypkg,32,x86,17,gnu,linux,gcc-6.3,release)) $(requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
:
: $(usage-requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release)) ;
call-in-project $(mypkg-mod) : explicit MyLib1 ; }
if $(__define_targets__) {
call-in-project $(mypkg-mod) : alias libs
: MyLib1
: $(requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release))
:
: $(usage-requirements(mypkg,32,x86,17,gnu,linux,gcc-6.3,release)) ;
call-in-project $(mypkg-mod) : explicit libs ; }
# mypkg2
if $(__define_targets__) {
call-in-project $(mypkg2-mod) : lib MyLib2
: /MyPkg//libs
: <name>MyLib2 <search>$(libdirs(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release)) $(requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
:
: $(usage-requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release)) ;
call-in-project $(mypkg2-mod) : explicit MyLib2 ; }
if $(__define_targets__) {
call-in-project $(mypkg2-mod) : alias libs
: /MyPkg//libs MyLib2
: $(requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release))
:
: $(usage-requirements(mypkg2,32,x86,17,gnu,linux,gcc-6.3,release)) ;
call-in-project $(mypkg2-mod) : explicit libs ; }
'''
_main_buildinfo_empty = '''\
#|
B2 definitions for Conan packages. This is a generated file.
Edit the corresponding conanfile.txt instead.
|#
import path ;
import project ;
import modules ;
import feature ;
local base-project = [ project.current ] ;
local base-project-mod = [ $(base-project).project-module ] ;
local base-project-location = [ project.attribute $(base-project-mod) location ] ;
rule project-define ( id )
{
id = $(id:L) ;
local saved-project = [ modules.peek project : .base-project ] ;
local id-location = [ path.join $(base-project-location) $(id) ] ;
local id-mod = [ project.load $(id-location) : synthesize ] ;
project.initialize $(id-mod) : $(id-location) ;
project.inherit-attributes $(id-mod) : $(base-project-mod) ;
local attributes = [ project.attributes $(id-mod) ] ;
$(attributes).set parent-module : $(base-project-mod) : exact ;
modules.poke $(base-project-mod) : $(id)-mod : $(id-mod) ;
modules.poke [ CALLER_MODULE ] : $(id)-mod : $(id-mod) ;
modules.poke project : .base-project : $(saved-project) ;
IMPORT $(__name__)
: constant-if call-in-project
: $(id-mod)
: constant-if call-in-project ;
if [ project.is-jamroot-module $(base-project-mod) ]
{
use-project /$(id) : $(id) ;
}
return $(id-mod) ;
}
rule constant-if ( name : value * )
{
if $(__define_constants__) && $(value)
{
call-in-project : constant $(name) : $(value) ;
modules.poke $(__name__) : $(name) : [ modules.peek $(base-project-mod) : $(name) ] ;
}
}
rule call-in-project ( project-mod ? : rule-name args * : * )
{
project-mod ?= $(base-project-mod) ;
project.push-current [ project.target $(project-mod) ] ;
local result = [ modules.call-in $(project-mod) :
$(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) : $(10) :
$(11) : $(12) : $(13) : $(14) : $(15) : $(16) : $(17) : $(18) :
$(19) ] ;
project.pop-current ;
return $(result) ;
}
rule include-conanbuildinfo ( cbi )
{
include $(cbi) ;
}
IMPORT $(__name__)
: project-define constant-if call-in-project include-conanbuildinfo
: $(base-project-mod)
: project-define constant-if call-in-project include-conanbuildinfo ;
if ! ( relwithdebinfo in [ feature.values variant ] )
{
variant relwithdebinfo : : <optimization>speed <debug-symbols>on <inlining>full <runtime-debugging>off ;
}
if ! ( minsizerel in [ feature.values variant ] )
{
variant minsizerel : : <optimization>space <debug-symbols>off <inlining>full <runtime-debugging>off ;
}
local __conanbuildinfo__ = [ GLOB $(__file__:D) : conanbuildinfo-*.jam : downcase ] ;
{
local __define_constants__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
{
local __define_targets__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
'''
_variation_empty = '''\
#|
B2 definitions for Conan packages. This is a generated file.
Edit the corresponding conanfile.txt instead.
|#
# global
constant-if rootpath(conan,) :
""
;
constant-if usage-requirements(conan,) :
<include>$(includedirs(conan,))
<define>$(defines(conan,))
<cflags>$(cflags(conan,))
<cxxflags>$(cppflags(conan,))
<link>shared:<linkflags>$(sharedlinkflags(conan,))
;
'''
| 31.237792 | 145 | 0.642425 | [
"MIT"
] | FalkorX/conan | conans/test/unittests/client/generators/b2_test.py | 14,713 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.