ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3261e2d179416150cf486cadf2ee9aee942d5d | #========================================================#
# #
# __init__.py - 今は特に何もしない #
# #
# (C) 2019-2019 VIPPOOL Inc. #
# #
#========================================================#
|
py | 1a3262a17519d97205b5660b5493b49e9c60ef07 | import codecs
import time
from datetime import datetime
from script.util.Logger import Logger, pprint_logger
from script.util.misc_util import log_error_trace
def file_lines_job(func, in_file='input.txt', out_file='output.txt', encoding='UTF8'):
def wrapper():
read_time = time.time()
with codecs.open(in_file, 'r', encoding=encoding) as f:
lines = [line for line in f.readlines()]
new_lines = []
for line in lines:
line = line.replace('\r', '')
line = line.replace('\n', '')
new_lines += [line]
lines = new_lines
read_time = time.time() - read_time
old_len = len(lines)
print("read '{}', {} lines, {:.3f}'s elapsed".format(in_file, old_len, read_time))
func_time = time.time()
lines = func(lines)
func_time = time.time() - func_time
print("in func {:.3f}'s elapsed".format(func_time))
write_time = time.time()
if lines is not None:
new_lines = []
for line in lines:
line = str(line)
if line[-1] is not '\n':
new_lines += [line + '\n']
else:
new_lines += [line]
lines = new_lines
with codecs.open(out_file, 'w', encoding=encoding) as f:
f.writelines(lines)
write_time = time.time() - write_time
new_len = len(lines)
if old_len - new_len == 0:
print('same len')
elif old_len - new_len > 0:
print("del {} lines".format(old_len - new_len))
else:
print("add {} lines".format(-(old_len - new_len)))
print("write '{}', {} lines, {:.3f}'s elapsed".format(out_file, new_len, write_time))
else:
write_time = 0
print("total {:.4f}'s elapsed".format(read_time + func_time + write_time))
wrapper.__name__ = func.__name__
return wrapper
def file_str_job(func, in_file='input.txt', out_file='output.txt', encoding='UTF8'):
def wrapper():
with codecs.open(in_file, 'r', encoding=encoding) as f:
line = "".join([line for line in f.readlines()])
print("read '{}', {} length".format(in_file, len(line)))
line = func(line)
if line is not None:
with codecs.open(out_file, 'w', encoding=encoding) as f:
f.writelines(str(line))
print("write '{}', {} length".format(out_file, len(line)))
wrapper.__name__ = func.__name__
return wrapper
def deco_exception_handle(func):
"""decorator for catch exception and log"""
def wrapper(*args, **kwargs):
self = args[0]
log_func = self.log
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
log_func("KeyboardInterrupt detected abort process")
except Exception as e:
log_error_trace(log_func, e)
wrapper.__name__ = func.__name__
return wrapper
def deco_log_func_name(func):
def wrapper(*args, **kwargs):
self = args[0]
log_func = self.log
return log_func(func.__name__, *args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
def deco_timeit(func):
def wrapper(*args, **kwargs):
date = datetime.now()
start = time.time()
try:
ret = func(*args, **kwargs)
except BaseException as e:
log_error_trace(print, e)
ret = None
finally:
elapse_time = time.time() - start
msg = f"in {func.__name__}(), time {time.time() - start:.4f}'s elapsed"
if elapse_time > 60:
now = datetime.now() - date
msg += f", {now}"
print(msg)
return ret
wrapper.__name__ = func.__name__
return wrapper
def deco_save_log(func):
def wrapper(*args, **kwargs):
logger = Logger(func.__name__, level='INFO')
print = logger.info
pprint = pprint_logger(print)
func_name = func.__name__
print('#' * 80)
print(f'begin {func_name}')
ret = func(print, pprint, *args, **kwargs)
print(f'end {func_name}')
print('#' * 80)
return ret
return wrapper
|
py | 1a3264518bd587d2f8b889705d6fb26c987968c5 | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
from ..base import BaseLearner
class BaseSkillDiscoveryLearner(BaseLearner):
def __init__(self, env_reward=False, hidden_size=128, num_layers=4, normalize_inputs=False, **kwargs):
self.env_reward = bool(env_reward)
self.hidden_size = int(hidden_size)
self.num_layers = int(num_layers)
self.normalize_inputs = bool(normalize_inputs)
super().__init__(**kwargs)
self.ep_summary_keys = ["cumulative_rew", "cumulative_im_rew", "cumulative_density_rew"]
def fill_summary(self, *values):
self._ep_summary = [float(sum([e['reward'] for e in self.agent.episode])),
float(sum([e.get('im_reward', 0.) for e in self.agent.episode])),
float(sum([e.get('density_model_reward', 0.) for e in self.agent.episode]))]
self._ep_summary += [v.item() for v in values]
def relabel_episode(self):
self._compress_me = []
for e in self.agent.episode:
# Optionally take into account extrinsic reward
r = e['env_reward'] * float(self.env_reward)
e['reward'] = r
self._compress_me.append(self.agent.episode)
# Add discriminator reward
self._add_im_reward()
def relabel_batch(self, batch):
# Compute intrinsic rewards
with torch.no_grad():
new_im_rew = self.im.surprisal(batch)
if self.density is not None:
new_density_rew = self.density.novelty(batch)
else:
new_density_rew = torch.zeros_like(new_im_rew)
# Make sure that weights for intrinsic rewards are not None
im_nu = self.im_nu if self.im_nu is not None else 0.
density_nu = self.density_nu if self.density_nu is not None else 0.
# Detach intrinsic rewards from computation graph
new_im_rew = new_im_rew.detach()
new_density_rew = new_density_rew.detach()
# Optionally take into account extrinsic reward
r = batch['env_reward'] * float(self.env_reward)
# Add intrinsic rewards
r += im_nu * new_im_rew + density_nu * new_density_rew
batch['reward'] = r
batch['im_reward'] = new_im_rew
batch['density_model_reward'] = new_density_rew
return batch
def _compute_surprisal(self, batched_episode):
return self.im.surprisal(batched_episode)
def _add_im_reward(self):
if self.im is not None:
for ep in self._compress_me:
batched_episode = {key: torch.stack([e[key] for e in ep]) for key in ep[0].keys()}
surprisals = self._compute_surprisal(batched_episode)
if self.im_scale:
self.train()
_ = self._im_bn(surprisals.view(-1, 1))
self.eval()
surprisals = surprisals / torch.sqrt(self._im_bn.running_var[0])
for e, s in zip(ep, surprisals):
e['reward'] += (self.im_nu * s.detach())
e['im_reward'] = s.detach()
def preprocess_skill(self, z, **kwargs):
return self.agent.preprocess_skill(z, **kwargs)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill']),
)
def get_terminal_values(self, batch):
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:])
)
def get_policy_lprobs_and_nents(self, batch):
log_prob, n_ent, _ = self.policy(
batch['state'],
self.preprocess_skill(batch['skill']),
action_logit=batch['action_logit']
)
return log_prob.sum(dim=1), n_ent
def get_im_loss(self, batch):
return self.im(batch)
def soft_update(self):
module_pairs = [
dict(source=self.v_module, target=self.v_target),
]
for pair in module_pairs:
for p, p_targ in zip(pair['source'].parameters(), pair['target'].parameters()):
p_targ.data *= self.polyak
p_targ.data += (1 - self.polyak) * p.data
def _get_q_module(self, q_i):
q_i = q_i if q_i is not None else 1
assert q_i in [1, 2]
return [self.q1, self.q2][q_i - 1]
def get_action_qs(self, batch, q_i=None):
return self.get_curr_qs(batch, new_actions=None, q_i=q_i)
def get_policy_loss_and_actions(self, batch):
policy_actions, logprobs = self.sample_policy_actions_and_lprobs(batch)
p_obj = self.q1.q_no_grad(batch['state'], policy_actions, self.preprocess_skill(batch['skill']))
if hasattr(self, 'alpha'): # for SAC
p_obj -= self.alpha * logprobs
p_losses = -p_obj # flip sign to turn the maximization objective into a loss function to minimize
p_loss = p_losses.mean()
return p_loss, policy_actions
def get_curr_qs(self, batch, new_actions=None, q_i=None):
"""
Compute Q_i(s,a). Use new_actions to override the actions in the batch (e.g. for SAC).
q_i selects the index of the Q-function.
"""
action = new_actions if new_actions is not None else batch['action']
return self._get_q_module(q_i)(
batch['state'],
action,
self.preprocess_skill(batch['skill'])
)
def get_next_vs(self, batch):
return self.v_target(
batch['next_state'],
self.preprocess_skill(batch['skill']),
)
def sample_policy_actions_and_lprobs(self, batch): # For SAC; we need to sample new actions when updating V
""" Sample new actions. Returns (actions, logprobs) tuple. """
action, action_logit, lprobs, n_ent = self.policy(
batch['state'],
self.preprocess_skill(batch['skill'])
)
return action, lprobs.sum(dim=1)
|
py | 1a3264a6479e12957baa9b4cb55c2d6192453cd4 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adyen', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='amount',
field=models.DecimalField(null=True, max_digits=20, decimal_places=2),
),
migrations.AlterField(
model_name='refund',
name='refund_amount',
field=models.DecimalField(max_digits=20, decimal_places=2),
),
migrations.AlterField(
model_name='transactionstatus',
name='amount',
field=models.DecimalField(max_digits=20, decimal_places=2),
),
]
|
py | 1a3265335223952795cddb033a7a3b7f7c1e7429 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be imported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
# > ℹ️ We're opting out of all Python versions except 3.8.
# > The Python version used is defined by the Dockerfile, so it's redundant
# > to run multiple tests since they would all be running the same Dockerfile.
"ignored_versions": ["2.7", "3.6", "3.7", "3.9", "3.10"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": True,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
|
py | 1a326628ba48cd41ad0a753af7e23f8a14763cd1 | def add(a, b):
"""
A function that adds two integers
:param a: (int) The first integer
:param b: (int) The second integer
:return: (int) The sum of a and b
:raises: AttributeError, if a and b are not integers
"""
if not isinstance(a, int) or not isinstance(b, int):
raise AttributeError("This function only accepts integer arguments")
return a + b
|
py | 1a326736d56945bd00e2e591cde3d69b722d5b42 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the publisher using Gazebo with ROS control and Pybullet
Before running this file, read the following instructions!
First, clone the following repository in your catkin workspace (catkin_ws/src/):
$ git clone https://github.com/ros-simulation/gazebo_ros_demos
Then, in your workspace compile it using `catkin_make` and source it:
$ catkin_make
$ source devel/setup.bash
Launch the roslaunch gazebo file and control file:
$ roslaunch rrbot_gazebo rrbot_world.launch # this will launch Gazebo and start the controller manager
$ roslaunch rrbot_control rrbot_control.launch # this will load the controllers
Now, you should have your Gazebo running with the `rrbot` inside. Then, just run this file:
$ python bullet_ros_control_gazebo.py
And move the `rrbot` robot using your mouse by left-clicking on a part of the robot and moving it.
Here is a video of what it should give: https://www.youtube.com/watch?v=OPh-NCfKKK8
If you want to use 'kuka_iiwa' and 'franka', you will have to follow the same steps as above but this time by cloning:
- https://github.com/IFL-CAMP/iiwa_stack
- https://github.com/erdalpekel/franka_ros and https://github.com/erdalpekel/panda_simulation
Then run the corresponding roslaunch files that are provided in these repositories (`simulation.launch` for the panda
robot, and `iiwa_gazebo.launch` for the kuka robot).
"""
import pyrobolearn as prl
ros = prl.middlewares.ROS(publish=True, teleoperate=True)
sim = prl.simulators.Bullet(middleware=ros)
# load world
world = prl.worlds.BasicWorld(sim)
# load robot
robot = world.load_robot('rrbot') # 'kuka_iiwa', 'franka', 'rrbot'
# run simulation
for t in prl.count():
# get the joint positions from the Bullet simulator (because :attr:`teleoperate` has been set to True,
# it will publish these read positions on the corresponding topic)
q = robot.get_joint_positions()
# perform a step in the simulator (and sleep for `sim.dt`)
world.step(sim.dt)
|
py | 1a3267c932e70bca9f0a128e5eec7def6566db2b | from flask_login import current_user, login_required
from app.libs.error_code import Forbidden, Success
from app.libs.red_print import RedPrint
from app.validators.user import ModifyPasswordForm
api = RedPrint('user')
@api.route('/modify_password', methods=['POST'])
@login_required
def modify_password_api():
form = ModifyPasswordForm().validate_for_api().data_
if not current_user.check_password(form['old_password']):
return Forbidden('old password isn\'t correct')
current_user.modify(password=form['new_password'])
return Success('password has been changed')
|
py | 1a3267c9b086253af67c6243413bcdc188ac96b9 | #script to detect where there are gamakas in an audio segment
def validInputs(freq,mags):
#lists must be the same size
if len(freq) == len(mags):
print "Lists are not the same size!!"
assert(False)
elif len(freq) == 0:
print "Empty Lists!"
assert(False)
#easy scoring mechanism
def score1(f,m):
threshold = max(m)/5
if 1.0*sum(m)/len(m) > threshold:
return False
else: return True
#makes sure gradient of slope above threshold is positive before max, negative after max
def score2(f,m): pass
#input: frequency list (freq) and corresponding magnitude list (mags)
def gamakaDetect(freq,mags,getScore):
# contract check
validInputs(freq,mags)
if getScore(freq,mags): return True
else: return False
|
py | 1a326854a806d7ccfd7259586abb4f5ffda49904 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "herb.users"
verbose_name = _("Users")
def ready(self):
try:
import herb.users.signals # noqa F401
except ImportError:
pass
|
py | 1a3269f7258d5f86c2dceb301ed9003698c96e3f | """Django_104 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from opening import views
# 在`settings.py`同級的`urls.py`內還需要把`STATIC_URL`和django的url連線起來
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name="Index"),
]
urlpatterns += staticfiles_urlpatterns() |
py | 1a326c1acea466fae285350bd1c0896cc0a17831 | import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index, Series
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = Index([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize(
("data", "f"),
(
([1, 1, np.nan], pd.isna),
([1, 2, np.nan], pd.isna),
([1, 1, np.nan], {1: False}),
([1, 2, np.nan], {1: False, 2: False}),
([1, 1, np.nan], Series([False, False])),
([1, 2, np.nan], Series([False, False, False])),
),
)
def test_map_with_nan(self, data, f): # GH 24241
values = pd.Categorical(data)
result = values.map(f)
if data[1] == 1:
expected = pd.Categorical([False, False, np.nan])
tm.assert_categorical_equal(result, expected)
else:
expected = Index([False, False, np.nan])
tm.assert_index_equal(result, expected)
def test_map_with_dict_or_series(self):
orig_values = ["a", "B", 1, "a"]
new_values = ["one", 2, 3.0, "one"]
cur_index = CategoricalIndex(orig_values, name="XXX")
expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"])
mapper = Series(new_values[:-1], index=orig_values[:-1])
result = cur_index.map(mapper)
# Order of categories in result can be different
tm.assert_index_equal(result, expected)
mapper = {o: n for o, n in zip(orig_values[:-1], new_values[:-1])}
result = cur_index.map(mapper)
# Order of categories in result can be different
tm.assert_index_equal(result, expected)
|
py | 1a326c775a590ba8104006a84edc5d392de2461c | #!/usr/bin/env python
from __future__ import print_function
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import time
import argparse
from opencvutils import Camera
import socket as Socket
from opencvutils import __version__ as VERSION
# import errno
# threaded version
# http://stackoverflow.com/questions/12650238/processing-simultaneous-asynchronous-requests-with-python-basehttpserver
def compress(orig, comp):
return float(orig) / float(comp)
class mjpgServer(BaseHTTPRequestHandler):
"""
A simple mjpeg server that either publishes images directly from a camera
or republishes images from another pygecko process.
"""
cam = None
ip = None
hostname = None
# def setUpCamera(self, cv=None, pi=None, win=(320, 240)):
# """
# cv - camera number, usually 0
# pi - set to True
# """
# if pi:
# self.cam = Camera('pi')
# self.cam.init(win=win)
# elif cv:
# self.cam = Camera('cv')
# self.cam.init(cameraNumber=cv, win=win)
#
# else:
# raise Exception('Error, you must specify "cv" or "pi" for camera type')
def getImage(self):
if self.cam:
print('cam')
return self.cam.read()
else:
# if not self.cam:
# raise Exception('Error, you must setup camera first')
# print('You should call setUpCamera() first ... let us try now and assume "cv=0"')
# self.setUpCamera(cv=0)
self.cam = Camera('cv')
self.cam.init(cameraNumber=0, win=(640, 480))
return False, None
# def do_HEAD(s):
# print 'do_HEAD'
# s.send_response(200)
# s.send_header("Content-type", "text/html")
# s.end_headers()
def do_GET(self):
print('connection from:', self.address_string())
# if self.ip is None or self.hostname is None:
# self.hostname = Socket.gethostname()
# self.ip = Socket.gethostbyname(Socket.gethostname())
if self.path == '/mjpg':
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
while True:
# ret, img = capture.read()
ret, img = self.getImage()
if not ret:
# print 'crap'
time.sleep(1)
continue
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write("--jpgboundary")
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
time.sleep(0.05)
# elif self.path == '/':
# # hn = self.server.server_address[0]
# port = self.server.server_address[1]
# ip = self.ip
# hostname = self.ip
#
# self.send_response(200)
# self.send_header('Content-type', 'text/html')
# self.end_headers()
# self.wfile.write('<html><head></head><body>')
# self.wfile.write('<h1>{0!s}[{1!s}]:{2!s}</h1>'.format(hostname, ip, port))
# self.wfile.write('<img src="http://{}:{}/mjpg"/>'.format(ip, port))
# self.wfile.write('<p>{0!s}</p>'.format((self.version_string())))
# self.wfile.write('<p>The mjpg stream can be accessed directly at:<ul>')
# self.wfile.write('<li><a href="http://{0!s}:{1!s}/mjpg"/>http://{0!s}:{1!s}/mjpg</a></li>'.format(ip, port))
# self.wfile.write('<li><a href="http://{0!s}:{1!s}/mjpg"/>http://{0!s}:{1!s}/mjpg</a></li>'.format(hostname, port))
# self.wfile.write('</p></ul>')
# self.wfile.write('<p>This only handles one connection at a time</p>')
# self.wfile.write('</body></html>')
else:
print('error', self.path)
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s} not found</h1>'.format(self.path))
self.wfile.write('</body></html>')
def handleArgs():
parser = argparse.ArgumentParser(version=VERSION, description='A simple mjpeg server Example: mjpeg-server -p 8080 --camera 4')
parser.add_argument('-p', '--port', help='mjpeg publisher port, default is 9000', type=int, default=9000)
parser.add_argument('-c', '--camera', help='set opencv camera number, ex. -c 1', type=int, default=0)
parser.add_argument('-t', '--type', help='set camera type, either pi or cv, ex. -t pi', default='cv')
parser.add_argument('-s', '--size', help='set size', nargs=2, type=int, default=(320, 240))
args = vars(parser.parse_args())
args['size'] = (args['size'][0], args['size'][1])
return args
def main():
args = handleArgs()
try:
# win = args['size']
# if args['type'] is 'cv':
# cv = args['camera']
# mjpgServer.setUpCamera(cv=cv, win=win)
# else:
# mjpgServer.setUpCamera(pi=True, win=win)
server = HTTPServer(('0.0.0.0', args['port']), mjpgServer)
print("server started on {}:{}".format(Socket.gethostname(), args['port']))
server.serve_forever()
except KeyboardInterrupt:
print('KeyboardInterrupt')
server.socket.close()
if __name__ == '__main__':
main()
|
py | 1a326d4f3afd25e80757a252fc104fcfe3d84a5e | import math
def test_triangle_number_factors():
assert get_triangle_number_with_factors_greater_than(5) == 28
assert get_triangle_number_with_factors_greater_than(500) == 500
# o(1), theta(1), omega(1)
def get_triangle_number(n):
return (n * (n + 1)) / 2
# o(log(n)), theta(log(n)), omega(1)
def num_factors_over_limit(num, limit):
end = int((math.sqrt(num)) + 1)
num_factors = 2
for i in xrange(2, end):
if num % i == 0:
num_factors += 2
if num_factors > limit:
return True
return False
# o(1), theta(1), omega(1)
def infinite_triangle_number_seq(start):
num = start
while True:
yield get_triangle_number(num)
num += 1
# o(n), theta(n), omega(n)
def get_triangle_number_with_factors_greater_than(n):
for i in infinite_triangle_number_seq(n):
if num_factors_over_limit(i, n):
return i
|
py | 1a326d5e2696bbbe5f54780afccdd72b2aa212ba | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool descendants/ancestors information update.
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
import time
from decimal import Decimal
from test_framework.test_framework import MERICATestFramework
from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(MERICATestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def transaction_graph_test(self, size, n_tx_to_mine=None, start_input_txid='', end_address='', fee=Decimal(0.00100000)):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
Keyword arguments:
size -- the order N of the tournament which is equal to the number of the created transactions
n_tx_to_mine -- the number of transaction that should be mined into a block
If all of the N created transactions tx[0]..tx[N-1] reside in the mempool,
the following holds:
the tx[K] transaction:
- has N-K descendants (including this one), and
- has K+1 ancestors (including this one)
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
if not start_input_txid:
start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0]
if not end_address:
end_address = self.nodes[0].getnewaddress()
first_block_hash = ''
tx_id = []
tx_size = []
self.log.info('Creating {} transactions...'.format(size))
for i in range(0, size):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
inputs = [{'txid': start_input_txid, 'vout': 0}]
inputs_value = self.nodes[0].gettxout(start_input_txid, 0)['value']
else:
inputs = []
inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
inputs.append({'txid': tx_id[j], 'vout': vout})
inputs_value += self.nodes[0].gettxout(tx, vout)['value']
self.log.debug('inputs={}'.format(inputs))
self.log.debug('inputs_value={}'.format(inputs_value))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
outputs = {}
for _ in range(n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
outputs = {end_address: output_value}
self.log.debug('output_value={}'.format(output_value))
self.log.debug('outputs={}'.format(outputs))
# Create a new transaction.
unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed_raw_tx = self.nodes[0].signrawtransactionwithwallet(unsigned_raw_tx)
tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
tx_size.append(self.nodes[0].getrawmempool(True)[tx_id[-1]]['vsize'])
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
self.log.info('The batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
block_hash = self.nodes[0].generate(1)[0]
if not first_block_hash:
first_block_hash = block_hash
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.info('All of the transactions from the current batch have been mined into a block.')
elif tx_count == size:
# At the end all of the mined blocks are invalidated, and all of the created
# transactions should be re-added from disconnected blocks to the mempool.
self.log.info('The last batch of {} transactions has been accepted into the mempool.'.format(len(self.nodes[0].getrawmempool())))
start = time.time()
self.nodes[0].invalidateblock(first_block_hash)
end = time.time()
assert_equal(len(self.nodes[0].getrawmempool()), size)
self.log.info('All of the recently mined transactions have been re-added into the mempool in {} seconds.'.format(end - start))
self.log.info('Checking descendants/ancestors properties of all of the in-mempool transactions...')
for k, tx in enumerate(tx_id):
self.log.debug('Check transaction #{}.'.format(k))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantcount'], size - k)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['descendantsize'], sum(tx_size[k:size]))
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorcount'], k + 1)
assert_equal(self.nodes[0].getrawmempool(True)[tx]['ancestorsize'], sum(tx_size[0:(k + 1)]))
def run_test(self):
# Use batch size limited by DEFAULT_ANCESTOR_LIMIT = 25 to not fire "too many unconfirmed parents" error.
self.transaction_graph_test(size=100, n_tx_to_mine=[25, 50, 75])
if __name__ == '__main__':
MempoolUpdateFromBlockTest().main()
|
py | 1a326d77aaeebb240683a47eb182102593eb58e2 | import re
from itertools import groupby
from operator import attrgetter
import itertools
import urllib
import json
from datetime import date, timedelta, datetime, MINYEAR
from dateutil.relativedelta import relativedelta
from dateutil import parser
import requests
import sqlalchemy as sa
from collections import namedtuple
import os
from haystack.backends.solr_backend import SolrSearchQuery
from haystack.query import SearchQuerySet
import pytz
from django.db import transaction, connection, connections
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.shortcuts import render
from django.db.models.functions import Lower, Now, Cast
from django.db.models import (Max,
Min,
Prefetch,
Case,
When,
Value,
IntegerField,
Q)
from django.utils import timezone
from django.utils.text import slugify
from django.views.generic import TemplateView
from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseNotFound
from django.shortcuts import render_to_response, redirect
from django.core import management
from django.core.serializers import serialize
from django.views.generic import View
from councilmatic_core.views import IndexView, BillDetailView, \
CouncilMembersView, AboutView, CommitteeDetailView, CommitteesView, \
PersonDetailView, EventDetailView, EventsView, CouncilmaticFacetedSearchView
from councilmatic_core.models import *
from opencivicdata.core.models import PersonLink
from lametro.models import LAMetroBill, LAMetroPost, LAMetroPerson, \
LAMetroEvent, LAMetroOrganization, LAMetroSubject
from lametro.forms import AgendaUrlForm, AgendaPdfForm, LAMetroCouncilmaticSearchForm
from councilmatic.settings_jurisdiction import MEMBER_BIOS
from councilmatic.settings import MERGER_BASE_URL, PIC_BASE_URL
from opencivicdata.legislative.models import EventDocument
app_timezone = pytz.timezone(settings.TIME_ZONE)
class LAMetroIndexView(IndexView):
template_name = 'lametro/index.html'
event_model = LAMetroEvent
@property
def extra_context(self):
extra = {}
extra['upcoming_board_meetings'] = self.event_model.upcoming_board_meetings()[:2]
extra['current_meeting'] = self.event_model.current_meeting()
extra['bilingual'] = bool([e for e in extra['current_meeting'] if e.bilingual])
extra['USING_ECOMMENT'] = settings.USING_ECOMMENT
extra['todays_meetings'] = self.event_model.todays_meetings().order_by('start_date')
extra['form'] = LAMetroCouncilmaticSearchForm()
return extra
class LABillDetail(BillDetailView):
model = LAMetroBill
template_name = 'lametro/legislation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
bill = self.get_object()
context['attachments'] = bill.attachments.all().order_by(Lower('note'))
actions = bill.actions.all()
organization_lst = [action.organization for action in actions]
context['sponsorships'] = set(organization_lst)
related_bills = context['legislation']\
.related_bills\
.exclude(related_bill__isnull=True)\
.annotate(latest_date=Max('related_bill__actions__date'))\
.order_by('-latest_date')
context['related_bills'] = related_bills
context['actions'] = bill.actions_and_agendas
return context
class LAMetroEventDetail(EventDetailView):
model = LAMetroEvent
template_name = 'lametro/event.html'
def post(self, request, *args, **kwargs):
self.object = self.get_object() # Assign object to detail view, so that get_context_data can find this variable: https://stackoverflow.com/questions/34460708/checkoutview-object-has-no-attribute-object
event = self.get_object()
event_slug = event.slug
# Look for the button name and assign form values.
if 'url_form' in request.POST:
url_form = AgendaUrlForm(request.POST)
pdf_form = AgendaPdfForm()
elif 'pdf_form' in request.POST:
pdf_form = AgendaPdfForm(request.POST, request.FILES)
url_form = AgendaUrlForm()
# Validate forms and redirect.
if url_form.is_valid():
agenda_url = url_form['agenda'].value()
document_obj, created = EventDocument.objects.get_or_create(
event=event,
note='Event Document - Manual upload URL')
document_obj.date=timezone.now().date()
document_obj.save()
document_obj.links.create(url=agenda_url)
return HttpResponseRedirect('/event/%s' % event_slug)
elif pdf_form.is_valid() and 'pdf_form' in request.POST:
agenda_pdf = request.FILES['agenda']
handle_uploaded_agenda(agenda=agenda_pdf, event=event)
return HttpResponseRedirect('/event/%s' % event_slug)
else:
return self.render_to_response(self.get_context_data(url_form=url_form, pdf_form=pdf_form))
def get_object(self):
# Get the event with prefetched media_urls in proper order.
event = LAMetroEvent.objects.with_media().get(slug=self.kwargs['slug'])
return event
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
event = context['event']
# Metro admins should see a status report if Legistar is down.
# GET the calendar page, which contains relevant URL for agendas.
if self.request.user.is_authenticated:
r = requests.get('https://metro.legistar.com/calendar.aspx')
context['legistar_ok'] = r.ok
# GET the event URL; allow admin to delete event if 404
response = requests.get(event.api_source.url)
context['event_ok'] = response.ok
try:
context['minutes'] = event.documents.get(note__icontains='minutes')
except EventDocument.DoesNotExist:
pass
agenda_with_board_reports = event.agenda\
.filter(related_entities__bill__versions__isnull=False)\
.annotate(int_order=Cast('order', IntegerField()))\
.order_by('int_order')
# Find agenda link.
if event.documents.all():
for document in event.documents.all():
if "Agenda" in document.note:
context['agenda_url'] = document.links.first().url
context['document_timestamp'] = document.date
elif "Manual upload URL" in document.note:
context['uploaded_agenda_url'] = document.links.first().url
context['document_timestamp'] = document.date
elif "Manual upload PDF" in document.note:
context['uploaded_agenda_pdf'] = document.links.first().url
context['document_timestamp'] = document.date
'''
LA Metro Councilmatic uses the adv_cache library
to partially cache templates: in the event view, we cache
the entire template, except the iframe. (N.B. With
this library, the views do not cached, unless
explicitly wrapped in a django cache decorator.
Nonetheless, several popular browsers (e.g.,
Chrome and Firefox) retrieve cached iframe images,
regardless of the site's caching specifications.
We use the agenda's "date" timestamp to bust
the iframe cache: we save it inside context and
then assign it as the "name" of the iframe,
preventing the browser from retrieving a cached
iframe, when the timestamp changes.
'''
context['related_board_reports'] = agenda_with_board_reports
context['base_url'] = PIC_BASE_URL # Give JS access to this variable
context['has_agenda'] = (context.get('agenda_url') or
context.get('uploaded_agenda_url') or
context.get('uploaded_agenda_pdf'))
# Render forms if not a POST request
if 'url_form' not in context:
context['url_form'] = AgendaUrlForm()
if 'pdf_form' not in context:
context['pdf_form'] = AgendaPdfForm()
context['USING_ECOMMENT'] = settings.USING_ECOMMENT
return context
def handle_uploaded_agenda(agenda, event):
with open('lametro/static/pdf/agenda-%s.pdf' % event.slug, 'wb+') as destination:
for chunk in agenda.chunks():
destination.write(chunk)
# Create the document in database
document_obj, created = EventDocument.objects.get_or_create(
event=event,
note='Event Document - Manual upload PDF')
document_obj.date = timezone.now().date
document_obj.links.create(url='pdf/agenda-%s.pdf' % event.slug)
document_obj.save()
# Collect static to render PDF on server
management.call_command('collectstatic', '--noinput')
@login_required
def delete_submission(request, event_slug):
event = LAMetroEvent.objects.get(slug=event_slug)
event_doc = EventDocument.objects.filter(event_id=event.id, note__icontains='Manual upload')
for e in event_doc:
# Remove stored PDF from Metro app.
if 'Manual upload PDF' in e.note:
try:
os.remove('lametro/static/%s' % e.links.get().url )
except OSError:
pass
e.delete()
return HttpResponseRedirect('/event/%s' % event_slug)
@login_required
def delete_event(request, event_slug):
event = LAMetroEvent.objects.get(slug=event_slug)
event.delete()
return HttpResponseRedirect('/events/')
class LAMetroEventsView(EventsView):
template_name = 'lametro/events.html'
def get_context_data(self, **kwargs):
context = {}
# Did the user set date boundaries?
start_date_str = self.request.GET.get('from')
end_date_str = self.request.GET.get('to')
day_grouper = lambda x: (x.local_start_time.year, x.local_start_time.month, x.local_start_time.day)
minutes_queryset = EventDocument.objects.filter(note__icontains='minutes')
# If yes...
if start_date_str and end_date_str:
context['start_date'] = start_date_str
context['end_date'] = end_date_str
start_date_time = parser.parse(start_date_str)
end_date_time = parser.parse(end_date_str)
select_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__gt=start_date_time)\
.filter(start_time__lt=end_date_time)\
.order_by('start_time')\
select_events = select_events.prefetch_related(Prefetch('documents',
minutes_queryset,
to_attr='minutes'))\
.prefetch_related('minutes__links')
org_select_events = []
for event_date, events in itertools.groupby(select_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_select_events.append([date(*event_date), events])
context['select_events'] = org_select_events
# If all meetings
elif self.request.GET.get('show'):
all_events = LAMetroEvent.objects\
.with_media()\
.order_by('-start_time')\
org_all_events = []
for event_date, events in itertools.groupby(all_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_all_events.append([date(*event_date), events])
context['all_events'] = org_all_events
# If no...
else:
# Upcoming events
future_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__gt=timezone.now())\
.order_by('start_time')\
org_future_events = []
for event_date, events in itertools.groupby(future_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_future_events.append([date(*event_date), events])
context['future_events'] = org_future_events
# Past events
past_events = LAMetroEvent.objects\
.with_media()\
.filter(start_time__lt=timezone.now())\
.order_by('-start_time')
past_events = past_events.prefetch_related(Prefetch('documents',
minutes_queryset,
to_attr='minutes'))\
.prefetch_related('minutes__links')
org_past_events = []
for event_date, events in itertools.groupby(past_events, key=day_grouper):
events = sorted(events, key=attrgetter('start_time'))
org_past_events.append([date(*event_date), events])
context['past_events'] = org_past_events
context['user_subscribed'] = False
if self.request.user.is_authenticated:
user = self.request.user
context['user'] = user
if settings.USING_NOTIFICATIONS:
if (len(user.eventssubscriptions.all()) > 0):
context['user_subscribed'] = True
return context
class LABoardMembersView(CouncilMembersView):
template_name = 'lametro/board_members.html'
def map(self):
maps = {'map_geojson_districts': {'type': 'FeatureCollection',
'features': []},
'map_geojson_sectors': {'type': 'FeatureCollection',
'features': []},
'map_geojson_city': {'type': 'FeatureCollection',
'features': []},
}
posts = LAMetroPost.objects\
.filter(shape__isnull=False)\
.exclude(label='Appointee of Mayor of the City of Los Angeles')
for post in posts:
district = post.label
try:
current_membership = post.memberships.get(end_date_dt__gt=Now())
except ObjectDoesNotExist:
council_member = 'Vacant'
detail_link = ''
else:
council_member = current_membership.person.name
detail_link = current_membership.person.slug
feature = {
'type': 'Feature',
'geometry': json.loads(post.shape.json),
'properties': {
'district': district,
'council_member': council_member,
'detail_link': '/person/' + detail_link,
'select_id': 'polygon-{}'.format(slugify(district)),
},
}
if 'council_district' in post.division_id:
maps['map_geojson_districts']['features'].append(feature)
if 'la_metro_sector' in post.division_id:
maps['map_geojson_sectors']['features'].append(feature)
if post.division_id == 'ocd-division/country:us/state:ca/place:los_angeles':
maps['map_geojson_city']['features'].append(feature)
return maps
def get_queryset(self):
board = Organization.objects.get(name=settings.OCD_CITY_COUNCIL_NAME)
memberships = board.memberships.filter(Q(role='Board Member') |
Q(role='Nonvoting Board Member'))\
.filter(end_date_dt__gte=Now())
display_order = {
'Chair': 0,
'Vice Chair': 1,
'1st Chair': 1,
'1st Vice Chair': 1,
'2nd Chair': 2,
'2nd Vice Chair': 2,
'Board Member': 3,
'Nonvoting Board Member': 4,
}
sortable_memberships = []
# Display board leadership first. Person.board_office is null for
# members without leadership roles, so fall back to using their
# board membership role to decide display order.
for m in memberships:
primary_post = m.person.board_office or m
m.index = display_order[primary_post.role]
sortable_memberships.append(m)
return sorted(sortable_memberships, key=lambda x: (
x.index,
x.person.family_name
))
def get_context_data(self, *args, **kwargs):
context = super(CouncilMembersView, self).get_context_data(**kwargs)
context['seo'] = self.get_seo_blob()
board = LAMetroOrganization.objects.get(name=settings.OCD_CITY_COUNCIL_NAME)
context['recent_activity'] = board.actions.order_by('-date', '-bill__identifier', '-order')
context['recent_events'] = board.recent_events
if settings.MAP_CONFIG:
context.update(self.map())
return context
class LAMetroAboutView(AboutView):
template_name = 'lametro/about.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['timestamp'] = datetime.datetime.now(app_timezone).strftime('%m%d%Y%s')
return context
class LACommitteesView(CommitteesView):
template_name = 'lametro/committees.html'
def get_queryset(self):
'''
We only want committees that have at least one member who is not
the CEO. We also want to not count the CEO in the committee
size
'''
ceo = LAMetroPerson.ceo()
memberships = Membership.objects\
.exclude(person=ceo)\
.filter(end_date_dt__gt=Now(),
organization__classification='committee')
qs = LAMetroOrganization.objects\
.filter(classification='committee')\
.filter(memberships__in=memberships)\
.distinct()
qs = qs.prefetch_related(Prefetch('memberships',
memberships,
to_attr='current_members'))
return qs
class LACommitteeDetailView(CommitteeDetailView):
model = LAMetroOrganization
template_name = 'lametro/committee.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
committee = context['committee']
if getattr(settings, 'COMMITTEE_DESCRIPTIONS', None):
description = settings.COMMITTEE_DESCRIPTIONS.get(committee.slug)
context['committee_description'] = description
ceo = LAMetroPerson.ceo()
non_ceos = committee.all_members\
.annotate(index=Case(
When(role='Chair', then=Value(0)),
When(role='Vice Chair', then=Value(1)),
When(role='1st Vice Chair', then=Value(1)),
When(role='2nd Vice Chair', then=Value(2)),
When(role='Member', then=Value(3)),
default=Value(999),
output_field=IntegerField()))\
.exclude(person=ceo)\
.order_by('index', 'person__family_name', 'person__given_name')
context['non_ceos'] = non_ceos
context['ceo'] = ceo
return context
class LAPersonDetailView(PersonDetailView):
template_name = 'lametro/person.html'
model = LAMetroPerson
def dispatch(self, request, *args, **kwargs):
slug = self.kwargs['slug']
try:
person = self.model.objects.get(slug=slug)
except LAMetroPerson.DoesNotExist:
person = None
else:
response = super().dispatch(request, *args, **kwargs)
if not person:
# Grab the first and last name from slug like "john-smith-af5a8ab39aad"
short_slug = '-'.join(slug.split('-')[:-1])
try:
person = self.model.objects.get(slug__startswith=short_slug)
except (LAMetroPerson.DoesNotExist, LAMetroPerson.MultipleObjectsReturned):
# Return a 404 if more than one matching slug, or if there are no matching slugs
response = HttpResponseNotFound()
else:
response = HttpResponsePermanentRedirect(reverse('person', args=[person.slug]))
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
person = context['person']
council_post = person.latest_council_membership.post
try:
context['qualifying_post'] = council_post.acting_label
except AttributeError:
context['qualifying_post'] = None
try:
if council_post.shape:
context['map_geojson'] = serialize('geojson',
[council_post],
geometry_field='shape',
fields=())
else:
context['map_geojson'] = None
except AttributeError:
context['map_geojson'] = None
if person.committee_sponsorships:
context['sponsored_legislation'] = person.committee_sponsorships
else:
context['sponsored_legislation'] = []
context['memberships_list'] = person.current_memberships\
.exclude(organization__name='Board of Directors')\
.annotate(index=Case(
When(role='Chair', then=Value(0)),
When(role='Vice Chair', then=Value(1)),
When(role='1st Vice Chair', then=Value(1)),
When(role='2nd Vice Chair', then=Value(2)),
When(role='Member', then=Value(3)),
default=Value(999),
output_field=IntegerField()))\
.order_by('index')
if person.slug in MEMBER_BIOS:
context['member_bio'] = MEMBER_BIOS[person.slug]
try:
context['website_url'] = person.links.get(note='web_site').url
except PersonLink.DoesNotExist:
pass
return context
class IdentifierBoostSearchQuery(SolrSearchQuery):
def run(self, spelling_query=None, **kwargs):
'''
If the search contains identifiers, boost results with matching
identifiers.
Reference:
https://medium.com/@pablocastelnovo/if-they-match-i-want-them-to-be-always-first-boosting-documents-in-apache-solr-with-the-boost-362abd36476c
'''
identifiers = set(re.findall('\d{4}-\d{4}', self.build_query()))
if identifiers:
kwargs.update({
'defType': 'edismax',
'bq': '+'.join('identifier:"{}"^2.0'.format(i) for i in identifiers),
})
return super().run(spelling_query, **kwargs)
class LAMetroCouncilmaticFacetedSearchView(CouncilmaticFacetedSearchView):
def __init__(self, *args, **kwargs):
kwargs['form_class'] = LAMetroCouncilmaticSearchForm
super(LAMetroCouncilmaticFacetedSearchView, self).__init__(*args, **kwargs)
def extra_context(self):
extra_context = super().extra_context()
extra_context['topic_facets'] = [facet for facet, _ in LAMetroSubject.CLASSIFICATION_CHOICES]
return extra_context
def build_form(self, form_kwargs=None):
if not form_kwargs:
form_kwargs = {}
form_kwargs['selected_facets'] = self.request.GET.getlist("selected_facets")
form_kwargs['search_corpus'] = 'bills' if self.request.GET.get('search-reports') else 'all'
form_kwargs['result_type'] = self.request.GET.get('result_type', 'all')
sqs = SearchQuerySet(
query=IdentifierBoostSearchQuery('default')
).facet('bill_type', sort='index')\
.facet('sponsorships', sort='index')\
.facet('legislative_session', sort='index')\
.facet('inferred_status')\
.facet('topics')\
.facet('lines_and_ways')\
.facet('phase')\
.facet('project')\
.facet('metro_location')\
.facet('geo_admin_location')\
.facet('motion_by')\
.facet('significant_date')\
.facet('plan_program_policy')\
.highlight(**{'hl.fl': 'text,attachment_text'})
data = None
kwargs = {
'load_all': self.load_all,
}
if form_kwargs:
kwargs.update(form_kwargs)
dataDict = {}
if len(self.request.GET):
data = self.request.GET
dataDict = dict(data)
if self.searchqueryset is not None:
kwargs['searchqueryset'] = sqs
if dataDict.get('sort_by'):
for el in dataDict['sort_by']:
if el == 'date':
if dataDict.get('order_by') == ['asc']:
kwargs['searchqueryset'] = sqs.order_by('last_action_date')
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
if el == 'title':
if dataDict.get('order_by') == ['desc']:
kwargs['searchqueryset'] = sqs.order_by('-sort_name')
else:
kwargs['searchqueryset'] = sqs.order_by('sort_name')
if el == 'relevance':
kwargs['searchqueryset'] = sqs
elif dataDict.get('q'):
kwargs['searchqueryset'] = sqs
else:
kwargs['searchqueryset'] = sqs.order_by('-last_action_date')
return self.form_class(data, **kwargs)
class GoogleView(IndexView):
template_name = 'lametro/google66b34bb6957ad66c.html'
class LAMetroArchiveSearch(TemplateView):
template_name = 'lametro/archive_search.html'
def metro_login(request):
logout(request)
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
if user is not None:
login(request, user)
return HttpResponseRedirect('/events/')
else:
form = AuthenticationForm()
return render(request, 'lametro/metro_login.html', {'form': form})
def metro_logout(request):
logout(request)
return HttpResponseRedirect('/')
|
py | 1a326e4172332e058ea0c0c637018cf145f5b0f2 | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import math
import mnistdata
print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (sigmoid+BN) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (sigmoid+BN) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax+BN) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = mnistdata.read_data_sets("data", one_hot=True, reshape=False)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# train/test selector for batch normalisation
tst = tf.placeholder(tf.bool)
# training iteration
iter = tf.placeholder(tf.int32)
# five layers and their number of neurons (tha last layer has 10 softmax neurons)
L = 200
M = 100
N = 60
P = 30
Q = 10
# Weights initialised with small random values between -0.2 and +0.2
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1)) # 784 = 28 * 28
S1 = tf.Variable(tf.ones([L]))
O1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
S2 = tf.Variable(tf.ones([M]))
O2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
S3 = tf.Variable(tf.ones([N]))
O3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, P], stddev=0.1))
S4 = tf.Variable(tf.ones([P]))
O4 = tf.Variable(tf.zeros([P]))
W5 = tf.Variable(tf.truncated_normal([P, Q], stddev=0.1))
B5 = tf.Variable(tf.zeros([Q]))
## Batch normalisation conclusions with sigmoid activation function:
# BN is applied between logits and the activation function
# On Sigmoids it is very clear that without BN, the sigmoids saturate, with BN, they output
# a clean gaussian distribution of values, especially with high initial learning rates.
# sigmoid, no batch-norm, lr(0.003, 0.0001, 2000) => 97.5%
# sigmoid, batch-norm lr(0.03, 0.0001, 1000) => 98%
# sigmoid, batch-norm, no offsets => 97.3%
# sigmoid, batch-norm, no scales => 98.1% but cannot hold fast learning rate at start
# sigmoid, batch-norm, no scales, no offsets => 96%
# Both scales and offsets are useful with sigmoids.
# With RELUs, the scale variables can be omitted.
# Biases are not useful with batch norm, offsets are to be used instead
# Steady 98.5% accuracy using these parameters:
# moving average decay: 0.998 (equivalent to averaging over two epochs)
# learning rate decay from 0.03 to 0.0001 speed 1000 => max 98.59 at 6500 iterations, 98.54 at 10K it, 98% at 1300it, 98.5% at 3200it
def batchnorm(Ylogits, Offset, Scale, is_test, iteration):
exp_moving_avg = tf.train.ExponentialMovingAverage(0.998, iteration) # adding the iteration prevents from averaging across non-existing iterations
bnepsilon = 1e-5
mean, variance = tf.nn.moments(Ylogits, [0])
update_moving_averages = exp_moving_avg.apply([mean, variance])
m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)
v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)
Ybn = tf.nn.batch_normalization(Ylogits, m, v, Offset, Scale, bnepsilon)
return Ybn, update_moving_averages
def no_batchnorm(Ylogits, Offset, Scale, is_test, iteration):
return Ylogits, tf.no_op()
# The model
XX = tf.reshape(X, [-1, 784])
Y1l = tf.matmul(XX, W1)
Y1bn, update_ema1 = batchnorm(Y1l, O1, S1, tst, iter)
Y1 = tf.nn.sigmoid(Y1bn)
Y2l = tf.matmul(Y1, W2)
Y2bn, update_ema2 = batchnorm(Y2l, O2, S2, tst, iter)
Y2 = tf.nn.sigmoid(Y2bn)
Y3l = tf.matmul(Y2, W3)
Y3bn, update_ema3 = batchnorm(Y3l, O3, S3, tst, iter)
Y3 = tf.nn.sigmoid(Y3bn)
Y4l = tf.matmul(Y3, W4)
Y4bn, update_ema4 = batchnorm(Y4l, O4, S4, tst, iter)
Y4 = tf.nn.sigmoid(Y4bn)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
update_ema = tf.group(update_ema1, update_ema2, update_ema3, update_ema4)
# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(O1, [-1]), tf.reshape(O2, [-1]), tf.reshape(O3, [-1]), tf.reshape(O4, [-1]), tf.reshape(B5, [-1])], 0)
# to use for sigmoid
allactivations = tf.concat([tf.reshape(Y1, [-1]), tf.reshape(Y2, [-1]), tf.reshape(Y3, [-1]), tf.reshape(Y4, [-1])], 0)
# to use for RELU
#allactivations = tf.concat([tf.reduce_max(Y1, [0]), tf.reduce_max(Y2, [0]), tf.reduce_max(Y3, [0]), tf.reduce_max(Y4, [0])], 0)
alllogits = tf.concat([tf.reshape(Y1l, [-1]), tf.reshape(Y2l, [-1]), tf.reshape(Y3l, [-1]), tf.reshape(Y4l, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis(title4="Logits", title5="activations", histogram4colornum=2, histogram5colornum=2)
# training step
# the learning rate is: # 0.0001 + 0.03 * (1/e)^(step/1000)), i.e. exponential decay from 0.03->0.0001
lr = 0.0001 + tf.train.exponential_decay(0.03, iter, 1000, 1/math.e)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# compute training values for visualisation
if update_train_data:
a, c, im, al, ac, l = sess.run([accuracy, cross_entropy, I, alllogits, allactivations, lr],
feed_dict={X: batch_X, Y_: batch_Y, iter: i, tst: False})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(l) + ")")
datavis.append_training_curves_data(i, a, c)
datavis.update_image1(im)
datavis.append_data_histograms(i, al, ac)
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels, tst: True})
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
# the backpropagation training step, also updates exponential moving averages for batch norm
sess.run([train_step, update_ema], feed_dict={X: batch_X, Y_: batch_Y, iter: i, tst: False})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# Some results to expect:
# (In all runs, if sigmoids are used, all biases are initialised at 0, if RELUs are used,
# all biases are initialised at 0.1 apart from the last one which is initialised at 0.)
## decaying learning rate from 0.003 to 0.0001 decay_speed 2000, 10K iterations
# final test accuracy = 0.9813 (sigmoid - training cross-entropy not stabilised)
# final test accuracy = 0.9842 (relu - training set fully learned, test accuracy stable)
|
py | 1a326f66aa6943f55240dac8017cf6b8b0f3781b | # Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python execution for checking whether the tests output is flaky."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import os
from core import python_utils
import requests
FLAKE_CHECK_AND_REPORT_URL = (
'https://oppia-e2e-test-results-logger.herokuapp.com'
'/check-flake-and-report')
PASS_REPORT_URL = (
'https://oppia-e2e-test-results-logger.herokuapp.com'
'/report-pass')
REPORT_API_KEY = '7Ccp062JVjv9LUYwnLMqcm5Eu5gYqqhpl3zQmcO3cDQ'
CI_INFO = {
'githubActions': {
'env': {
'identifier': 'GITHUB_ACTIONS',
'user_info': 'GITHUB_ACTOR',
'branch': 'GITHUB_REF',
'build_url_template_vars': ['GITHUB_REPOSITORY', 'GITHUB_RUN_ID'],
},
'build_url_template': 'https://github.com/%s/actions/runs/%s',
},
'circleCI': {
'env': {
'identifier': 'CIRCLECI',
'user_info': 'CIRCLE_USERNAME',
'branch': 'CIRCLE_BRANCH',
'build_url_template_vars': ['CIRCLE_BUILD_URL']
},
'build_url_template': '%s',
}
}
REQUEST_EXCEPTIONS = (
requests.RequestException, requests.ConnectionError,
requests.HTTPError, requests.TooManyRedirects, requests.Timeout)
def _print_color_message(message):
"""Prints the given message in red color.
Args:
message: str. The success message to print.
"""
# \033[91m is the ANSI escape sequences for green color.
python_utils.PRINT('\033[92m' + message + '\033[0m\n')
def check_if_on_ci():
"""Check if the script is running on a CI server.
Returns: bool. Whether we are running on a CI server.
"""
for info in CI_INFO.values():
ci_identifier = info['env']['identifier']
if os.getenv(ci_identifier):
return True
return False
def _get_build_info():
"""Returns the info related to the build container."""
build_info = {}
for info in CI_INFO.values():
ci_env = info['env']
if not os.getenv(ci_env['identifier']):
continue
template_values = []
for template_var in ci_env['build_url_template_vars']:
value = os.getenv(template_var)
if value is None:
raise RuntimeError(
'Expected environment variable %s missing' %
template_var)
template_values.append(value)
build_url = info['build_url_template'] % tuple(template_values)
timestamp = datetime.datetime.utcnow().isoformat() + '+00:00'
build_info['username'] = os.getenv(ci_env['user_info'])
build_info['build_url'] = build_url
build_info['timestamp'] = timestamp
build_info['branch'] = os.getenv(ci_env['branch'])
return build_info
raise Exception('Unknown build environment.')
def report_pass(suite_name):
"""Report a passing test to the logging server."""
metadata = _get_build_info()
payload = {
'suite': suite_name,
'metadata': metadata,
}
try:
requests.post(
PASS_REPORT_URL, json=payload,
allow_redirects=False,
headers={'report_key': REPORT_API_KEY})
except REQUEST_EXCEPTIONS as e:
_print_color_message((
'Failed to contact E2E test logging server at %s.'
'Please report to E2E team in case server is down.'
'Exception: %s') % (PASS_REPORT_URL, e))
_print_color_message(
'Reported pass to E2E logging server at {}.'.format(
PASS_REPORT_URL))
def is_test_output_flaky(output_lines, suite_name):
"""Returns whether the test output matches any flaky test log."""
build_info = _get_build_info()
payload = {
'suite': suite_name,
'output_lines': output_lines,
'metadata': build_info,
}
response = None
try:
response = requests.post(
FLAKE_CHECK_AND_REPORT_URL, json=payload,
allow_redirects=False,
headers={'report_key': REPORT_API_KEY})
except REQUEST_EXCEPTIONS as e:
_print_color_message((
'Failed to contact E2E test logging server at %s.'
'Please report to E2E team in case server is down.'
'Exception: %s') % (FLAKE_CHECK_AND_REPORT_URL, e))
return False
if not response.ok:
_print_color_message('Failed request with response code: %s (%s)' % (
response.status_code, response.reason))
return False
report = {}
try:
report = response.json()
except ValueError as e:
_print_color_message('Unable to convert json response: %s' % e)
if 'log' in report:
log_str = '\n'.join(report['log'])
_print_color_message(
'Logs from test result logging server:\n %s' % log_str)
flaky = report['result'] if 'result' in report else False
_print_color_message(
'E2E logging server says test flaky: {}.'.format(flaky))
if flaky:
flake = report['flake']
_print_color_message('Flake Detected:')
_print_color_message(' Suite: %s' % flake['suite'])
_print_color_message(' Test: %s' % flake['test'])
_print_color_message(
' Error Message: %s' % flake['flake_id'])
return flaky
|
py | 1a326f97fb4215badb0550459b077fe4d2d89cd8 | # 2.1 - 4, 5 or 6
from qat.lang.AQASM import Program
from qat.lang.AQASM import H, X, CNOT, CCNOT
epr_prog = Program()
qbits = epr_prog.qalloc(3)
epr_prog.apply(X,qbits[0])
epr_prog.apply(H,qbits[1])
epr_prog.apply(H,qbits[2])
epr_prog.apply(H.ctrl(1),qbits[1],qbits[2])
#epr_prog.apply(H,qbits[1])
#epr_prog.apply(CNOT,qbits[1],qbits[2])
#pr_prog.apply(X,qbits[2])
circuit = epr_prog.to_circ()
%qatdisplay --svg circuit
from qat.qpus import PyLinalg
qpu = PyLinalg()
job = circuit.to_job()
result = qpu.submit(job)
for sample in result:
print("State", sample.state, "with probability :", sample.probability) |
py | 1a326fbfd1df346fceedd99185e1e987782ff437 | ############################################################################
# Copyright 2018 Anthony Ma & Stanford University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
##############################################################################
# Imports
##############################################################################
from vmd import *
from .stratify_hbonds import *
from .stratify_ligand_hbonds import *
__all__ = ['compute_hydrogen_bonds']
##############################################################################
# Globals
##############################################################################
WATER_TO_PROTEIN_DIST = 5
WATER_TO_LIGAND_DIST = 12
##############################################################################
# Functions
##############################################################################
def filter_duplicates(donors, acceptors):
"""
Filter out duplicate donor acceptor atom pairs
"""
pairs = sorted(list(set([(d, acceptors[idx]) for idx, d in enumerate(donors)])))
new_donors, new_acceptors = [], []
for d, a in pairs:
new_donors.append(d)
new_acceptors.append(a)
return new_donors, new_acceptors
def calc_ligand_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id, ligands,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE):
"""
Compute donor and acceptor atom pairs for hydrogen bonds in terms of numeric VMD indices
"""
donors, acceptors = [], []
for ligand in ligands:
if sele_id is None:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of resname %s) or " \
"(not carbon and not sulfur and protein within %s of resname %s) or " \
"(not carbon and not sulfur and resname %s) and (not lipid)\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_LIGAND_DIST, ligand, WATER_TO_LIGAND_DIST, ligand, ligand, frame_idx)
else:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of resname %s) or " \
"((not carbon and not sulfur and protein and (%s)) and within %s of resname %s) or " \
"(not carbon and not sulfur and resname %s) and (not lipid)\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_LIGAND_DIST, ligand, sele_id, WATER_TO_LIGAND_DIST, ligand, ligand,
frame_idx)
# donor_acceptor_indices should be of format "{106 91 85 99 120 130} {91 55 55 69 105 69} {107 92 86 100 121 131}"
donor_acceptor_indices = evaltcl(measure_hbonds_command)
# Parse atom indices
donor_acceptor_lists = donor_acceptor_indices.split("}")
# Filter out improperly parsed coordinates
if len(donor_acceptor_lists) != 4:
continue
donor_list = donor_acceptor_lists[0].split("{")[1].split(" ")
acceptor_list = donor_acceptor_lists[1].split("{")[1].split(" ")
for idx, d in enumerate(donor_list):
a = acceptor_list[idx]
if d == "" or a == "":
continue
donors.append(int(d))
acceptors.append(int(a))
return donors, acceptors
def calc_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE):
"""
Compute donor and acceptor atom pairs for hydrogen bonds in terms of numeric VMD indices
"""
# Measure Hbonds command
if sele_id is None:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of protein) or " \
"protein and not lipid and not carbon and not sulfur\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_PROTEIN_DIST, frame_idx)
else:
measure_hbonds_command = "measure hbonds %s %s [atomselect %s \"" \
"(resname %s and within %s of (protein and (%s))) or " \
"protein and (%s) and not lipid and not carbon and not sulfur\" frame %s]" % \
(HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE, traj_frag_molid, solvent_resn,
WATER_TO_PROTEIN_DIST, sele_id, sele_id, frame_idx)
# donor_acceptor_indices should be of format "{106 91 85 99 120 130} {91 55 55 69 105 69} {107 92 86 100 121 131}"
donor_acceptor_indices = evaltcl(measure_hbonds_command)
# Parse atom indices
donor_acceptor_lists = donor_acceptor_indices.split("}")
# Filter out improperly parsed coordinates
if len(donor_acceptor_lists) != 4:
return [], []
donor_list = donor_acceptor_lists[0].split("{")[1].split(" ")
acceptor_list = donor_acceptor_lists[1].split("{")[1].split(" ")
donors, acceptors = [], []
for idx, d in enumerate(donor_list):
a = acceptor_list[idx]
if d == "" or a == "":
continue
donors.append(int(d))
acceptors.append(int(a))
return donors, acceptors
def compute_hydrogen_bonds(traj_frag_molid, frame_idx, index_to_label, solvent_resn, sele_id, ligand=None,
HBOND_CUTOFF_DISTANCE=3.5, HBOND_CUTOFF_ANGLE=70):
"""
Compute hydrogen bonds involving protein for a single frame of simulation
Parameters
----------
traj_frag_molid: int
Specifies which trajectory fragment in VMD to perform computations upon
frame_idx: int
Specify frame index with respect to the smaller trajectory fragment
solvent_resn: string, default = TIP3
Denotes the resname of solvent in simulation
sele_id: string, default = None
Compute contacts on subset of atom selection based on VMD query
ligand: list of string
???
HBOND_CUTOFF_DISTANCE: float, default = 3.5 Angstroms
HBOND_CUTOFF_ANGLE: float, default = 70 degrees
Return
------
hbonds: list of tuples, [(frame_idx, atom1_label, atom2_label, itype), ...]
"""
itype = "hb"
if ligand:
itype = "lhb"
donors, acceptors = calc_ligand_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id, ligand,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)
else:
donors, acceptors = calc_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id,
HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)
donors, acceptors = filter_duplicates(donors, acceptors)
hbonds = []
for idx, donor in enumerate(donors):
acceptor = acceptors[idx]
donor_label, acceptor_label = index_to_label[donor], index_to_label[acceptor]
hbonds.append([frame_idx, donor_label, acceptor_label, itype])
# Perform post processing on hbonds list to stratify into different subtypes
if itype == "hb":
hbond_subtypes = stratify_hbond_subtypes(hbonds, solvent_resn)
elif itype == "lhb":
hbond_subtypes = stratify_ligand_hbond_subtypes(hbonds, solvent_resn, ligand)
return hbond_subtypes
|
py | 1a326ff4375136c99a2cc2172fd03ab598d4ead7 | import gnupg
import requests
import tempfile
import logging
logger = logging.getLogger(__name__)
def encrypt(data, keybase_ids):
with tempfile.TemporaryDirectory() as tmpdirname:
gpg = gnupg.GPG(gnupghome=tmpdirname)
fingerprints = []
for k in keybase_ids:
logger.info("reading from keybase public key {}".format(k))
result = requests.get("https://keybase.io/{}/key.asc".format(k))
result.raise_for_status()
import_result = gpg.import_keys(result.text)
if not import_result.fingerprints:
raise Exception("Error importing {} from keybase: {}".format(
k, import_result.results))
fingerprints += import_result.fingerprints
logger.info("{} keys processed".format(len(fingerprints)))
encrypted_ascii_data = gpg.encrypt(
data, fingerprints, always_trust=True)
if encrypted_ascii_data.ok:
return encrypted_ascii_data.data
else:
raise Exception("gpg error: {}".format(data.status))
|
py | 1a327006ac26e0c0e8203d6497d4adc221355ccb | import random
import numpy as np
import cv2
import lmdb
import torch
import torch.utils.data as data
import data.util as util
class LQGTDataset(data.Dataset):
"""
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, etc) and GT image pairs.
If only GT images are provided, generate LQ images on-the-fly.
"""
def __init__(self, opt):
super(LQGTDataset, self).__init__()
self.opt = opt
self.data_type = self.opt['data_type']
self.paths_LQ, self.paths_GT = None, None
self.sizes_LQ, self.sizes_GT = None, None
self.LQ_env, self.GT_env = None, None # environments for lmdb
self.paths_GT, self.sizes_GT = util.get_image_paths(self.data_type, opt['dataroot_GT'])
self.paths_LQ, self.sizes_LQ = util.get_image_paths(self.data_type, opt['dataroot_LQ'])
assert self.paths_GT, 'Error: GT path is empty.'
if self.paths_LQ and self.paths_GT:
assert len(self.paths_LQ) == len(
self.paths_GT
), 'GT and LQ datasets have different number of images - {}, {}.'.format(
len(self.paths_LQ), len(self.paths_GT))
self.random_scale_list = [1]
def _init_lmdb(self):
# https://github.com/chainer/chainermn/issues/129
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False,
meminit=False)
if self.paths_LQ is not None:
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False,
meminit=False)
def __getitem__(self, index):
if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):
self._init_lmdb()
GT_path, LQ_path = None, None
scale = self.opt['scale']
GT_size = self.opt['GT_size']
# get GT image
GT_path = self.paths_GT[index]
resolution = [int(s) for s in self.sizes_GT[index].split('_')
] if self.data_type == 'lmdb' else None
img_GT = util.read_img(self.GT_env, GT_path, resolution)
if self.opt['phase'] != 'train': # modcrop in the validation / test phase
img_GT = util.modcrop(img_GT, scale)
if self.opt['color']: # change color space if necessary
img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]
# get LQ image
if self.paths_LQ:
LQ_path = self.paths_LQ[index]
resolution = [int(s) for s in self.sizes_LQ[index].split('_')
] if self.data_type == 'lmdb' else None
img_LQ = util.read_img(self.LQ_env, LQ_path, resolution)
else: # down-sampling on-the-fly
# randomly scale during training
if self.opt['phase'] == 'train':
random_scale = random.choice(self.random_scale_list)
H_s, W_s, _ = img_GT.shape
def _mod(n, random_scale, scale, thres):
rlt = int(n * random_scale)
rlt = (rlt // scale) * scale
return thres if rlt < thres else rlt
H_s = _mod(H_s, random_scale, scale, GT_size)
W_s = _mod(W_s, random_scale, scale, GT_size)
img_GT = cv2.resize(img_GT, (W_s, H_s), interpolation=cv2.INTER_LINEAR)
if img_GT.ndim == 2:
img_GT = cv2.cvtColor(img_GT, cv2.COLOR_GRAY2BGR)
H, W, _ = img_GT.shape
# using matlab imresize
img_LQ = util.imresize_np(img_GT, 1 / scale, True)
if img_LQ.ndim == 2:
img_LQ = np.expand_dims(img_LQ, axis=2)
if self.opt['phase'] == 'train':
# if the image size is too small
H, W, _ = img_GT.shape
if H < GT_size or W < GT_size:
img_GT = cv2.resize(img_GT, (GT_size, GT_size), interpolation=cv2.INTER_LINEAR)
# using matlab imresize
img_LQ = util.imresize_np(img_GT, 1 / scale, True)
if img_LQ.ndim == 2:
img_LQ = np.expand_dims(img_LQ, axis=2)
H, W, C = img_GT.shape
LQ_size = GT_size // scale
# randomly crop
rnd_h = random.randint(0, max(0, H//scale - LQ_size))
rnd_w = random.randint(0, max(0, W//scale - LQ_size))
img_LQ = img_LQ[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :]
rnd_h_GT, rnd_w_GT = int(rnd_h * scale), int(rnd_w * scale)
img_GT = img_GT[rnd_h_GT:rnd_h_GT + GT_size, rnd_w_GT:rnd_w_GT + GT_size, :]
# augmentation - flip, rotate
img_LQ, img_GT = util.augment([img_LQ, img_GT], self.opt['use_flip'],
self.opt['use_rot'])
if self.opt['color']: # change color space if necessary
img_LQ = util.channel_convert(img_LQ.shape[2], self.opt['color'], [img_LQ])[0]
# BGR to RGB, HWC to CHW, numpy to tensor
if img_GT.shape[2] == 3:
img_GT = img_GT[:, :, [2, 1, 0]]
img_LQ = img_LQ[:, :, [2, 1, 0]]
img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()
img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float()
if LQ_path is None:
LQ_path = GT_path
return {'LQ': img_LQ, 'GT': img_GT, 'LQ_path': LQ_path, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
|
py | 1a327398b55dba35ba3bc5880e0858dc25c1eb5e | from __future__ import absolute_import, print_function, division
import numpy as np
import scipy
import theano
from theano import gof, scalar, tensor
from theano.compat import izip
from theano.tensor import blas
from theano.tensor.opt import register_specialize, register_canonicalize
from theano.sparse import (CSC, CSR, csm_properties,
csm_grad, usmm, csm_indices, csm_indptr,
csm_data)
from theano.sparse import basic as sparse
_is_sparse_variable = sparse._is_sparse_variable
_is_dense = sparse._is_dense
# This is tested in tests/test_opt.py:test_local_csm_properties_csm
@gof.local_optimizer([csm_properties])
def local_csm_properties_csm(node):
"""
If we find csm_properties(CSM(*args)), then we can replace that with the
*args directly.
"""
if node.op == csm_properties:
csm, = node.inputs
if csm.owner and (csm.owner.op == CSC or csm.owner.op == CSR):
# csm.owner.inputs could be broadcastable. In that case, we have
# to adjust the broadcasting flag here.
ret_var = [theano.tensor.patternbroadcast(i, o.broadcastable)
for i, o in izip(csm.owner.inputs, node.outputs)]
return ret_var
return False
register_specialize(local_csm_properties_csm)
# This is tested in tests/test_basic.py:test_remove0
@gof.local_optimizer([sparse.Remove0])
def local_inplace_remove0(node):
"""
Optimization to insert inplace versions of Remove0.
"""
# If inplace is not enabled, enable it and replace that op with a
# new op which has inplace enabled
if isinstance(node.op, sparse.Remove0) and not node.op.inplace:
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
return [new_node]
return False
theano.compile.optdb.register(
'local_inplace_remove0',
gof.TopoOptimizer(local_inplace_remove0,
failure_callback=gof.TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace')
class AddSD_ccode(gof.op.Op):
"""
Add a sparse and a dense matrix.
Parameters
----------
x
A sparse matrix.
y
A dense matrix
Returns
-------
matrix
`x`+`y`
Notes
-----
The grad implemented is structured on `x`.
"""
__props__ = ("format", "inplace")
def __init__(self, format, inplace=False, *args, **kwargs):
gof.Op.__init__(self, *args, **kwargs)
# Should we do inplace addition or not ?
self.inplace = inplace
self.format = format
if self.inplace:
self.destroy_map = {0: [3]}
def __str__(self):
inp = ''
if self.inplace:
inp = ',inplace'
return "%s{%s%s}" % (self.__class__.__name__,
self.format, inp)
def make_node(self, x, y):
x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y)
out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if self.inplace:
assert out_dtype == y.dtype
indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x)
# We either use CSC or CSR depending on the format of input
assert self.format == x.type.format
# The magic number two here arises because L{scipy.sparse}
# objects must be matrices (have dimension 2)
assert y.type.ndim == 2
out = tensor.TensorType(dtype=out_dtype,
broadcastable=y.type.broadcastable)()
return gof.Apply(self,
[data, indices, indptr, y],
[out])
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, y) = inputs
(z,) = outputs
inplace = int(self.inplace)
format = {'csc': 0, 'csr': 1}[self.format]
out_typenum = node.outputs[0].type.dtype_specs()[2]
code = """
Py_XDECREF(%(z)s);
if (!%(inplace)s){
if(PyArray_TYPE(%(y)s) != %(out_typenum)s){
%(z)s = (PyArrayObject *) PyArray_FromArray(%(y)s, PyArray_DescrFromType(%(out_typenum)s), 0);
}else{
%(z)s = (PyArrayObject *) PyArray_NewCopy(%(y)s, NPY_CORDER);
}
}else{
%(z)s = %(y)s;
Py_XINCREF(%(z)s);
}
npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;
const npy_int32 * __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * __restrict__ indices = (npy_int32*)PyArray_DATA(%(_indices)s);
const dtype_%(_data)s* __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);
dtype_%(y)s* ydata = (dtype_%(y)s*)PyArray_DATA(%(y)s);
dtype_%(z)s* zdata = (dtype_%(z)s*)PyArray_DATA(%(z)s);
int Yi = PyArray_STRIDES(%(y)s)[0]/PyArray_DESCR(%(y)s)->elsize;
int Yj = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;
npy_int32 pos;
if (%(format)s == 0){
for (npy_int32 col = 0; col < N; ++col){
for (npy_int32 ind = indptr[col]; ind < indptr[col+1]; ++ind){
npy_int32 row = indices[ind];
pos = row * Yi + col * Yj;
zdata[pos] = ydata[pos] + data[ind];
}
}
}else{
for (npy_int32 row = 0; row < N; ++row){
for (npy_int32 ind = indptr[row]; ind < indptr[row+1]; ++ind){
npy_int32 col = indices[ind];
pos = row * Yi + col * Yj;
zdata[pos] = ydata[pos] + data[ind];
}
}
}
""" % dict(locals(), **sub)
return code
def infer_shape(self, node, shapes):
return [shapes[3]]
def c_code_cache_version(self):
return (1,)
@gof.local_optimizer([sparse.AddSD])
def local_inplace_addsd_ccode(node):
"""
Optimization to insert inplace versions of AddSD.
"""
if isinstance(node.op, sparse.AddSD) and theano.config.cxx:
out_dtype = scalar.upcast(*node.inputs)
if out_dtype != node.inputs[1].dtype:
return
new_node = AddSD_ccode(format=node.inputs[0].type.format,
inplace=True)(*node.inputs)
return [new_node]
return False
theano.compile.optdb.register(
'local_inplace_addsd_ccode',
gof.TopoOptimizer(local_inplace_addsd_ccode,
failure_callback=gof.TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace')
@register_canonicalize("fast_compile")
@register_specialize
@gof.local_optimizer([sparse.DenseFromSparse])
def local_dense_from_sparse_sparse_from_dense(node):
if isinstance(node.op, sparse.DenseFromSparse):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, sparse.SparseFromDense):
return inp.owner.inputs
@gof.local_optimizer([sparse.AddSD])
def local_addsd_ccode(node):
"""
Convert AddSD to faster AddSD_ccode.
"""
if isinstance(node.op, sparse.AddSD) and theano.config.cxx:
new_node = AddSD_ccode(format=node.inputs[0].type.format)(*node.inputs)
return [new_node]
return False
theano.compile.optdb.register('local_addsd_ccode',
gof.TopoOptimizer(local_addsd_ccode),
# Must be after local_inplace_addsd_ccode at 60
61, 'fast_run')
class StructuredDotCSC(gof.Op):
"""
Structured Dot CSC is like dot, except that only the gradient wrt non-zero
elements of the sparse matrix `a` are calculated and propagated.
The output is presumed to be a dense matrix, and is represented by a
TensorType instance.
Parameters
----------
a
A sparse matrix in csc format.
b
A sparse or dense matrix.
Returns
-------
The dot product of `a` and `b`.
Notes
-----
The grad implemented is structured.
This op is used as an optimization for StructuredDot.
"""
__props__ = ()
def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):
dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, a_nrows, b],
[tensor.tensor(dtype_out,
(False, b.type.broadcastable[1]))])
return r
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(out,) = outputs
a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr),
(a_nrows, b.shape[0]),
copy=False)
# out[0] = a.dot(b)
out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype)
assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
def c_code(self, node, name, inputs, outputs, sub):
# C-implementation of the dot product of the sparse matrix A and matrix
# B.
# @param a_val: non-zero values of the sparse matrix
# @param a_ind: column indices of the non-null values (.indices of a
# scipy.csc_matrix)
# @param a_ptr: a_ptr indicates col indices for col. i are in the range
# a_ptr[i]:a_ptr[i+1]
# @param n_rows: number of rows of sparse matrix
# @param b: dense matrix to perform dot product with, as in dot(a, b)
# @param z: return value
# @param sub: TODO, not too sure, something to do with weave probably
(a_val, a_ind, a_ptr, a_nrows, b) = inputs
(z,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[4].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
typenum_z = node.outputs[0].type.dtype_specs()[2] # retrieve dtype number
typenum_a_val = node.inputs[0].type.dtype_specs()[2] # retrieve dtype number
typenum_b = node.inputs[4].type.dtype_specs()[2] # retrieve dtype number
rval = """
if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_val) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ind) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, "rank(nrows) != 0"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (PyArray_TYPE(%(a_val)s) != %(typenum_a_val)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for a_val"); %(fail)s;}
if (PyArray_TYPE(%(b)s) != %(typenum_b)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for b"); %(fail)s;}
if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "a_ind dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "a_ptr dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(a_nrows)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "a_nrows dtype not INT32"); %(fail)s;}
if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a_val and a_ind have different lengths"); %(fail)s;}
if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b)s)[0]+1)
{PyErr_SetString(PyExc_NotImplementedError, "a's number of columns doesn't match b's rows"); %(fail)s;}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0])
|| (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0, 0};
dims[0] = ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0];
dims[1] = PyArray_DIMS(%(b)s)[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);
}
{
// sparse array has size MxK, dense KxN, output MxN
npy_intp M = PyArray_DIMS(%(z)s)[0];
npy_intp N = PyArray_DIMS(%(z)s)[1];
npy_intp K = PyArray_DIMS(%(b)s)[0];
// strides tell you how many bytes to skip to go to next column/row entry
npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;
//npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;
npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;
npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;
npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;
npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;
// pointers to access actual data in the arrays passed as params.
dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);
const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);
const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);
const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);
//npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];
//clear the output array
memset(Dz, 0, M*N*sizeof(dtype_%(z)s));
//iterate over the sparse array, making the most of an entry wherever we find it.
//
// Normal matrix matrix multiply: A MxK, B KxN => Z = AB
// for m
// for n
// for k
// z[m, n] += a[m, k] * b[k, n]
// Here instead: Z =
// for k
// for m (sparse)
// for n
// z[m, n] += a[m, k] * b[k, n]
// loop over inner dimension
for (npy_int32 k = 0; k < K; ++k)
{
// get pointer to k-th row of dense matrix
const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);
// loop over sparse column indices through index pointer array
// (amounts to looping over rows M of sparse matrix)
for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1) * Sptr]; ++m_idx)
{
npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K
const dtype_%(a_val)s Amk = Dval[m_idx * Sval]; // actual value at that location
// pointer to m-th row of the output matrix Z
dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);
//RESOLVE: a.shape[0] equals z.shape[0], why is this not an equality constraint?
if (m >= PyArray_DIMS(%(z)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "illegal row index in a"); %(fail)s;}
// loop over final dimension (cols of dense matrix) and perform dot product
if ((Szn == 1) && (Sbn == 1)) {
for(npy_int32 n = 0; n < N; ++n)
{
zm[n] += Amk * bk[n];
}
}
else
{
for(npy_int32 n = 0; n < N; ++n)
{
zm[n*Szn] += Amk * bk[n*Sbn];
}
}
}
}
}
""" % dict(locals(), **sub)
return rval
def c_code_cache_version(self):
return (2,)
sd_csc = StructuredDotCSC()
class StructuredDotCSR(gof.Op):
"""
Structured Dot CSR is like dot, except that only the
gradient wrt non-zero elements of the sparse matrix
`a` are calculated and propagated.
The output is presumed to be a dense matrix, and is represented by a
TensorType instance.
Parameters
----------
a
A sparse matrix in csr format.
b
A sparse or dense matrix.
Returns
-------
matrix
The dot product of `a` and `b`.
Notes
-----
The grad implemented is structured.
This op is used as an optimization for StructuredDot.
"""
__props__ = ()
def make_node(self, a_val, a_ind, a_ptr, b):
self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)
r = gof.Apply(self, [a_val, a_ind, a_ptr, b],
[tensor.tensor(self.dtype_out,
(False, b.type.broadcastable[1]))])
return r
def perform(self, node, inputs, outputs):
(a_val, a_ind, a_ptr, b) = inputs
(out,) = outputs
a = scipy.sparse.csr_matrix(
(a_val, a_ind, a_ptr),
(len(a_ptr) - 1, b.shape[0]),
copy=True) # use view_map before setting this to False
# out[0] = a.dot(b)
out[0] = a * b
# scipy 0.7 automatically converts to dense, but not .6 sometimes
assert _is_dense(out[0])
def c_code(self, node, name, inputs, outputs, sub):
"""
C-implementation of the dot product of the sparse matrix A and matrix B.
Parameters
----------
a_val
Non-zero values of the sparse matrix.
a_ind
Column indices of the non-null values (.indices of a
scipy.csc_matrix).
a_ptr
Indicates col indices for col. i are in the range
a_ptr[i]:a_ptr[i+1].
n_cols
Number of columns of sparse matrix.
b
Dense matrix to perform dot product with, as in dot(a, b).
z
Return value.
sub
TODO, not too sure, something to do with weave probably.
"""
(a_val, a_ind, a_ptr, b) = inputs
(z,) = outputs
typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
return """
if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_val) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ind) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "a_ind dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "a_ptr dtype not INT32"); %(fail)s;}
if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a_val and a_ind have different lengths"); %(fail)s;}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_ptr)s)[0]-1) //a's rows
|| (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1]) //b's columns
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0, 0};
dims[0] = PyArray_DIMS(%(a_ptr)s)[0]-1;
dims[1] = PyArray_DIMS(%(b)s)[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);
}
{
// sparse array has size MxK, dense KxN, output MxN
npy_intp M = PyArray_DIMS(%(z)s)[0];
npy_intp N = PyArray_DIMS(%(z)s)[1];
npy_intp K = PyArray_DIMS(%(b)s)[0];
// strides tell you how many bytes to skip to go to next column/row entry
npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;
npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;
npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;
npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;
npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;
npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;
// pointers to access actual data in the arrays passed as params.
dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);
const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);
const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);
const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);
//npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];
//clear the output array
memset(Dz, 0, M*N*sizeof(dtype_%(z)s));
//iterate over the sparse array, making the most of an entry wherever we find it.
// Normal matrix matrix multiply:
// for m
// for n
// for k
// z[m, n] += a[m, k] * b[k, n]
// Here instead:
// for m
// for k (sparse)
// for n
// z[m, n] += a[m, k] * b[k, n]
// loop over inner dimension
for (npy_int64 m = 0; m < M; ++m)
{
// pointer to m-th row of the output matrix Z
dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);
// loop over sparse rows indices through index pointer array
// (amounts to looping over cols k of sparse matrix)
for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)
{
npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m
const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location
// get pointer to k-th row of dense matrix
const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);
// loop over final dimension (cols of dense matrix) and perform dot product
for(npy_int32 n = 0; n < N; ++n)
{
zm[n*Szn] += Amk * bk[n*Sbn];
}
}
}
}
""" % dict(locals(), **sub)
def c_code_cache_version(self):
return (1,)
sd_csr = StructuredDotCSR()
# register a specialization to replace StructuredDot -> StructuredDotCSx
# This is tested in tests/test_basic.py:792
@gof.local_optimizer([sparse._structured_dot])
def local_structured_dot(node):
if node.op == sparse._structured_dot:
a, b = node.inputs
if a.type.format == 'csc':
a_val, a_ind, a_ptr, a_shape = csm_properties(a)
a_nsparse = a_shape[0]
return [sd_csc(a_val, a_ind, a_ptr, a_nsparse, b)]
if a.type.format == 'csr':
a_val, a_ind, a_ptr, a_shape = csm_properties(a)
return [sd_csr(a_val, a_ind, a_ptr, b)]
return False
# Commented out because
# a) it is only slightly faster than scipy these days, and sometimes a little
# slower, and
# b) the resulting graphs make it very difficult for an op to do size checking
# on the matrices involved. dimension mismatches are hard to detect sensibly.
# register_specialize(local_structured_dot)
class UsmmCscDense(gof.Op):
"""
Performs the expression is `alpha` * `x` `y` + `z`.
Parameters
----------
x
Matrix variable.
y
Matrix variable.
z
Dense matrix.
alpha
A tensor scalar.
Returns
-------
The dense matrix resulting from `alpha` * `x` `y` + `z`.
Notes
-----
The grad is not implemented for this op.
Optimized version os Usmm when `x` is in csc format and `y` is dense.
"""
__props__ = ("inplace",)
def __init__(self, inplace):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [6]}
def __str__(self):
if self.inplace:
return 'UsmmCscDense{inplace}'
else:
return 'UsmmCscDense{no_inplace}'
def make_node(self, alpha, x_val, x_ind, x_ptr, x_nrows, y, z):
alpha = tensor.as_tensor_variable(alpha)
x_val = tensor.as_tensor_variable(x_val)
x_ind = tensor.as_tensor_variable(x_ind)
x_ptr = tensor.as_tensor_variable(x_ptr)
x_nrows = tensor.as_tensor_variable(x_nrows)
y = tensor.as_tensor_variable(y)
z = tensor.as_tensor_variable(z)
assert x_ind.dtype == 'int32'
assert x_ptr.dtype == 'int32'
assert x_nrows.dtype == 'int32'
assert alpha.ndim == 2 and alpha.type.broadcastable == (True, True)
assert x_val.ndim == 1
assert y.ndim == 2
assert z.ndim == 2
dtype_out = scalar.upcast(alpha.type.dtype, x_val.type.dtype,
y.type.dtype, z.type.dtype)
if dtype_out not in ('float32', 'float64'):
raise NotImplementedError('only float types are supported in '
'operands')
if self.inplace:
assert z.type.dtype == dtype_out
# axpy work only with the same dtype, so we should upcast the input
if dtype_out != alpha.type.dtype:
alpha = tensor.cast(alpha, dtype_out)
if dtype_out != x_val.type.dtype:
x_val = tensor.cast(x_val, dtype_out)
if dtype_out != y.type.dtype:
y = tensor.cast(y, dtype_out)
if dtype_out != z.type.dtype:
z = tensor.cast(z, dtype_out)
r = gof.Apply(
self, [alpha, x_val, x_ind, x_ptr, x_nrows, y, z],
[tensor.tensor(dtype_out, (False, y.type.broadcastable[1]))])
return r
def c_support_code(self):
return blas.blas_header_text()
def c_libraries(self):
return blas.ldflags()
def c_compile_args(self):
return blas.ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, name, inputs, outputs, sub):
alpha, x_val, x_ind, x_ptr, x_nrows, y, z = inputs
zn = outputs[0]
if node.inputs[1].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for '
'x_val')
if node.inputs[5].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for y')
if node.inputs[6].type.dtype != node.outputs[0].type.dtype:
raise NotImplementedError('z and output must have same type')
if node.inputs[1].type.dtype == "float32":
conv_type = "float"
axpy = "saxpy_"
else:
conv_type = "double"
axpy = "daxpy_"
# retrieve dtype numbers
typenum_alpha = node.inputs[0].type.dtype_specs()[2]
typenum_x_val = node.inputs[1].type.dtype_specs()[2]
typenum_y = node.inputs[5].type.dtype_specs()[2]
typenum_z = node.inputs[6].type.dtype_specs()[2]
typenum_zn = node.outputs[0].type.dtype_specs()[2]
inplace = int(self.inplace)
rval = """
if (PyArray_NDIM(%(x_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_val) != 1"); %(fail)s;}
if (PyArray_NDIM(%(x_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_ind) != 1"); %(fail)s;}
if (PyArray_NDIM(%(x_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_ptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(x_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, "rank(nrows) != 0"); %(fail)s;}
if (PyArray_NDIM(%(y)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(y) != 2"); %(fail)s;}
if (PyArray_TYPE(%(x_val)s) != %(typenum_x_val)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for x_val"); %(fail)s;}
if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for y"); %(fail)s;}
if (PyArray_TYPE(%(z)s) != %(typenum_z)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for z"); %(fail)s;}
if (PyArray_TYPE(%(alpha)s) != %(typenum_alpha)s) {
PyErr_SetString(PyExc_NotImplementedError, "Invalid type for alpha"); %(fail)s;}
if (PyArray_TYPE(%(x_ind)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "x_ind dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(x_ptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "x_ptr dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(x_nrows)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "x_nrows dtype not INT32"); %(fail)s;}
if (PyArray_DIMS(%(x_val)s)[0] != PyArray_DIMS(%(x_ind)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "x_val and x_ind have different lengths"); %(fail)s;}
if (PyArray_DIMS(%(x_ptr)s)[0] != PyArray_DIMS(%(y)s)[0]+1)
{PyErr_SetString(PyExc_NotImplementedError, "x's number of columns doesn't match y's rows"); %(fail)s;}
if (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0] || PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(y)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "The dimension of the allocated output doesn't match the correct output size."); %(fail)s;}
if (PyArray_SIZE(%(alpha)s) != 1)
{PyErr_SetString(PyExc_NotImplementedError, "The number of element in alpha must be 1"); %(fail)s;}
if (PyArray_NDIM(%(alpha)s) != 2)
{PyErr_SetString(PyExc_NotImplementedError, "The number dimension of alpha must be 2"); %(fail)s;}
if (PyArray_NDIM(%(x_val)s) != 1)
{PyErr_SetString(PyExc_NotImplementedError, "The number dimension of x_val must be 1"); %(fail)s;}
if (PyArray_NDIM(%(y)s) != 2)
{PyErr_SetString(PyExc_NotImplementedError, "The number dimension of y must be 2"); %(fail)s;}
if (PyArray_NDIM(%(z)s) != 2)
{PyErr_SetString(PyExc_NotImplementedError, "The number dimension of z must be 2"); %(fail)s;}
if (%(inplace)s)
{
if (%(typenum_zn)s != %(typenum_z)s) {
PyErr_SetString(PyExc_NotImplementedError, "When inplace the output dtype must be the same as the input"); %(fail)s;}
Py_XDECREF(%(zn)s);
%(zn)s = %(z)s;
Py_INCREF(%(zn)s);
}
else if (!%(zn)s
|| (PyArray_DIMS(%(zn)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0])
|| (PyArray_DIMS(%(zn)s)[1] != PyArray_DIMS(%(y)s)[1])
)
{
{Py_XDECREF(%(zn)s);}
npy_intp dims[] = {0, 0};
dims[0] = ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0];
dims[1] = PyArray_DIMS(%(y)s)[1];
%(zn)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_zn)s);
}
{
// sparse array has size MxK, dense KxN, output MxN
npy_intp M = PyArray_DIMS(%(zn)s)[0];
npy_intp N = PyArray_DIMS(%(zn)s)[1];
npy_intp K = PyArray_DIMS(%(y)s)[0];
// pointers to access actual data in the arrays passed as params.
const dtype_%(x_val)s* __restrict__ Dval = (dtype_%(x_val)s*)PyArray_DATA(%(x_val)s);
const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(x_ind)s);
const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(x_ptr)s);
const dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
npy_intp Sz = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;
npy_intp Szn = PyArray_STRIDES(%(zn)s)[1] / PyArray_DESCR(%(zn)s)->elsize;
npy_intp Sval = PyArray_STRIDES(%(x_val)s)[0] / PyArray_DESCR(%(x_val)s)->elsize;
npy_intp Sind = PyArray_STRIDES(%(x_ind)s)[0] / PyArray_DESCR(%(x_ind)s)->elsize;
npy_intp Sptr = PyArray_STRIDES(%(x_ptr)s)[0] / PyArray_DESCR(%(x_ptr)s)->elsize;
npy_intp Sy = PyArray_STRIDES(%(y)s)[1] / PyArray_DESCR(%(y)s)->elsize;
if (!(%(inplace)s))
{
if (PyArray_CopyInto(%(zn)s, %(z)s))
{
Py_XDECREF(%(zn)s);
%(fail)s;
}
}
for (npy_int32 k = 0; k < K; ++k)
{
for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1)*Sptr]; ++m_idx)
{
const npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K
const dtype_%(x_val)s Amk = alpha * Dval[m_idx * Sval]; // actual value at that location
dtype_%(y)s* y_row = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * k);
// axpy expects pointer to the beginning of memory arrays,
// so when the stride is negative, we need to get the
// last element
if (Sy < 0)
y_row += (K - 1) * Sy;
dtype_%(zn)s* z_row = (dtype_%(zn)s*)(PyArray_BYTES(%(zn)s) + PyArray_STRIDES(%(zn)s)[0] * m);
if (Szn < 0)
z_row += (N - 1) * Szn;
%(axpy)s((int*)&N, (%(conv_type)s*)&Amk, (%(conv_type)s*)y_row, (int*)&Sy, (%(conv_type)s*)z_row, (int*)&Szn);
}
}
}
""" % dict(locals(), **sub)
return rval
def c_code_cache_version(self):
return (1, blas.blas_header_version())
usmm_csc_dense = UsmmCscDense(inplace=False)
usmm_csc_dense_inplace = UsmmCscDense(inplace=True)
# This is tested in tests/test_basic.py:UsmmTests
local_usmm = gof.opt.PatternSub(
(theano.tensor.sub, 'z',
(theano.tensor.mul,
{'pattern': 'alpha',
'constraint': lambda expr: (np.all(expr.type.broadcastable) and
theano.config.blas.ldflags)},
(sparse._dot, 'x', 'y'))),
(usmm, (theano.tensor.neg, 'alpha'), 'x', 'y', 'z'))
register_specialize(local_usmm, name="local_usmm")
# register a specialization to replace usmm_csc_dense -> usmm_csc_dense_inplace
# This is tested in tests/test_basic.py:UsmmTests
@gof.local_optimizer([usmm_csc_dense])
def local_usmm_csc_dense_inplace(node):
if node.op == usmm_csc_dense:
return [usmm_csc_dense_inplace(*node.inputs)]
register_specialize(local_usmm_csc_dense_inplace, 'cxx_only', 'inplace')
# This is tested in tests/test_basic.py:UsmmTests
@gof.local_optimizer([usmm])
def local_usmm_csx(node):
"""
usmm -> usmm_csc_dense
"""
if node.op == usmm:
alpha, x, y, z = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
y_is_sparse_variable = _is_sparse_variable(y)
if x_is_sparse_variable and not y_is_sparse_variable:
if x.type.format == 'csc':
x_val, x_ind, x_ptr, x_shape = csm_properties(x)
x_nsparse = x_shape[0]
dtype_out = scalar.upcast(alpha.type.dtype, x.type.dtype,
y.type.dtype, z.type.dtype)
if dtype_out not in ('float32', 'float64'):
return False
# Sparse cast is not implemented.
if y.type.dtype != dtype_out:
return False
return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr,
x_nsparse, y, z)]
return False
register_specialize(local_usmm_csx, 'cxx_only')
class CSMGradC(gof.Op):
__props__ = ()
def make_node(self, a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim):
return gof.Apply(self, [a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim], [b_val.type()])
def c_code(self, node, name, inputs, outputs, sub):
# retrieve dtype number
(a_val, a_ind, a_ptr, a_dim,
b_val, b_ind, b_ptr, b_dim) = inputs
(z,) = outputs
typenum_z = node.outputs[0].type.dtype_specs()[2]
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a_val')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b_val')
return """
if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_val) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ind) != 1"); %(fail)s;}
if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a_ptr) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b_val) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b_ind) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b_ptr) != 1"); %(fail)s;}
if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "a_ind dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "a_ptr dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(b_ind)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "b_ind dtype not INT32"); %(fail)s;}
if (PyArray_TYPE(%(b_ptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "b_ptr dtype not INT32"); %(fail)s;}
if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a_val and a_ind have different lengths"); %(fail)s;}
if (PyArray_DIMS(%(b_val)s)[0] != PyArray_DIMS(%(b_ind)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "b_val and b_ind have different lengths"); %(fail)s;}
if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b_ptr)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a_ptr and b_ptr have different lengths"); %(fail)s;}
if ((!%(z)s) || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_val)s)[0]))
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(a_val)s)[0];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, %(typenum_z)s);
}
{
// sparse array has size MxK, dense KxN, output MxN
npy_intp M = PyArray_DIMS(%(a_ptr)s)[0] - 1;
npy_intp a_dim_0 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[0];
npy_intp a_dim_1 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[1];
npy_intp sp_dim = (M == a_dim_0)?a_dim_1:a_dim_0;
// strides tell you how many bytes to skip to go to next column/row entry
npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_intp Sa_val = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;
npy_intp Sa_ind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;
npy_intp Sa_ptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;
npy_intp Sb_val = PyArray_STRIDES(%(b_val)s)[0] / PyArray_DESCR(%(b_val)s)->elsize;
npy_intp Sb_ind = PyArray_STRIDES(%(b_ind)s)[0] / PyArray_DESCR(%(b_ind)s)->elsize;
npy_intp Sb_ptr = PyArray_STRIDES(%(b_ptr)s)[0] / PyArray_DESCR(%(b_ptr)s)->elsize;
// pointers to access actual data in the arrays passed as params.
dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);
const dtype_%(a_val)s* __restrict__ Da_val = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);
const npy_int32 * __restrict__ Da_ind = (npy_int32*)PyArray_DATA(%(a_ind)s);
const npy_int32 * __restrict__ Da_ptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);
const dtype_%(b_val)s* __restrict__ Db_val = (dtype_%(b_val)s*)PyArray_DATA(%(b_val)s);
const npy_int32 * __restrict__ Db_ind = (npy_int32*)PyArray_DATA(%(b_ind)s);
const npy_int32 * __restrict__ Db_ptr = (npy_int32*)PyArray_DATA(%(b_ptr)s);
npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];
dtype_%(b_val)s b_row[sp_dim];
//clear the output array
for (npy_int64 i = 0; i < nnz; ++i)
{
Dz[i*Sz] = 0;
}
memset(b_row, 0, sp_dim*sizeof(dtype_%(b_val)s));
// loop over inner dimension
for (npy_int64 m = 0; m < M; ++m)
{
for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];
j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {
b_row[Db_ind[j_ptr * Sb_ind]] += Db_val[j_ptr*Sb_val];
}
for (npy_int32 j_ptr = Da_ptr[m * Sa_ptr];
j_ptr < Da_ptr[(m + 1) * Sa_ptr]; j_ptr++) {
Dz[j_ptr*Sz] = b_row[Da_ind[j_ptr * Sa_ind]];
}
for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];
j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {
b_row[Db_ind[j_ptr * Sb_ind]] = 0;
}
}
}
""" % dict(locals(), **sub)
def c_code_cache_version(self):
return (3,)
csm_grad_c = CSMGradC()
# register a specialization to replace csm_grad -> csm_grad_c
# This is tested in tests/test_opt.py:test_local_csm_grad_c
@gof.local_optimizer([csm_grad(None)])
def local_csm_grad_c(node):
"""
csm_grad(None) -> csm_grad_c
"""
if node.op == csm_grad(None):
return [csm_grad_c(*node.inputs)]
return False
# DISABLED AS IT IS BROKEN FOR UNSORTED INDICES!
# register_specialize(local_csm_grad_c, 'cxx_only')
class MulSDCSC(gof.Op):
"""
Multiplication of sparse matrix by a broadcasted dense vector
element wise.
Parameters
----------
a_data
Sparse matrix data.
a_indices
Sparse matrix indices.
a_indptr
Sparse matrix indptr.
b
Tensor type matrix.
Returns
-------
The multiplication of the two matrices element-wise.
Notes
-----
`a_data`, `a_indices` and `a_indptr` must be the properties of a sparse
matrix in csc format.
The dtype of `a_data`, i.e. the dtype of the sparse matrix, cannot be a
complex type.
This op is used as an optimization of mul_s_d.
"""
__props__ = ()
def make_node(self, a_data, a_indices, a_indptr, b):
assert b.type.ndim == 2
return gof.Apply(self, [a_data, a_indices, a_indptr, b],
[tensor.tensor(b.dtype, (False,))])
def c_code_cache_version(self):
return (2,)
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplementedError()
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
return """
if (PyArray_NDIM(%(_b)s) != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2");
%(fail)s;}
if (PyArray_NDIM(%(_data)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(data) != 1");
%(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1");
%(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1");
%(fail)s;}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if (!%(_zout)s ||
(PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||
!(PyArray_ISCONTIGUOUS(%(_zout)s)))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,
PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));
if (!%(_zout)s)
{
PyErr_SetString(PyExc_MemoryError,
"Could not allocate output memory.");
%(fail)s;
}
}
{ //makes it compile even though labels jump over variable definitions.
const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
//TODO: error checking with this
const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;
const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);
const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);
const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];
// loop over columns
for (npy_int32 j = 0; j < N; ++j)
{
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)
{
// extract row index of non-null value
npy_int32 i = indices[i_idx];
// extract i-th row of dense matrix
const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * i);
// write resulting gradient to sparse output
zout[i_idx] = data[i_idx] * b_row[j];
}
}
}
""" % dict(locals(), **sub)
def __str__(self):
return self.__class__.__name__
mul_s_d_csc = MulSDCSC()
class MulSDCSR(gof.Op):
"""
Multiplication of sparse matrix by a broadcasted dense vector
element wise.
Parameters
----------
a_data
Sparse matrix data.
a_indices
Sparse matrix indices.
a_indptr
Sparse matrix indptr.
b
Tensor type matrix.
Returns
-------
The multiplication of the two matrix element wise.
Notes
-----
`a_data`, `a_indices` and `a_indptr` must be the properties
of a sparse matrix in csr format.
The dtype of `a_data`, i.e. the dtype of the sparse matrix,
cannot be a complex type.
This op is used as an optimization of mul_s_d.
"""
__props__ = ()
def make_node(self, a_data, a_indices, a_indptr, b):
assert b.type.ndim == 2
return gof.Apply(self, [a_data, a_indices, a_indptr, b],
[tensor.tensor(b.dtype, (False,))])
def c_code_cache_version(self):
return (2,)
# def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):
# return NotImplemented()
def c_code(self, node, name, inputs, outputs, sub):
(_data, _indices, _indptr, _b,) = inputs
(_zout,) = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
return """
if (PyArray_NDIM(%(_b)s) != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2");
%(fail)s;}
if (PyArray_NDIM(%(_data)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(data) != 1");
%(fail)s;}
if (PyArray_NDIM(%(_indices)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1");
%(fail)s;}
if (PyArray_NDIM(%(_indptr)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1");
%(fail)s;}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if (!%(_zout)s ||
(PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||
!(PyArray_ISCONTIGUOUS(%(_zout)s)))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,
PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));
if (!%(_zout)s)
{
PyErr_SetString(PyExc_MemoryError,
"Could not allocate output memory.");
%(fail)s;
}
}
{ //makes it compile even though labels jump over variable definitions.
const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
//TODO: error checking with this
const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;
const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);
const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);
const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];
// loop over columns
for (npy_int32 j = 0; j < N; ++j)
{
// extract i-th row of dense matrix
const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * j);
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)
{
// extract row index of non-null value
npy_int32 i = indices[i_idx];
// write resulting gradient to sparse output
zout[i_idx] = data[i_idx] * b_row[i];
}
}
}
""" % dict(locals(), **sub)
def __str__(self):
return self.__class__.__name__
mul_s_d_csr = MulSDCSR()
# register a specialization to replace MulSD -> MulSDCSX
@gof.local_optimizer([sparse.mul_s_d])
def local_mul_s_d(node):
if node.op == sparse.mul_s_d:
x, y = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
if x_is_sparse_variable:
svar = x
dvar = y
else:
svar = y
dvar = x
if dvar.type.ndim != 2:
return False
if svar.type.format == 'csc':
CSx = sparse.CSC
mul_s_d_csx = mul_s_d_csc
elif svar.type.format == 'csr':
CSx = sparse.CSR
mul_s_d_csx = mul_s_d_csr
else:
raise NotImplemented()
if x.dtype != y.dtype:
# mul_s_d_csx don't support that case
return
c_data = mul_s_d_csx(sparse.csm_data(svar),
sparse.csm_indices(svar),
sparse.csm_indptr(svar), dvar)
return [CSx(c_data,
sparse.csm_indices(svar),
sparse.csm_indptr(svar),
sparse.csm_shape(svar))]
return False
register_specialize(local_mul_s_d, 'cxx_only')
class MulSVCSR(gof.Op):
"""
Multiplication of sparse matrix by a broadcasted dense vector
element wise.
Parameters
----------
a_data
Sparse matrix data.
a_indices
Sparse matrix indices.
a_indptr
Sparse matrix indptr.
b
Tensor type matrix.
Returns
-------
The multiplication of the two matrix element wise.
Notes
-----
`a_data`, `a_indices` and `a_indptr` must be the properties
of a sparse matrix in csr format.
The dtype of `a_data`, i.e. the dtype of the sparse matrix,
cannot be a complex type.
This op is used as an optimization of MulSV.
"""
__props__ = ()
def make_node(self, a_data, a_indices, a_indptr, b):
assert b.type.ndim == 1
return gof.Apply(self, [a_data, a_indices, a_indptr, b],
[tensor.tensor(b.dtype, (False,))])
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inputs, outputs, sub):
_data, _indices, _indptr, _b, = inputs
_zout, = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
return """
if (PyArray_NDIM(%(_b)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_data)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(data) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_indices)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_indptr)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1");
%(fail)s;
}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if (!%(_zout)s
|| PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]
|| !PyArray_ISCONTIGUOUS(%(_zout)s))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,
PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));
}
{ //makes it compile even though labels jump over variable definitions.
const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
//TODO: error checking with this
const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;
const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);
const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);
dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);
const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;
// loop over rows
for (npy_int32 j = 0; j < N; ++j)
{
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)
{
// extract row index of non-null value
npy_int32 i = indices[i_idx];
zout[i_idx] = data[i_idx] * Db[i * Sb];
}
}
}
""" % dict(locals(), **sub)
def __str__(self):
return self.__class__.__name__
mul_s_v_csr = MulSVCSR()
# register a specialization to replace MulSV -> MulSVCSR
@gof.local_optimizer([sparse.mul_s_v])
def local_mul_s_v(node):
if node.op == sparse.mul_s_v:
x, y = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
if x_is_sparse_variable:
svar = x
dvar = y
else:
svar = y
dvar = x
if dvar.type.ndim != 1:
return False
elif svar.type.format == 'csr':
CSx = sparse.CSR
mul_s_v_csx = mul_s_v_csr
else:
return False
s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)
c_data = mul_s_v_csx(s_val, s_ind, s_ptr, dvar)
return [CSx(c_data, s_ind, s_ptr, s_shape)]
return False
register_specialize(local_mul_s_v, 'cxx_only')
class StructuredAddSVCSR(gof.Op):
"""
Structured addition of a sparse matrix and a dense vector.
The elements of the vector are are only added to the corresponding
non-zero elements. Therefore, this operation outputs another sparse
matrix.
Parameters
----------
a_data
Sparse matrix data.
a_indices
Sparse matrix indices.
a_indptr
Sparse matrix indptr.
b
Tensor type vector.
Returns
-------
A sparse matrix containing the addition of the vector to the data of the
sparse matrix.
Notes
-----
The a_* are the properties of a sparse matrix in csr format.
This op is used as an optimization for StructuredAddSV.
"""
__props__ = ()
def make_node(self, a_data, a_indices, a_indptr, b):
b = tensor.as_tensor_variable(b)
a_data = tensor.as_tensor_variable(a_data)
a_indices = tensor.as_tensor_variable(a_indices)
a_indptr = tensor.as_tensor_variable(a_indptr)
assert a_data.type.ndim == 1
assert a_indices.type.ndim == 1
assert a_indptr.type.ndim == 1
assert b.type.ndim == 1
return gof.Apply(self, [a_data, a_indices, a_indptr, b],
[tensor.tensor(b.dtype, (False,))])
def c_code_cache_version(self):
return (2,)
def c_code(self, node, name, inputs, outputs, sub):
_data, _indices, _indptr, _b, = inputs
_zout, = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for a')
if node.inputs[3].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for b')
return """
if (PyArray_NDIM(%(_b)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_data)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(data) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_indices)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indices) != 1");
%(fail)s;
}
if (PyArray_NDIM(%(_indptr)s) != 1) {
PyErr_SetString(PyExc_NotImplementedError, "rank(indptr) != 1");
%(fail)s;
}
if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {
PyErr_SetString(PyExc_NotImplementedError, "C"); %(fail)s;}
if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)
{PyErr_SetString(PyExc_NotImplementedError, "D"); %(fail)s;}
if (!%(_zout)s
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0])
|| !(PyArray_ISCONTIGUOUS(%(_zout)s)))
{
Py_XDECREF(%(_zout)s);
%(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,
PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));
if (!%(_zout)s)
{
PyErr_SetString(PyExc_MemoryError,
"Could not allocate output memory.");
%(fail)s;
}
}
{ //makes it compile even though labels jump over variable definitions.
const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];
//TODO: error checking with this
const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;
const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);
const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);
const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);
const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);
dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);
const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;
// loop over columns
for (npy_int32 j = 0; j < N; ++j)
{
// for each non-null value in the sparse column
for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)
{
// extract row index of non-null value
npy_int32 i = indices[i_idx];
// write resulting gradient to sparse output
zout[i_idx] = data[i_idx] + Db[i * Sb];
}
}
}
""" % dict(locals(), **sub)
def __str__(self):
return self.__class__.__name__
structured_add_s_v_csr = StructuredAddSVCSR()
# register a specialization to replace
# structured_add_s_v -> structured_add_s_v_csr
@gof.local_optimizer([sparse.structured_add_s_v])
def local_structured_add_s_v(node):
if node.op == sparse.structured_add_s_v:
x, y = node.inputs
x_is_sparse_variable = _is_sparse_variable(x)
# y_is_sparse_variable = _is_sparse_variable(y)
if x_is_sparse_variable:
svar = x
dvar = y
else:
svar = y
dvar = x
if dvar.type.ndim != 1:
return False
elif svar.type.format == 'csr':
CSx = sparse.CSR
structured_add_s_v_csx = structured_add_s_v_csr
else:
return False
s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)
c_data = structured_add_s_v_csx(s_val, s_ind, s_ptr, dvar)
return [CSx(c_data, s_ind, s_ptr, s_shape)]
return False
register_specialize(local_structured_add_s_v, 'cxx_only')
class SamplingDotCSR(gof.Op):
"""
Operand optimized for calculating the dot product dot(`x`, `y`.T) = `z`
when you only want to calculate a subset of `z`.
It is equivalent to `p` o (`x` . `y`.T) where o is the element-wise
product, `x` and `y` operands of the dot product and `p` is a matrix
that contains 1 when the corresponding element of `z` should be
calculated and 0 when it shouldn't. Note that SamplingDot has a different
interface than `dot` because SamplingDot requires `x` to be a `m`x`k`
matrix while `y` is a `n`x`k` matrix instead of the usual `k`x`n` matrix.
Parameters
----------
x
Tensor matrix.
y
Tensor matrix.
p_data
Sparse matrix data.
p_ind
Sparse matrix indices.
p_ptr
Sparse matric indptr.
p_ncols
Sparse matrix number of columns.
Returns
-------
A dense matrix containing the dot product of `x` by `y`.T only
where `p` is 1.
Notes
-----
It will work if the pattern is not binary value, but if the
pattern doesn't have a high sparsity proportion it will be slower
then a more optimized dot followed by a normal elemwise
multiplication.
If we have the input of mixed dtype, we insert cast elemwise
in the graph to be able to call blas function as they don't
allow mixed dtype.
This op is used as an optimization for SamplingDot.
"""
__props__ = ()
def make_node(self, x, y, p_data, p_ind, p_ptr, p_ncols):
x = tensor.as_tensor_variable(x)
y = tensor.as_tensor_variable(y)
p_data = tensor.as_tensor_variable(p_data)
p_ind = tensor.as_tensor_variable(p_ind)
p_ptr = tensor.as_tensor_variable(p_ptr)
p_ncols = tensor.as_tensor_variable(p_ncols)
assert p_ncols.dtype == 'int32'
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype,
p_data.type.dtype)
dot_out = scalar.upcast(x.type.dtype, y.type.dtype)
# We call blas ?dot function that take only param of the same type
x = tensor.cast(x, dot_out)
y = tensor.cast(y, dot_out)
return gof.Apply(self, [x, y, p_data, p_ind, p_ptr, p_ncols], [
tensor.tensor(dtype=dtype_out, broadcastable=(False,)),
tensor.tensor(dtype=p_ind.type.dtype, broadcastable=(False,)),
tensor.tensor(dtype=p_ptr.type.dtype, broadcastable=(False,))
])
def c_code_cache_version(self):
return (2, blas.blas_header_version())
def c_support_code(self):
return blas.blas_header_text()
def c_libraries(self):
return blas.ldflags()
def c_compile_args(self):
return blas.ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, name, inputs, outputs, sub):
x, y, p_data, p_ind, p_ptr, p_ncols = inputs
z_data, z_ind, z_ptr = outputs
if node.inputs[0].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for x')
if node.inputs[1].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError('Complex types are not supported for y')
if node.inputs[2].type.dtype in ('complex64', 'complex128'):
raise NotImplementedError(
'Complex types are not supported for pattern')
dot_out = scalar.upcast(node.inputs[0].type.dtype,
node.inputs[1].type.dtype)
if dot_out == "float32":
conv_type = "float"
cdot = "sdot_"
else:
conv_type = "double"
cdot = "ddot_"
# retrieve dtype number
typenum_x = node.inputs[0].type.dtype_specs()[2]
typenum_y = node.inputs[1].type.dtype_specs()[2]
typenum_p = node.inputs[2].type.dtype_specs()[2]
typenum_zd = tensor.TensorType(node.outputs[0].dtype,
[]).dtype_specs()[2]
typenum_zi = tensor.TensorType(node.outputs[1].dtype,
[]).dtype_specs()[2]
typenum_zp = tensor.TensorType(node.outputs[2].dtype,
[]).dtype_specs()[2]
rval = """
if (PyArray_NDIM(%(x)s) != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(x) != 2"); %(fail)s;}
if (PyArray_NDIM(%(y)s) != 2) {
PyErr_SetString(PyExc_NotImplementedError, "rank(y) != 2"); %(fail)s;}
if (PyArray_TYPE(%(x)s) != %(typenum_x)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for x");
%(fail)s;}
if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for y");
%(fail)s;}
if (PyArray_TYPE(%(p_data)s) != %(typenum_p)s) {
PyErr_SetString(PyExc_NotImplementedError,
"Invalid type for pattern");
%(fail)s;}
if (PyArray_DIMS(%(x)s)[1] != PyArray_DIMS(%(y)s)[1]) {
PyErr_SetString(PyExc_NotImplementedError,
"x's number of columns doesn't match y's rows! Note: sampling_dot is different from dot because y is assumed to be transposed.");
%(fail)s;}
if (PyArray_DIMS(%(y)s)[0] != ((npy_int32 *)PyArray_DATA(%(p_ncols)s))[0] ||
PyArray_DIMS(%(x)s)[0] != (PyArray_DIMS(%(p_ptr)s)[0] - 1))
{PyErr_SetString(PyExc_NotImplementedError,
"The dimension of the pattern and the output must match"); %(fail)s;}
// Allocate output
if (!%(z_data)s
|| (PyArray_DIMS(%(z_data)s)[0] != PyArray_DIMS(%(p_data)s)[0])
|| (PyArray_TYPE(%(z_data)s) != %(typenum_zd)s)
|| !(PyArray_ISCONTIGUOUS(%(z_data)s)))
{
{Py_XDECREF(%(z_data)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(p_data)s)[0];
%(z_data)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zd)s);
}
if (!%(z_ind)s
|| (PyArray_DIMS(%(z_ind)s)[0] != PyArray_DIMS(%(p_ind)s)[0])
|| (PyArray_TYPE(%(z_ind)s) != %(typenum_zi)s)
|| !(PyArray_ISCONTIGUOUS(%(z_ind)s)))
{
{Py_XDECREF(%(z_ind)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(p_ind)s)[0];
%(z_ind)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zi)s);
}
if (!%(z_ptr)s
|| (PyArray_DIMS(%(z_ptr)s)[0] != PyArray_DIMS(%(p_ptr)s)[0])
|| (PyArray_TYPE(%(z_ptr)s) != %(typenum_zp)s)
|| !(PyArray_ISCONTIGUOUS(%(z_ptr)s)))
{
{Py_XDECREF(%(z_ptr)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(p_ptr)s)[0];
%(z_ptr)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,
%(typenum_zp)s);
}
{
// Product of MxK and NxK, output MxN
npy_intp M = PyArray_DIMS(%(x)s)[0];
npy_intp N = PyArray_DIMS(%(y)s)[0];
npy_intp K = PyArray_DIMS(%(y)s)[1];
// pointers to access actual data in the arrays passed as params.
const dtype_%(x)s* __restrict__ Dx = (dtype_%(x)s*)PyArray_DATA(%(x)s);
const dtype_%(y)s* __restrict__ Dy = (dtype_%(y)s*)PyArray_DATA(%(y)s);
const dtype_%(p_data)s* __restrict__ Dpd = (dtype_%(p_data)s*)PyArray_DATA(%(p_data)s);
const dtype_%(p_ind)s* __restrict__ Dpi = (dtype_%(p_ind)s*)PyArray_DATA(%(p_ind)s);
const dtype_%(p_ptr)s* __restrict__ Dpp = (dtype_%(p_ptr)s*)PyArray_DATA(%(p_ptr)s);
dtype_%(z_data)s* __restrict__ Dzd = (dtype_%(z_data)s*)PyArray_DATA(%(z_data)s);
dtype_%(z_ind)s* __restrict__ Dzi = (dtype_%(z_ind)s*)PyArray_DATA(%(z_ind)s);
dtype_%(z_ptr)s* __restrict__ Dzp = (dtype_%(z_ptr)s*)PyArray_DATA(%(z_ptr)s);
const npy_intp Sdx = PyArray_STRIDES(%(x)s)[1]/PyArray_DESCR(%(x)s)->elsize;
const npy_intp Sdy = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;
const npy_intp Sdpd = PyArray_STRIDES(%(p_data)s)[0] / PyArray_DESCR(%(p_data)s)->elsize;
const npy_intp Sdpi = PyArray_STRIDES(%(p_ind)s)[0] / PyArray_DESCR(%(p_ind)s)->elsize;
const npy_intp Sdpp = PyArray_STRIDES(%(p_ptr)s)[0] / PyArray_DESCR(%(p_ptr)s)->elsize;
const npy_intp Sdzd = PyArray_STRIDES(%(z_data)s)[0] / PyArray_DESCR(%(z_data)s)->elsize;
const npy_intp Sdzi = PyArray_STRIDES(%(z_ind)s)[0] / PyArray_DESCR(%(z_ind)s)->elsize;
const npy_intp Sdzp = PyArray_STRIDES(%(z_ptr)s)[0] / PyArray_DESCR(%(z_ptr)s)->elsize;
memcpy(Dzi, Dpi, PyArray_DIMS(%(p_ind)s)[0]*sizeof(dtype_%(p_ind)s));
memcpy(Dzp, Dpp, PyArray_DIMS(%(p_ptr)s)[0]*sizeof(dtype_%(p_ptr)s));
for (npy_int32 m = 0; m < M; ++m) {
for (npy_int32 n_idx = Dpp[m * Sdpp]; n_idx < Dpp[(m+1)*Sdpp]; ++n_idx) {
const npy_int32 n = Dpi[n_idx * Sdpi]; // row index of non-null value for column K
const dtype_%(x)s* x_row = (dtype_%(x)s*)(PyArray_BYTES(%(x)s) + PyArray_STRIDES(%(x)s)[0] * m);
const dtype_%(y)s* y_col = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * n);
Dzd[n_idx * Sdzd] = Dpd[n_idx * Sdpd] * %(cdot)s((int*)&K, (const %(conv_type)s*)x_row, (int*)&Sdx, (const %(conv_type)s*)y_col, (int*)&Sdy);
}
}
}
""" % dict(locals(), **sub)
return rval
sampling_dot_csr = SamplingDotCSR()
# register a specialization to replace SamplingDot -> SamplingDotCsr
@gof.local_optimizer([sparse.sampling_dot])
def local_sampling_dot_csr(node):
if not theano.config.blas.ldflags:
# The C implementation of SamplingDotCsr relies on BLAS routines
return
if node.op == sparse.sampling_dot:
x, y, p = node.inputs
if p.type.format == 'csr':
p_data, p_ind, p_ptr, p_shape = sparse.csm_properties(p)
z_data, z_ind, z_ptr = sampling_dot_csr(x, y, p_data,
p_ind, p_ptr, p_shape[1])
return [sparse.CSR(z_data, z_ind, z_ptr, p_shape)]
return False
register_specialize(local_sampling_dot_csr,
'cxx_only',
name='local_sampling_dot_csr')
|
py | 1a3275e81f1392c34377ba285d3738a80fdb06c3 | from depth2mesh.metaavatar import (
config, training, models
)
__all__ = [
config, training, models
]
|
py | 1a3275ecad033337d167943767081fa03758076f | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2client.exceptions.base import StackStormCLIBaseException
class OperationFailureException(StackStormCLIBaseException):
pass
|
py | 1a327604a97f481b8e6afc5f1941a61064593c0c | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
"""Function to add two numbers together in a linked list notation
Args:
l1 (ListNode): Integer number 1 represented as a linked list
l2 (ListNode): Integer number 2 represented as a linked list
Returns:
ListNode: the resulting sum returned as a linked list
"""
head = ListNode(0)
cur = head
carry = 0
while l1 or l2 or carry:
# Set the value, including handling when l1 or l2 is none where we set to 0
val1 = l1.val if l1 else 0
val2 = l2.val if l2 else 0
# Find the value of the two nodes, and determine if there's a carry for next value
sum_value = val1 + val2 + carry
carry = sum_value // 10
sum_value = sum_value % 10
# Create node and append to the list
node = ListNode(sum_value)
# Move to the next ndoe in each list
l1 = l1.next if l1 else 0
l2 = l2.next if l2 else 0
cur.next = node
cur = node
return head.next
|
py | 1a327616be20db304ef1e6d9ee9c7a81983c269e | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70918
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "MSG_ERROR",
1: "MSG_TX",
2: "MSG_BLOCK",
3: "MSG_FILTERED_BLOCK",
4: "MSG_TXLOCK_REQUEST",
5: "MSG_TXLOCK_VOTE",
6: "MSG_SPORK",
7: "MSG_MASTERNODE_WINNER",
8: "MSG_MASTERNODE_SCANNING_ERROR",
9: "MSG_BUDGET_VOTE",
10: "MSG_BUDGET_PROPOSAL",
11: "MSG_BUDGET_FINALIZED",
12: "MSG_BUDGET_FINALIZED_VOTE",
13: "MSG_MASTERNODE_QUORUM",
14: "MSG_MASTERNODE_QUORUM",
15: "MSG_MASTERNODE_ANNOUNCE",
16: "MSG_MASTERNODE_PING",
17: "MSG_DSTX",
18: "MSG_PUBCOINS",
19: "MSG_GENWIT",
20: "MSG_ACC_VALUE"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def serialize_uniqueness(self):
r = b""
r += struct.pack("<I", self.n)
r += ser_uint256(self.hash)
return r
def deserialize_uniqueness(self, f):
self.n = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
NullOutPoint = COutPoint(0, 0xffffffff)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
def is_zerocoinspend(self):
return bytes_to_hex_str(self.scriptSig)[:2] == "c2"
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def is_coinbase(self):
return (
len(self.vin) == 1 and
self.vin[0].prevout == NullOutPoint and
(not self.vin[0].is_zerocoinspend())
)
def is_coinstake(self):
return (
len(self.vin) == 1 and
len(self.vout) >= 2 and
self.vout[0] == CTxOut()
)
def from_hex(self, hexstring):
f = BytesIO(hex_str_to_bytes(hexstring))
self.deserialize(f)
def spends(self, outpoint):
return len([x for x in self.vin if
x.prevout.hash == outpoint.hash and x.prevout.n == outpoint.n]) > 0
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# oasis
def solve_stake(self, stakeInputs, prevModifier):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for uniqueness in stakeInputs:
nvalue, _, prevTime = stakeInputs[uniqueness]
target = int(target0 * nvalue / 100) % 2**256
data = b""
# always modifier V2 (256 bits) on regtest
data += ser_uint256(prevModifier)
data += struct.pack("<I", prevTime)
# prevout is CStake uniqueness
data += uniqueness
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = uniqueness
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
self.sig_key = None # not serialized / used only to re_sign
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
self.sig_key = None
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
self.sig_key = key
self.low_s = low_s
def re_sign_block(self):
if self.sig_key == None:
raise Exception("Unable to re-sign block. Key Not present, use 'sign_block' first.")
return self.sign_block(self.sig_key, self.low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
py | 1a327792f1393e2d40262df96a107b0522a48ae2 | import requests
from bs4 import BeautifulSoup
import time
import twilio
from twilio.rest import Client
import twilioauth as twauth
import tkinter as tk
window = tk.Tk()
class GUI:
def RunGUI(self):
l1 = tk.Label(window, text="Enter website name here ")
l2 = tk.Label(window, text="Enter checkrate here (in seconds) ")
l3 = tk.Label(window, text="Enter SID here ")
l4 = tk.Label(window, text="Enter Twilio Authorization here ")
l5 = tk.Label(window, text="Enter Twilio phone number here ")
l6 = tk.Label(window, text="Enter user phone number here ")
self.entWebsite = tk.Entry(window, text="Hello World")
self.entCheckRate = tk.Entry(window)
self.entSID = tk.Entry(window)
self.entAuth = tk.Entry(window)
self.entTwilPhone = tk.Entry(window)
self.entUserPhone = tk.Entry(window)
self.entWebsite.insert(0, "https://www.imdb.com/name/nm3485845/")
self.entCheckRate.insert(0, "86400")
self.entSID.insert(0, twauth.twiliodict["sid"])
self.entAuth.insert(0, twauth.twiliodict["auth"])
self.entTwilPhone.insert(0, twauth.twiliodict["twilphone"])
self.entUserPhone.insert(0, twauth.twiliodict["userphone"])
button = tk.Button(window, text="Finish", width=25, command=self.end)
l1.grid(row=0, column=0, pady=2)
l2.grid(row=1, column=0, pady=2)
l3.grid(row=2, column=0, pady=2)
l4.grid(row=3, column=0, pady=2)
l5.grid(row=4, column=0, pady=2)
l6.grid(row=5, column=0, pady=2)
self.entWebsite.grid(row=0, column=2, pady=2)
self.entCheckRate.grid(row=1, column=2, pady=2)
self.entSID.grid(row=2, column=2, pady=2)
self.entAuth.grid(row=3, column=2, pady=2)
self.entTwilPhone.grid(row=4, column=2, pady=2)
self.entUserPhone.grid(row=5, column=2, pady=2)
button.grid(row=6, column=1)
window.mainloop()
def end(self):
# stationdictionary.update({elevation : name})
tempCheckRate = self.entCheckRate.get()
for i in range len(tempCheckRate):
tempCheckRate.remove(",")
global userDictionary
userDictionary = {
"websiteName": self.entWebsite.get(),
"checkRate": tempCheckRate,
"SID": self.entSID.get(),
"twilioAuth": self.entAuth.get(),
"twilioPhone": self.entTwilPhone.get(),
"phoneNumber": self.entUserPhone.get(),
}
print(userDictionary)
# big credit to Andrew for like.. figuring it out, thanks Java
window.destroy()
numberOfCredits = ""
def magic_happens():
#url = "https://www.imdb.com/name/nm3485845/?ref_=fn_al_nm_1" #website
url = userDictionary["websiteName"]
response = requests.get(url) #get html from site
soup = BeautifulSoup(response.content,"html.parser") #add html to BeautifulSoup
div = soup.find(id = "filmo-head-actor" ) #search soup for tag h1\
unwanted = div.find('span') #remove stuff
unwanted.extract()
unwanted = div.find('span')
unwanted.extract()
unwanted = div.find('a')
unwanted.extract()
str = div.text
newStr = str.replace("(", " ")
newestStr = newStr.replace(")", " ")
if numberOfCredits == "":
numberOfCredits = newestStr
elif numberOfCredits != newestStr:
output("Adam Driver now has" + newestStr)
numberOfCredits = newestStr
print(newestStr) #print text
def output(change_message):
twilio_sid = twauth.twiliodict["sid"]
twilio_auth = twauth.twiliodict["auth"]
twilio_phone = twauth.twiliodict["twilphone"]
user_phone = twauth.twiliodict["userphone"]
# print(twilio_sid, twilio_auth, twilio_phone, user_phone, message)
client = Client(twilio_sid, twilio_auth)
message = client.messages.create(
body=change_message, from_=twilio_phone, to=user_phone
)
print(message.sid)
if __name__ == "__main__":
firstGUI = GUI()
firstGUI.RunGUI()
while not False:
magic_happens()
time.sleep(int(userDictionary["checkRate"])) |
py | 1a3277f33a6d0a0b0d387c4d76ef06ee027b7dd8 | #!/usr/bin/env python
# coding: utf-8
"""
pokepy.api
User interaction with this package is done through this file.
"""
import functools
import os
import sys
import types
from collections import namedtuple
import appdirs # dependency of FileCache
from beckett.clients import BaseClient
from beckett.constants import DEFAULT_VALID_STATUS_CODES
from .fcache.cache import FileCache
from . import resources_v2 as rv2
from . import __version__
class V2Client(BaseClient):
"""Pokéapi client"""
class Meta(BaseClient.Meta):
name = 'pokepy-v2-client-' + __version__
base_url = 'https://pokeapi.co/api/v2'
resources = (
rv2.BerryResource,
rv2.BerryFirmnessResource,
rv2.BerryFlavorResource,
rv2.ContestTypeResource,
rv2.ContestEffectResource,
rv2.SuperContestEffectResource,
rv2.EncounterMethodResource,
rv2.EncounterConditionResource,
rv2.EncounterConditionValueResource,
rv2.EvolutionChainResource,
rv2.EvolutionTriggerResource,
rv2.GenerationResource,
rv2.PokedexResource,
rv2.VersionResource,
rv2.VersionGroupResource,
rv2.ItemResource,
rv2.ItemAttributeResource,
rv2.ItemCategoryResource,
rv2.ItemFlingEffectResource,
rv2.ItemPocketResource,
rv2.MachineResource,
rv2.MoveResource,
rv2.MoveAilmentResource,
rv2.MoveBattleStyleResource,
rv2.MoveCategoryResource,
rv2.MoveDamageClassResource,
rv2.MoveLearnMethodResource,
rv2.MoveTargetResource,
rv2.LocationResource,
rv2.LocationAreaResource,
rv2.PalParkAreaResource,
rv2.RegionResource,
rv2.AbilityResource,
rv2.CharacteristicResource,
rv2.EggGroupResource,
rv2.GenderResource,
rv2.GrowthRateResource,
rv2.NatureResource,
rv2.PokeathlonStatResource,
rv2.PokemonResource,
rv2.PokemonColorResource,
rv2.PokemonFormResource,
rv2.PokemonHabitatResource,
rv2.PokemonShapeResource,
rv2.PokemonSpeciesResource,
rv2.StatResource,
rv2.TypeResource,
rv2.LanguageResource
)
def __init__(self, cache=None, cache_location=None, *args, **kwargs):
"""
Parameters
----------
cache: str
cache can be 'in_memory' or 'in_disk',
for memory-based or disk-based cache, respectively.
Optional.
cache_location: str
cache directory, for disk-based cache.
Optional.
"""
if cache is None: # empty wrapping function
def no_cache(func):
@functools.wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
cache_function = no_cache
else:
if cache in ['in_memory', 'in_disk']:
cache_function = self._caching(cache.split('in_')[1], cache_location)
self.cache_type = cache
def cache_info_total(self):
return self._cache_info_(self._cache_hits_global,
self._cache_misses_global,
self._cache_len_global)
def cache_clear_total(self):
for get_method_name in self._all_get_methods_names:
getattr(self, get_method_name).cache_clear()
def cache_location_absolute(self):
return self._cache_location_global
# global cache related methods
self.cache_info = types.MethodType(cache_info_total, self)
self.cache_clear = types.MethodType(cache_clear_total, self)
self.cache_location = types.MethodType(cache_location_absolute, self)
self._cache_hits_global = 0
self._cache_misses_global = 0
self._cache_len_global = 0
self._cache_location_global = ''
self._cache_info_ = namedtuple('CacheInfo', ['hits', 'misses', 'size'])
else: # wrong cache parameter
raise ValueError('Accepted values for cache are "in_memory" or "in_disk"')
self._cache = cache_function
self._all_get_methods_names = []
super(V2Client, self).__init__(*args, **kwargs)
def _assign_method(self, resource_class, method_type):
"""
Exactly the same code as the original except:
- uid is now first parameter (after self). Therefore, no need to explicitly call 'uid='
- Ignored the other http methods besides GET (as they are not needed for the pokeapi.co API)
- Added cache wrapping function
- Added a way to list all get methods
- Added a filter for single element lists (extract element into a standalone object)
"""
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
'valid_status_codes',
DEFAULT_VALID_STATUS_CODES
)
def extract_single_element_list(func):
@functools.wraps(func)
def inner(*args, **kwargs):
final = func(*args, **kwargs)
if len(final) == 1:
final = final[0]
return final
return inner
# uid is now the first argument (after self)
@self._cache
@extract_single_element_list
def get(self, uid=None, method_type=method_type,
method_name=method_name,
valid_status_codes=valid_status_codes,
resource=resource_class, data=None, **kwargs):
uid = uid.lower() if isinstance(uid, str) else uid
return self.call_api(
method_type, method_name,
valid_status_codes, resource,
data, uid=uid, **kwargs)
# only GET method is used
setattr(
self, method_name,
types.MethodType(get, self)
)
# for easier listing of get methods
self._all_get_methods_names.append(method_name)
def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"')
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 and 3.4 workaround
if (sys.version_info[0] == 2 and not
isinstance(cache_directory, (str, unicode))) or (
sys.version_info[0:2] == (3, 4) and not
isinstance(cache_directory, str)):
raise TypeError('expected str, not %s' % cache_directory.__class__.__name__)
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False,
opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get("uid"))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize
|
py | 1a3277fd197789aae9ece15f900ca4026de96269 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 AT&T
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
'''Voting App Example for Music.'''
# Standard library imports
import time
# Related third party imports
# Local application/library specific imports
from music import Music
def current_time_millis():
'''Current time in milliseconds.'''
return int(round(time.time()*1000))
def main():
'''Sample usage of Music.'''
kwargs = {'host': 'localhost'}
music = Music(**kwargs)
print "Music version %s" % music.version()
# Randomize the name so that we don't step on each other.
keyspace = 'NewVotingApp' + str(current_time_millis()/100)
music.create_keyspace(keyspace)
print "Created keyspace: %s" % keyspace
# Create the table
kwargs = {
'keyspace': keyspace,
'table': 'votecount',
'schema': {
'name': 'text',
'count': 'varint',
'PRIMARY KEY': '(name)'
}
}
music.create_table(**kwargs)
# Candidate data
data = {
'Trump': 5,
'Bush': 7,
'Jeb': 8,
'Clinton': 2,
'Bharath': 0
}
# Create an entry in the voting table for each candidate
# and with a vote count of 0.
kwargs = {'keyspace': keyspace, 'table': 'votecount'}
for name in data.iterkeys():
kwargs['values'] = {'name': name, 'count': 0}
music.create_row(**kwargs)
# Update each candidate's count atomically.
kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
for name, count in data.iteritems():
kwargs['pk_value'] = name
kwargs['values'] = {'count': count}
music.update_row_atomically(**kwargs)
# Read all rows
kwargs = {'keyspace': keyspace, 'table': 'votecount'}
print music.read_all_rows(**kwargs)
# Delete Clinton, read Trump
kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
kwargs['pk_value'] = 'Clinton'
music.delete_row_eventually(**kwargs)
kwargs['pk_value'] = 'Trump'
print music.read_row(**kwargs)
# Read all rows again
kwargs = {'keyspace': keyspace, 'table': 'votecount'}
print music.read_all_rows(**kwargs)
# Cleanup.
music.drop_keyspace(keyspace)
music.delete_all_locks()
if __name__ == "__main__":
main()
|
py | 1a327843777455d97e4ee4320c8e3127e00c9412 | #!/usr/bin/env python2.7
import sys
for line in open(sys.argv[2]):
if not line.startswith(">"): print line.strip()
else:
if int(line.split("_")[3])<int(sys.argv[1]): break
else: print line.strip()
|
py | 1a327888d647c2b8978c60fe42872755a2c60743 | from dataset import *
from visualize import *
from forward import *
# utils
from libs.utils import _init_fn
from libs.load_model import *
def prepare_dataloader(cfg, dict_DB):
# train dataloader
if cfg.run_mode == 'train':
dataset = Train_Dataset_SLNet(cfg)
trainloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=True,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['trainloader'] = trainloader
val_dataset = Train_Dataset_SLNet(cfg, mode='val')
valloader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=cfg.batch_size['img'],
shuffle=True,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['valloader'] = valloader
# test dataloader
if cfg.dataset == 'SEL':
dataset = SEL_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_test_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
else:
dataset = SEL_Hard_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['testloader'] = testloader
return dict_DB
def prepare_model(cfg, dict_DB):
if 'test' in cfg.run_mode:
dict_DB = load_SLNet_for_test(cfg, dict_DB)
if 'train' in cfg.run_mode:
dict_DB = load_SLNet_for_train(cfg, dict_DB)
dict_DB['forward_model'] = Forward_Model(cfg=cfg)
return dict_DB
def prepare_visualization(cfg, dict_DB):
dict_DB['visualize'] = Visualize_plt(cfg=cfg)
return dict_DB
def prepare_training(cfg, dict_DB):
logfile = cfg.output_dir + 'train/log/logfile.txt'
mkdir(path=cfg.output_dir + 'train/log/')
if cfg.run_mode == 'train' and cfg.resume == True:
rmfile(path=logfile)
val_result = {'AUC_A': 0, 'AUC_P': 0,
'AUC_R_upper_P_0.80': 0,
'AUC_R_upper_P_0.82': 0}
dict_DB['val_result'] = val_result
dict_DB['epoch'] = 0
dict_DB['logfile'] = logfile
return dict_DB
|
py | 1a3278c3bb00771452c941b839b830547632af7f | from toga.interface import WebView as WebViewInterface
from .base import WidgetMixin
from .. import impl
class WebView(WebViewInterface, WidgetMixin):
def __init__(self, id=None, url=None, on_key_down=None, style=None):
super().__init__(id=id, style=style, url=url, on_key_down=on_key_down)
self._create()
def create(self):
self._impl = impl.WebView(
id=self.id,
url=self._config['url'],
on_key_down=self.handler(self._config['on_key_down'], 'on_key_down') if self._config['on_key_down'] else None,
style=self.style,
)
def set_on_key_down(self, handler):
pass
def set_on_webview_load(self, handler):
pass
def set_url(self, value):
self._impl.url = value
# def _set_window(self, window):
# super()._set_window(window)
# if self.on_press:
# self.window.callbacks[(self.id, 'on_press')] = self.on_press
def get_dom(self):
self.interface.factory.not_implemented('WebView.get_dom()')
def set_user_agent(self, value):
self.interface.factory.not_implemented('WebView.set_user_agent()')
def set_placeholder(self, value):
self.interface.factory.not_implemented('WebView.set_placeholder()')
def set_readonly(self, value):
self.interface.factory.not_implemented('WebView.set_readonly()')
def get_value(self):
return self._impl.value
def set_value(self, value):
self.interface.factory.not_implemented('WebView.set_value()')
def set_content(self, root_url, content):
self.interface.factory.not_implemented('WebView.set_content()')
async def evaluate_javascript(self, javascript):
self.interface.factory.not_implemented('WebView.evaluate_javascript()')
def invoke_javascript(self, javascript):
self.interface.factory.not_implemented('WebView.invoke_javascript()')
|
py | 1a327901240157c50a5c8a170c5c4f42c364df8f | import redis
# redis Cache class to get and set HashMap values to redis so that user creds in key.json can be stored and accessed
class setCache:
value=""
key=""
r=redis.Redis()
def __init__(self):
self.r = redis.Redis(host='localhost',port='6379') # redis server to be hosted in local - can also be a remote redis server with auth
def setValue(self,username,key):
self.r.hmset(username,key)
def getValue(self,username,key):
return(self.r.hget(username,key))
def __del__(self): # flush all will clear the redis cache of all credentials
self.r.flushall()
|
py | 1a3279df8aeab4ec725f789b93b3720ed371ee72 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from mailchimp_marketing.api.activity_feed_api import ActivityFeedApi
from mailchimp_marketing.api.authorized_apps_api import AuthorizedAppsApi
from mailchimp_marketing.api.automations_api import AutomationsApi
from mailchimp_marketing.api.batch_webhooks_api import BatchWebhooksApi
from mailchimp_marketing.api.batches_api import BatchesApi
from mailchimp_marketing.api.campaign_folders_api import CampaignFoldersApi
from mailchimp_marketing.api.campaigns_api import CampaignsApi
from mailchimp_marketing.api.connected_sites_api import ConnectedSitesApi
from mailchimp_marketing.api.conversations_api import ConversationsApi
from mailchimp_marketing.api.customer_journeys_api import CustomerJourneysApi
from mailchimp_marketing.api.ecommerce_api import EcommerceApi
from mailchimp_marketing.api.facebook_ads_api import FacebookAdsApi
from mailchimp_marketing.api.file_manager_api import FileManagerApi
from mailchimp_marketing.api.landing_pages_api import LandingPagesApi
from mailchimp_marketing.api.lists_api import ListsApi
from mailchimp_marketing.api.ping_api import PingApi
from mailchimp_marketing.api.reporting_api import ReportingApi
from mailchimp_marketing.api.reports_api import ReportsApi
from mailchimp_marketing.api.root_api import RootApi
from mailchimp_marketing.api.search_campaigns_api import SearchCampaignsApi
from mailchimp_marketing.api.search_members_api import SearchMembersApi
from mailchimp_marketing.api.template_folders_api import TemplateFoldersApi
from mailchimp_marketing.api.templates_api import TemplatesApi
from mailchimp_marketing.api.verified_domains_api import VerifiedDomainsApi
|
py | 1a327ad3557a547da5041e1d6c1a50e2cff84e0f | # Practical Machine learning
# Bayesian learning - Naive Bayes example
# Chapter 9
from datatypes import Dataset
from classifier import naive_bayes, svm, naive_bayes_custom, knn
from feature_selection import univariate_feature_selection, lda, pca
from sklearn.cross_validation import train_test_split
from numpy import mean, var, sum, diag, shape
def load_spam_ds():
"""
Loads the data from file and build the dataset in scikit format.
() -> Dataset
"""
data = []
target = []
i = 0
with open("data/spambase.data", "r") as f:
for line in f:
# Removes \r\n from line
line = line.replace("\n","").replace("\r","")
items = line.split(",")
features = [float(item) for item in items[:-1]]
spam_class = int(items[-1])
data.append(features)
target.append(spam_class)
return Dataset(data, target)
def split_train_test(ds):
"""
Given the dataset, split in two datasets:
One is the Training set. Other is the Test set.
The proportion is 80% to 20% Respectively
Dataset -> Dataset, Dataset
"""
samples_train, samples_test, classes_train, classes_test = train_test_split(ds.data, ds.target, test_size=0.2)
training_set = Dataset(samples_train, classes_train)
test_set = Dataset(samples_test, classes_test)
return training_set, test_set
def run(n=0, dimension_reduction=univariate_feature_selection, learning=naive_bayes_custom):
"""
Starts the classification Pipeline
"""
ds = load_spam_ds()
if n > 0 and n < len(ds.data):
ds = dimension_reduction(ds, n)
evaluate(ds, learning)
def evaluate(ds, classifier_class, iterations=10):
'''
Train a given classifier n times
and prints his confusion matrix and the accuracy of the classifier
with a margin of error (by Chebychev Inequation)
'''
results = []
for i in range(iterations):
training_set, test_set = split_train_test(ds)
classifier = classifier_class(training_set)
cm = 1.0 * classifier.classify(test_set) / len(test_set.data)
results += [cm]
cm_mean = mean(results, axis=0)
cm_variance = var(results, axis=0)
print ("Accuracy of", sum(diag(cm_mean))*100, "% (+-", iterations * sum(diag(cm_variance)), ") with", (1 - 1.0/(iterations*iterations)), "of certain." )
print ("\nConfusion Matrix:\n",cm_mean,"\n")
if __name__ == "__main__":
algo=[naive_bayes_custom, naive_bayes, knn, svm]
feature=[univariate_feature_selection, pca, lda]
num=[1,10,0]
for n in num:
for f in feature:
if (n==0):
print("\nUsing all features")
else:
print("\nUsing",n,"feature(s) (", f.__name__, ")" )
print("=======================================================\n")
for a in algo:
print("* Learning Algorithm:", a.__name__)
run(n, f, a)
if (n==0):
break
|
py | 1a327af1d83f544669a4f210cd9c30495e4be5d1 | import unittest
import numpy as np
from codeplag.algorithms.featurebased import (
op_shift_metric, counter_metric,
get_children_indexes, struct_compare,
find_max_index, matrix_value,
add_not_counted
)
class TestFeaturebased(unittest.TestCase):
def test_counter_metric_normal(self):
example1 = {'a': 2, 'b': 1, 'c': 5, 'd': 7}
example2 = {'a': 10, 'c': 8, 'e': 2, 'f': 12}
example3 = {'USub': 3, 'Mor': 3, 'Der': 5}
example4 = {'USub': 5, 'Mor': 5, 'Ker': 5}
res1 = counter_metric(example1, example2)
res2 = counter_metric(example3, example4)
res3 = counter_metric({}, example4)
res4 = counter_metric({}, {})
self.assertEqual(res1, 0.175)
self.assertEqual(res2, 0.3)
self.assertEqual(res3, 0.0)
self.assertEqual(res4, 1.0)
'''
Numba forbid bad arguments
def test_counter_metric_bad_args(self):
res1 = counter_metric("", [])
res2 = counter_metric([], [])
'''
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
def test_op_shift_metric_normal(self):
empty_list = []
example1 = ['+', '-', '=']
example2 = ['+', '+=', '/', '%']
example3 = ['+', '-=', '/', '%']
example4 = ['-', '+', '%', '*', '+=']
example5 = ['%', '*', '+=']
res3 = op_shift_metric(empty_list, empty_list)
res4 = op_shift_metric(example1, empty_list)
res5 = op_shift_metric(empty_list, example1)
res6 = op_shift_metric(example2, example3)
res7 = op_shift_metric(example4, example5)
self.assertEqual(res3, (0, 1.0))
self.assertEqual(res4, (0, 0.0))
self.assertEqual(res5, (0, 0.0))
self.assertEqual(res6[0], 0)
self.assertAlmostEqual(res6[1], 0.6, 2)
self.assertEqual(res7[0], 2)
self.assertAlmostEqual(res7[1], 0.6, 2)
def test_get_children_indexes_normal(self):
example1 = [(1, 2), (2, 3), (3, 5), (2, 4), (2, 5), (1, 6)]
example2 = [(3, 4), (3, 2), (4, 5), (3, 1), (4, 8), (3, 8)]
example3 = [(2, 1), (3, 4), (3, 10), (4, 1), (2, 5), (2, 9)]
ind1, c_ch1 = get_children_indexes(example1, len(example1))
ind2, c_ch2 = get_children_indexes(example2, len(example2))
ind3, c_ch3 = get_children_indexes(example3, len(example3))
self.assertEqual(c_ch1, 2)
self.assertEqual(ind1[0], 0)
self.assertEqual(ind1[1], 5)
self.assertEqual(c_ch2, 4)
self.assertEqual(ind2[0], 0)
self.assertEqual(ind2[1], 1)
self.assertEqual(ind2[2], 3)
self.assertEqual(ind2[3], 5)
self.assertEqual(c_ch3, 3)
self.assertEqual(ind3[0], 0)
self.assertEqual(ind3[1], 4)
self.assertEqual(ind3[2], 5)
def test_find_max_index(self):
arr1 = np.array([[[1, 2], [2, 3]],
[[3, 4], [5, 10]]])
res1 = find_max_index(arr1)
arr2 = np.array([[[8, 2], [100, 15]],
[[3, 14], [1, 13]]])
res2 = find_max_index(arr2)
res3 = find_max_index(np.array([[]]))
self.assertEqual(res1[0], 1)
self.assertEqual(res1[1], 0)
self.assertEqual(res2[0], 0)
self.assertEqual(res2[1], 1)
self.assertEqual(res3[0], 0)
self.assertEqual(res3[1], 0)
def test_matrix_value(self):
arr1 = np.array([[[1, 2], [2, 3]],
[[3, 4], [5, 10]]])
metric1, indexes1 = matrix_value(arr1)
arr2 = np.array([[[8, 2], [100, 15]],
[[3, 14], [1, 13]]])
metric2, indexes2 = matrix_value(arr2)
metric3, indexes3 = matrix_value(np.array([[]]))
self.assertEqual(metric1[0], 6)
self.assertEqual(metric1[1], 8)
self.assertEqual(indexes1[0][0], 1)
self.assertEqual(indexes1[0][1], 0)
self.assertEqual(indexes1[1][0], 0)
self.assertEqual(indexes1[1][1], 1)
self.assertEqual(metric2[0], 104)
self.assertEqual(metric2[1], 30)
self.assertEqual(indexes2[0][0], 0)
self.assertEqual(indexes2[0][1], 1)
self.assertEqual(indexes2[1][0], 1)
self.assertEqual(indexes2[1][1], 0)
self.assertEqual(metric3[0], 1)
self.assertEqual(metric3[1], 1)
self.assertEqual(indexes3, [])
def test_add_not_counted(self):
structure = ((1, 2), (2, 1), (1, 3), (2, 4),
(3, 5), (1, 4), (2, 2), (2, 5))
res1 = add_not_counted(structure, 3, [0, 2, 5, len(structure)],
[[0, 0], [1, 1]], axis=0)
self.assertEqual(res1, 3)
# Тут хорошо бы переписать под общий случай, а не под codeplag
def test_struct_compare_normal(self):
structure1 = [(1, 0), (2, 1), (3, 2),
(3, 2), (2, 3), (3, 4),
(4, 5), (3, 6), (3, 4),
(4, 7), (2, 8)]
structure2 = [(1, 0), (2, 1), (2, 2),
(3, 3), (4, 4), (5, 5),
(4, 1), (4, 1), (4, 1),
(1, 6), (2, 7), (3, 8),
(3, 8), (3, 8), (2, 9)]
count_ch1 = (get_children_indexes(structure1, len(structure1)))[1]
count_ch2 = (get_children_indexes(structure2, len(structure2)))[1]
compliance_matrix = np.zeros((count_ch1, count_ch2, 2),
dtype=np.int64)
res = struct_compare(structure1, structure2,
compliance_matrix)
self.assertEqual(res, [6, 22])
self.assertEqual(list(compliance_matrix[0][0]), [5, 15])
self.assertEqual(list(compliance_matrix[0][1]), [5, 12])
structure1 = [(1, 0), (2, 1), (3, 2),
(2, 3), (3, 4), (4, 5),
(3, 6), (4, 7), (2, 3),
(3, 4), (4, 5), (3, 6),
(4, 7), (2, 3), (3, 4)]
structure2 = [(1, 0), (2, 1), (3, 2), (2, 3),
(3, 4), (4, 5), (3, 6), (4, 7),
(5, 4), (6, 8), (5, 8), (4, 9),
(2, 3), (3, 4), (4, 5), (3, 6),
(4, 4), (5, 8), (4, 10), (5, 4)]
count_ch1 = (get_children_indexes(structure1, len(structure1)))[1]
count_ch2 = (get_children_indexes(structure2, len(structure2)))[1]
compliance_matrix = np.zeros((count_ch1, count_ch2, 2),
dtype=np.int64)
res = struct_compare(structure1, structure2,
compliance_matrix)
self.assertEqual(res, [14, 23])
self.assertEqual(compliance_matrix[0][0][0], 13)
self.assertEqual(compliance_matrix[0][0][1], 22)
def test_struct_compare_file_empty(self):
structure1 = [(1, 2)]
structure1.clear()
structure2 = [(1, 0), (2, 1), (2, 2), (3, 3),
(4, 4), (5, 5), (4, 1), (4, 1),
(4, 1), (1, 6), (2, 7), (3, 8),
(3, 8), (3, 8), (2, 9), (3, 4),
(4, 10), (3, 11), (3, 4), (4, 5),
(2, 2), (3, 3), (4, 4), (5, 5), (4, 12),
(5, 4), (6, 5), (5, 13), (5, 4), (6, 5),
(2, 14), (3, 4), (4, 5)]
res = struct_compare(structure1, structure2)
self.assertEqual(res, [1, 34])
structure3 = [(1, 0), (2, 1), (3, 2), (3, 2),
(2, 3), (3, 4), (4, 5), (3, 6),
(3, 4), (4, 7), (2, 8), (3, 9),
(4, 4), (5, 7), (4, 4), (5, 7),
(2, 10), (3, 4), (4, 7), (1, 11),
(2, 12), (2, 8), (3, 9), (4, 4),
(5, 7), (4, 12), (4, 12)]
res = struct_compare(structure1, structure3)
self.assertEqual(res, [1, 28])
# Numba forbid bad arguments
# def test_struct_compare_bad_args(self):
# tree, tree2 = self.init('empty.py', 'test2.py')
# res1 = struct_compare("", "")
# res2 = struct_compare(tree, tree2, "")
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
# Numba forbid bad arguments
# def test_op_shift_metric_bad_args(self):
# res1 = op_shift_metric([], 34)
# res2 = op_shift_metric(56, [])
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
|
py | 1a327af8b28bd5bcbaacf7c9fc33cedff8abf86e | #!/usr/bin/env python
import sys
import os
import rospy
import rospkg
from threading import Thread
from python_qt_binding import loadUi
from python_qt_binding import QtGui
from python_qt_binding.QtGui import QWidget
from trainergui import Ui_MainWindow
from inmoov_msgs.msg import MotorStatus
from inmoov_msgs.msg import MotorCommand
from inmoov_msgs.srv import MotorParameter
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
# https://github.com/ColinDuquesnoy/QDarkStyleSheet
import qdarkstyle
# https://nikolak.com/pyqt-qt-designer-getting-started/
class ExampleApp(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
# Explaining super is out of the scope of this article
# So please google it if you're not familar with it
# Simple reason why we use it here is that it allows us to
# access variables, methods etc in the design.py file
super(self.__class__, self).__init__()
self.setupUi(self) # This is defined in design.py file automatically
# It sets up layout and widgets that are defined
self.parameterTopic = ["servobus/torso/motorparameter","servobus/leftarm/motorparameter","servobus/rightarm/motorparameter"]
self.motorcommand = MotorCommand()
self.jointcommand = JointState()
self.jointNames = []
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/torso/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/leftarm/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/rightarm/servomap/'+str(servo)+'/name'))
#print(self.jointNames)
#'right_pinky','right_ring','right_middle','right_index','right_thumb',
#'right_hand','right_bicep','right_bicep_rotate','right_shoulder_side','right_shoulder_up','','',
#'eye_leftright','eyes_updown','jaw','head_leftright','head_updown','head_tilt','waist_lean','waist_rotate','','','','',
#'left_pinky','left_ring','left_middle','left_index','left_thumb',
#'left_hand','left_bicep','left_bicep_rotate','left_shoulder_side','left_shoulder_up','','',
self.setupDropDowns()
self.cmbBus.currentIndexChanged.connect(self.busChanged)
self.cmbServo.currentIndexChanged.connect(self.servoChanged)
self.txtGoal.editingFinished.connect(self.setGoal)
self.txtMinPulse.editingFinished.connect(self.setMinPulse)
self.txtMaxPulse.editingFinished.connect(self.setMaxPulse)
self.txtMinGoal.editingFinished.connect(self.setMinGoal)
self.txtMaxGoal.editingFinished.connect(self.setMaxGoal)
self.txtMinSensor.editingFinished.connect(self.setMinSensor)
self.txtMaxSensor.editingFinished.connect(self.setMaxSensor)
self.chkEnabled.stateChanged.connect(self.setEnabled)
self.chkCalibrated.stateChanged.connect(self.setCalibrated)
self.sliderGoal.valueChanged.connect(self.sliderChanged)
rospy.init_node('trainer', anonymous=True)
self.commandPublisher = []
self.commandPublisher.append(rospy.Publisher("servobus/torso/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/leftarm/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/rightarm/motorcommand", MotorCommand, queue_size=10))
self.statusSubscriber = []
self.statusSubscriber.append(rospy.Subscriber("servobus/torso/motorstatus", MotorStatus, self.callback0))
self.statusSubscriber.append(rospy.Subscriber("servobus/leftarm/motorstatus", MotorStatus, self.callback1))
self.statusSubscriber.append(rospy.Subscriber("servobus/rightarm/motorstatus", MotorStatus, self.callback2))
self.jointPublisher = rospy.Publisher("joint_command", JointState, queue_size=10)
self.bus = 0
self.servo = 0
self.busChanged()
self.servoChanged()
def busChanged(self):
# unregister topics and reregister to the new ones
self.bus = self.cmbBus.currentIndex()
self.cmbServo.clear()
for s in range(0, 11):
self.cmbServo.addItem(self.jointNames[(self.bus * 11) + s])
#self.commandPublisher.unregister()
#self.commandPublisher = rospy.Publisher(self.commandTopic[bus], MotorCommand, queue_size=10)
#self.statusSubscriber.unregister()
#self.statusSubscriber = rospy.Subscriber(self.statusTopic[self.bus], MotorStatus, self.callback)
self.servoChanged()
def servoChanged(self):
if self.cmbServo.count() > 0:
self.servo = self.cmbServo.currentIndex()
self.getMinPulse()
self.getMaxPulse()
self.getMinGoal()
self.getMaxGoal()
self.getGoal()
self.getMinSensor()
self.getMaxSensor()
self.getEnabled()
self.getCalibrated()
def callback0(self, data):
if data.id == self.servo and self.bus == 0:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback1(self, data):
if data.id == self.servo and self.bus == 1:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback2(self, data):
if data.id == self.servo and self.bus == 2:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def degreestoradians(self, d):
return d*(3.1415926/180.0)
def setupDropDowns(self):
self.cmbBus.addItem(rospy.get_param('/servobus/torso/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/leftarm/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/rightarm/name'))
for servo in range (0, 11):
print('/servobus/torso/servomap/' + str(servo) + '/name')
self.cmbServo.addItem(rospy.get_param('/servobus/torso/servomap/' + str(servo) + '/name'))
#self.cmbServo.addItem('Servo 00')
#self.cmbServo.addItem('Servo 01')
#self.cmbServo.addItem('Servo 02')
#self.cmbServo.addItem('Servo 03')
#self.cmbServo.addItem('Servo 04')
#self.cmbServo.addItem('Servo 05')
#self.cmbServo.addItem('Servo 06')
#self.cmbServo.addItem('Servo 07')
#self.cmbServo.addItem('Servo 08')
#self.cmbServo.addItem('Servo 09')
#self.cmbServo.addItem('Servo 10')
#self.cmbServo.addItem('Servo 11')
self.cmbSmoothing.addItem('0 - Instant')
self.cmbSmoothing.addItem('1 - Max Speed')
self.cmbSmoothing.addItem('2 - Linear Ramp')
self.cmbSmoothing.addItem('3 - COS Ramp')
self.cmbSmoothing.addItem('4 - COS^2 Ramp')
def sliderChanged(self, i):
self.txtGoal.setText(str(i/1000.0))
self.setGoal()
def setGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x1E
self.motorcommand.value = float(self.txtGoal.text())
#print(self.motorcommand.value)
self.commandPublisher[self.bus].publish(self.motorcommand)
self.jointcommand.header = Header()
self.jointcommand.header.stamp = rospy.Time.now()
self.jointcommand.name = [self.jointNames[((self.bus * 12) + self.servo)]]
self.jointcommand.position = [self.degreestoradians(float(self.txtGoal.text()))]
self.jointcommand.velocity = []
self.jointcommand.effort = []
self.jointPublisher.publish(self.jointcommand)
def getGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x1E).data
self.txtGoal.setText(str(value))
self.sliderGoal.setValue(int(value * 1000.0))
def setMinPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x14
self.motorcommand.value = float(self.txtMinPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x14).data))
def setMaxPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x16
self.motorcommand.value = float(self.txtMaxPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x16).data))
def setMinGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x06
self.motorcommand.value = float(self.txtMinGoal.text())
self.sliderGoal.setMinimum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x06).data
self.txtMinGoal.setText(str(value))
self.sliderGoal.setMinimum(int(value * 1000.0))
def setMaxGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x08
self.motorcommand.value = float(self.txtMaxGoal.text())
self.sliderGoal.setMaximum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x08).data
self.txtMaxGoal.setText(str(value))
self.sliderGoal.setMaximum(int(value * 1000.0))
def setMinSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA2
self.motorcommand.value = float(self.txtMinSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA2).data))
def setMaxSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA4
self.motorcommand.value = float(self.txtMaxSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA4).data))
def setEnabled(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x18
self.motorcommand.value = float(self.chkEnabled.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getEnabled(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkEnabled.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0x18).data))
def setCalibrated(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA0
self.motorcommand.value = float(self.chkCalibrated.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getCalibrated(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkCalibrated.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0xA0).data))
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
form = ExampleApp() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the app
if __name__ == '__main__': # if we're running file directly and not importing it
main()
|
py | 1a327bf18c3c3f26d3b3099e3687c3464c4d9d6c | from flask_restful import reqparse
getUserParser = reqparse.RequestParser()
getUserParser.add_argument('id', type=int, help='please enter id')
getUserParser.add_argument('username', type=int, help='please enter username')
getUserParser.add_argument('type', type=str, help='please enter type')
getUserParser.add_argument('token', type=str, location='headers')
getUserParser.add_argument('page', type=int, help='please enter page')
getUserParser.add_argument('limit', type=int, help='please enter limit')
deleteUserParser = reqparse.RequestParser()
deleteUserParser.add_argument('id', type=int, help='please enter id', required=True)
deleteUserParser.add_argument('token', type=str, location='headers')
putUserParser = reqparse.RequestParser()
putUserParserRules = [{
"name":"username",
"type":str,
"help":"please enter username",
"required":True,
},{
"name":"password",
"type":str,
"help":"please enter password",
"required":True,
},{
"name":"role",
"type":str,
"help":"please enter role",
"required":True,
},{
"name":"name",
"type":str,
"help":"please enter name",
"required":True,
}]
for i in putUserParserRules:
putUserParser.add_argument(i['name'], type=i['type'], help=i['help'], required=i['required'])
# putUserParser.add_argument('username', type=str, help='please enter username', required=True)
# putUserParser.add_argument('password', type=str, help='please enter password', required=True)
# putUserParser.add_argument('role', type=str, help='please enter role', required=True)
# putUserParser.add_argument('name', type=str, help='please enter name', required=True)
postUserParser = reqparse.RequestParser()
postUserParser.add_argument('id', type=str, help='please enter id', required=True)
postUserParser.add_argument('username', type=str, help='please enter username', required=True)
postUserParser.add_argument('role', type=str, help='please enter role', required=True)
postUserParser.add_argument('name', type=str, help='please enter name', required=True)
postUserParser.add_argument('password', type=str, help='please enter password', required=True)
postUserParser.add_argument('token', type=str, location='headers')
postLoginParser = reqparse.RequestParser()
postLoginParser.add_argument('username', type=str, help='please enter username', required=True)
postLoginParser.add_argument('password', type=str, help='please enter password', required=True)
getLoginParser = reqparse.RequestParser()
getLoginParser.add_argument('token', type=str, location='headers')
getLoginOutParser = reqparse.RequestParser()
getLoginOutParser.add_argument('token', type=str, location='headers')
# args = parser.parse_args() |
py | 1a327e082d8c4088571d9ff3b5db39e01d6338b7 | #!/usr/bin/python
"""
Http Extension
Verify status_codes/headers and make HEAD,POST,GET,PUT,DELETE requests
"""
import requests
from requests.exceptions import ConnectionError
from Util import Util
# If ssl-verify is false disable insecure request warnings
if Util.getConfig("ssl-verify") != True:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
"""
Verify if status is 200/201 (OK) with GET request
"""
def is_ok(url,auth=None):
result = False
try:
response = get(url,auth=auth)
result = response.status_code == 200 or response.status_code == 201
except ConnectionError:pass
return result
"""
Verify if status is 200/201 (OK) with HEAD request
"""
def is_ok_head(url,auth=None):
result = False
try:
response = head(url,auth=auth)
result = response.status_code == 200 or response.status_code == 201
except ConnectionError:pass
return result
"""
Verify if status is 404 (Not Found) with GET request
"""
def is_not_found(url,auth=None):
result = False
try:
response = get(url,auth=auth)
result = response.status_code == 404
except ConnectionError:
result = True
return result
"""
Verify if status is 404 (Not Found) with HEAD request
"""
def is_not_found_head(url,auth=None):
result = False
try:
response = head(url,auth=auth)
result = response.status_code == 404
except ConnectionError:
result = True
return result
"""
Verify if a header is present with GET request
"""
def has_header(url,header,auth=None):
return header in headers(url,auth)
"""
Get headers with GET request
"""
def headers(url,auth = None):
response = get(url,auth=auth)
return response.headers
"""
Verify if a header is present with HEAD request
"""
def has_header_head(url,header,auth=None):
return header in headers_head(url,auth)
"""
Get headers with HEAD request
"""
def headers_head(url,auth = None):
response = head(url,auth)
return response.headers
"""
Get request
"""
def get(url,auth = None):
verify = Util.getConfig("ssl-verify") == True
return requests.get(url, verify=verify,auth=auth,timeout=Util.getConfig("timeout"))
"""
Get request
"""
def post(url,data = {},auth = None):
verify = Util.getConfig("ssl-verify") == True
return requests.post(url,verify=verify,data=data,auth=auth,timeout=Util.getConfig("timeout"))
"""
Get request
"""
def delete(url,data = {},auth = None):
verify = Util.getConfig("ssl-verify") == True
return requests.delete(url, verify=verify,data=data,auth=auth,timeout=Util.getConfig("timeout"))
"""
Get request
"""
def put(url,data = {},auth = None):
verify = Util.getConfig("ssl-verify") == True
return requests.put(url, verify=verify,data=data,auth=auth,timeout=Util.getConfig("timeout"))
"""
Head request
"""
def head(url,auth = None):
verify = Util.getConfig("ssl-verify") == True
return requests.head(url, verify=verify,auth=auth,timeout=Util.getConfig("timeout"))
|
py | 1a327ff436647cc903dd5a7a3acf85c63430563a | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#blur the frame to get rid of noise. the kernel should be ODD
#frame = cv2.GaussianBlur(frame,(7,7),0)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#apply the background subtraction
fgmask = fgbg.apply(frame)
kernel = np.ones((3,3),np.uint8)
kernel_lg = np.ones((7,7),np.uint8)
#erosion followed by dilation is called an opening
#mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#erode the mask to get rid of noise
fgmask = cv2.erode(fgmask,kernel,iterations = 1)
#dialate it back to regain some lost area
fgmask = cv2.dilate(fgmask,kernel_lg,iterations = 1)
# Display the resulting frame
cv2.imshow('frame',gray)
cv2.imshow('vanish',fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() |
py | 1a3280932f5bcd7b5f1530ac2fac425cb830d6e5 | import re
import os
from subprocess import call
from clint.textui import colored, puts, puts_err, indent, progress
def command_out(message):
"""
Used to print shell commands to stderr
"""
puts_err(colored.green(message))
def message(message, n_indent = 4, color = "blue"):
with indent(n_indent):
if color == "blue":
puts_err(colored.blue('\n' + message + '\n'))
elif color == "red":
puts_err(colored.red('\n' + message + '\n'))
def boolify(s):
if s == 'True':
return True
if s == 'False':
return False
raise ValueError("huh?")
def autoconvert(s):
for fn in (boolify, int, float):
try:
return fn(s)
except ValueError:
pass
return s
def parse_region(region):
return re.split("[:-]+", region)
# Stack Overflow: 377017
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check_program_exists(program):
if which(program) is None:
exit(puts_err(colored.red("\n\t" + program + " not installed or on PATH.\n")))
def run_command(comm, shell = True):
"""
Runs a shell command
"""
sh_out = ' '.join(comm) if type(comm) == list else comm
command_out(sh_out)
out = call(comm, shell = shell)
if out != 0:
raise Exception(f"Error [{out}] running {sh_out}")
return out
# Levenshtein edit distnace
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
def lev(s1, s2):
if len(s1) < len(s2):
return lev(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = list(range(len(s2) + 1))
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
py | 1a3282fdf7e55026a44cfae367b73fc1e162f1dd | # Generated by Django 2.0.5 on 2018-06-06 01:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breeds', '0022_auto_20180604_2355'),
]
operations = [
migrations.AddField(
model_name='breedimage',
name='featured',
field=models.NullBooleanField(),
),
]
|
py | 1a328403164cbec24d61150d8c5f67817eaa216b | """ Utility functions for photoscript """
import fnmatch
import os
import re
import subprocess
def ditto(src, dest, norsrc=False):
""" Copies a file or directory tree from src path to dest path
src: source path as string
dest: destination path as string
norsrc: (bool) if True, uses --norsrc flag with ditto so it will not copy
resource fork or extended attributes. May be useful on volumes that
don't work with extended attributes (likely only certain SMB mounts)
default is False
Uses ditto to perform copy; will silently overwrite dest if it exists
Raises exception if copy fails or either path is None """
if src is None or dest is None:
raise ValueError("src and dest must not be None", src, dest)
if norsrc:
command = ["/usr/bin/ditto", "--norsrc", src, dest]
else:
command = ["/usr/bin/ditto", src, dest]
# if error on copy, subprocess will raise CalledProcessError
result = subprocess.run(command, check=True, stderr=subprocess.PIPE)
return result.returncode
def findfiles(pattern, path_):
"""Returns list of filenames from path_ matched by pattern
shell pattern. Matching is case-insensitive.
If 'path_' is invalid/doesn't exist, returns []."""
if not os.path.isdir(path_):
return []
# See: https://gist.github.com/techtonik/5694830
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
return [name for name in os.listdir(path_) if rule.match(name)]
|
py | 1a32842b027f5de1b1d017e9f40cf0a0bd1d792f | # Copyright (c) 2018-2020, NVIDIA CORPORATION.
import pickle
import warnings
from numbers import Number
from types import SimpleNamespace
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda, njit
import cudf
from cudf import _lib as libcudf
from cudf._lib.column import Column
from cudf._lib.null_mask import (
MaskState,
bitmask_allocation_size_bytes,
create_null_mask,
)
from cudf._lib.scalar import as_device_scalar
from cudf._lib.stream_compaction import distinct_count as cpp_distinct_count
from cudf._lib.transform import bools_to_mask
from cudf.core.abc import Serializable
from cudf.core.buffer import Buffer
from cudf.core.dtypes import CategoricalDtype
from cudf.utils import ioutils, utils
from cudf.utils.dtypes import (
NUMERIC_TYPES,
check_cast_unsupported_dtype,
cudf_dtypes_to_pandas_dtypes,
get_time_unit,
is_categorical_dtype,
is_list_dtype,
is_numerical_dtype,
is_scalar,
is_string_dtype,
is_struct_dtype,
min_signed_type,
min_unsigned_type,
np_to_pa_dtype,
)
from cudf.utils.utils import mask_dtype
class ColumnBase(Column, Serializable):
def __init__(
self,
data,
size,
dtype,
mask=None,
offset=0,
null_count=None,
children=(),
):
"""
Parameters
----------
data : Buffer
dtype
The type associated with the data Buffer
mask : Buffer, optional
children : tuple, optional
"""
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
children=children,
)
def as_frame(self):
"""
Converts a Column to Frame
"""
return cudf.core.frame.Frame({None: self.copy(deep=False)})
@property
def data_array_view(self):
"""
View the data as a device array object
"""
if self.dtype == "object":
raise ValueError("Cannot get an array view of a StringColumn")
if is_categorical_dtype(self.dtype):
return self.codes.data_array_view
else:
dtype = self.dtype
result = cuda.as_cuda_array(self.data)
# Workaround until `.view(...)` can change itemsize
# xref: https://github.com/numba/numba/issues/4829
result = cuda.devicearray.DeviceNDArray(
shape=(result.nbytes // dtype.itemsize,),
strides=(dtype.itemsize,),
dtype=dtype,
gpu_data=result.gpu_data,
)
return result
@property
def mask_array_view(self):
"""
View the mask as a device array
"""
result = cuda.as_cuda_array(self.mask)
dtype = mask_dtype
# Workaround until `.view(...)` can change itemsize
# xref: https://github.com/numba/numba/issues/4829
result = cuda.devicearray.DeviceNDArray(
shape=(result.nbytes // dtype.itemsize,),
strides=(dtype.itemsize,),
dtype=dtype,
gpu_data=result.gpu_data,
)
return result
def __len__(self):
return self.size
def to_pandas(self, index=None, nullable=False, **kwargs):
if nullable and self.dtype in cudf_dtypes_to_pandas_dtypes:
pandas_nullable_dtype = cudf_dtypes_to_pandas_dtypes[self.dtype]
arrow_array = self.to_arrow()
pandas_array = pandas_nullable_dtype.__from_arrow__(arrow_array)
pd_series = pd.Series(pandas_array, copy=False)
elif str(self.dtype) in NUMERIC_TYPES and self.null_count == 0:
pd_series = pd.Series(cupy.asnumpy(self.values), copy=False)
else:
pd_series = self.to_arrow().to_pandas(**kwargs)
if index is not None:
pd_series.index = index
return pd_series
def __iter__(self):
cudf.utils.utils.raise_iteration_error(obj=self)
@property
def values_host(self):
"""
Return a numpy representation of the Column.
"""
return self.data_array_view.copy_to_host()
@property
def values(self):
"""
Return a CuPy representation of the Column.
"""
if len(self) == 0:
return cupy.asarray([], dtype=self.dtype)
if self.has_nulls:
raise ValueError("Column must have no nulls.")
return cupy.asarray(self.data_array_view)
def clip(self, lo, hi):
if is_categorical_dtype(self):
input_col = self.astype(self.categories.dtype)
return libcudf.replace.clip(input_col, lo, hi).astype(self.dtype)
else:
return libcudf.replace.clip(self, lo, hi)
def equals(self, other, check_dtypes=False):
if self is other:
return True
if other is None or len(self) != len(other):
return False
if check_dtypes:
if self.dtype != other.dtype:
return False
return (self == other).min()
def all(self):
return bool(libcudf.reduce.reduce("all", self, dtype=np.bool_))
def any(self):
return bool(libcudf.reduce.reduce("any", self, dtype=np.bool_))
def __sizeof__(self):
n = self.data.size
if self.nullable:
n += bitmask_allocation_size_bytes(self.size)
return n
@classmethod
def _concat(cls, objs, dtype=None):
if len(objs) == 0:
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
dtype = CategoricalDtype()
return column_empty(0, dtype=dtype, masked=True)
# If all columns are `NumericalColumn` with different dtypes,
# we cast them to a common dtype.
# Notice, we can always cast pure null columns
not_null_cols = list(filter(lambda o: o.valid_count > 0, objs))
if len(not_null_cols) > 0 and (
len(
[
o
for o in not_null_cols
if not is_numerical_dtype(o.dtype)
or np.issubdtype(o.dtype, np.datetime64)
]
)
== 0
):
col_dtypes = [o.dtype for o in not_null_cols]
# Use NumPy to find a common dtype
common_dtype = np.find_common_type(col_dtypes, [])
# Cast all columns to the common dtype
for i in range(len(objs)):
objs[i] = objs[i].astype(common_dtype)
# Find the first non-null column:
head = objs[0]
for i, obj in enumerate(objs):
if obj.valid_count > 0:
head = obj
break
for i, obj in enumerate(objs):
# Check that all columns are the same type:
if not pd.api.types.is_dtype_equal(obj.dtype, head.dtype):
# if all null, cast to appropriate dtype
if obj.valid_count == 0:
objs[i] = column_empty_like(
head, dtype=head.dtype, masked=True, newsize=len(obj)
)
else:
raise ValueError("All columns must be the same type")
cats = None
is_categorical = all(is_categorical_dtype(o.dtype) for o in objs)
# Combine CategoricalColumn categories
if is_categorical:
# Combine and de-dupe the categories
cats = (
cudf.concat([o.cat().categories for o in objs])
.to_series()
.drop_duplicates(ignore_index=True)
._column
)
objs = [
o.cat()._set_categories(
o.cat().categories, cats, is_unique=True
)
for o in objs
]
# Map `objs` into a list of the codes until we port Categorical to
# use the libcudf++ Category data type.
objs = [o.cat().codes._column for o in objs]
head = head.cat().codes._column
newsize = sum(map(len, objs))
if newsize > libcudf.MAX_COLUMN_SIZE:
raise MemoryError(
f"Result of concat cannot have "
f"size > {libcudf.MAX_COLUMN_SIZE_STR}"
)
# Filter out inputs that have 0 length
objs = [o for o in objs if len(o) > 0]
# Perform the actual concatenation
if newsize > 0:
col = libcudf.concat.concat_columns(objs)
else:
col = column_empty(0, head.dtype, masked=True)
if is_categorical:
col = build_categorical_column(
categories=cats,
codes=as_column(col.base_data, dtype=col.dtype),
mask=col.base_mask,
size=col.size,
offset=col.offset,
)
return col
def dropna(self):
dropped_col = self.as_frame().dropna()._as_column()
return dropped_col
def to_arrow(self):
"""Convert to PyArrow Array
Examples
--------
>>> import cudf
>>> col = cudf.core.column.as_column([1, 2, 3, 4])
>>> col.to_arrow()
<pyarrow.lib.Int64Array object at 0x7f886547f830>
[
1,
2,
3,
4
]
"""
if isinstance(self, cudf.core.column.CategoricalColumn):
# arrow doesn't support unsigned codes
signed_type = (
min_signed_type(self.codes.max())
if self.codes.size > 0
else np.int8
)
codes = self.codes.astype(signed_type)
categories = self.categories
out_indices = codes.to_arrow()
out_dictionary = categories.to_arrow()
return pa.DictionaryArray.from_arrays(
out_indices, out_dictionary, ordered=self.ordered,
)
if isinstance(self, cudf.core.column.StringColumn) and (
self.null_count == len(self)
):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer((b""))]
)
return libcudf.interop.to_arrow(
libcudf.table.Table(
cudf.core.column_accessor.ColumnAccessor({"None": self})
),
[["None"]],
keep_index=False,
)["None"].chunk(0)
@classmethod
def from_arrow(cls, array):
"""
Convert PyArrow Array/ChunkedArray to column
Parameters
----------
array : PyArrow Array/ChunkedArray
Returns
-------
column
Examples
--------
>>> import pyarrow as pa
>>> import cudf
>>> cudf.core.column.ColumnBase.from_arrow(pa.array([1, 2, 3, 4]))
<cudf.core.column.numerical.NumericalColumn object at 0x7f8865497ef0>
"""
if not isinstance(array, (pa.Array, pa.ChunkedArray)):
raise TypeError("array should be PyArrow array or chunked array")
data = pa.table([array], [None])
if isinstance(array.type, pa.DictionaryType):
indices_table = pa.table(
{
"None": pa.chunked_array(
[chunk.indices for chunk in data["None"].chunks],
type=array.type.index_type,
)
}
)
dictionaries_table = pa.table(
{
"None": pa.chunked_array(
[chunk.dictionary for chunk in data["None"].chunks],
type=array.type.value_type,
)
}
)
codes = libcudf.interop.from_arrow(
indices_table, indices_table.column_names
)._data["None"]
categories = libcudf.interop.from_arrow(
dictionaries_table, dictionaries_table.column_names
)._data["None"]
return build_categorical_column(
categories=categories,
codes=codes,
mask=codes.base_mask,
size=codes.size,
ordered=array.type.ordered,
)
elif isinstance(array.type, pa.StructType):
return cudf.core.column.StructColumn.from_arrow(array)
return libcudf.interop.from_arrow(data, data.column_names)._data[
"None"
]
def _get_mask_as_column(self):
return libcudf.transform.mask_to_bools(
self.base_mask, self.offset, self.offset + len(self)
)
def _memory_usage(self, **kwargs):
return self.__sizeof__()
def to_gpu_array(self, fillna=None):
"""Get a dense numba device array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
See *fillna* in ``.to_array``.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
if fillna:
return self.fillna(self.default_na_value()).data_array_view
else:
return self.dropna().data_array_view
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
Defaults to None, which will skip null values.
If it equals "pandas", null values are filled with NaNs.
Non integral dtype is promoted to np.float64.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self.to_gpu_array(fillna=fillna).copy_to_host()
def _fill(self, fill_value, begin=0, end=-1, inplace=False):
if end <= begin or begin >= self.size:
return self if inplace else self.copy()
if is_categorical_dtype(self.dtype):
return self._fill_categorical(fill_value, begin, end, inplace)
fill_scalar = as_device_scalar(fill_value, self.dtype)
if not inplace:
return libcudf.filling.fill(self, begin, end, fill_scalar)
if is_string_dtype(self.dtype):
return self._mimic_inplace(
libcudf.filling.fill(self, begin, end, fill_scalar),
inplace=True,
)
if fill_value is None and not self.nullable:
mask = create_null_mask(self.size, state=MaskState.ALL_VALID)
self.set_base_mask(mask)
libcudf.filling.fill_in_place(self, begin, end, fill_scalar)
return self
def _fill_categorical(self, fill_value, begin, end, inplace):
fill_code = self._encode(fill_value)
fill_scalar = as_device_scalar(fill_code, self.codes.dtype)
result = self if inplace else self.copy()
libcudf.filling.fill_in_place(result.codes, begin, end, fill_scalar)
return result
def shift(self, offset, fill_value):
return libcudf.copying.shift(self, offset, fill_value)
@property
def valid_count(self):
"""Number of non-null values"""
return len(self) - self.null_count
@property
def nullmask(self):
"""The gpu buffer for the null-mask
"""
if self.nullable:
return self.mask_array_view
else:
raise ValueError("Column has no null mask")
def copy(self, deep=True):
"""Columns are immutable, so a deep copy produces a copy of the
underlying data and mask and a shallow copy creates a new column and
copies the references of the data and mask.
"""
if deep:
return libcudf.copying.copy_column(self)
else:
return build_column(
self.base_data,
self.dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
children=self.base_children,
)
def view(self, dtype):
"""
View the data underlying a column as different dtype.
The source column must divide evenly into the size of
the desired data type. Columns with nulls may only be
viewed as dtypes with size equal to source dtype size
Parameters
----------
dtype : NumPy dtype, string
The dtype to view the data as
"""
dtype = np.dtype(dtype)
if dtype.kind in ("o", "u", "s"):
raise TypeError(
"Bytes viewed as str without metadata is ambiguous"
)
if self.dtype.itemsize == dtype.itemsize:
return build_column(
self.base_data,
dtype=dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
)
else:
if self.null_count > 0:
raise ValueError(
"Can not produce a view of a column with nulls"
)
if (self.size * self.dtype.itemsize) % dtype.itemsize:
raise ValueError(
f"Can not divide {self.size * self.dtype.itemsize}"
+ f" total bytes into {dtype} with size {dtype.itemsize}"
)
new_buf_ptr = (
self.base_data.ptr + self.offset * self.dtype.itemsize
)
new_buf_size = self.size * self.dtype.itemsize
view_buf = Buffer(
data=new_buf_ptr,
size=new_buf_size,
owner=self.base_data._owner,
)
return build_column(view_buf, dtype=dtype)
def element_indexing(self, index):
"""Default implementation for indexing to an element
Raises
------
``IndexError`` if out-of-bound
"""
index = np.int32(index)
if index < 0:
index = len(self) + index
if index > len(self) - 1 or index < 0:
raise IndexError("single positional indexer is out-of-bounds")
return libcudf.copying.get_element(self, index).value
def __getitem__(self, arg):
if isinstance(arg, Number):
arg = int(arg)
return self.element_indexing(arg)
elif isinstance(arg, slice):
if is_categorical_dtype(self):
codes = self.codes[arg]
return build_categorical_column(
categories=self.categories,
codes=as_column(codes.base_data, dtype=codes.dtype),
mask=codes.base_mask,
ordered=self.ordered,
size=codes.size,
offset=codes.offset,
)
start, stop, stride = arg.indices(len(self))
if start < 0:
start = start + len(self)
if stop < 0:
stop = stop + len(self)
if start >= stop:
return column_empty(0, self.dtype, masked=True)
# compute mask slice
if stride == 1 or stride is None:
return libcudf.copying.column_slice(self, [start, stop])[0]
else:
# Need to create a gather map for given slice with stride
gather_map = arange(
start=start,
stop=stop,
step=stride,
dtype=np.dtype(np.int32),
)
return self.take(gather_map)
else:
arg = as_column(arg)
if len(arg) == 0:
arg = as_column([], dtype="int32")
if pd.api.types.is_integer_dtype(arg.dtype):
return self.take(arg)
if pd.api.types.is_bool_dtype(arg.dtype):
return self.apply_boolean_mask(arg)
raise NotImplementedError(type(arg))
def __setitem__(self, key, value):
"""
Set the value of self[key] to value.
If value and self are of different types,
value is coerced to self.dtype
"""
if isinstance(key, slice):
key_start, key_stop, key_stride = key.indices(len(self))
if key_start < 0:
key_start = key_start + len(self)
if key_stop < 0:
key_stop = key_stop + len(self)
if key_start >= key_stop:
return self.copy()
if (key_stride is None or key_stride == 1) and is_scalar(value):
return self._fill(value, key_start, key_stop, inplace=True)
if key_stride != 1 or key_stride is not None or is_scalar(value):
key = arange(
start=key_start,
stop=key_stop,
step=key_stride,
dtype=np.dtype(np.int32),
)
nelem = len(key)
else:
nelem = abs(key_stop - key_start)
else:
key = as_column(key)
if pd.api.types.is_bool_dtype(key.dtype):
if not len(key) == len(self):
raise ValueError(
"Boolean mask must be of same length as column"
)
key = arange(len(self))[key]
if hasattr(value, "__len__") and len(value) == len(self):
value = as_column(value)[key]
nelem = len(key)
if is_scalar(value):
if is_categorical_dtype(self.dtype):
value = self._encode(value)
else:
value = self.dtype.type(value) if value is not None else value
else:
if len(value) != nelem:
msg = (
f"Size mismatch: cannot set value "
f"of size {len(value)} to indexing result of size "
f"{nelem}"
)
raise ValueError(msg)
value = as_column(value).astype(self.dtype)
if is_categorical_dtype(value.dtype):
value = value.cat().set_categories(self.categories)
assert self.dtype == value.dtype
if (
isinstance(key, slice)
and (key_stride == 1 or key_stride is None)
and not is_scalar(value)
):
out = libcudf.copying.copy_range(
value, self, 0, nelem, key_start, key_stop, False
)
if is_categorical_dtype(value.dtype):
out = build_categorical_column(
categories=value.categories,
codes=as_column(out.base_data, dtype=out.dtype),
mask=out.base_mask,
size=out.size,
offset=out.offset,
ordered=value.ordered,
)
else:
try:
if is_scalar(value):
input = self
if is_categorical_dtype(self.dtype):
input = self.codes
out = input.as_frame()._scatter(key, [value])._as_column()
if is_categorical_dtype(self.dtype):
out = build_categorical_column(
categories=self.categories,
codes=as_column(out.base_data, dtype=out.dtype),
mask=out.base_mask,
size=out.size,
offset=out.offset,
ordered=self.ordered,
)
else:
if not isinstance(value, Column):
value = as_column(value)
out = (
self.as_frame()
._scatter(key, value.as_frame())
._as_column()
)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
) from e
raise
self._mimic_inplace(out, inplace=True)
def fillna(self, value=None, method=None, dtype=None):
"""Fill null values with ``value``.
Returns a copy with null filled.
"""
return libcudf.replace.replace_nulls(
input_col=self, replacement=value, method=method, dtype=dtype
)
def isnull(self):
"""Identify missing values in a Column.
"""
result = libcudf.unary.is_null(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values incase
# of a float column
result = result | libcudf.unary.is_nan(self)
return result
def isna(self):
"""Identify missing values in a Column. Alias for isnull.
"""
return self.isnull()
def notnull(self):
"""Identify non-missing values in a Column.
"""
result = libcudf.unary.is_valid(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values incase
# of a float column
result = result & libcudf.unary.is_non_nan(self)
return result
def notna(self):
"""Identify non-missing values in a Column. Alias for notnull.
"""
return self.notnull()
def find_first_value(self, value):
"""
Returns offset of first value that matches
"""
# FIXME: Inefficient, may be need a libcudf api
index = cudf.core.index.RangeIndex(0, stop=len(self))
indices = index.take(self == value)
if not len(indices):
raise ValueError("value not found")
return indices[0]
def find_last_value(self, value):
"""
Returns offset of last value that matches
"""
# FIXME: Inefficient, may be need a libcudf api
index = cudf.core.index.RangeIndex(0, stop=len(self))
indices = index.take(self == value)
if not len(indices):
raise ValueError("value not found")
return indices[-1]
def append(self, other):
return ColumnBase._concat([self, as_column(other)])
def quantile(self, q, interpolation, exact):
raise TypeError(f"cannot perform quantile with type {self.dtype}")
def median(self, skipna=None):
raise TypeError(f"cannot perform median with type {self.dtype}")
def take(self, indices, keep_index=True):
"""Return Column by taking values from the corresponding *indices*.
"""
# Handle zero size
if indices.size == 0:
return column_empty_like(self, newsize=0)
try:
return (
self.as_frame()
._gather(indices, keep_index=keep_index)
._as_column()
)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
) from e
raise
def isin(self, values):
"""Check whether values are contained in the Column.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
Raises
-------
TypeError
If values is a string
"""
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
lhs = self
rhs = None
try:
# We need to convert values to same type as self,
# hence passing dtype=self.dtype
rhs = as_column(values, dtype=self.dtype)
# Short-circuit if rhs is all null.
if lhs.null_count == 0 and (rhs.null_count == len(rhs)):
return full(len(self), False, dtype="bool")
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return full(len(self), False, dtype="bool")
# If categorical, combine categories first
if is_categorical_dtype(lhs):
lhs_cats = lhs.cat().categories._values
rhs_cats = rhs.cat().categories._values
if not np.issubdtype(rhs_cats.dtype, lhs_cats.dtype):
# If they're not the same dtype, short-circuit if the values
# list doesn't have any nulls. If it does have nulls, make
# the values list a Categorical with a single null
if not rhs.has_nulls:
return full(len(self), False, dtype="bool")
rhs = as_column(pd.Categorical.from_codes([-1], categories=[]))
rhs = rhs.cat().set_categories(lhs_cats).astype(self.dtype)
lhs = cudf.DataFrame({"x": lhs, "orig_order": arange(len(lhs))})
rhs = cudf.DataFrame(
{"x": rhs, "bool": full(len(rhs), True, dtype="bool")}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(by="orig_order")
res = res.drop_duplicates(subset="orig_order", ignore_index=True)
res = res._data["bool"].fillna(False)
return res
def as_mask(self):
"""Convert booleans to bitmask
Returns
-------
Buffer
"""
if self.has_nulls:
raise ValueError("Column must have no nulls.")
return bools_to_mask(self)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
@property
def is_unique(self):
return self.distinct_count() == len(self)
@property
def is_monotonic(self):
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
if not hasattr(self, "_is_monotonic_increasing"):
if self.has_nulls:
self._is_monotonic_increasing = False
else:
self._is_monotonic_increasing = self.as_frame()._is_sorted(
ascending=None, null_position=None
)
return self._is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
if not hasattr(self, "_is_monotonic_decreasing"):
if self.has_nulls:
self._is_monotonic_decreasing = False
else:
self._is_monotonic_decreasing = self.as_frame()._is_sorted(
ascending=[False], null_position=None
)
return self._is_monotonic_decreasing
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ["ix", "loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,)
)
# TODO: Handle errors/missing keys correctly
# Not currently using `kind` argument.
if side == "left":
return self.find_first_value(label, closest=True)
if side == "right":
return self.find_last_value(label, closest=True) + 1
def sort_by_values(self, ascending=True, na_position="last"):
col_inds = self.as_frame()._get_sorted_inds(ascending, na_position)
col_keys = self[col_inds]
return col_keys, col_inds
def distinct_count(self, method="sort", dropna=True):
if method != "sort":
msg = "non sort based distinct_count() not implemented yet"
raise NotImplementedError(msg)
return cpp_distinct_count(self, ignore_nulls=dropna)
def astype(self, dtype, **kwargs):
if is_categorical_dtype(dtype):
return self.as_categorical_column(dtype, **kwargs)
elif pd.api.types.pandas_dtype(dtype).type in {
np.str_,
np.object_,
str,
}:
return self.as_string_column(dtype, **kwargs)
elif is_list_dtype(dtype):
if not self.dtype == dtype:
raise NotImplementedError(
"Casting list columns not currently supported"
)
return self
elif np.issubdtype(dtype, np.datetime64):
return self.as_datetime_column(dtype, **kwargs)
elif np.issubdtype(dtype, np.timedelta64):
return self.as_timedelta_column(dtype, **kwargs)
else:
return self.as_numerical_column(dtype)
def as_categorical_column(self, dtype, **kwargs):
if "ordered" in kwargs:
ordered = kwargs["ordered"]
else:
ordered = False
sr = cudf.Series(self)
# Re-label self w.r.t. the provided categories
if isinstance(dtype, (cudf.CategoricalDtype, pd.CategoricalDtype)):
labels = sr.label_encoding(cats=dtype.categories)
if "ordered" in kwargs:
warnings.warn(
"Ignoring the `ordered` parameter passed in `**kwargs`, "
"will be using `ordered` parameter of CategoricalDtype"
)
return build_categorical_column(
categories=dtype.categories,
codes=labels._column,
mask=self.mask,
ordered=dtype.ordered,
)
cats = sr.unique().astype(sr.dtype)
label_dtype = min_unsigned_type(len(cats))
labels = sr.label_encoding(cats=cats, dtype=label_dtype, na_sentinel=1)
# columns include null index in factorization; remove:
if self.has_nulls:
cats = cats.dropna()
min_type = min_unsigned_type(len(cats), 8)
labels = labels - 1
if np.dtype(min_type).itemsize < labels.dtype.itemsize:
labels = labels.astype(min_type)
return build_categorical_column(
categories=cats._column,
codes=labels._column,
mask=self.mask,
ordered=ordered,
)
def as_numerical_column(self, dtype):
raise NotImplementedError
def as_datetime_column(self, dtype, **kwargs):
raise NotImplementedError
def as_timedelta_column(self, dtype, **kwargs):
raise NotImplementedError
def as_string_column(self, dtype, **kwargs):
raise NotImplementedError
def apply_boolean_mask(self, mask):
mask = as_column(mask, dtype="bool")
result = (
self.as_frame()._apply_boolean_mask(boolean_mask=mask)._as_column()
)
return result
def argsort(self, ascending=True, na_position="last"):
sorted_indices = self.as_frame()._get_sorted_inds(
ascending=ascending, na_position=na_position
)
return sorted_indices
@property
def __cuda_array_interface__(self):
output = {
"shape": (len(self),),
"strides": (self.dtype.itemsize,),
"typestr": self.dtype.str,
"data": (self.data_ptr, False),
"version": 1,
}
if self.nullable and self.has_nulls:
# Create a simple Python object that exposes the
# `__cuda_array_interface__` attribute here since we need to modify
# some of the attributes from the numba device array
mask = SimpleNamespace(
__cuda_array_interface__={
"shape": (len(self),),
"typestr": "<t1",
"data": (self.mask_ptr, True),
"version": 1,
}
)
output["mask"] = mask
return output
def __add__(self, other):
return self.binary_operator("add", other)
def __sub__(self, other):
return self.binary_operator("sub", other)
def __mul__(self, other):
return self.binary_operator("mul", other)
def __eq__(self, other):
return self.binary_operator("eq", other)
def __ne__(self, other):
return self.binary_operator("ne", other)
def __or__(self, other):
return self.binary_operator("or", other)
def __and__(self, other):
return self.binary_operator("and", other)
def __floordiv__(self, other):
return self.binary_operator("floordiv", other)
def __truediv__(self, other):
return self.binary_operator("truediv", other)
def __mod__(self, other):
return self.binary_operator("mod", other)
def __pow__(self, other):
return self.binary_operator("pow", other)
def __lt__(self, other):
return self.binary_operator("lt", other)
def __gt__(self, other):
return self.binary_operator("gt", other)
def __le__(self, other):
return self.binary_operator("le", other)
def __ge__(self, other):
return self.binary_operator("ge", other)
def searchsorted(
self, value, side="left", ascending=True, na_position="last"
):
values = as_column(value).as_frame()
return self.as_frame().searchsorted(
values, side, ascending=ascending, na_position=na_position
)
def unique(self):
"""
Get unique values in the data
"""
return (
self.as_frame()
.drop_duplicates(keep="first", ignore_index=True)
._as_column()
)
def serialize(self):
header = {}
frames = []
header["type-serialized"] = pickle.dumps(type(self))
header["dtype"] = self.dtype.str
data_header, data_frames = self.data.serialize()
header["data"] = data_header
frames.extend(data_frames)
if self.nullable:
mask_header, mask_frames = self.mask.serialize()
header["mask"] = mask_header
frames.extend(mask_frames)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
dtype = header["dtype"]
data = Buffer.deserialize(header["data"], [frames[0]])
mask = None
if "mask" in header:
mask = Buffer.deserialize(header["mask"], [frames[1]])
return build_column(data=data, dtype=dtype, mask=mask)
def min(self, skipna=None, dtype=None):
result_col = self._process_for_reduction(skipna=skipna)
if isinstance(result_col, ColumnBase):
return libcudf.reduce.reduce("min", result_col, dtype=dtype)
else:
return result_col
def max(self, skipna=None, dtype=None):
result_col = self._process_for_reduction(skipna=skipna)
if isinstance(result_col, ColumnBase):
return libcudf.reduce.reduce("max", result_col, dtype=dtype)
else:
return result_col
def sum(self, skipna=None, dtype=None, min_count=0):
raise TypeError(f"cannot perform sum with type {self.dtype}")
def product(self, skipna=None, dtype=None, min_count=0):
raise TypeError(f"cannot perform prod with type {self.dtype}")
def mean(self, skipna=None, dtype=None):
raise TypeError(f"cannot perform mean with type {self.dtype}")
def std(self, skipna=None, ddof=1, dtype=np.float64):
raise TypeError(f"cannot perform std with type {self.dtype}")
def var(self, skipna=None, ddof=1, dtype=np.float64):
raise TypeError(f"cannot perform var with type {self.dtype}")
def kurtosis(self, skipna=None):
raise TypeError(f"cannot perform kurt with type {self.dtype}")
def skew(self, skipna=None):
raise TypeError(f"cannot perform skew with type {self.dtype}")
def cov(self, other):
raise TypeError(
f"cannot perform covarience with types {self.dtype}, "
f"{other.dtype}"
)
def corr(self, other):
raise TypeError(
f"cannot perform corr with types {self.dtype}, {other.dtype}"
)
def nans_to_nulls(self):
if self.dtype.kind == "f":
col = self.fillna(np.nan)
newmask = libcudf.transform.nans_to_nulls(col)
return self.set_mask(newmask)
else:
return self
def _process_for_reduction(self, skipna=None, min_count=0):
skipna = True if skipna is None else skipna
if skipna:
result_col = self.nans_to_nulls()
if result_col.has_nulls:
result_col = result_col.dropna()
else:
if self.has_nulls:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
result_col = self
if min_count > 0:
valid_count = len(result_col) - result_col.null_count
if valid_count < min_count:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
elif min_count < 0:
warnings.warn(
f"min_count value cannot be negative({min_count}), will "
f"default to 0."
)
return result_col
def scatter_to_table(
self, row_indices, column_indices, names, nrows=None, ncols=None
):
"""
Scatters values from the column into a table.
Parameters
----------
row_indices
A column of the same size as `self` specifying the
row index to scatter each value to
column_indices
A column of the same size as `self` specifying the
column index to scatter each value to
names
The column names of the resulting table
Returns
-------
"""
if nrows is None:
nrows = 0
if len(row_indices) > 0:
nrows = int(row_indices.max() + 1)
if ncols is None:
ncols = 0
if len(column_indices) > 0:
ncols = int(column_indices.max() + 1)
if nrows * ncols == 0:
return cudf.core.frame.Frame({})
scatter_map = (column_indices * np.int32(nrows)) + row_indices
target = cudf.core.frame.Frame(
{None: column_empty_like(self, masked=True, newsize=nrows * ncols)}
)
target._data[None][scatter_map] = self
result_frames = target._split(range(nrows, nrows * ncols, nrows))
return cudf.core.frame.Frame(
{
name: next(iter(f._columns))
for name, f in zip(names, result_frames)
}
)
def column_empty_like(column, dtype=None, masked=False, newsize=None):
"""Allocate a new column like the given *column*
"""
if dtype is None:
dtype = column.dtype
row_count = len(column) if newsize is None else newsize
if (
hasattr(column, "dtype")
and is_categorical_dtype(column.dtype)
and dtype == column.dtype
):
codes = column_empty_like(column.codes, masked=masked, newsize=newsize)
return build_column(
data=None,
dtype=dtype,
mask=codes.base_mask,
children=(as_column(codes.base_data, dtype=codes.dtype),),
size=codes.size,
)
return column_empty(row_count, dtype, masked)
def column_empty_like_same_mask(column, dtype):
"""Create a new empty Column with the same length and the same mask.
Parameters
----------
dtype : np.dtype like
The dtype of the data buffer.
"""
result = column_empty_like(column, dtype)
if column.nullable:
result = result.set_mask(column.mask)
return result
def column_empty(row_count, dtype="object", masked=False):
"""Allocate a new column like the given row_count and dtype.
"""
dtype = pd.api.types.pandas_dtype(dtype)
children = ()
if is_categorical_dtype(dtype):
data = None
children = (
build_column(
data=Buffer.empty(row_count * np.dtype("int32").itemsize),
dtype="int32",
),
)
elif dtype.kind in "OU":
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
build_column(
data=Buffer.empty(row_count * np.dtype("int8").itemsize),
dtype="int8",
),
)
else:
data = Buffer.empty(row_count * dtype.itemsize)
if masked:
mask = create_null_mask(row_count, state=MaskState.ALL_NULL)
else:
mask = None
return build_column(
data, dtype, mask=mask, size=row_count, children=children
)
def build_column(
data, dtype, mask=None, size=None, offset=0, null_count=None, children=()
):
"""
Build a Column of the appropriate type from the given parameters
Parameters
----------
data : Buffer
The data buffer (can be None if constructing certain Column
types like StringColumn, ListColumn, or CategoricalColumn)
dtype
The dtype associated with the Column to construct
mask : Buffer, optional
The mask buffer
size : int, optional
offset : int, optional
children : tuple, optional
"""
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
if not len(children) == 1:
raise ValueError(
"Must specify exactly one child column for CategoricalColumn"
)
if not isinstance(children[0], ColumnBase):
raise TypeError("children must be a tuple of Columns")
return cudf.core.column.CategoricalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
elif dtype.type is np.datetime64:
return cudf.core.column.DatetimeColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type is np.timedelta64:
return cudf.core.column.TimeDeltaColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type in (np.object_, np.str_):
return cudf.core.column.StringColumn(
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_list_dtype(dtype):
return cudf.core.column.ListColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
elif is_struct_dtype(dtype):
return cudf.core.column.StructColumn(
data=data,
size=size,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
else:
return cudf.core.column.NumericalColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
def build_categorical_column(
categories,
codes,
mask=None,
size=None,
offset=0,
null_count=None,
ordered=None,
):
"""
Build a CategoricalColumn
Parameters
----------
categories : Column
Column of categories
codes : Column
Column of codes, the size of the resulting Column will be
the size of `codes`
mask : Buffer
Null mask
size : int, optional
offset : int, optional
ordered : bool
Indicates whether the categories are ordered
"""
codes_dtype = min_unsigned_type(len(categories))
codes = as_column(codes)
if codes.dtype != codes_dtype:
codes = codes.astype(codes_dtype)
dtype = CategoricalDtype(categories=as_column(categories), ordered=ordered)
return build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=(codes,),
)
def as_column(arbitrary, nan_as_null=None, dtype=None, length=None):
"""Create a Column from an arbitrary object
Parameters
----------
arbitrary : object
Object to construct the Column from. See *Notes*.
nan_as_null : bool, optional, default None
If None (default), treats NaN values in arbitrary as null if there is
no mask passed along with it. If True, combines the mask and NaNs to
form a new validity mask. If False, leaves NaN values as is.
dtype : optional
Optionally typecast the constructed Column to the given
dtype.
length : int, optional
If `arbitrary` is a scalar, broadcast into a Column of
the given length.
Returns
-------
A Column of the appropriate type and size.
Notes
-----
Currently support inputs are:
* ``Column``
* ``Series``
* ``Index``
* Scalars (can be broadcasted to a specified `length`)
* Objects exposing ``__cuda_array_interface__`` (e.g., numba device arrays)
* Objects exposing ``__array_interface__``(e.g., numpy arrays)
* pyarrow array
* pandas.Categorical objects
"""
if isinstance(arbitrary, ColumnBase):
if dtype is not None:
return arbitrary.astype(dtype)
else:
return arbitrary
elif isinstance(arbitrary, cudf.Series):
data = arbitrary._column
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, cudf.Index):
data = arbitrary._values
if dtype is not None:
data = data.astype(dtype)
elif type(arbitrary) is Buffer:
if dtype is None:
raise TypeError("dtype cannot be None if 'arbitrary' is a Buffer")
data = build_column(arbitrary, dtype=dtype)
elif hasattr(arbitrary, "__cuda_array_interface__"):
desc = arbitrary.__cuda_array_interface__
current_dtype = np.dtype(desc["typestr"])
arb_dtype = check_cast_unsupported_dtype(current_dtype)
if desc.get("mask", None) is not None:
# Extract and remove the mask from arbitrary before
# passing to cupy.asarray
mask = _mask_from_cuda_array_interface_desc(arbitrary)
arbitrary = SimpleNamespace(__cuda_array_interface__=desc.copy())
arbitrary.__cuda_array_interface__["mask"] = None
desc = arbitrary.__cuda_array_interface__
else:
mask = None
arbitrary = cupy.asarray(arbitrary)
if arb_dtype != current_dtype:
arbitrary = arbitrary.astype(arb_dtype)
current_dtype = arb_dtype
if (
desc["strides"] is not None
and not (arbitrary.itemsize,) == arbitrary.strides
):
arbitrary = cupy.ascontiguousarray(arbitrary)
data = _data_from_cuda_array_interface_desc(arbitrary)
col = build_column(data, dtype=current_dtype, mask=mask)
if dtype is not None:
col = col.astype(dtype)
if isinstance(col, cudf.core.column.CategoricalColumn):
return col
elif np.issubdtype(col.dtype, np.floating):
if nan_as_null or (mask is None and nan_as_null is None):
mask = libcudf.transform.nans_to_nulls(col.fillna(np.nan))
col = col.set_mask(mask)
elif np.issubdtype(col.dtype, np.datetime64):
if nan_as_null or (mask is None and nan_as_null is None):
col = utils.time_col_replace_nulls(col)
return col
elif isinstance(arbitrary, (pa.Array, pa.ChunkedArray)):
col = ColumnBase.from_arrow(arbitrary)
if isinstance(arbitrary, pa.NullArray):
if type(dtype) == str and dtype == "empty":
new_dtype = pd.api.types.pandas_dtype(
arbitrary.type.to_pandas_dtype()
)
else:
new_dtype = pd.api.types.pandas_dtype(dtype)
col = col.astype(new_dtype)
return col
elif isinstance(arbitrary, (pd.Series, pd.Categorical)):
if isinstance(arbitrary, pd.Series) and isinstance(
arbitrary.array, pd.core.arrays.masked.BaseMaskedArray
):
return as_column(arbitrary.array)
if is_categorical_dtype(arbitrary):
data = as_column(pa.array(arbitrary, from_pandas=True))
elif arbitrary.dtype == np.bool:
data = as_column(cupy.asarray(arbitrary), dtype=arbitrary.dtype)
elif arbitrary.dtype.kind in ("f"):
arb_dtype = check_cast_unsupported_dtype(arbitrary.dtype)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
dtype=dtype,
)
elif arbitrary.dtype.kind in ("u", "i"):
data = as_column(
cupy.asarray(arbitrary), nan_as_null=nan_as_null, dtype=dtype
)
else:
data = as_column(
pa.array(arbitrary, from_pandas=nan_as_null),
dtype=arbitrary.dtype,
)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, (pd.Timestamp, pd.Timedelta)):
# This will always treat NaTs as nulls since it's not technically a
# discrete value like NaN
data = as_column(pa.array(pd.Series([arbitrary]), from_pandas=True))
if dtype is not None:
data = data.astype(dtype)
elif np.isscalar(arbitrary) and not isinstance(arbitrary, memoryview):
length = length or 1
if (
(nan_as_null is True)
and isinstance(arbitrary, (np.floating, float))
and np.isnan(arbitrary)
):
arbitrary = None
if dtype is None:
dtype = np.dtype("float64")
data = as_column(
utils.scalar_broadcast_to(arbitrary, length, dtype=dtype)
)
if not nan_as_null:
if np.issubdtype(data.dtype, np.floating):
data = data.fillna(np.nan)
elif np.issubdtype(data.dtype, np.datetime64):
data = data.fillna(np.datetime64("NaT"))
elif hasattr(arbitrary, "__array_interface__"):
# CUDF assumes values are always contiguous
desc = arbitrary.__array_interface__
shape = desc["shape"]
arb_dtype = np.dtype(desc["typestr"])
# CUDF assumes values are always contiguous
if len(shape) > 1:
raise ValueError("Data must be 1-dimensional")
arbitrary = np.asarray(arbitrary)
# Handle case that `arbitary` elements are cupy arrays
if (
shape
and shape[0]
and hasattr(arbitrary[0], "__cuda_array_interface__")
):
return as_column(
cupy.asarray(arbitrary, dtype=arbitrary[0].dtype),
nan_as_null=nan_as_null,
dtype=dtype,
length=length,
)
if not arbitrary.flags["C_CONTIGUOUS"]:
arbitrary = np.ascontiguousarray(arbitrary)
if dtype is not None:
arbitrary = arbitrary.astype(dtype)
if arb_dtype.kind == "M":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(np.dtype("datetime64[s]"))
buffer = Buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = as_column(
buffer, dtype=arbitrary.dtype, nan_as_null=nan_as_null
)
data = utils.time_col_replace_nulls(data)
mask = data.mask
data = cudf.core.column.datetime.DatetimeColumn(
data=buffer, mask=mask, dtype=arbitrary.dtype
)
elif arb_dtype.kind == "m":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(np.dtype("timedelta64[s]"))
buffer = Buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = as_column(
buffer, dtype=arbitrary.dtype, nan_as_null=nan_as_null
)
data = utils.time_col_replace_nulls(data)
mask = data.mask
data = cudf.core.column.timedelta.TimeDeltaColumn(
data=buffer, mask=mask, dtype=arbitrary.dtype
)
elif arb_dtype.kind in ("O", "U"):
data = as_column(
pa.Array.from_pandas(arbitrary), dtype=arbitrary.dtype
)
# There is no cast operation available for pa.Array from int to
# str, Hence instead of handling in pa.Array block, we
# will have to type-cast here.
if dtype is not None:
data = data.astype(dtype)
elif arb_dtype.kind in ("f"):
arb_dtype = check_cast_unsupported_dtype(
arb_dtype if dtype is None else dtype
)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
)
else:
data = as_column(cupy.asarray(arbitrary), nan_as_null=nan_as_null)
elif isinstance(arbitrary, pd.core.arrays.numpy_.PandasArray):
if is_categorical_dtype(arbitrary.dtype):
arb_dtype = arbitrary.dtype
else:
if arbitrary.dtype == pd.StringDtype():
arb_dtype = np.dtype("O")
else:
arb_dtype = check_cast_unsupported_dtype(arbitrary.dtype)
if arb_dtype != arbitrary.dtype.numpy_dtype:
arbitrary = arbitrary.astype(arb_dtype)
if arb_dtype.kind in ("O", "U"):
data = as_column(pa.Array.from_pandas(arbitrary), dtype=arb_dtype)
else:
data = as_column(
pa.array(
arbitrary,
from_pandas=True if nan_as_null is None else nan_as_null,
),
nan_as_null=nan_as_null,
)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, memoryview):
data = as_column(
np.asarray(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
elif isinstance(arbitrary, cudf.Scalar):
data = libcudf.column.make_column_from_scalar(
arbitrary, length if length else 1
)
elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray):
cudf_dtype = arbitrary._data.dtype
data = Buffer(arbitrary._data.view("|u1"))
data = as_column(data, dtype=cudf_dtype)
mask = arbitrary._mask
mask = bools_to_mask(as_column(mask).unary_operator("not"))
data = data.set_mask(mask)
else:
try:
data = as_column(
memoryview(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
except TypeError:
pa_type = None
np_type = None
try:
if dtype is not None:
if is_list_dtype(dtype):
data = pa.array(arbitrary)
if type(data) not in (pa.ListArray, pa.NullArray):
raise ValueError(
"Cannot create list column from given data"
)
return as_column(data, nan_as_null=nan_as_null)
dtype = pd.api.types.pandas_dtype(dtype)
if is_categorical_dtype(dtype):
raise TypeError
else:
np_type = np.dtype(dtype).type
if np_type == np.bool_:
pa_type = pa.bool_()
else:
pa_type = np_to_pa_dtype(np.dtype(dtype))
data = as_column(
pa.array(
arbitrary,
type=pa_type,
from_pandas=True
if nan_as_null is None
else nan_as_null,
),
dtype=dtype,
nan_as_null=nan_as_null,
)
except (pa.ArrowInvalid, pa.ArrowTypeError, TypeError):
if is_categorical_dtype(dtype):
sr = pd.Series(arbitrary, dtype="category")
data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)
elif np_type == np.str_:
sr = pd.Series(arbitrary, dtype="str")
data = as_column(sr, nan_as_null=nan_as_null)
else:
native_dtype = dtype
if dtype is None and pd.api.types.infer_dtype(
arbitrary
) in ("mixed", "mixed-integer"):
native_dtype = "object"
data = np.asarray(
arbitrary,
dtype=native_dtype
if native_dtype is None
else np.dtype(native_dtype),
)
data = as_column(
data, dtype=dtype, nan_as_null=nan_as_null
)
return data
def column_applymap(udf, column, out_dtype):
"""Apply an element-wise function to transform the values in the Column.
Parameters
----------
udf : function
Wrapped by numba jit for call on the GPU as a device function.
column : Column
The source column.
out_dtype : numpy.dtype
The dtype for use in the output.
Returns
-------
result : Column
"""
core = njit(udf)
results = column_empty(len(column), dtype=out_dtype)
values = column.data_array_view
if column.nullable:
# For masked columns
@cuda.jit
def kernel_masked(values, masks, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# valid?
if utils.mask_get(masks, i):
# call udf
results[i] = core(values[i])
masks = column.mask_array_view
kernel_masked.forall(len(column))(values, masks, results)
else:
# For non-masked columns
@cuda.jit
def kernel_non_masked(values, results):
i = cuda.grid(1)
# in range?
if i < values.size:
# call udf
results[i] = core(values[i])
kernel_non_masked.forall(len(column))(values, results)
return as_column(results)
def _data_from_cuda_array_interface_desc(obj):
desc = obj.__cuda_array_interface__
ptr = desc["data"][0]
nelem = desc["shape"][0] if len(desc["shape"]) > 0 else 1
dtype = np.dtype(desc["typestr"])
data = Buffer(data=ptr, size=nelem * dtype.itemsize, owner=obj)
return data
def _mask_from_cuda_array_interface_desc(obj):
desc = obj.__cuda_array_interface__
mask = desc.get("mask", None)
if mask is not None:
desc = mask.__cuda_array_interface__
ptr = desc["data"][0]
nelem = desc["shape"][0]
typestr = desc["typestr"]
typecode = typestr[1]
if typecode == "t":
mask_size = bitmask_allocation_size_bytes(nelem)
mask = Buffer(data=ptr, size=mask_size, owner=obj)
elif typecode == "b":
col = as_column(mask)
mask = bools_to_mask(col)
else:
raise NotImplementedError(
f"Cannot infer mask from typestr {typestr}"
)
return mask
def serialize_columns(columns):
"""
Return the headers and frames resulting
from serializing a list of Column
Parameters
----------
columns : list
list of Columns to serialize
Returns
-------
headers : list
list of header metadata for each Column
frames : list
list of frames
"""
headers = []
frames = []
if len(columns) > 0:
header_columns = [c.serialize() for c in columns]
headers, column_frames = zip(*header_columns)
for f in column_frames:
frames.extend(f)
return headers, frames
def deserialize_columns(headers, frames):
"""
Construct a list of Columns from a list of headers
and frames.
"""
columns = []
for meta in headers:
col_frame_count = meta["frame_count"]
col_typ = pickle.loads(meta["type-serialized"])
colobj = col_typ.deserialize(meta, frames[:col_frame_count])
columns.append(colobj)
# Advance frames
frames = frames[col_frame_count:]
return columns
def arange(start, stop=None, step=1, dtype=None):
"""
Returns a column with evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop).
The first three arguments are mapped like the range built-in function,
i.e. start and step are optional.
Parameters
----------
start : int/float
Start of the interval.
stop : int/float, default is None
Stop of the interval.
step : int/float, default 1
Step width between each pair of consecutive values.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
cudf.core.column.NumericalColumn
Examples
--------
>>> import cudf
>>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>
>>> cudf.Series(col)
0 2
1 3
2 4
3 5
4 6
dtype: int16
"""
if stop is None:
stop = start
start = 0
if step is None:
step = 1
size = int(np.ceil((stop - start) / step))
return libcudf.filling.sequence(
size,
as_device_scalar(start, dtype=dtype),
as_device_scalar(step, dtype=dtype),
)
def full(size, fill_value, dtype=None):
"""
Returns a column of given size and dtype, filled with a given value.
Parameters
----------
size : int
size of the expected column.
fill_value : scalar
A scalar value to fill a new array.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
Column
Examples
--------
>>> import cudf
>>> col = cudf.core.column.full(size=5, fill_value=7, dtype='int8')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7fa0912e8b90>
>>> cudf.Series(col)
0 7
1 7
2 7
3 7
4 7
dtype: int8
"""
return libcudf.column.make_column_from_scalar(
cudf.Scalar(fill_value, dtype), size
)
|
py | 1a3285483850edafd34c1a38974836e1be993a75 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ResourceInsightProjectedUtilization(object):
"""
Projected utilization(High/low) for cpu or storage
"""
def __init__(self, **kwargs):
"""
Initializes a new ResourceInsightProjectedUtilization object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param low:
The value to assign to the low property of this ResourceInsightProjectedUtilization.
:type low: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
:param high:
The value to assign to the high property of this ResourceInsightProjectedUtilization.
:type high: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
"""
self.swagger_types = {
'low': 'list[ResourceInsightProjectedUtilizationItem]',
'high': 'list[ResourceInsightProjectedUtilizationItem]'
}
self.attribute_map = {
'low': 'low',
'high': 'high'
}
self._low = None
self._high = None
@property
def low(self):
"""
**[Required]** Gets the low of this ResourceInsightProjectedUtilization.
List of db ids with low usage
:return: The low of this ResourceInsightProjectedUtilization.
:rtype: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
"""
return self._low
@low.setter
def low(self, low):
"""
Sets the low of this ResourceInsightProjectedUtilization.
List of db ids with low usage
:param low: The low of this ResourceInsightProjectedUtilization.
:type: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
"""
self._low = low
@property
def high(self):
"""
**[Required]** Gets the high of this ResourceInsightProjectedUtilization.
List of db ids with high usage
:return: The high of this ResourceInsightProjectedUtilization.
:rtype: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
"""
return self._high
@high.setter
def high(self, high):
"""
Sets the high of this ResourceInsightProjectedUtilization.
List of db ids with high usage
:param high: The high of this ResourceInsightProjectedUtilization.
:type: list[oci.opsi.models.ResourceInsightProjectedUtilizationItem]
"""
self._high = high
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a32859ea2b105f4abc46a0c032157bb4d765909 | import copy
import logging
import os.path as osp
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from pycocotools import mask as maskUtils
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import SizeMismatchError
from detectron2.structures import BoxMode
from .augmentation import RandomCropWithInstance
from .detection_utils import (annotations_to_instances, build_augmentation,
transform_instance_annotations)
import re
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapperWithBasis"]
logger = logging.getLogger(__name__)
def save_tmp_image(image, tmp_dir="tmp", img_name=None):
import os
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
if img_name is None:
tmp_id = len(os.listdir(tmp_dir))
img_name = "%d.png" % tmp_id
import cv2
cv2.imwrite("tmp/%s" % img_name, image)
def segmToRLE(segm, img_size):
h, w = img_size
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm["counts"]) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = segm
return rle
def segmToMask(segm, img_size):
rle = segmToRLE(segm, img_size)
m = maskUtils.decode(rle)
return m
def read_image_and_resize(file_name, shape, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with open(file_name, "rb") as f:
image = Image.open(f)
image = image.resize(shape)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = utils._apply_exif_orientation(image)
return utils.convert_PIL_to_numpy(image, format)
def normalization(heatmap, target_min=-1, target_max=1):
input_min = np.min(heatmap[np.nonzero(heatmap)])
heatmap[np.nonzero(heatmap)] = heatmap[np.nonzero(heatmap)] - input_min
input_max = np.max(heatmap)
heatmap = heatmap / input_max * (target_max - target_min) + target_min
return heatmap
class DatasetMapperWithBasis(DatasetMapper):
"""
This caller enables the default Detectron2 mapper to read an additional basis semantic label
"""
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
# Rebuild augmentations
logger.info(
"Rebuilding the augmentations. The previous augmentations will be overridden."
)
self.augmentation = build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0,
RandomCropWithInstance(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.CROP_INSTANCE,
),
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.basis_loss_on = cfg.MODEL.BASIS_MODULE.LOSS_ON
self.ann_set = cfg.MODEL.BASIS_MODULE.ANN_SET
self.stcseg_enabled = cfg.MODEL.STCSEG.ENABLED
self.use_depth = cfg.MODEL.STCSEG.BOUNDARY.USE_DEPTH
self.use_optical_flow = cfg.MODEL.STCSEG.BOUNDARY.USE_OPTICAL_FLOW
if self.stcseg_enabled:
self.use_instance_mask = False
self.recompute_boxes = False
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
try:
image = utils.read_image(
dataset_dict["file_name"], format=self.image_format
)
# print("%s image shape:" % dataset_dict["file_name"], image.shape)
except Exception as e:
print(dataset_dict["file_name"])
print(e)
raise e
try:
utils.check_image_size(dataset_dict, image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
image_wh = (image.shape[1], image.shape[0])
if (image_wh[1], image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
image = image.transpose(1, 0, 2)
else:
raise e
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
boxes = np.asarray(
[
BoxMode.convert(
instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS
)
for instance in dataset_dict["annotations"]
]
)
aug_input = T.StandardAugInput(image, boxes=boxes, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
# save_tmp_image(image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '.png')
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image.transpose(2, 0, 1))
)
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# ---------------------- Add Depth ---------------------------
if self.use_depth: # For kitti object
# print(dataset_dict["file_name"])
try:
if re.search("kitti_mot", dataset_dict["file_name"]):
# depth_image = utils.read_image(
# dataset_dict["file_name"].replace("image_02", "image_depth").replace(".png", "_disp.jpeg"), format=self.image_format
# )
# depth_image = np.load(dataset_dict["file_name"].replace("training", "Depth/training").replace(".png", "_disp.npy"))
# depth_image = depth_image[0].transpose(1,2,0) * (1, 1, 1) * 10
depth_image = utils.read_image(
dataset_dict["file_name"].replace("image_02", "depth"), format=self.image_format
)
elif re.search("kitti", dataset_dict["file_name"]):
depth_image = utils.read_image(
dataset_dict["file_name"].replace("image_2", "depth"), format=self.image_format
)
elif re.search("ytvis", dataset_dict["file_name"]):
depth_image = utils.read_image(
dataset_dict["file_name"].replace("JPEGImages", "Depth").replace(".jpg", ".png"), format=self.image_format
)
# print("%s depth shape:" % dataset_dict["file_name"], depth_image.shape)
# assert (depth_image.shape[1], depth_image.shape[0]) == (dataset_dict["width"], dataset_dict["height"]), dataset_dict["file_name"] + ": " + str(depth_image.shape)
else:
print(dataset_dict["file_name"])
print("!!!!!!! Please use kitti or ytvis !!!!!!!")
except Exception as e:
print("Depth file for ", dataset_dict["file_name"])
print(e)
raise e
try:
utils.check_image_size(dataset_dict, depth_image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
depth_image_wh = (depth_image.shape[1], depth_image.shape[0])
if (depth_image_wh[1], depth_image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
depth_image = depth_image.transpose(1, 0, 2)
else:
raise e
# aug_depth_input = T.StandardAugInput(depth_image, boxes=boxes, sem_seg=sem_seg_gt)
# depth_transforms = aug_depth_input.apply_augmentations(self.augmentation)
# depth_image = aug_depth_input.image
depth_image = transforms.apply_image(depth_image)
# save_tmp_image(depth_image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '_depth.png')
dataset_dict["depth_image"] = torch.as_tensor(
np.ascontiguousarray(depth_image.transpose(2, 0, 1))
)
# ---------------------- Add Depth ---------------------------
# ---------------------- Add Flow ---------------------------
if self.use_optical_flow: # For kitti object
# print(dataset_dict["file_name"])
try:
if re.search("kitti_mot", dataset_dict["file_name"]):
flow_image_path = dataset_dict["file_name"].replace("image_02", "optical_flow")
elif re.search("ytvis", dataset_dict["file_name"]):
flow_image_path = dataset_dict["file_name"].replace("JPEGImages", "OpticalFlow").replace(".jpg", ".png")
else:
print(dataset_dict["file_name"])
print("!!!!!!! Please use kitti mot or ytvis !!!!!!!")
flow_image = read_image_and_resize(
flow_image_path, shape=(dataset_dict["width"], dataset_dict["height"]),
format=self.image_format
)
except Exception as e:
print(flow_image_path)
print(e)
raise e
try:
utils.check_image_size(dataset_dict, flow_image)
except SizeMismatchError as e:
expected_wh = (dataset_dict["width"], dataset_dict["height"])
flow_image_wh = (flow_image.shape[1], flow_image.shape[0])
if (flow_image_wh[1], flow_image_wh[0]) == expected_wh:
print("transposing image {}".format(dataset_dict["file_name"]))
flow_image = flow_image.transpose(1, 0, 2)
else:
raise e
# aug_flow_input = T.StandardAugInput(flow_image, boxes=boxes, sem_seg=sem_seg_gt)
# flow_transforms = aug_flow_input.apply_augmentations(self.augmentation)
# flow_image = aug_flow_input.image
flow_image = transforms.apply_image(flow_image)
# save_tmp_image(flow_image, img_name=dataset_dict["file_name"].split('/')[-1].split('.')[0] + '_flow.png')
dataset_dict["flow_image"] = torch.as_tensor(
np.ascontiguousarray(flow_image.transpose(2, 0, 1))
)
# ---------------------- Add Flow ---------------------------
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
dataset_dict.pop("pano_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if self.basis_loss_on and self.is_train:
# load basis supervisions
if self.ann_set == "coco":
basis_sem_path = (
dataset_dict["file_name"]
.replace("train2017", "thing_train2017")
.replace("image/train", "thing_train")
)
else:
basis_sem_path = (
dataset_dict["file_name"]
.replace("coco", "lvis")
.replace("train2017", "thing_train")
)
# change extension to npz
basis_sem_path = osp.splitext(basis_sem_path)[0] + ".npz"
basis_sem_gt = np.load(basis_sem_path)["mask"]
basis_sem_gt = transforms.apply_segmentation(basis_sem_gt)
basis_sem_gt = torch.as_tensor(basis_sem_gt.astype("long"))
dataset_dict["basis_sem"] = basis_sem_gt
return dataset_dict
|
py | 1a3285ac5098326e50a4faf3e909a6fbf989693e | import re
import json
from time import time
from random import random
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/601.1.10 (KHTML, like Gecko) Version/8.0.5 Safari/601.1.10",
"Mozilla/5.0 (Windows NT 6.3; WOW64; ; NCT50_AAP285C84A1328) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
]
def now():
return int(time()*1000)
def get_json(text):
return json.loads(re.sub(r"for.*(.*;.*;.*).*;", '', text.encode('utf-8').decode("unicode-escape"), 1))
def digit_to_char(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number,base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digit_to_char(m)
return digit_to_char(m)
def generateMessageID(client_id=None):
k = now()
l = int(random() * 4294967295)
return ("<%s:%s-%[email protected]>" % (k, l, client_id));
|
py | 1a328654a091bd2f6f480e9e85ba9ad5c09f4e71 | """
Django settings for log_viewer_demo project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
from log_viewer_demo.logger import LOGGING
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-3k68@_zbsw#f$%tvg$g=h3r4_#rk8_5aamhwgz1gyf997fsye1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
LOGIN_URL = '/admin/login/'
LOGIN_REDIRECT_URL = '/'
LOGGING = LOGGING
# Django log viewer
LOG_VIEWER_FILES_DIR = 'logs'
LOG_VIEWER_PAGE_LENGTH = 25
LOG_VIEWER_MAX_READ_LINES = 1000
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'log_viewer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'log_viewer_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'log_viewer_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | 1a32865a4cbac71572ad76b1b5e1c84619e5647e | #Constantly Changing, just a plugin I use to debug whatever is broken atm
import sys
import threading
from spock.mcmap import mapdata
from spock.mcp import mcdata
from spock.utils import pl_announce
class DebugPlugin:
def __init__(self, ploader, settings):
for packet in mcdata.hashed_structs:
ploader.reg_event_handler(packet, self.debug)
#ploader.reg_event_handler('tick', self.tick)
#ploader.reg_event_handler('w_map_chunk', self.map)
#ploader.reg_event_handler('w_block_update', self.block_update)
def debug(self, name, packet):
if packet.ident() == (mcdata.PLAY_STATE, mcdata.SERVER_TO_CLIENT, 0x26):
packet.data['data'] = b''
#print(packet)
#print(packet)
def block_update(self, name, data):
print('Block Updated!')
print(
'Block is:', mapdata.blocks[data.id]['display_name']+',',
'Biome:', mapdata.biomes[data.biome]['display_name']
)
print('Block Light:', str(data.block_light)+',', 'Sky Light:', data.sky_light)
def map(self, name, data):
print(data)
def tick(self, name, data):
print('tick!')
print('Current threads:', threading.active_count()) |
py | 1a3286f90f8f9e3a55496b14e9fd227e036b984c | __version__ = '0.0.1'
__dev__ = True
from .AlphaVantage import AlphaVantage
|
py | 1a32889cd89e41a4cae594b62f69d1f0666220ec | # system_project_folder.py (c) 2010 Dany Lebel (Axon_D)
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Project Folder",
"author": "Dany Lebel (Axon_D), Spirou4D",
"version": (0, 3, 1),
"blender": (2, 80, 0),
"location": "Info -> File Menu -> Project Folder",
"description": "Open the project folder in a file browser",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/System/Project_Folder",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "System"}
import bpy
import os
from platform import system as currentOS
class ProjectFolder(bpy.types.Operator):
"""Open the Project Folder in a file Browser"""
bl_idname = "file.project_folder"
bl_label = "Project Folder"
def execute(self, context):
try :
path = self.path()
except ValueError:
self.report({'INFO'}, "No project folder yet")
return {'FINISHED'}
bpy.ops.wm.path_open(filepath=path)
return {'FINISHED'}
def path(self):
filepath = bpy.data.filepath
relpath = bpy.path.relpath(filepath)
path = filepath[0: -1 * (relpath.__len__() - 2)]
return path
# Registration
def menu_func(self, context):
self.layout.operator(
ProjectFolder.bl_idname,
text="Project Folder",
icon='FILEBROWSER')
def register():
bpy.utils.register_class(ProjectFolder)
bpy.types.TOPBAR_MT_file.prepend(menu_func)
def unregister():
bpy.utils.unregister_class(ProjectFolder)
bpy.types.TOPBAR_MT_file.remove(menu_func)
if __name__ == "__main__":
register()
|
py | 1a3288d32a5c697fe88de1e94d972179b61b1a84 | import os
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
include_dirs = ['src',
get_include(),
os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')]
config.add_extension("_nd_image",
sources=["src/nd_image.c",
"src/ni_filters.c",
"src/ni_fourier.c",
"src/ni_interpolation.c",
"src/ni_measure.c",
"src/ni_morphology.c",
"src/ni_splines.c",
"src/ni_support.c"],
include_dirs=include_dirs,
**numpy_nodepr_api)
# Cython wants the .c and .pyx to have the underscore.
config.add_extension("_ni_label",
sources=["src/_ni_label.c",],
include_dirs=['src']+[get_include()])
config.add_extension("_ctest",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
**numpy_nodepr_api)
config.add_extension("_cytest",
sources=["src/_cytest.c"])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
py | 1a328995a8060071283ed74361b6ef5d69082753 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for allowing TF ops to work with Keras Functional API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.saving import model_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
def _single_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = gen_nn_ops.relu(x)
return keras.Model(inputs, outputs)
def _single_identity_op_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = array_ops.identity(x)
assert 'Identity' in outputs.name
return keras.Model(inputs, outputs)
def _multiple_ops_at_end():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = gen_nn_ops.relu(x)
return keras.Model(inputs, outputs)
def _single_op_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _multiple_ops_in_middle():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
x = gen_nn_ops.relu(x)
x = gen_nn_ops.relu(x)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _single_standalone_branch():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10)(inputs)
outputs = x * 2
return keras.Model(inputs, outputs)
def _single_op_with_attrs():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _multiple_uses():
inputs = keras.Input(shape=(10,))
x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
x1 = keras.layers.Dense(10)(x)
x2 = keras.layers.Dense(10)(x)
outputs = x1 + x2
return keras.Model(inputs, outputs)
def _op_with_tensor_list():
inputs = keras.Input(shape=(10,))
x = array_ops.concat([inputs, inputs], axis=1)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
def _add_n():
inputs = keras.Input(shape=(10,))
outputs = math_ops.add_n([inputs, inputs, inputs])
return keras.Model(inputs, outputs)
def _reuse_op():
inputs = keras.Input(shape=(10,))
# This op needs to be checked multiple times.
x = gen_nn_ops.relu(inputs)
y = keras.layers.Dense(10)(x)
x2 = x * 2
y2 = keras.layers.Dense(10)(x2)
outputs = y + y2
return keras.Model(inputs, outputs)
def _float64_op():
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(10, dtype='float64')(inputs)
x = gen_nn_ops.relu(x)
assert x.dtype == 'float64', 'x has dtype: %s' % x.dtype
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs, outputs)
class MyAdd(keras.layers.Layer):
def call(self, x, y):
return x + y
def _layer_with_tensor_arg():
inputs = keras.Input(shape=(10,))
x = inputs * 2
outputs = MyAdd()(inputs, x)
return keras.Model(inputs, outputs)
class LayerWithLayer(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', dtype='float32')
self.layer = keras.layers.Dense(10)
def call(self, inputs):
inputs = inputs * self.bias
# Would throw an error if Keras History was created here.
return self.layer(inputs)
def _inner_layer():
inputs = keras.Input(shape=(10,))
outputs = LayerWithLayer()(inputs)
return keras.Model(inputs, outputs)
def _reuse_ancillary_layer():
inputs = (keras.Input(shape=(5,)), keras.Input(shape=(5,)))
base_model = keras.Sequential([
keras.layers.Dense(3, input_shape=(5,)),
])
outputs = base_model(inputs[0])
model = keras.Model(inputs, outputs)
# The second input is only involved in ancillary layers.
outputs_delta = outputs - base_model(0.5 * inputs[1])
l2_loss = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(outputs_delta), -1))
model.add_loss(l2_loss)
model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
l1_loss = 0.01 * math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.abs(outputs_delta), -1))
model.add_loss(l1_loss)
model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
return model
@keras_parameterized.run_all_keras_modes
class AutoLambdaTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('single_op_at_end', _single_op_at_end),
('single_identity_op_at_end', _single_identity_op_at_end),
('multiple_ops_at_end', _multiple_ops_at_end),
('single_op_in_middle', _single_op_in_middle),
('multiple_ops_in_middle', _multiple_ops_in_middle),
('single_standalone_branch', _single_standalone_branch),
('single_op_with_attrs', _single_op_with_attrs),
('multiple_uses', _multiple_uses),
('op_with_tensor_list', _op_with_tensor_list),
('add_n', _add_n),
('_reuse_op', _reuse_op),
('_float64_op', _float64_op),
('_inner_layer', _inner_layer),
('_reuse_ancillary_layer', _reuse_ancillary_layer),
('_layer_with_tensor_arg', _layer_with_tensor_arg),
)
def test_autolambda(self, model_fn):
model = model_fn()
model.compile(
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
np_inputs = nest.map_structure(
lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.inputs)
np_outputs = nest.map_structure(
lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.outputs)
model.fit(np_inputs, np_outputs, batch_size=2)
model(np_inputs) # Test calling the model directly on inputs.
new_model = keras.Model.from_config(
model.get_config(),
custom_objects={
'LayerWithLayer': LayerWithLayer,
'MyAdd': MyAdd
})
new_model.compile(
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly())
new_model.fit(np_inputs, np_outputs, batch_size=2)
new_model(np_inputs) # Test calling the new model directly on inputs.
# Assert that metrics are preserved and in the right order.
self.assertAllEqual(model.metrics_names, new_model.metrics_names)
# Assert that layer names don't change.
self.assertAllEqual([layer.name for layer in model.layers],
[layer.name for layer in new_model.layers])
def test_numerical_correctness_simple(self):
x = ops.convert_to_tensor_v2([[-1., 0., -2., 1.]])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [[0., 0., 0., 1.]])
def test_numerical_correctness_with_attrs(self):
x = ops.convert_to_tensor_v2([[1.5, 1.5], [2.5, 3.5]])
inputs = keras.Input(shape=(10,))
outputs = math_ops.reduce_mean(inputs, axis=1)
model = keras.Model(inputs, outputs)
y = self.evaluate(model(x))
self.assertAllClose(y, [1.5, 3.])
def test_numerical_correctness_serialization(self):
x = ops.convert_to_tensor_v2([-1., 0., -2., 1.])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model1 = keras.Model(inputs, outputs)
y1 = self.evaluate(model1(x))
model2 = keras.Model.from_config(model1.get_config())
y2 = self.evaluate(model2(x))
self.assertAllClose(y1, y2)
def test_gradient_tape_in_function(self):
z = keras.Input((1,))
x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1)))
x = math_ops.reduce_mean(x, axis=0, keepdims=True)
h = gen_nn_ops.relu(x)
m = keras.Model(z, h)
@def_function.function()
def f(x):
with backprop.GradientTape() as t:
t.watch(x)
z = m(x ** 2)
grads = t.gradient(z, x)
return grads
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
f = def_function.function(f)
self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
constant_op.constant(40.0, shape=(1, 1)))
def test_no_tracking(self):
if not context.executing_eagerly():
x = constant_op.constant(1.0, shape=(10, 10))
keras.layers.Dense(1)(x)
self.assertTrue(x._keras_history_checked)
def test_timing_scales_linearly(self):
def _construct_graph_of_size(size):
start = time.time()
x = keras.backend.placeholder(shape=(10, 4))
for _ in range(size):
x = keras.layers.Dense(4)(x)
x = gen_nn_ops.relu(x)
end = time.time()
return end - start
size_50 = _construct_graph_of_size(50)
size_500 = _construct_graph_of_size(500)
# Check construction time grows approx. linearly with size.
e = 3 # Fudge factor to prevent flakiness.
self.assertLess(size_500, (10 * e) * size_50)
def test_no_mask_tracking(self):
x = keras.backend.placeholder((10, 10))
y = keras.layers.Masking(0.)(x)
self.assertTrue(y._keras_mask._keras_history_checked)
def test_built(self):
inputs = keras.Input(shape=(10,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
for layer in model.layers:
self.assertTrue(layer.built)
# Test something that requires Layers to be built.
model.summary()
def test_json_serialization(self):
inputs = keras.Input(shape=(4,), dtype='uint8')
outputs = math_ops.cast(inputs, 'float32') / 4.
model = model_config.model_from_json(keras.Model(inputs, outputs).to_json())
self.assertAllEqual(
self.evaluate(model(np.array([0, 64, 128, 192], np.uint8))),
[0., 16., 32., 48.])
model.summary()
class InputInEagerTest(test.TestCase):
"""Tests ops on graph tensors in Eager runtime.
Input returns graph/symbolic tensors in the Eager runtime (this
happens, for example, with tensors returned from Keras layers). These
should be routed to the graph-style branch of these ops (b/134715641)
"""
def test_identity(self):
with context.eager_mode():
x = keras.Input(shape=(1,))
self.assertTrue(hasattr(x, 'graph'))
ident = array_ops.identity(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Identity', ident.name)
def test_size(self):
with context.eager_mode():
x = keras.Input(shape=(3,))
self.assertTrue(hasattr(x, 'graph'))
self.assertAllEqual(x.get_shape().as_list(), [None, 3])
sz = array_ops.size(x)
# This is now a graph tensor, and should be able to continue in graphland
self.assertIn('Size', sz.name)
if __name__ == '__main__':
test.main()
|
py | 1a3289c7e3b745af65c889fcda1a93bee11de7b4 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
class SharedRunningStats(object):
def __init__(self, replicated_device=None, epsilon=1e-2, name="", create_ops=True):
self.sess = None
self.name = name
self.replicated_device = replicated_device
self.epsilon = epsilon
self.ops_were_created = False
if create_ops:
with tf.device(replicated_device):
self.create_ops()
def create_ops(self, shape=[1], clip_values=None):
self.clip_values = clip_values
with tf.variable_scope(self.name):
self._sum = tf.get_variable(
dtype=tf.float64,
initializer=tf.constant_initializer(0.0),
name="running_sum", trainable=False, shape=shape, validate_shape=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._sum_squared = tf.get_variable(
dtype=tf.float64,
initializer=tf.constant_initializer(self.epsilon),
name="running_sum_squared", trainable=False, shape=shape, validate_shape=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(self.epsilon),
name="count", trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES])
self._shape = None
self._mean = tf.div(self._sum, self._count, name="mean")
self._std = tf.sqrt(tf.maximum((self._sum_squared - self._count*tf.square(self._mean))
/ tf.maximum(self._count-1, 1), self.epsilon), name="stdev")
self.tf_mean = tf.cast(self._mean, 'float32')
self.tf_std = tf.cast(self._std, 'float32')
self.new_sum = tf.placeholder(dtype=tf.float64, name='sum')
self.new_sum_squared = tf.placeholder(dtype=tf.float64, name='var')
self.newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self._inc_sum = tf.assign_add(self._sum, self.new_sum, use_locking=True)
self._inc_sum_squared = tf.assign_add(self._sum_squared, self.new_sum_squared, use_locking=True)
self._inc_count = tf.assign_add(self._count, self.newcount, use_locking=True)
self.raw_obs = tf.placeholder(dtype=tf.float64, name='raw_obs')
self.normalized_obs = (self.raw_obs - self._mean) / self._std
if self.clip_values is not None:
self.clipped_obs = tf.clip_by_value(self.normalized_obs, self.clip_values[0], self.clip_values[1])
self.ops_were_created = True
def set_session(self, sess):
self.sess = sess
def push(self, x):
x = x.astype('float64')
self.sess.run([self._inc_sum, self._inc_sum_squared, self._inc_count],
feed_dict={
self.new_sum: x.sum(axis=0).ravel(),
self.new_sum_squared: np.square(x).sum(axis=0).ravel(),
self.newcount: np.array(len(x), dtype='float64')
})
if self._shape is None:
self._shape = x.shape
@property
def n(self):
return self.sess.run(self._count)
@property
def mean(self):
return self.sess.run(self._mean)
@property
def var(self):
return self.std ** 2
@property
def std(self):
return self.sess.run(self._std)
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val):
self._shape = val
self.new_sum.set_shape(val)
self.new_sum_squared.set_shape(val)
self.tf_mean.set_shape(val)
self.tf_std.set_shape(val)
self._sum.set_shape(val)
self._sum_squared.set_shape(val)
def normalize(self, batch):
if self.clip_values is not None:
return self.sess.run(self.clipped_obs, feed_dict={self.raw_obs: batch})
else:
return self.sess.run(self.normalized_obs, feed_dict={self.raw_obs: batch})
|
py | 1a328bc05531f8898ff4c48312d59289a51c1eb7 | #!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from lib.utility import misc
class TemplateSnippet(object):
def __init__(self, snippet_name = None, source_url = None, order = None):
self._snippet_name = snippet_name
self._source_url = source_url
self._order = order
@property
def snippet_name(self):
return self._snippet_name
@property
def source_url(self):
return self._source_url
@property
def order(self):
return self._order
@snippet_name.setter
def snippet_name(self, snippet_name):
self._snippet_name = snippet_name
@source_url.setter
def source_url(self, source_url):
self._source_url = source_url
@order.setter
def order(self, order):
self._order = order
class Version(object):
def __init__(self, application_name = None, version_label = None):
self._application_name = application_name
self._version_label = version_label
@property
def application_name(self):
return self._application_name
@property
def version_label(self):
return self._version_label
@application_name.setter
def application_name(self, application_name):
self._application_name = application_name
@version_label.setter
def version_label(self, version_label):
self._version_label = version_label
class TemplateSpecification(object):
def __init__(self, template_source = None, template_snippets = None):
if template_source is None:
self._template_source = TemplateSource()
else:
self._template_source = template_source
if template_snippets is None:
self._template_snippets = list()
else:
self._template_snippets = template_snippets
@property
def template_source(self):
return self._template_source
@property
def template_snippets(self):
return self._template_snippets
@template_source.setter
def template_source(self, template_source):
self._template_source = template_source
@template_snippets.setter
def template_snippets(self, snippets):
self._template_snippets = snippets
class TemplateSource(object):
def __init__(self, solution_stack_name = None):
self._solution_stack_name = solution_stack_name
@property
def solution_stack_name(self):
return self._solution_stack_name
@solution_stack_name.setter
def solution_stack_name(self, solution_stack_name):
self._solution_stack_name = solution_stack_name
class Request(object):
'''
Convert and store EB request parameters
'''
def __init__(self):
self._request = dict()
def _set_to_list(self, name_set):
name_list = []
if isinstance(name_set, set):
for name in name_set:
name_list.append(str(name))
else:
name_list.append(str(name_set))
return name_list
def _check_boolean(self, switch):
if isinstance(switch, bool):
if switch:
return 'true'
else:
return 'false'
else:
return switch
def __repr__(self):
try:
text = 'Request API: {0}. \nParameters: [\n'.format(self._request['Operation'])
except:
text = 'Parameters:[\n'
for key,value in self._request.items():
text = text + ' {0} : {1}\n'.format(key, value)
text = text + ']'
return text
def get_dict(self):
return self._request
def set_operation(self, name):
self._request['Operation'] = misc.to_unicode(name)
def set_app_name(self, name):
self._request['ApplicationName'] = misc.to_unicode(name)
def set_app_names(self, name_set):
name_list = self._set_to_list(name_set)
for i in range(len(name_list)):
self._request['ApplicationNames.member.' + misc.to_unicode(i + 1)] \
= misc.to_unicode(name_list[i])
def set_version_label(self, name):
self._request['VersionLabel'] = misc.to_unicode(name)
def set_description(self, description):
self._request['Description'] = misc.to_unicode(description)
def set_s3bucket(self, bucket):
self._request['SourceBundle.S3Bucket'] = misc.to_unicode(bucket)
def set_s3key(self, key):
self._request['SourceBundle.S3Key'] = misc.to_unicode(key)
def set_auto_create_app(self, switch):
switch = self._check_boolean(switch)
self._request['AutoCreateApplication'] = misc.to_unicode(switch)
def set_env_name(self, name):
self._request['EnvironmentName'] = misc.to_unicode(name)
def set_env_id(self, env_id):
self._request['EnvironmentId'] = misc.to_unicode(env_id)
def set_env_names(self, name_set):
name_list = self._set_to_list(name_set)
for i in range(len(name_list)):
self._request['EnvironmentNames.member.' + misc.to_unicode(i + 1)] \
= misc.to_unicode(name_list[i])
def set_env_ids(self, id_set):
id_list = self._set_to_list(id_set)
for i in range(len(id_list)):
self._request['EnvironmentIds.member.' + misc.to_unicode(i + 1)] \
= misc.to_unicode(id_list[i])
def set_cname(self, name):
self._request['CNAMEPrefix'] = misc.to_unicode(name)
def set_source_configuration(self, name):
self._request['SourceConfiguration'] = misc.to_unicode(name)
def set_template(self, name):
self._request['TemplateName'] = misc.to_unicode(name)
def set_solution_stack(self, name):
self._request['SolutionStackName'] = misc.to_unicode(name)
def set_options(self, options_to_describe):
index = 1
for namespace, options in options_to_describe.items():
for option_name in options:
self._request['Options.member.' + misc.to_unicode(index) + '.Namespace'] \
= misc.to_unicode(namespace)
self._request['Options.member.' + misc.to_unicode(index) + '.OptionName'] \
= misc.to_unicode(option_name)
index = index + 1
def set_option_settings(self, option_settings):
index = 1
for namespace, options in option_settings.items():
for option_name, value in options.items():
self._request['OptionSettings.member.' + misc.to_unicode(index) + '.Namespace'] \
= misc.to_unicode(namespace)
self._request['OptionSettings.member.' + misc.to_unicode(index) + '.OptionName'] \
= misc.to_unicode(option_name)
self._request['OptionSettings.member.' + misc.to_unicode(index) + '.Value'] \
= misc.to_unicode(value)
index = index + 1
def set_options_to_remove(self, options_to_remove):
index = 1
for namespace, options in options_to_remove.items():
for option_name in options:
self._request['OptionsToRemove.member.' + misc.to_unicode(index) + '.Namespace'] \
= misc.to_unicode(namespace)
self._request['OptionsToRemove.member.' + misc.to_unicode(index) + '.OptionName'] \
= misc.to_unicode(option_name)
index = index + 1
def set_include_deleted(self, switch):
switch = self._check_boolean(switch)
self._request['IncludeDeleted'] = misc.to_unicode(switch)
def set_included_deleted_backto(self, datetime):
self._request['IncludedDeletedBackTo'] = misc.to_unicode(datetime)
def set_start_time(self, datetime):
self._request['StartTime'] = misc.to_unicode(datetime)
def set_end_time(self, datetime):
self._request['EndTime'] = misc.to_unicode(datetime)
def set_max_records(self, num):
self._request['MaxRecords'] = misc.to_unicode(num)
def set_next_token(self, token):
self._request['NextToken'] = misc.to_unicode(token)
def set_requst_id(self, request_id):
self._request['RequestId'] = misc.to_unicode(request_id)
def set_severity(self, severity):
self._request['Severity'] = misc.to_unicode(severity)
def set_terminate_env(self, switch):
self._request['TerminateEnvByForce'] = misc.to_unicode(switch)
def set_delete_source_bundle(self, switch):
self._request['DeleteSourceBundle'] = misc.to_unicode(switch)
def set_terminate_resources(self, switch):
self._request['TerminateResources'] = misc.to_unicode(switch)
def set_template_specification(self, template_spec):
#TemplateSource
if template_spec.template_source is not None:
ts = template_spec.template_source
if ts.solution_stack_name is not None:
self._request['TemplateSpecification.TemplateSource.SolutionStackName'] \
= misc.to_unicode(ts.solution_stack_name)
#Snippets
if template_spec.template_snippets is not None:
for i, snippet in enumerate(template_spec.template_snippets):
if snippet.snippet_name is not None:
self._request['TemplateSpecification.TemplateSnippets.member.' \
+ misc.to_unicode(i + 1)+'.SnippetName'] \
= misc.to_unicode(snippet.snippet_name)
if snippet.source_url is not None:
self._request['TemplateSpecification.TemplateSnippets.member.' \
+ misc.to_unicode(i + 1)+'.SourceUrl'] \
= misc.to_unicode(snippet.source_url)
if snippet.order is not None:
self._request['TemplateSpecification.TemplateSnippets.member.' \
+ misc.to_unicode(i + 1)+'.Order'] \
= misc.to_unicode(snippet.order)
def set_tier(self, environment_tier):
self._request['Tier.Name'] = misc.to_unicode(environment_tier.name)
self._request['Tier.Type'] = misc.to_unicode(environment_tier.type)
self._request['Tier.Version'] = misc.to_unicode(environment_tier.version)
def set_info_type(self, info_type):
self._request['InfoType'] = misc.to_unicode(info_type)
class Response(object):
def __init__(self, request_id, result = None, next_token = None):
self._request_id = request_id
self._result = result
self._next_token = next_token
def __repr__(self):
return 'API Response.\n Request ID: {0}\n Results: {1}'.\
format(self.request_id, misc.collection_to_string(self._result))
@property
def request_id(self):
return self._request_id
@property
def result(self):
return self._result
@property
def next_token(self):
return self._next_token
|
py | 1a328be2565a1ac99408d4cfd0c4b42877573179 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo
from wtforms import ValidationError
from flask_wtf.file import FileField,FileAllowed
from flask_login import current_user
from app.models import User
class LoginForm(FlaskForm):
email = StringField('Email',validators=[DataRequired(),Email()])
password = PasswordField('Password',validators=[DataRequired()])
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email',validators=[DataRequired(),Email()])
username = StringField('UserName',validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired(),EqualTo('pass_confirm',message='Passwords must match!')])
pass_confirm = PasswordField('Confirm Password',validators=[DataRequired()])
submit = SubmitField('Register!')
def check_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Your email has been registered already!')
def check_username(self,field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Your username has been registered already!')
class UpdateUserForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),Email()])
username = StringField('Username', validators=[DataRequired()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def check_email(self, field):
# Check if not None for that user email!
if User.query.filter_by(email=field.data).first():
raise ValidationError('Your email has been registered already!')
def check_username(self, field):
# Check if not None for that username!
if User.query.filter_by(username=field.data).first():
raise ValidationError('Sorry, that username is taken!')
|
py | 1a328d2ba915c8b73e263333c77f456de0a07050 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetSnapshotResult:
"""
A collection of values returned by getSnapshot.
"""
def __init__(__self__, allocated_storage=None, availability_zone=None, db_instance_identifier=None, db_snapshot_arn=None, db_snapshot_identifier=None, encrypted=None, engine=None, engine_version=None, include_public=None, include_shared=None, iops=None, kms_key_id=None, license_model=None, most_recent=None, option_group_name=None, port=None, snapshot_create_time=None, snapshot_type=None, source_db_snapshot_identifier=None, source_region=None, status=None, storage_type=None, vpc_id=None, id=None):
if allocated_storage and not isinstance(allocated_storage, float):
raise TypeError("Expected argument 'allocated_storage' to be a float")
__self__.allocated_storage = allocated_storage
"""
Specifies the allocated storage size in gigabytes (GB).
"""
if availability_zone and not isinstance(availability_zone, str):
raise TypeError("Expected argument 'availability_zone' to be a str")
__self__.availability_zone = availability_zone
"""
Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.
"""
if db_instance_identifier and not isinstance(db_instance_identifier, str):
raise TypeError("Expected argument 'db_instance_identifier' to be a str")
__self__.db_instance_identifier = db_instance_identifier
if db_snapshot_arn and not isinstance(db_snapshot_arn, str):
raise TypeError("Expected argument 'db_snapshot_arn' to be a str")
__self__.db_snapshot_arn = db_snapshot_arn
"""
The Amazon Resource Name (ARN) for the DB snapshot.
"""
if db_snapshot_identifier and not isinstance(db_snapshot_identifier, str):
raise TypeError("Expected argument 'db_snapshot_identifier' to be a str")
__self__.db_snapshot_identifier = db_snapshot_identifier
if encrypted and not isinstance(encrypted, bool):
raise TypeError("Expected argument 'encrypted' to be a bool")
__self__.encrypted = encrypted
"""
Specifies whether the DB snapshot is encrypted.
"""
if engine and not isinstance(engine, str):
raise TypeError("Expected argument 'engine' to be a str")
__self__.engine = engine
"""
Specifies the name of the database engine.
"""
if engine_version and not isinstance(engine_version, str):
raise TypeError("Expected argument 'engine_version' to be a str")
__self__.engine_version = engine_version
"""
Specifies the version of the database engine.
"""
if include_public and not isinstance(include_public, bool):
raise TypeError("Expected argument 'include_public' to be a bool")
__self__.include_public = include_public
if include_shared and not isinstance(include_shared, bool):
raise TypeError("Expected argument 'include_shared' to be a bool")
__self__.include_shared = include_shared
if iops and not isinstance(iops, float):
raise TypeError("Expected argument 'iops' to be a float")
__self__.iops = iops
"""
Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.
"""
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
__self__.kms_key_id = kms_key_id
"""
The ARN for the KMS encryption key.
"""
if license_model and not isinstance(license_model, str):
raise TypeError("Expected argument 'license_model' to be a str")
__self__.license_model = license_model
"""
License model information for the restored DB instance.
"""
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
__self__.most_recent = most_recent
if option_group_name and not isinstance(option_group_name, str):
raise TypeError("Expected argument 'option_group_name' to be a str")
__self__.option_group_name = option_group_name
"""
Provides the option group name for the DB snapshot.
"""
if port and not isinstance(port, float):
raise TypeError("Expected argument 'port' to be a float")
__self__.port = port
if snapshot_create_time and not isinstance(snapshot_create_time, str):
raise TypeError("Expected argument 'snapshot_create_time' to be a str")
__self__.snapshot_create_time = snapshot_create_time
"""
Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).
"""
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
__self__.snapshot_type = snapshot_type
if source_db_snapshot_identifier and not isinstance(source_db_snapshot_identifier, str):
raise TypeError("Expected argument 'source_db_snapshot_identifier' to be a str")
__self__.source_db_snapshot_identifier = source_db_snapshot_identifier
"""
The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy.
"""
if source_region and not isinstance(source_region, str):
raise TypeError("Expected argument 'source_region' to be a str")
__self__.source_region = source_region
"""
The region that the DB snapshot was created in or copied from.
"""
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
__self__.status = status
"""
Specifies the status of this DB snapshot.
"""
if storage_type and not isinstance(storage_type, str):
raise TypeError("Expected argument 'storage_type' to be a str")
__self__.storage_type = storage_type
"""
Specifies the storage type associated with DB snapshot.
"""
if vpc_id and not isinstance(vpc_id, str):
raise TypeError("Expected argument 'vpc_id' to be a str")
__self__.vpc_id = vpc_id
"""
Specifies the ID of the VPC associated with the DB snapshot.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
class AwaitableGetSnapshotResult(GetSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSnapshotResult(
allocated_storage=self.allocated_storage,
availability_zone=self.availability_zone,
db_instance_identifier=self.db_instance_identifier,
db_snapshot_arn=self.db_snapshot_arn,
db_snapshot_identifier=self.db_snapshot_identifier,
encrypted=self.encrypted,
engine=self.engine,
engine_version=self.engine_version,
include_public=self.include_public,
include_shared=self.include_shared,
iops=self.iops,
kms_key_id=self.kms_key_id,
license_model=self.license_model,
most_recent=self.most_recent,
option_group_name=self.option_group_name,
port=self.port,
snapshot_create_time=self.snapshot_create_time,
snapshot_type=self.snapshot_type,
source_db_snapshot_identifier=self.source_db_snapshot_identifier,
source_region=self.source_region,
status=self.status,
storage_type=self.storage_type,
vpc_id=self.vpc_id,
id=self.id)
def get_snapshot(db_instance_identifier=None,db_snapshot_identifier=None,include_public=None,include_shared=None,most_recent=None,snapshot_type=None,opts=None):
"""
Use this data source to get information about a DB Snapshot for use when provisioning DB instances
> **NOTE:** This data source does not apply to snapshots created on Aurora DB clusters.
See the [`rds.ClusterSnapshot` data source](https://www.terraform.io/docs/providers/aws/d/db_cluster_snapshot.html) for DB Cluster snapshots.
:param str db_instance_identifier: Returns the list of snapshots created by the specific db_instance
:param str db_snapshot_identifier: Returns information on a specific snapshot_id.
:param bool include_public: Set this value to true to include manual DB snapshots that are public and can be
copied or restored by any AWS account, otherwise set this value to false. The default is `false`.
:param bool include_shared: Set this value to true to include shared manual DB snapshots from other
AWS accounts that this AWS account has been given permission to copy or restore, otherwise set this value to false.
The default is `false`.
:param bool most_recent: If more than one result is returned, use the most
recent Snapshot.
:param str snapshot_type: The type of snapshots to be returned. If you don't specify a SnapshotType
value, then both automated and manual snapshots are returned. Shared and public DB snapshots are not
included in the returned results by default. Possible values are, `automated`, `manual`, `shared` and `public`.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/d/db_snapshot.html.markdown.
"""
__args__ = dict()
__args__['dbInstanceIdentifier'] = db_instance_identifier
__args__['dbSnapshotIdentifier'] = db_snapshot_identifier
__args__['includePublic'] = include_public
__args__['includeShared'] = include_shared
__args__['mostRecent'] = most_recent
__args__['snapshotType'] = snapshot_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getSnapshot:getSnapshot', __args__, opts=opts).value
return AwaitableGetSnapshotResult(
allocated_storage=__ret__.get('allocatedStorage'),
availability_zone=__ret__.get('availabilityZone'),
db_instance_identifier=__ret__.get('dbInstanceIdentifier'),
db_snapshot_arn=__ret__.get('dbSnapshotArn'),
db_snapshot_identifier=__ret__.get('dbSnapshotIdentifier'),
encrypted=__ret__.get('encrypted'),
engine=__ret__.get('engine'),
engine_version=__ret__.get('engineVersion'),
include_public=__ret__.get('includePublic'),
include_shared=__ret__.get('includeShared'),
iops=__ret__.get('iops'),
kms_key_id=__ret__.get('kmsKeyId'),
license_model=__ret__.get('licenseModel'),
most_recent=__ret__.get('mostRecent'),
option_group_name=__ret__.get('optionGroupName'),
port=__ret__.get('port'),
snapshot_create_time=__ret__.get('snapshotCreateTime'),
snapshot_type=__ret__.get('snapshotType'),
source_db_snapshot_identifier=__ret__.get('sourceDbSnapshotIdentifier'),
source_region=__ret__.get('sourceRegion'),
status=__ret__.get('status'),
storage_type=__ret__.get('storageType'),
vpc_id=__ret__.get('vpcId'),
id=__ret__.get('id'))
|
py | 1a328e5e786ea1f700c597aacc53bf7386d0ca07 | from abc import ABC, abstractmethod
from datetime import datetime
class BasePricingGen(ABC):
def __init__(self):
pass
@abstractmethod
def gen(self):
pass
def standard_filename(from_time: datetime, to_time: datetime):
filename = from_time.strftime("%Y.%m.%dT%H.%M.%S") + "-" + to_time.strftime("%Y.%m.%dT%H.%M.%S")
filename = "LIVE-[" + filename + "]"
return filename
|
py | 1a328efac15d7df4cde0d2898e382fc95d13d058 | import os
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmpose.datasets.pipelines import Compose
from mmpose.models import build_posenet
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def init_pose_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a pose model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
model = build_posenet(config.model)
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
return model
def _xyxy2xywh(bbox_xyxy):
"""Transform the bbox format from x1y1x2y2 to xywh.
Args:
bbox_xyxy (np.ndarray): Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
Returns:
np.ndarray: Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
"""
bbox_xywh = bbox_xyxy.copy()
bbox_xywh[:, 2] = bbox_xywh[:, 2] - bbox_xywh[:, 0] + 1
bbox_xywh[:, 3] = bbox_xywh[:, 3] - bbox_xywh[:, 1] + 1
return bbox_xywh
def _xywh2xyxy(bbox_xywh):
"""Transform the bbox format from xywh to x1y1x2y2.
Args:
bbox_xywh (ndarray): Bounding boxes (with scores),
shaped (n, 4) or (n, 5). (left, top, width, height, [score])
Returns:
np.ndarray: Bounding boxes (with scores), shaped (n, 4) or
(n, 5). (left, top, right, bottom, [score])
"""
bbox_xyxy = bbox_xywh.copy()
bbox_xyxy[:, 2] = bbox_xyxy[:, 2] + bbox_xyxy[:, 0] - 1
bbox_xyxy[:, 3] = bbox_xyxy[:, 3] + bbox_xyxy[:, 1] - 1
return bbox_xyxy
def _box2cs(cfg, box):
"""This encodes bbox(x,y,w,h) into (center, scale)
Args:
x, y, w, h
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32](2,): Center of the bbox (x, y).
- np.ndarray[float32](2,): Scale of the bbox w & h.
"""
x, y, w, h = box[:4]
input_size = cfg.data_cfg['image_size']
aspect_ratio = input_size[0] / input_size[1]
center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
# pixel std is 200.0
scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)
scale = scale * 1.25
return center, scale
class LoadImage:
"""A simple pipeline to load image."""
def __init__(self, color_type='color', channel_order='rgb'):
self.color_type = color_type
self.channel_order = channel_order
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img_or_path'], str):
results['image_file'] = results['img_or_path']
else:
results['image_file'] = ''
img = mmcv.imread(results['img_or_path'], self.color_type,
self.channel_order)
results['img'] = img
return results
def _inference_single_pose_model(model, img_or_path, bbox, dataset):
"""Inference a single bbox.
num_keypoints: K
Args:
model (nn.Module): The loaded pose model.
image_name (str | np.ndarray):Image_name
bbox (list | np.ndarray): Bounding boxes (with scores),
shaped (4, ) or (5, ). (left, top, width, height, [score])
dataset (str): Dataset name.
Returns:
ndarray[Kx3]: Predicted pose x, y, score.
"""
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
assert len(bbox) in [4, 5]
center, scale = _box2cs(cfg, bbox)
flip_pairs = None
if dataset == 'TopDownCocoDataset' or dataset == 'TopDownOCHumanDataset':
flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
[13, 14], [15, 16]]
elif dataset == 'TopDownCocoWholeBodyDataset':
body = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16]]
foot = [[17, 20], [18, 21], [19, 22]]
face = [[23, 39], [24, 38], [25, 37], [26, 36], [27, 35], [28, 34],
[29, 33], [30, 32], [40, 49], [41, 48], [42, 47], [43, 46],
[44, 45], [54, 58], [55, 57], [59, 68], [60, 67], [61, 66],
[62, 65], [63, 70], [64, 69], [71, 77], [72, 76], [73, 75],
[78, 82], [79, 81], [83, 87], [84, 86], [88, 90]]
hand = [[91, 112], [92, 113], [93, 114], [94, 115], [95, 116],
[96, 117], [97, 118], [98, 119], [99, 120], [100, 121],
[101, 122], [102, 123], [103, 124], [104, 125], [105, 126],
[106, 127], [107, 128], [108, 129], [109, 130], [110, 131],
[111, 132]]
flip_pairs = body + foot + face + hand
elif dataset == 'TopDownAicDataset':
flip_pairs = [[0, 3], [1, 4], [2, 5], [6, 9], [7, 10], [8, 11]]
elif (dataset == 'TopDownOneHand10KDataset'
or dataset == 'TopDownFreiHandDataset'
or dataset == 'TopDownPanopticDataset'):
flip_pairs = []
else:
raise NotImplementedError()
# prepare data
data = {
'img_or_path':
img_or_path,
'center':
center,
'scale':
scale,
'bbox_score':
bbox[4] if len(bbox) == 5 else 1,
'dataset':
dataset,
'joints_3d':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'joints_3d_visible':
np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32),
'rotation':
0,
'ann_info': {
'image_size': cfg.data_cfg['image_size'],
'num_joints': cfg.data_cfg['num_joints'],
'flip_pairs': flip_pairs
}
}
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
# forward the model
with torch.no_grad():
all_preds, _, _ = model(
return_loss=False, img=data['img'], img_metas=data['img_metas'])
return all_preds[0]
def inference_top_down_pose_model(model,
img_or_path,
person_bboxes,
bbox_thr=None,
format='xywh',
dataset='TopDownCocoDataset'):
"""Inference a single image with a list of person bounding boxes.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
image_name (str| np.ndarray): Image_name
person_bboxes: (np.ndarray[P x 4] or [P x 5]): Each person bounding box
shaped (4, ) or (5, ), contains 4 box coordinates (and score).
bbox_thr: Threshold for bounding boxes. Only bboxes with higher scores
will be fed into the pose detector. If bbox_thr is None, ignore it.
format: bbox format ('xyxy' | 'xywh'). Default: 'xywh'.
'xyxy' means (left, top, right, bottom),
'xywh' means (left, top, width, height).
dataset (str): Dataset name, e.g. 'TopDownCocoDataset'.
Returns:
list[dict]: The bbox & pose info.
Each item in the list is a dictionary,
containing the bbox: (left, top, right, bottom, [score])
and the pose (ndarray[Kx3]): x, y, score
"""
# only two kinds of bbox format is supported.
assert format in ['xyxy', 'xywh']
# transform the bboxes format to xywh
if format == 'xyxy':
person_bboxes = _xyxy2xywh(np.array(person_bboxes))
pose_results = []
if len(person_bboxes) > 0:
if bbox_thr is not None:
person_bboxes = person_bboxes[person_bboxes[:, 4] > bbox_thr]
for bbox in person_bboxes:
pose = _inference_single_pose_model(model, img_or_path, bbox,
dataset)
pose_results.append({
'bbox':
_xywh2xyxy(np.expand_dims(np.array(bbox), 0)),
'keypoints':
pose,
})
return pose_results
def inference_bottom_up_pose_model(model, img_or_path):
"""Inference a single image.
num_people: P
num_keypoints: K
bbox height: H
bbox width: W
Args:
model (nn.Module): The loaded pose model.
image_name (str| np.ndarray): Image_name.
Returns:
list[ndarray]: The predicted pose info.
The length of the list
is the number of people (P). Each item in the
list is a ndarray, containing each person's
pose (ndarray[Kx3]): x, y, score
"""
pose_results = []
cfg = model.cfg
device = next(model.parameters()).device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = {
'img_or_path': img_or_path,
'dataset': 'coco',
'ann_info': {
'image_size':
cfg.data_cfg['image_size'],
'num_joints':
cfg.data_cfg['num_joints'],
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15],
}
}
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'].data[0]
# forward the model
with torch.no_grad():
all_preds, _, _ = model(
return_loss=False, img=data['img'], img_metas=data['img_metas'])
for pred in all_preds:
pose_results.append({
'keypoints': pred[:, :3],
})
return pose_results
def vis_pose_result(model,
img,
result,
kpt_score_thr=0.3,
dataset='TopDownCocoDataset',
show=False,
out_file=None):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str | np.ndarray): Image filename or loaded image.
result (list[dict]): The results to draw over `img`
(bbox_result, pose_result).
kpt_score_thr (float): The threshold to visualize the keypoints.
skeleton (list[tuple()]): Default None.
show (bool): Whether to show the image. Default True.
out_file (str|None): The filename of the output visualization image.
"""
if hasattr(model, 'module'):
model = model.module
palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102],
[230, 230, 0], [255, 153, 255], [153, 204, 255],
[255, 102, 255], [255, 51, 255], [102, 178, 255],
[51, 153, 255], [255, 153, 153], [255, 102, 102],
[255, 51, 51], [153, 255, 153], [102, 255, 102],
[51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0],
[255, 255, 255]])
radius = 4
if dataset == 'TopDownCocoDataset' or dataset == 'BottomUpCocoDataset' \
or dataset == 'TopDownOCHumanDataset':
# show the results
skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
[7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
[1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
pose_limb_color = palette[[
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
]]
pose_kpt_color = palette[[
16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0
]]
elif dataset == 'TopDownCocoWholeBodyDataset':
# show the results
skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
[7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
[1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], [16, 18],
[16, 19], [16, 20], [17, 21], [17, 22], [17, 23], [92, 93],
[93, 94], [94, 95], [95, 96], [92, 97], [97, 98], [98, 99],
[99, 100], [92, 101], [101, 102], [102, 103], [103, 104],
[92, 105], [105, 106], [106, 107], [107, 108], [92, 109],
[109, 110], [110, 111], [111, 112], [113, 114], [114, 115],
[115, 116], [116, 117], [113, 118], [118, 119], [119, 120],
[120, 121], [113, 122], [122, 123], [123, 124], [124, 125],
[113, 126], [126, 127], [127, 128], [128, 129], [113, 130],
[130, 131], [131, 132], [132, 133]]
pose_limb_color = palette[
[0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16] +
[16, 16, 16, 16, 16, 16] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
] + [
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
pose_kpt_color = palette[
[16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0] +
[0, 0, 0, 0, 0, 0] + [
19,
] * (68 + 42)]
radius = 1
elif dataset == 'TopDownAicDataset':
skeleton = [[3, 2], [2, 1], [1, 14], [14, 4], [4, 5], [5, 6], [9, 8],
[8, 7], [7, 10], [10, 11], [11, 12], [13, 14], [1, 7],
[4, 10]]
pose_limb_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 0, 7, 7
]]
pose_kpt_color = palette[[
9, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 0, 0
]]
elif (dataset == 'TopDownOneHand10KDataset'
or dataset == 'TopDownFreiHandDataset'
or dataset == 'TopDownPanopticDataset'):
skeleton = [[1, 2], [2, 3], [3, 4], [4, 5], [1, 6], [6, 7], [7, 8],
[8, 9], [1, 10], [10, 11], [11, 12], [12, 13], [1, 14],
[14, 15], [15, 16], [16, 17], [1, 18], [18, 19], [19, 20],
[20, 21]]
pose_limb_color = palette[[
0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16, 16
]]
pose_kpt_color = palette[[
0, 0, 0, 0, 0, 4, 4, 4, 4, 8, 8, 8, 8, 12, 12, 12, 12, 16, 16, 16,
16
]]
else:
raise NotImplementedError()
img = model.show_result(
img,
result,
skeleton,
radius=radius,
pose_kpt_color=pose_kpt_color,
pose_limb_color=pose_limb_color,
kpt_score_thr=kpt_score_thr,
show=show,
out_file=out_file)
return img
|
py | 1a32901dc0e9de1869c868c864e756abe0f87181 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_log import versionutils
from glance.common import wsgi
from glance.i18n import _LW
LOG = logging.getLogger(__name__)
"""v3 controller stub
Since Glance Artifact Service was released in Liberty as experimental Glance v3
API, its router was referenced in paste configuration as glance.api.v3.router
In Mitaka the Artifacts Service was moved into a standalone process and its
router was renamed to glance.api.artifacts.router.
However, in existing deployments the glance-api-paste.ini may still reference
the glance.api.v3.router. To not break these deployments this stub is included
to redirect the v3 request to glare service (if it is present) or return a 410
otherwise.
This stub controller should be removed in future releases.
"""
class API(wsgi.Router):
def __init__(self, mapper):
versionutils.report_deprecated_feature(
LOG,
_LW('/v3 controller is deprecated and will be removed from '
'glance-api soon. Remove the reference to it from '
'glance-api-paste.ini configuration file and use Glance '
'Artifact Service API instead'))
redirector = self._get_redirector()
mapper.connect(None, "/artifacts",
controller=redirector, action='redirect')
mapper.connect(None, "/artifacts/{path:.*}",
controller=redirector, action='redirect')
super(API, self).__init__(mapper)
def _get_redirector(self):
return wsgi.Resource(RedirectController(),
serializer=RedirectResponseSerializer())
class RedirectController(object):
def redirect(self, req, path=None):
try:
glare_endpoint = next((s['endpoints']
for s in req.context.service_catalog
if s['type'] == 'artifact'))[0]['publicURL']
if path:
path = '/' + path
return '{0}/v0.1/artifacts{1}'.format(glare_endpoint, path or "")
except StopIteration:
return None
class RedirectResponseSerializer(wsgi.JSONResponseSerializer):
def default(self, response, res):
if res:
response.location = res
response.status_int = 301
else:
response.status_int = 410
|
py | 1a329042545ad59465d11b207fcd1dd6180b7e97 | """Auto-generated file, do not edit by hand. TO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TO = PhoneMetadata(id='TO', country_code=676, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:0800|(?:[5-8]\\d\\d|999)\\d)\\d{3}|[2-8]\\d{4}', possible_length=(5, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2\\d|3[0-8]|4[0-4]|50|6[09]|7[0-24-69]|8[05])\\d{3}', example_number='20123', possible_length=(5,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:55[4-6]|6(?:[09]\\d|3[02]|8[15-9])|(?:7\\d|8[46-9])\\d|999)\\d{4}', example_number='7715123', possible_length=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='0800\\d{3}', example_number='0800222', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='55[0-37-9]\\d{4}', example_number='5510123', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})', format='\\1-\\2', leading_digits_pattern=['[2-4]|50|6[09]|7[0-24-69]|8[05]']),
NumberFormat(pattern='(\\d{4})(\\d{3})', format='\\1 \\2', leading_digits_pattern=['0']),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[5-9]'])])
|
py | 1a3290ffb5c10185d279f28eb481f27b04f3e2b0 | """
CITATION: The following code is a python implementation of some of the functions
taken from https://github.com/mimno/Mallet
"""
from __future__ import division
import sys, math
EULER_MASCHERONI = -0.5772156649015328606065121
PI_SQUARED_OVER_SIX = math.pi * math.pi / 6
HALF_LOG_TWO_PI = math.log(2 * math.pi) / 2
DIGAMMA_COEF_1 = 1 / 12
DIGAMMA_COEF_2 = 1 / 120
DIGAMMA_COEF_3 = 1 / 252
DIGAMMA_COEF_4 = 1 / 240
DIGAMMA_COEF_5 = 1 / 132
DIGAMMA_COEF_6 = 691 / 32760
DIGAMMA_COEF_7 = 1 / 12
DIGAMMA_COEF_8 = 3617 / 8160
DIGAMMA_COEF_9 = 43867 / 14364
DIGAMMA_COEF_10 = 174611 / 6600
DIGAMMA_LARGE = 9.5
DIGAMMA_SMALL = 0.000001
def learn_symmetric_concentration(countHistogram, observationLengths, numDimensions, currentValue):
currentDigamma = float()
largestNonZeroCount = 0
nonZeroLengthIndex = [int()]*len(observationLengths)
index = 0
while index < len(countHistogram):
if countHistogram[index] > 0:
largestNonZeroCount = index
index += 1
denseIndex = 0
index = 0
while index < len(observationLengths):
if observationLengths[index] > 0:
nonZeroLengthIndex[denseIndex] = index
denseIndex += 1
index += 1
denseIndexSize = denseIndex
iteration = 1
while iteration <= 200:
currentParameter = currentValue / numDimensions
currentDigamma = 0
numerator = 0.0
index = 1
while index <= largestNonZeroCount:
currentDigamma += 1.0 / (currentParameter + index - 1)
numerator += countHistogram[index] * currentDigamma
index += 1
currentDigamma = 0
denominator = 0.0
previousLength = 0
cachedDigamma = digamma(currentValue)
denseIndex = 0
while denseIndex < denseIndexSize:
length = nonZeroLengthIndex[denseIndex]
if length - previousLength > 20:
currentDigamma = digamma(currentValue + length) - cachedDigamma
else:
index = previousLength
while index < length:
currentDigamma += 1.0 / (currentValue + index)
index += 1
denominator += currentDigamma * observationLengths[length]
denseIndex += 1
currentValue = currentParameter * numerator / denominator
iteration += 1
return currentValue
def learn_parameters(parameters, observations, observationLengths):
return learn_params(parameters, observations, observationLengths, 1.00001, 1.0, 200)
def learn_params(parameters, observations, observationLengths, shape, scale, numIterations):
i = int()
k = int()
parametersSum = 0
k = 0
while k < len(parameters):
parametersSum += parameters[k]
k += 1
oldParametersK = float()
currentDigamma = float()
denominator = float()
nonZeroLimit = int()
nonZeroLimits = [-1]*len(observations)
histogram = []
i = 0
while i < len(observations):
histogram = observations[i]
k = 0
while k < len(histogram):
if histogram[k] > 0:
nonZeroLimits[i] = k
k += 1
i += 1
iteration = 0
while iteration < numIterations:
denominator = 0
currentDigamma = 0
i = 1
while i < len(observationLengths):
currentDigamma += 1 / (parametersSum + i - 1)
denominator += observationLengths[i] * currentDigamma
i += 1
denominator -= 1 / scale
parametersSum = 0
k = 0
while k < len(parameters):
nonZeroLimit = nonZeroLimits[k]
oldParametersK = parameters[k]
parameters[k] = 0
currentDigamma = 0
histogram = observations[k]
i = 1
while i <= nonZeroLimit:
currentDigamma += 1 / (oldParametersK + i - 1)
parameters[k] += histogram[i] * currentDigamma
i += 1
parameters[k] = oldParametersK * (parameters[k] + shape) / denominator
parametersSum += parameters[k]
k += 1
iteration += 1
if parametersSum < 0.0:
print parametersSum
print("sum not valid")
sys.exit(1)
return parametersSum
def digamma(z):
psi = 0.0
if z < DIGAMMA_SMALL:
psi = EULER_MASCHERONI - (1 / z)
return psi
while z < DIGAMMA_LARGE:
psi -= 1 / z
z += 1
invZ = 1 / z
invZSquared = invZ * invZ
psi += math.log(z) - 0.5 * invZ - invZSquared * (DIGAMMA_COEF_1 - invZSquared * (DIGAMMA_COEF_2 - invZSquared * (DIGAMMA_COEF_3 - invZSquared * (DIGAMMA_COEF_4 - invZSquared * (DIGAMMA_COEF_5 - invZSquared * (DIGAMMA_COEF_6 - invZSquared * DIGAMMA_COEF_7))))))
return psi
|
py | 1a32919105dd00259514a105285e374ed447411e | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import math
import os
import shutil
try:
import cudf
import dask_cudf
except ImportError:
cudf = None
dask_cudf = None
import numpy as np
import pytest
from pandas.api.types import is_integer_dtype
import nvtabular as nvt
from merlin.core.dispatch import HAS_GPU, make_df
from merlin.dag import ColumnSelector, postorder_iter_nodes
from merlin.schema import Schema, Tags
from nvtabular import Dataset, Workflow, ops
from nvtabular.utils import set_dask_client
from tests.conftest import assert_eq, get_cats, mycols_csv
def test_workflow_double_fit():
raw_df = make_df({"user_session": ["1", "2", "4", "4", "5"]})
cat_feats = ["user_session"] >> nvt.ops.Categorify()
for _ in [1, 2]:
df_event = nvt.Dataset(raw_df)
workflow = nvt.Workflow(cat_feats)
workflow.fit(df_event)
workflow.transform(df_event).to_ddf().compute()
@pytest.mark.parametrize("engine", ["parquet"])
def test_workflow_fit_op_rename(tmpdir, dataset, engine):
# NVT
schema = dataset.schema
for name in schema.column_names:
dataset.schema.column_schemas[name] = dataset.schema.column_schemas[name].with_tags(
[Tags.USER]
)
selector = nvt.ColumnSelector(tags=[Tags.USER])
workflow_ops_1 = selector >> nvt.ops.Rename(postfix="_1")
workflow_1 = nvt.Workflow(workflow_ops_1)
workflow_1.fit(dataset)
workflow_1.save(str(tmpdir / "one"))
new_dataset = workflow_1.transform(dataset).to_ddf().compute()
assert len(new_dataset.columns) > 0
assert all("_1" in col for col in new_dataset.columns)
@pytest.mark.parametrize("engine", ["parquet"])
def test_grab_additional_input_columns(dataset, engine):
schema = Schema(["x", "y"])
node1 = ["x"] >> ops.FillMissing()
node2 = node1 >> ops.Clip(min_value=0)
add_node = node2 + ["y"]
workflow = Workflow(add_node).fit_schema(schema)
output_df = workflow.transform(dataset).to_ddf().compute()
assert len(workflow.output_node.input_columns.names) == 2
assert workflow.output_node.input_columns.names == ["x", "y"]
assert len(workflow.output_node.output_columns.names) == 2
assert workflow.output_node.output_columns.names == ["x", "y"]
assert len(output_df.columns) == 2
assert output_df.columns.tolist() == ["x", "y"]
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
@pytest.mark.parametrize("use_client", [True, False])
def test_gpu_workflow_api(tmpdir, client, df, dataset, gpu_memory_frac, engine, dump, use_client):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
set_dask_client(client=client if use_client else None)
norms = ops.Normalize()
cat_features = cat_names >> ops.Categorify(cat_cache="host")
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp >> norms
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
gdf = tar.fillna(0)
gdf = gdf * (gdf >= 0).astype("int")
gdf = np.log(gdf + 1)
return gdf
# Check mean and std - No good right now we have to add all other changes; Clip, Log
assert math.isclose(get_norms(df.y).mean(), norms.means["y"], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).std(), norms.stds["y"], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).mean(), norms.means["x"], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).std(), norms.stds["x"], rel_tol=1e-1)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
if HAS_GPU:
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
else:
cats_expected1 = df["name-string"].unique()
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
tmpdir,
out_files_per_proc=10,
shuffle=nvt.io.Shuffle.PER_PARTITION,
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("engine", ["csv", "csv-no-header"])
def test_gpu_dataset_iterator_csv(df, dataset, engine):
df_itr = nvt.dispatch.concat(list(dataset.to_iter(columns=mycols_csv)), axis=0)
assert_eq(df_itr.reset_index(drop=True), df.reset_index(drop=True))
def test_spec_set(tmpdir, client):
gdf_test = make_df(
{
"ad_id": [1, 2, 2, 6, 6, 8, 3, 3],
"source_id": [2, 4, 4, 7, 5, 2, 5, 2],
"platform": [1, 2, np.nan, 2, 1, 3, 3, 1],
"cont": [1, 2, np.nan, 2, 1, 3, 3, 1],
"clicked": [1, 0, 1, 0, 0, 1, 1, 0],
}
)
cats = ColumnSelector(["ad_id", "source_id", "platform"])
cat_features = cats >> ops.Categorify
cont_features = ColumnSelector(["cont"]) >> ops.FillMissing >> ops.Normalize
te_features = cats >> ops.TargetEncoding("clicked", kfold=5, fold_seed=42, p_smooth=20)
set_dask_client(client=client)
p = Workflow(cat_features + cont_features + te_features)
p.fit_transform(nvt.Dataset(gdf_test)).to_ddf().compute()
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
def test_gpu_workflow(tmpdir, df, dataset, gpu_memory_frac, engine, dump):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
norms = ops.Normalize()
conts = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> norms
cats = cat_names >> ops.Categorify()
workflow = nvt.Workflow(conts + cats + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
gdf = tar.fillna(0)
gdf = gdf * (gdf >= 0).astype("int")
return gdf
assert math.isclose(get_norms(df.x).mean(), norms.means["x"], rel_tol=1e-4)
assert math.isclose(get_norms(df.y).mean(), norms.means["y"], rel_tol=1e-4)
assert math.isclose(get_norms(df.x).std(), norms.stds["x"], rel_tol=1e-3)
assert math.isclose(get_norms(df.y).std(), norms.stds["y"], rel_tol=1e-3)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
output_path=tmpdir, out_files_per_proc=10, shuffle=nvt.io.Shuffle.PER_PARTITION
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("dump", [True, False])
@pytest.mark.parametrize("replace", [True, False])
def test_gpu_workflow_config(tmpdir, client, df, dataset, gpu_memory_frac, engine, dump, replace):
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
norms = ops.Normalize()
cat_features = cat_names >> ops.Categorify()
if replace:
cont_features = cont_names >> ops.FillMissing() >> ops.LogOp >> norms
else:
fillmissing_logop = (
cont_names
>> ops.FillMissing()
>> ops.LogOp
>> ops.Rename(postfix="_FillMissing_1_LogOp_1")
)
cont_features = cont_names + fillmissing_logop >> norms
set_dask_client(client=client)
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
if dump:
workflow_dir = os.path.join(tmpdir, "workflow")
workflow.save(workflow_dir)
workflow = None
workflow = Workflow.load(workflow_dir)
def get_norms(tar):
ser_median = tar.dropna().quantile(0.5, interpolation="linear")
gdf = tar.fillna(ser_median)
gdf = np.log(gdf + 1)
return gdf
# Check mean and std - No good right now we have to add all other changes; Clip, Log
concat_ops = "_FillMissing_1_LogOp_1"
if replace:
concat_ops = ""
assert math.isclose(get_norms(df.x).mean(), norms.means["x" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).mean(), norms.means["y" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.x).std(), norms.stds["x" + concat_ops], rel_tol=1e-1)
assert math.isclose(get_norms(df.y).std(), norms.stds["y" + concat_ops], rel_tol=1e-1)
# Check that categories match
if engine == "parquet":
cats_expected0 = df["name-cat"].unique().values_host if HAS_GPU else df["name-cat"].unique()
cats0 = get_cats(workflow, "name-cat")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected0.tolist()) for cat in cats0.tolist())
assert len(cats0.tolist()) == len(cats_expected0.tolist() + [None])
cats_expected1 = (
df["name-string"].unique().values_host if HAS_GPU else df["name-string"].unique()
)
cats1 = get_cats(workflow, "name-string")
# adding the None entry as a string because of move from gpu
assert all(cat in [None] + sorted(cats_expected1.tolist()) for cat in cats1.tolist())
assert len(cats1.tolist()) == len(cats_expected1.tolist() + [None])
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
tmpdir,
out_files_per_proc=10,
shuffle=nvt.io.Shuffle.PER_PARTITION,
)
dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = nvt.dispatch.concat(list(dataset_2.to_iter()), axis=0)
if engine == "parquet":
assert is_integer_dtype(df_pp["name-cat"].dtype)
assert is_integer_dtype(df_pp["name-string"].dtype)
num_rows, num_row_groups, col_names = nvt.dispatch.read_parquet_metadata(
str(tmpdir) + "/_metadata"
)
assert num_rows == len(df_pp)
@pytest.mark.parametrize("shuffle", [nvt.io.Shuffle.PER_WORKER, nvt.io.Shuffle.PER_PARTITION, None])
@pytest.mark.parametrize("use_client", [True, False])
def test_parquet_output(client, use_client, tmpdir, shuffle):
out_files_per_proc = 2
set_dask_client(client=client if use_client else None)
n_workers = len(client.cluster.workers) if use_client else 1
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
size = 25
row_group_size = 5
df = make_df({"a": np.arange(size)})
df.to_parquet(path, row_group_size=row_group_size, engine="pyarrow")
columns = ["a"]
dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
workflow = nvt.Workflow(columns >> ops.Normalize())
workflow.fit_transform(dataset).to_parquet(
output_path=out_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc
)
# Check that the number of output files is correct
result = glob.glob(os.path.join(out_path, "*.parquet"))
assert len(result) == out_files_per_proc * n_workers
# Make sure _metadata exists
meta_path = os.path.join(out_path, "_metadata")
assert os.path.exists(meta_path)
# Make sure _metadata makes sense
_metadata = nvt.dispatch.read_parquet_metadata(meta_path)
assert _metadata[0] == size
assert _metadata[2] == columns
@pytest.mark.parametrize("engine", ["parquet"])
def test_join_external_workflow(tmpdir, df, dataset, engine):
# Define "external" table
how = "left"
drop_duplicates = True
cache = "device"
shift = 100
df_ext = df[["id"]].copy().sort_values("id")
df_ext["new_col"] = df_ext["id"] + shift
df_ext["new_col_2"] = "keep"
df_ext["new_col_3"] = "ignore"
df_ext_check = df_ext.copy()
# Define Op
on = "id"
columns_left = list(df.columns)
columns_ext = ["id", "new_col", "new_col_2"]
df_ext_check = df_ext_check[columns_ext]
if drop_duplicates:
df_ext_check.drop_duplicates(ignore_index=True, inplace=True)
joined = ColumnSelector(columns_left) >> nvt.ops.JoinExternal(
df_ext,
on,
how=how,
columns_ext=columns_ext,
cache=cache,
drop_duplicates_ext=drop_duplicates,
)
# Define Workflow
gdf = df.reset_index()
dataset = nvt.Dataset(gdf)
processor = nvt.Workflow(joined)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute().reset_index()
# Validate
check_gdf = gdf.merge(df_ext_check, how=how, on=on)
assert len(check_gdf) == len(new_gdf)
assert (new_gdf["id"] + shift).all() == new_gdf["new_col"].all()
assert gdf["id"].all() == new_gdf["id"].all()
assert "new_col_2" in new_gdf.columns
assert "new_col_3" not in new_gdf.columns
@pytest.mark.parametrize("shuffle", [nvt.io.Shuffle.PER_WORKER, nvt.io.Shuffle.PER_PARTITION, None])
@pytest.mark.parametrize("use_client", [True, False])
@pytest.mark.parametrize("apply_offline", [True, False])
def test_workflow_apply(client, use_client, tmpdir, shuffle, apply_offline):
set_dask_client(client=client if use_client else None)
out_files_per_proc = 2
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
size = 25
row_group_size = 5
cont_names = ["cont1", "cont2"]
cat_names = ["cat1", "cat2"]
label_name = ["label"]
df = make_df(
{
"cont1": np.arange(size, dtype=np.float64),
"cont2": np.arange(size, dtype=np.float64),
"cat1": np.arange(size, dtype=np.int32),
"cat2": np.arange(size, dtype=np.int32),
"label": np.arange(size, dtype=np.float64),
}
)
df.to_parquet(path, row_group_size=row_group_size, engine="pyarrow")
dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
cat_features = cat_names >> ops.Categorify()
cont_features = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp
workflow = Workflow(cat_features + cont_features + label_name)
workflow.fit(dataset)
# Force dtypes
dict_dtypes = {}
for col in cont_names:
dict_dtypes[col] = np.float32
for col in cat_names:
dict_dtypes[col] = np.float32
for col in label_name:
dict_dtypes[col] = np.int64
workflow.transform(dataset).to_parquet(
# apply_offline=apply_offline, Not any more?
# record_stats=apply_offline, Not any more?
output_path=out_path,
shuffle=shuffle,
out_files_per_proc=out_files_per_proc,
dtypes=dict_dtypes,
)
# Check dtypes
for filename in glob.glob(os.path.join(out_path, "*.parquet")):
gdf = nvt.dispatch.read_dispatch(filename)(filename)
assert dict(gdf.dtypes) == dict_dtypes
@pytest.mark.parametrize("use_parquet", [True, False])
def test_workflow_generate_columns(tmpdir, use_parquet):
out_path = str(tmpdir.mkdir("processed"))
path = str(tmpdir.join("simple.parquet"))
# Stripped down dataset with geo_locaiton codes like in outbrains
df = make_df({"geo_location": ["US>CA", "CA>BC", "US>TN>659"]})
# defining a simple workflow that strips out the country code from the first two digits of the
# geo_location code and sticks in a new 'geo_location_country' field
country = (
["geo_location"]
>> ops.LambdaOp(
f=lambda col: col.str.slice(0, 2),
)
>> ops.Rename(postfix="_country")
)
cat_features = ["geo_location"] + country >> ops.Categorify()
workflow = Workflow(cat_features)
if use_parquet:
df.to_parquet(path)
dataset = nvt.Dataset(path)
else:
dataset = nvt.Dataset(df)
# just make sure this works without errors
workflow.fit(dataset)
workflow.transform(dataset).to_parquet(out_path)
def test_fit_simple():
data = make_df({"x": [0, 1, 2, None, 0, 1, 2], "y": [None, 3, 4, 5, 3, 4, 5]})
dataset = Dataset(data)
workflow = Workflow(["x", "y"] >> ops.FillMedian() >> ops.LambdaOp(lambda x: x * x))
workflow.fit(dataset)
transformed = workflow.transform(dataset).to_ddf().compute()
expected = make_df({"x": [0, 1, 4, 1, 0, 1, 4], "y": [16, 9, 16, 25, 9, 16, 25]})
if not HAS_GPU:
transformed["x"] = transformed["x"].astype(expected["x"].dtype)
transformed["y"] = transformed["y"].astype(expected["y"].dtype)
assert_eq(expected, transformed)
@pytest.mark.skipif(not cudf, reason="needs cudf")
def test_transform_geolocation():
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo_location": raw})
geo_location = ColumnSelector(["geo_location"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.HashBucket(num_buckets=100)
# for this workflow we don't have any statoperators, so we can get away without fitting
workflow = Workflow(geo_features)
transformed = workflow.transform(Dataset(data)).to_ddf().compute()
expected = make_df()
expected["geo_location_state"] = data["geo_location"].str.slice(0, 5).hash_values() % 100
expected["geo_location_country"] = data["geo_location"].str.slice(0, 2).hash_values() % 100
expected["geo_location"] = data["geo_location"].hash_values() % 100
expected = expected.astype(np.int32)
assert_eq(expected, transformed)
def test_workflow_move_saved(tmpdir):
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo": raw})
geo_location = ColumnSelector(["geo"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.Categorify()
# create the workflow and transform the input
workflow = Workflow(geo_features)
expected = workflow.fit_transform(Dataset(data)).to_ddf().compute()
# save the workflow (including categorical mapping parquet files)
# and then verify we can load the saved workflow after moving the directory
out_path = os.path.join(tmpdir, "output", "workflow")
workflow.save(out_path)
moved_path = os.path.join(tmpdir, "output", "workflow2")
shutil.move(out_path, moved_path)
workflow2 = Workflow.load(moved_path)
# also check that when transforming our input we get the same results after loading
transformed = workflow2.transform(Dataset(data)).to_ddf().compute()
assert_eq(expected, transformed)
def test_workflow_input_output_dtypes():
df = make_df({"genre": ["drama", "comedy"], "user": ["a", "b"], "unneeded": [1, 2]})
features = [["genre", "user"], "genre"] >> ops.Categorify(encode_type="combo")
workflow = Workflow(features)
workflow.fit(Dataset(df))
assert "unneeded" not in workflow.input_dtypes
assert set(workflow.input_dtypes.keys()) == {"genre", "user"}
assert set(workflow.output_dtypes.keys()) == {"genre_user", "genre"}
@pytest.mark.skipif(not cudf, reason="needs cudf")
def test_workflow_transform_ddf_dtypes():
# Initial Dataset
dtypes = {"name": str, "id": int, "x": float, "y": float}
df = cudf.datasets.timeseries(dtypes=dtypes).reset_index()
ddf = dask_cudf.from_cudf(df, npartitions=2)
dataset = Dataset(ddf)
# Create and Execute Workflow
cols = ["name", "x", "y", "timestamp"]
cat_cols = ["id"] >> ops.Normalize()
workflow = Workflow(cols + cat_cols)
workflow.fit(dataset)
transformed_ddf = workflow.transform(dataset).to_ddf()
# no transforms on the pass through cols, should have original dtypes
for col in cols:
assert_eq(ddf.dtypes[col], transformed_ddf.dtypes[col])
# Followup dask-cudf sorting used to throw an exception because of dtype issues,
# check that it works now
transformed_ddf.sort_values(["id", "timestamp"]).compute()
def test_workflow_saved_schema(tmpdir):
raw = """US>SC>519 US>CA>807 US>MI>505 US>CA>510 CA>NB US>CA>534""".split()
data = make_df({"geo": raw})
geo_location = ColumnSelector(["geo"])
state = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 5))
>> ops.Rename(postfix="_state")
)
country = (
geo_location
>> ops.LambdaOp(lambda col: col.str.slice(0, 2))
>> ops.Rename(postfix="_country")
)
geo_features = state + country + geo_location >> ops.Categorify()
# create the workflow and transform the input
workflow = Workflow(geo_features)
workflow.fit(Dataset(data))
real_input_schema = workflow.input_schema
real_output_schema = workflow.output_schema
# save the workflow (including categorical mapping parquet files)
# and then verify we can load the saved workflow after moving the directory
out_path = os.path.join(tmpdir, "output", "workflow")
workflow.save(out_path)
workflow2 = Workflow.load(out_path)
assert workflow2.input_schema == real_input_schema
assert workflow2.output_schema == real_output_schema
for node in postorder_iter_nodes(workflow2.output_node):
assert node.input_schema is not None
assert node.output_schema is not None
|
py | 1a3291c8957fe2be282d99046a8f52b9484937ef | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Intel Corporation
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.orchestrator.heat
import unittest
import mock
from yardstick.orchestrator.kubernetes import KubernetesObject
from yardstick.orchestrator.kubernetes import KubernetesTemplate
class GetTemplateTestCase(unittest.TestCase):
def test_get_template(self):
output_t = {
"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {
"name": "host-k8s-86096c30"
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"labels": {
"app": "host-k8s-86096c30"
}
},
"spec": {
"containers": [
{
"args": [
"-c",
"chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done"
],
"command": [
"/bin/bash"
],
"image": "openretriever/yardstick",
"name": "host-k8s-86096c30-container",
"volumeMounts": [
{
"mountPath": "/root/.ssh/",
"name": "k8s-86096c30-key"
}
]
}
],
"volumes": [
{
"configMap": {
"name": "k8s-86096c30-key"
},
"name": "k8s-86096c30-key"
}
],
"nodeSelector": {
"kubernetes.io/hostname": "node-01"
}
}
}
}
}
input_s = {
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done'],
'ssh_key': 'k8s-86096c30-key',
'nodeSelector': { 'kubernetes.io/hostname': 'node-01'}
}
name = 'host-k8s-86096c30'
output_r = KubernetesObject(name, **input_s).get_template()
self.assertEqual(output_r, output_t)
class GetRcPodsTestCase(unittest.TestCase):
@mock.patch('yardstick.orchestrator.kubernetes.k8s_utils.get_pod_list')
def test_get_rc_pods(self, mock_get_pod_list):
servers = {
'host': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
},
'target': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
}
}
k8s_template = KubernetesTemplate('k8s-86096c30', servers)
mock_get_pod_list.return_value.items = []
pods = k8s_template.get_rc_pods()
self.assertEqual(pods, [])
def main():
unittest.main()
if __name__ == '__main__':
main()
|
py | 1a32928e387a0339dfe7ae1c12d1f136f926c072 | #!/usr/bin/env python
r"""Import BTi / 4D MagnesWH3600 data to fif file.
Notes
-----
1. Currently direct inclusion of reference channel weights
is not supported. Please use \'mne_create_comp_data\' to include
the weights or use the low level functions from this module to
include them by yourself.
2. The informed guess for the 4D name is E31 for the ECG channel and
E63, E63 for the EOG channels. Pleas check and adjust if those channels
are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't
appear in the channel names of the raw object.
Examples
--------
.. code-block:: console
$ mne bti2fiff --pdf C,rfDC -o my_raw.fif
"""
# Authors: Denis A. Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Yuval Harpaz <[email protected]>
#
# simplified bsd-3 license
import sys
import mne
from mne.io import read_raw_bti
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option('-p', '--pdf', dest='pdf_fname',
help='Input data file name', metavar='FILE')
parser.add_option('-c', '--config', dest='config_fname',
help='Input config file name', metavar='FILE',
default='config')
parser.add_option('--head_shape', dest='head_shape_fname',
help='Headshape file name', metavar='FILE',
default='hs_file')
parser.add_option('-o', '--out_fname', dest='out_fname',
help='Name of the resulting fiff file',
default='as_data_fname')
parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
help='Compensatory rotation about Neuromag x axis, deg',
default=2.0)
parser.add_option('-T', '--translation', dest='translation', type='str',
help='Default translation, meter',
default=(0.00, 0.02, 0.11))
parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
help='4D ECG channel name',
default='E31')
parser.add_option('--eog_ch', dest='eog_ch', type='str',
help='4D EOG channel names',
default='E63,E64')
options, args = parser.parse_args()
pdf_fname = options.pdf_fname
if pdf_fname is None:
parser.print_help()
sys.exit(1)
config_fname = options.config_fname
head_shape_fname = options.head_shape_fname
out_fname = options.out_fname
rotation_x = options.rotation_x
translation = options.translation
ecg_ch = options.ecg_ch
eog_ch = options.ecg_ch.split(',')
if out_fname == 'as_data_fname':
out_fname = pdf_fname + '_raw.fif'
raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname,
head_shape_fname=head_shape_fname,
rotation_x=rotation_x, translation=translation,
ecg_ch=ecg_ch, eog_ch=eog_ch)
raw.save(out_fname)
raw.close()
mne.utils.run_command_if_main()
|
py | 1a3293c2996c600a1a94cbe7889cf4677a479c64 | '''
Copyright (C) 2021 Tayler Mauk and contributors. All rights reserved.
Licensed under the MIT license.
See LICENSE file in the project root for full license information.
'''
from app import Application
if __name__ == "__main__":
Application().Run()
|
py | 1a3294cf7baf54f2510df158e144dee556620619 | # coding=utf-8
import urllib
from bs4 import BeautifulSoup
def run(bot, chat_id, user, keyConfig, message, totalResults=1):
requestText = message.replace(bot.name, '').strip().upper()
code = urllib.urlopen('http://www.abbreviations.com/' + requestText).read()
resultsList = acronym_results_parser(code)
result = ''
if resultsList:
searchResults = acronym_results_printer(requestText, resultsList)
result = user + ', ' + searchResults
else:
result='I\'m sorry ' + (user if not user == '' else 'Dave') + \
', I\'m afraid I can\'t find the acronym *' + \
str(requestText) + '*'
try:
bot.sendMessage(chat_id=chat_id, text=result, parse_mode='Markdown')
except:
bot.sendMessage(chat_id=chat_id, text=result.replace('*', ''))
def acronym_results_parser(code):
soup = BeautifulSoup(code, 'html.parser')
resultList = []
for resultRow in soup.findAll('p', attrs={'class':'desc'}):
resultList.append(resultRow.string)
return resultList
def acronym_results_printer(request, list):
AllGameDetailsFormatted = '*' + str(request) + '* could mean:'
for item in list:
encodedItem = str(item)
if (encodedItem != 'None'):
AllGameDetailsFormatted += '\n'
for char in encodedItem.replace('Definition', '').replace('*', '\*'):
if char.isupper():
AllGameDetailsFormatted += '*' + char + '*'
else:
AllGameDetailsFormatted += char
return AllGameDetailsFormatted
|
py | 1a3294d2f7166322bf2e7cdbc6549a389ad2da42 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
from pyiron.lammps.base import Input
from pyiron.lammps.interactive import LammpsInteractive
from pyiron_contrib.atomistics.mlip.mlip import read_cgfs
from pyiron_base import GenericParameters
__author__ = "Jan Janssen"
__copyright__ = "Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "[email protected]"
__status__ = "development"
__date__ = "Sep 1, 2018"
class LammpsMlip(LammpsInteractive):
def __init__(self, project, job_name):
super(LammpsMlip, self).__init__(project, job_name)
self.input = MlipInput()
self.__name__ = "LammpsMlip"
self.__version__ = None # Reset the version number to the executable is set automatically
self._executable = None
self._executable_activate()
def set_input_to_read_only(self):
"""
This function enforces read-only mode for the input classes, but it has to be implement in the individual
classes.
"""
super(LammpsMlip, self).set_input_to_read_only()
self.input.mlip.read_only = True
def write_input(self):
super(LammpsMlip, self).write_input()
if self.input.mlip['mlip:load-from'] == 'auto':
self.input.mlip['mlip:load-from'] = os.path.basename(self.potential['Filename'][0][0])
self.input.mlip.write_file(file_name="mlip.ini", cwd=self.working_directory)
def enable_active_learning(self):
self.input.mlip.load_string("""\
abinitio void
mlip mtpr
mlip:load-from Trained.mtp_
calculate-efs TRUE
fit FALSE
select TRUE
select:site-en-weight 0.0
select:energy-weight 1.0
select:force-weight 0.0
select:stress-weight 0.0
select:threshold-init 1e-5
select:threshold 2.0
select:threshold-swap 1.000001
select:threshold-break 5.0
select:save-selected selected.cfg
select:save-state selection.mvs
select:load-state state.mvs
select:efs-ignored FALSE
select:log selection.log
write-cfgs:skip 0
log lotf.log""")
def collect_output(self):
super(LammpsMlip, self).collect_output()
if 'select:save-selected' in self.input.mlip._dataset['Parameter']:
file_name = os.path.join(self.working_directory, self.input.mlip['select:save-selected'])
if os.path.exists(file_name):
cell, positions, forces, stress, energy, indicies, grades, jobids, timesteps = read_cgfs(file_name=file_name)
with self.project_hdf5.open("output/mlip") as hdf5_output:
hdf5_output['forces'] = forces
hdf5_output['energy_tot'] = energy
hdf5_output['pressures'] = stress
hdf5_output['cells'] = cell
hdf5_output['positions'] = positions
hdf5_output['indicies'] = indicies
class MlipInput(Input):
def __init__(self):
self.mlip = MlipParameter()
super(MlipInput, self).__init__()
def to_hdf(self, hdf5):
"""
Args:
hdf5:
Returns:
"""
with hdf5.open("input") as hdf5_input:
self.mlip.to_hdf(hdf5_input)
super(MlipInput, self).to_hdf(hdf5)
def from_hdf(self, hdf5):
"""
Args:
hdf5:
Returns:
"""
with hdf5.open("input") as hdf5_input:
self.mlip.from_hdf(hdf5_input)
super(MlipInput, self).from_hdf(hdf5)
class MlipParameter(GenericParameters):
def __init__(self, separator_char=' ', comment_char='#', table_name="mlip_inp"):
super(MlipParameter, self).__init__(separator_char=separator_char, comment_char=comment_char, table_name=table_name)
def load_default(self, file_content=None):
if file_content is None:
file_content = '''\
abinitio void
mlip mtpr
mlip:load-from auto
calculate-efs TRUE
fit FALSE
select FALSE
'''
self.load_string(file_content)
|
py | 1a3295614c6f1fcc7a3303b89ccc650ebf4b9a9e | from torch import nn
from torchvision.models import resnet
from torch.utils import model_zoo
class ResEnc(resnet.ResNet):
def __init__(self, block, layers, url=None):
self.url = url
super().__init__(block, layers)
del self.avgpool
del self.fc
def initialize(self):
if self.url:
self.load_state_dict(model_zoo.load_url(self.url), strict=False)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
return [x2, x3, x4, x5]
def res18_enc():
encoder = ResEnc(resnet.BasicBlock, [2, 2, 2, 2], resnet.model_urls['resnet18'])
encoder.initialize()
return encoder
def res34_enc():
encoder = ResEnc(resnet.BasicBlock, [3, 4, 6, 3], resnet.model_urls['resnet34'])
encoder.initialize()
return encoder
res_inchannels = [64, 128, 256, 512] |
py | 1a329624ccee9fa3973bd77be93293eaefb94947 | r"""
Relative finite field extensions
Considering a *absolute field* `F_{q^m}` and a *relative_field* `F_q`, with
`q = p^s`, `p` being a prime and `s, m` being integers, this file
contains a class to take care of the representation of `F_{q^m}`-elements
as `F_q`-elements.
.. WARNING::
As this code is experimental, a warning is thrown when a
relative finite field extension is created for the first time
in a session (see :class:`sage.misc.superseded.experimental`).
TESTS::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation.
See http://trac.sagemath.org/20284 for details.
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
# ****************************************************************************
# Copyright (C) 2016 David Lucas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.structure.sage_object import SageObject
from sage.categories.homset import Hom
from sage.matrix.constructor import column_matrix
from sage.modules.free_module_element import vector
from sage.misc.superseded import experimental
class RelativeFiniteFieldExtension(SageObject):
r"""
Considering `p` a prime number, n an integer and three finite fields
`F_p`, `F_q` and `F_{q^m}`, this class contains a set of methods
to manage the representation of elements of the relative extension
`F_{q^m}` over `F_q`.
INPUT:
- ``absolute_field``, ``relative_field`` -- two finite fields, ``relative_field``
being a subfield of ``absolute_field``
- ``embedding`` -- (default: ``None``) an homomorphism from ``relative_field`` to
``absolute_field``. If ``None`` is provided, it will default to the first
homomorphism of the list of homomorphisms Sage can build.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
It is possible to specify the embedding to use
from ``relative_field`` to ``absolute_field``::
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq, embedding=Hom(Fq, Fqm)[1])
sage: FE.embedding() == Hom(Fq, Fqm)[1]
True
"""
@experimental(trac_number=20284)
def __init__(self, absolute_field, relative_field, embedding=None):
r"""
TESTS:
If ``absolute_field`` is not a finite field, an error is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm = RR
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: absolute_field has to be a finite field
Same for ``relative_field``::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq = RR
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a finite field
If ``relative_field`` is not a subfield of ``absolute_field``, an exception
is raised::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(8)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Traceback (most recent call last):
...
ValueError: relative_field has to be a subfield of absolute_field
"""
if not absolute_field.is_finite():
raise ValueError("absolute_field has to be a finite field")
if not relative_field.is_finite():
raise ValueError("relative_field has to be a finite field")
s = relative_field.degree()
sm = absolute_field.degree()
if not s.divides(sm):
raise ValueError("relative_field has to be a subfield of absolute_field")
H = Hom(relative_field, absolute_field)
if embedding is not None and embedding not in H:
raise ValueError("embedding has to be an embedding from relative_field to absolute_field")
elif embedding is not None:
self._phi = embedding
else:
self._phi = H[0]
self._prime_field = relative_field.base_ring()
self._relative_field = relative_field
self._absolute_field = absolute_field
alpha = relative_field.gen()
beta = absolute_field.gen()
self._alphas = [alpha ** i for i in range(s)]
self._betas = [beta ** i for i in range(sm)]
self._relative_field_degree = s
self._absolute_field_degree = sm
def _repr_(self):
r"""
Returns a string representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: RelativeFiniteFieldExtension(Fqm, Fq)
Relative field extension between Finite Field in aa of size 2^4 and Finite Field in a of size 2^2
"""
return "Relative field extension between %s and %s" % (self.absolute_field(), self.relative_field())
def _latex_(self):
r"""
Returns a latex representation of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: latex(RelativeFiniteFieldExtension(Fqm, Fq))
\textnormal{Relative field extension between \Bold{F}_{2^{4}} and \Bold{F}_{2^{2}}}
"""
return "\\textnormal{Relative field extension between %s and %s}" % (self.absolute_field()._latex_(),
self.relative_field()._latex_())
def __eq__(self, other):
r"""
Tests equality between embeddings.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fq = GF(4)
sage: FQ = GF(4**3)
sage: H = Hom(Fq, FQ)
sage: E1 = RelativeFiniteFieldExtension(FQ, Fq)
sage: E2 = RelativeFiniteFieldExtension(FQ, Fq, H[0])
sage: E3 = RelativeFiniteFieldExtension(FQ, Fq, H[1])
sage: E1 == E2
True
sage: E1 == E3
False
"""
return isinstance(other, RelativeFiniteFieldExtension) \
and self.embedding() == other.embedding()
@cached_method
def _representation_matrix(self):
r"""
Returns the matrix used to represents elements of the absolute field
as vectors in the basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE._representation_matrix()
[1 0 0 0]
[0 0 1 1]
[0 1 1 1]
[0 0 0 1]
"""
s = self.relative_field_degree()
m = self.extension_degree()
betas = self.absolute_field_basis()
phi_alphas = [ self._phi(self._alphas[i]) for i in range(s) ]
A = column_matrix([vector(betas[i] * phi_alphas[j])
for i in range(m) for j in range(s)])
return A.inverse()
def _flattened_relative_field_representation(self, b):
r"""
Returns a vector representation of ``b`` in the basis of
the relative field over the prime field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE._flattened_relative_field_representation(b)
(1, 0, 1, 1)
"""
if b not in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
return self._representation_matrix() * vector(b)
def relative_field_representation(self, b):
r"""
Returns a vector representation of the field element ``b`` in the basis
of the absolute field over the relative field.
INPUT:
- ``b`` -- an element of the absolute field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: FE.relative_field_representation(b)
(1, a + 1)
"""
if b not in self.absolute_field():
raise ValueError("The input has to be an element of the absolute field")
s = self.relative_field_degree()
if s == 1:
return vector(b)
Fq = self.relative_field()
vect = self._flattened_relative_field_representation(b)
sm = self.absolute_field_degree()
list_elts = []
for i in range(0, sm, s):
list_elts.append(Fq(vect[i:i + s]))
return vector(Fq, list_elts)
def absolute_field_representation(self, a):
r"""
Returns an absolute field representation of the relative field
vector ``a``.
INPUT:
- ``a`` -- a vector in the relative extension field
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: b = aa^3 + aa^2 + aa + 1
sage: rel = FE.relative_field_representation(b)
sage: FE.absolute_field_representation(rel) == b
True
"""
s = self.relative_field_degree()
m = self.extension_degree()
if len(a) != m:
raise ValueError("The input has to be a vector with length equal to the order of the absolute field")
if not a.base_ring() == self.relative_field():
raise ValueError("The input has to be over the prime field")
alphas = self.relative_field_basis()
betas = self.absolute_field_basis()
phi = self.embedding()
b = self.absolute_field().zero()
flattened_relative_field_rep_list = []
for i in a:
tmp = vector(i).list()
for j in tmp:
flattened_relative_field_rep_list.append(j)
flattened_relative_field_rep = vector(flattened_relative_field_rep_list)
for i in range(m):
b += betas[i] * phi(sum([flattened_relative_field_rep[j] * alphas[j%s] for j in range(i*s, i*s + s)]))
return b
def is_in_relative_field(self, b):
r"""
Returns ``True`` if ``b`` is in the relative field.
INPUT:
- ``b`` -- an element of the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.is_in_relative_field(aa^2 + aa)
True
sage: FE.is_in_relative_field(aa^3)
False
"""
vect = self.relative_field_representation(b)
return vect[1:vect.length()].is_zero()
def cast_into_relative_field(self, b, check=True):
r"""
Casts an absolute field element into the relative field (if possible).
This is the inverse function of the field embedding.
INPUT:
- ``b`` -- an element of the absolute field which also lies in the
relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: phi = FE.embedding()
sage: b = aa^2 + aa
sage: FE.is_in_relative_field(b)
True
sage: FE.cast_into_relative_field(b)
a
sage: phi(FE.cast_into_relative_field(b)) == b
True
"""
if check:
if not self.is_in_relative_field(b):
raise ValueError("%s does not belong to the relative field" % b)
return self.relative_field_representation(b)[0]
def embedding(self):
r"""
Returns the embedding which is used to go from the
relative field to the absolute field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.embedding()
Ring morphism:
From: Finite Field in a of size 2^2
To: Finite Field in aa of size 2^4
Defn: a |--> aa^2 + aa
"""
return self._phi
def relative_field_basis(self):
r"""
Returns a basis of the relative field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_basis()
[1, a]
"""
return self._alphas
def absolute_field_basis(self):
r"""
Returns a basis of the absolute field over the prime field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_basis()
[1, aa, aa^2, aa^3]
"""
return self._betas
def relative_field_degree(self):
r"""
Let `F_p` be the base field of our relative field `F_q`.
Returns `s` where `p^s = q`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field_degree()
2
"""
return self._relative_field_degree
def absolute_field_degree(self):
r"""
Let `F_p` be the base field of our absolute field `F_{q^m}`.
Returns `sm` where `p^{sm} = q^{m}`
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field_degree()
4
"""
return self._absolute_field_degree
def extension_degree(self):
r"""
Return `m`, the extension degree of the absolute field over
the relative field.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(64)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.extension_degree()
3
"""
return self.absolute_field_degree() // self.relative_field_degree()
def prime_field(self):
r"""
Returns the base field of our absolute and relative fields.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.prime_field()
Finite Field of size 2
"""
return self._prime_field
def relative_field(self):
r"""
Returns the relative field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.relative_field()
Finite Field in a of size 2^2
"""
return self._relative_field
def absolute_field(self):
r"""
Returns the absolute field of ``self``.
EXAMPLES::
sage: from sage.coding.relative_finite_field_extension import *
sage: Fqm.<aa> = GF(16)
sage: Fq.<a> = GF(4)
sage: FE = RelativeFiniteFieldExtension(Fqm, Fq)
sage: FE.absolute_field()
Finite Field in aa of size 2^4
"""
return self._absolute_field
|
py | 1a3296716e58071473486ff3d1d3882b412502cd | from django import forms
from dal import autocomplete
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, Div, MultiField, HTML
from crispy_forms.bootstrap import Accordion, AccordionGroup
from . models import *
class PersonPersonFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(PersonPersonFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'source',
'target',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Advanced search',
'rel_type',
css_id="more"
),
)
)
class PersonPersonForm(forms.ModelForm):
class Meta:
model = PersonPerson
fields = "__all__"
widgets = {
'source': autocomplete.ModelSelect2(
url='entities-ac:person-autocomplete'),
'target': autocomplete.ModelSelect2(url='entities-ac:person-autocomplete'),
'rel_type': autocomplete.ModelSelect2(
url='/vocabs-ac/concept-by-colleciton-ac/fam-rel'),
}
def __init__(self, *args, **kwargs):
super(PersonPersonForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class PersonFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(PersonFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Accordion(
AccordionGroup(
'Namen, Geschlecht',
'name',
'forename',
'written_name',
'gender',
css_id="basic_search_fields"
),
AccordionGroup(
'Berufe, Orte',
'belongs_to_place',
'profession',
css_id="more"
),
AccordionGroup(
'Erwähnungen',
'is_main_vfbr',
'is_main',
'is_adm',
'is_related',
'is_other',
css_id="mentions"
),
AccordionGroup(
'Duplikate',
'dedupe_cluster_id',
css_id="duplicates"
),
)
)
class PersonForm(forms.ModelForm):
class Meta:
model = Person
fields = "__all__"
widgets = {
'belongs_to_institution': autocomplete.ModelSelect2(
url='entities-ac:institution-autocomplete'),
'place_of_birth': autocomplete.ModelSelect2(url='entities-ac:place-autocomplete'),
'alt_names': autocomplete.ModelSelect2Multiple(
url='entities-ac:altname-autocomplete'),
}
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class InstitutionFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(InstitutionFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'written_name',
'alt_names',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Advanced search',
'authority_url',
'location',
css_id="more"
),
)
)
class InstitutionForm(forms.ModelForm):
class Meta:
model = Institution
fields = "__all__"
widgets = {
'location': autocomplete.ModelSelect2(url='entities-ac:place-autocomplete'),
'parent_institution': autocomplete.ModelSelect2(
url='entities-ac:institution-autocomplete'),
'alt_names': autocomplete.ModelSelect2Multiple(
url='entities-ac:altname-autocomplete'),
}
def __init__(self, *args, **kwargs):
super(InstitutionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class AlternativeNameForm(forms.ModelForm):
class Meta:
model = AlternativeName
fields = "__all__"
def __init__(self, *args, **kwargs):
super(AlternativeNameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3'
self.helper.field_class = 'col-md-9'
self.helper.add_input(Submit('submit', 'save'),)
class AlternativeNameFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(AlternativeNameFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'name',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Advanced search',
'name',
css_id="more"
),
)
)
class AlternativeNameFormCreate(forms.ModelForm):
class Meta:
model = AlternativeName
fields = "__all__"
def __init__(self, *args, **kwargs):
super(AlternativeNameFormCreate, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
class PlaceFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(PlaceFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.helper.form_tag = False
self.add_input(Submit('Filter', 'Search'))
self.layout = Layout(
Fieldset(
'Basic search options',
'name',
css_id="basic_search_fields"
),
Accordion(
AccordionGroup(
'Advanced search',
'geonames_id',
'part_of',
css_id="more"
),
)
)
class PlaceForm(forms.ModelForm):
class Meta:
model = Place
fields = "__all__"
widgets = {
'part_of': autocomplete.ModelSelect2(url='entities-ac:place-autocomplete'),
'alt_names': autocomplete.ModelSelect2Multiple(
url='entities-ac:altname-autocomplete'),
}
def __init__(self, *args, **kwargs):
super(PlaceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.add_input(Submit('submit', 'save'),)
class PlaceFormCreate(forms.ModelForm):
class Meta:
model = Place
fields = "__all__"
widgets = {
'part_of': autocomplete.ModelSelect2(url='entities-ac:place-autocomplete'),
'alt_names': autocomplete.ModelSelect2Multiple(
url='entities-ac:altname-autocomplete'),
}
def __init__(self, *args, **kwargs):
super(PlaceFormCreate, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
|
py | 1a32972ed8188e65a2bb9f25ad6a0587e672db14 | from __future__ import absolute_import, division, print_function
from stripe_modern import util
from stripe_modern.api_resources.customer import Customer
from stripe_modern.api_resources.abstract import APIResource
from stripe_modern.six.moves.urllib.parse import quote_plus
class CustomerBalanceTransaction(APIResource):
OBJECT_NAME = "customer_balance_transaction"
def instance_url(self):
token = util.utf8(self.id)
customer = util.utf8(self.customer)
base = Customer.class_url()
cust_extn = quote_plus(customer)
extn = quote_plus(token)
return "%s/%s/balance_transactions/%s" % (base, cust_extn, extn)
@classmethod
def retrieve(cls, id, api_key=None, **params):
raise NotImplementedError(
"Can't retrieve a Customer Balance Transaction without a Customer ID. "
"Use Customer.retrieve_customer_balance_transaction('cus_123', 'cbtxn_123')"
)
|
py | 1a32978910f9509018352cd3d450fe3d49bdddca | import os
import json
import base64
import random
import hashlib
import jinja2
import webapp2
from google.appengine.ext import ndb
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
autoescape=True
)
class CounterConfig(ndb.Model):
shards = ndb.IntegerProperty(default=50, indexed=False)
class CounterShard(ndb.Model):
count = ndb.IntegerProperty(default=0, indexed=False)
class Hash(ndb.Model):
pass
def _key(*parts):
hasher = hashlib.sha256()
for part in parts:
hasher.update(base64.b64encode(part) + "\x00")
return base64.b64encode(hasher.digest())
def _shard_key(team, attr, shard):
return _key(team, attr, str(shard))
@ndb.tasklet
def _shards():
cache_key = _key("shards")
context = ndb.get_context()
shards = yield context.memcache_get(cache_key)
if shards is None:
config = yield CounterConfig.get_or_insert_async("main")
shards = config.shards
yield context.memcache_add(cache_key, shards)
raise ndb.Return(shards)
@ndb.tasklet
def mark_tasklet(team, attr, value):
cache_key = _key("hash", team, attr, value)
context = ndb.get_context()
exists = yield context.memcache_get(cache_key)
if exists:
return
yield [
context.memcache_add(cache_key, True),
_mark_tasklet(team, attr, value)
]
@ndb.transactional_tasklet(xg=True)
def _mark_tasklet(team, attr, value):
key_name = _key(team, attr, value)
hash_entity = yield Hash.get_by_id_async(key_name)
if hash_entity is not None:
return
yield [
Hash(key=ndb.Key(Hash, key_name)).put_async(),
ndb.get_context().memcache_incr(_key("count", team, attr)),
_incr_tasklet(team, attr)
]
@ndb.transactional_tasklet
def _incr_tasklet(team, attr):
shards = yield _shards()
shard = random.randint(0, shards - 1)
counter = yield CounterShard.get_or_insert_async(_shard_key(team, attr, shard))
counter.count += 1
yield counter.put_async()
@ndb.tasklet
def count_tasklet(team, attr, force_recount=False):
cache_key = _key("count", team, attr)
context = ndb.get_context()
if not force_recount:
count = yield context.memcache_get(cache_key)
if count is not None:
raise ndb.Return((team, attr, count))
shards = yield _shards()
keys = [ndb.Key(CounterShard, _shard_key(team, attr, shard)) for shard in xrange(shards)]
results = yield ndb.get_multi_async(keys)
count = 0
for counter in results:
if counter is None:
continue
count += counter.count
cache_key = _key("count", team, attr)
context = ndb.get_context()
yield context.memcache_set(cache_key, count, random.randint(90, 120))
raise ndb.Return((team, attr, count))
@ndb.synctasklet
def scores(teams=["yellow", "blue", "red"], force_recount=False):
tasklets = []
for team in teams:
tasklets.extend([
count_tasklet(team, "user_agents", force_recount),
count_tasklet(team, "remote_addrs", force_recount)
])
results = yield tasklets
scores = {}
for team, attr, count in results:
scores.setdefault(team, {}).setdefault(attr, count)
raise ndb.Return(scores)
class TeamPage(webapp2.RequestHandler):
@ndb.synctasklet
def get(self, team):
team = team.lower()
user_agent = self.request.headers.get("user-agent", "")
remote_addr = self.request.remote_addr
yield [
mark_tasklet(team, "user_agents", user_agent),
mark_tasklet(team, "remote_addrs", remote_addr)
]
template = env.get_template("team.html")
self.response.write(template.render({
"team": team.capitalize(),
"image": {
"yellow": "/static/yellowteam.png",
"blue": "/static/blueteam.png",
"red": "/static/redteam.png"
}.get(team, "/static/unknown.png"),
"color": jinja2.Markup({
"yellow": "#FFEF00",
"red": "#53140A",
"blue": "#0056B9"
}.get(team, "#777777"))
}))
class ScorePage(webapp2.RequestHandler):
def get(self):
template = env.get_template("scores.html")
self.response.write(template.render({}))
class ScoreAPI(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "application/json"
self.response.write(json.dumps(scores()))
class RecalcTask(webapp2.RequestHandler):
def get(self):
scores(force_recount=True)
class MainPage(webapp2.RequestHandler):
def get(self):
template = env.get_template("index.html")
self.response.write(template.render({}))
app = webapp2.WSGIApplication(routes=[
("(?i)/(yellow|blue|red)/?", TeamPage),
("(?i)/scores/api/?", ScoreAPI),
("(?i)/scores/?", ScorePage),
("/", MainPage)
])
tasks = webapp2.WSGIApplication(routes=[
("/tasks/recalc_scores", RecalcTask)
])
|
py | 1a3298cf9ce1ac91c5bb0ce83d5b3ad2d44de53c | import sys
sys.setrecursionlimit(100000)
input = sys.stdin.readline
N, T = map(int, input().split())
A = list(map(int, input().split()))
dp = [[0] * (T + 1) for i in range(N + 1)]
for i, t in enumerate(A):
for j in range(T + 1):
if j - t >= 0:
dp[i + 1][j] = max(dp[i][j], dp[i][j - t] + t)
else:
dp[i + 1][j] = dp[i][j]
print(dp[N][T]) |
py | 1a32990eeff075896a4afd4f58d48df4974e84fe | from flask import Blueprint
auth = Blueprint('auth', __name__)
from views import *
|
py | 1a3299d73ae832ce38dbdc186577bf9f99c14ef9 | import json
from controller.client import Client
def anunciarview():
isvalid, trades = _anunciar()
if isvalid:
print("--------------------- LISTA DE TROCAS -------------------")
for trade in trades:
print("Usuário {", trade.name, '} - Código da Troca: {', trade.idTrade, '}')
print("Oferece -> ID figura: ", trade.offerID, '- Nome: ', trade.offerName, ' - Raridade: ',
trade.offerRarity)
print("Deseja <- ID figura: ", trade.takingID, '- Nome: ', trade.takingName, ' - Raridade: ',
trade.takingRarity)
print('--------------------- ------*----- -------------------')
return trades
else:
print('Lamentamos, mas não foi possível exibir as trocas')
return None
def _anunciar():
client = Client()
response = client.listTrade()
isvalid = response.response
trades = response.list
return isvalid, trades
|
py | 1a329a22e407eb8eaac9a686faa439beed80fe3d | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set secure.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using secure %(secure)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"secure": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
|
py | 1a329a64802cb8e5e965355624ea621b83504b37 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a nodepay test script.
Individual nodepay test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave nodepayds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop nodepayds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing nodepayd/nodepay-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: nodepayds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a nodepayd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple nodepayds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a nodepayd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple nodepayd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'nodepayd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "nodepayd should have exited with an error"
else:
assert_msg = "nodepayd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as nodepayd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "nodepayd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some nodepayd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "nodepayd"),
help="nodepayd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "nodepayd"),
help="nodepayd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
|
py | 1a329b6bcb3728d93c8560c2005b6e581eedae95 | #! /usr/bin/python3
"""
The database connections are read‐only, so SQL injection attacks can’t be a
problem.
"""
import sys
import os
import threading
import decimal
import time
import json
import re
import requests
import collections
import logging
logger = logging.getLogger(__name__)
from logging import handlers as logging_handlers
D = decimal.Decimal
import binascii
import struct
import apsw
import flask
from flask.ext.httpauth import HTTPBasicAuth
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import jsonrpc
from jsonrpc import dispatcher
import inspect
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
from counterpartylib.lib import util
from counterpartylib.lib import check
from counterpartylib.lib import backend
from counterpartylib.lib import database
from counterpartylib.lib import transaction
from counterpartylib.lib import blocks
from counterpartylib.lib import script
from counterpartylib.lib.messages import send
from counterpartylib.lib.messages import order
from counterpartylib.lib.messages import btcpay
from counterpartylib.lib.messages import issuance
from counterpartylib.lib.messages import broadcast
from counterpartylib.lib.messages import bet
from counterpartylib.lib.messages import dividend
from counterpartylib.lib.messages import burn
from counterpartylib.lib.messages import cancel
from counterpartylib.lib.messages import rps
from counterpartylib.lib.messages import rpsresolve
from counterpartylib.lib.messages import publish
from counterpartylib.lib.messages import execute
API_TABLES = ['assets', 'balances', 'credits', 'debits', 'bets', 'bet_matches',
'broadcasts', 'btcpays', 'burns', 'cancels',
'dividends', 'issuances', 'orders', 'order_matches', 'sends',
'bet_expirations', 'order_expirations', 'bet_match_expirations',
'order_match_expirations', 'bet_match_resolutions', 'rps',
'rpsresolves', 'rps_matches', 'rps_expirations', 'rps_match_expirations',
'mempool']
API_TRANSACTIONS = ['bet', 'broadcast', 'btcpay', 'burn', 'cancel',
'dividend', 'issuance', 'order', 'send',
'rps', 'rpsresolve', 'publish', 'execute']
COMMONS_ARGS = ['encoding', 'fee_per_kb', 'regular_dust_size',
'multisig_dust_size', 'op_return_value', 'pubkey',
'allow_unconfirmed_inputs', 'fee', 'fee_provided']
API_MAX_LOG_SIZE = 10 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later)
API_MAX_LOG_COUNT = 10
current_api_status_code = None #is updated by the APIStatusPoller
current_api_status_response_json = None #is updated by the APIStatusPoller
class APIError(Exception):
pass
# TODO: ALL queries EVERYWHERE should be done with these methods
def db_query(db, statement, bindings=(), callback=None, **callback_args):
"""Allow direct access to the database in a parametrized manner."""
cursor = db.cursor()
if hasattr(callback, '__call__'):
cursor.execute(statement, bindings)
for row in cursor:
callback(row, **callback_args)
results = None
else:
results = list(cursor.execute(statement, bindings))
cursor.close()
return results
def get_rows(db, table, filters=None, filterop='AND', order_by=None, order_dir=None, start_block=None, end_block=None,
status=None, limit=1000, offset=0, show_expired=True):
"""SELECT * FROM wrapper. Filters results based on a filter data structure (as used by the API)."""
if filters == None:
filters = []
def value_to_marker(value):
# if value is an array place holder is (?,?,?,..)
if isinstance(value, list):
return '''({})'''.format(','.join(['?' for e in range(0, len(value))]))
else:
return '''?'''
# TODO: Document that op can be anything that SQLite3 accepts.
if not table or table.lower() not in API_TABLES:
raise APIError('Unknown table')
if filterop and filterop.upper() not in ['OR', 'AND']:
raise APIError('Invalid filter operator (OR, AND)')
if order_dir and order_dir.upper() not in ['ASC', 'DESC']:
raise APIError('Invalid order direction (ASC, DESC)')
if not isinstance(limit, int):
raise APIError('Invalid limit')
elif limit > 1000:
raise APIError('Limit should be lower or equal to 1000')
if not isinstance(offset, int):
raise APIError('Invalid offset')
# TODO: accept an object: {'field1':'ASC', 'field2': 'DESC'}
if order_by and not re.compile('^[a-z0-9_]+$').match(order_by):
raise APIError('Invalid order_by, must be a field name')
if isinstance(filters, dict): #single filter entry, convert to a one entry list
filters = [filters,]
elif not isinstance(filters, list):
filters = []
# TODO: Document this! (Each filter can be an ordered list.)
new_filters = []
for filter_ in filters:
if type(filter_) in (list, tuple) and len(filter_) in [3, 4]:
new_filter = {'field': filter_[0], 'op': filter_[1], 'value': filter_[2]}
if len(filter_) == 4:
new_filter['case_sensitive'] = filter_[3]
new_filters.append(new_filter)
elif type(filter_) == dict:
new_filters.append(filter_)
else:
raise APIError('Unknown filter type')
filters = new_filters
# validate filter(s)
for filter_ in filters:
for field in ['field', 'op', 'value']: #should have all fields
if field not in filter_:
raise APIError("A specified filter is missing the '%s' field" % field)
if not isinstance(filter_['value'], (str, int, float, list)):
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if isinstance(filter_['value'], list) and filter_['op'].upper() not in ['IN', 'NOT IN']:
raise APIError("Invalid value for the field '%s'" % filter_['field'])
if filter_['op'].upper() not in ['=', '==', '!=', '>', '<', '>=', '<=', 'IN', 'LIKE', 'NOT IN', 'NOT LIKE']:
raise APIError("Invalid operator for the field '%s'" % filter_['field'])
if 'case_sensitive' in filter_ and not isinstance(filter_['case_sensitive'], bool):
raise APIError("case_sensitive must be a boolean")
# SELECT
statement = '''SELECT * FROM {}'''.format(table)
# WHERE
bindings = []
conditions = []
for filter_ in filters:
case_sensitive = False if 'case_sensitive' not in filter_ else filter_['case_sensitive']
if filter_['op'] == 'LIKE' and case_sensitive == False:
filter_['field'] = '''UPPER({})'''.format(filter_['field'])
filter_['value'] = filter_['value'].upper()
marker = value_to_marker(filter_['value'])
conditions.append('''{} {} {}'''.format(filter_['field'], filter_['op'], marker))
if isinstance(filter_['value'], list):
bindings += filter_['value']
else:
bindings.append(filter_['value'])
# AND filters
more_conditions = []
if table not in ['balances', 'order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''block_index <= ?''')
bindings.append(end_block)
elif table in ['order_matches', 'bet_matches']:
if start_block != None:
more_conditions.append('''tx0_block_index >= ?''')
bindings.append(start_block)
if end_block != None:
more_conditions.append('''tx1_block_index <= ?''')
bindings.append(end_block)
# status
if isinstance(status, list) and len(status) > 0:
more_conditions.append('''status IN {}'''.format(value_to_marker(status)))
bindings += status
elif isinstance(status, str) and status != '':
more_conditions.append('''status == ?''')
bindings.append(status)
# legacy filters
if not show_expired and table == 'orders':
#Ignore BTC orders one block early.
expire_index = util.CURRENT_BLOCK_INDEX + 1
more_conditions.append('''((give_asset == ? AND expire_index > ?) OR give_asset != ?)''')
bindings += [config.BTC, expire_index, config.BTC]
if (len(conditions) + len(more_conditions)) > 0:
statement += ''' WHERE'''
all_conditions = []
if len(conditions) > 0:
all_conditions.append('''({})'''.format(''' {} '''.format(filterop.upper()).join(conditions)))
if len(more_conditions) > 0:
all_conditions.append('''({})'''.format(''' AND '''.join(more_conditions)))
statement += ''' {}'''.format(''' AND '''.join(all_conditions))
# ORDER BY
if order_by != None:
statement += ''' ORDER BY {}'''.format(order_by)
if order_dir != None:
statement += ''' {}'''.format(order_dir.upper())
# LIMIT
if limit:
statement += ''' LIMIT {}'''.format(limit)
if offset:
statement += ''' OFFSET {}'''.format(offset)
return db_query(db, statement, tuple(bindings))
def compose_transaction(db, name, params,
encoding='auto',
fee_per_kb=config.DEFAULT_FEE_PER_KB,
regular_dust_size=config.DEFAULT_REGULAR_DUST_SIZE,
multisig_dust_size=config.DEFAULT_MULTISIG_DUST_SIZE,
op_return_value=config.DEFAULT_OP_RETURN_VALUE,
pubkey=None,
allow_unconfirmed_inputs=False,
fee=None,
fee_provided=0):
"""Create and return a transaction."""
# Get provided pubkeys.
if type(pubkey) == str:
provided_pubkeys = [pubkey]
elif type(pubkey) == list:
provided_pubkeys = pubkey
elif pubkey == None:
provided_pubkeys = []
else:
assert False
# Get additional pubkeys from `source` and `destination` params.
# Convert `source` and `destination` to pubkeyhash form.
for address_name in ['source', 'destination']:
if address_name in params:
address = params[address_name]
provided_pubkeys += script.extract_pubkeys(address)
params[address_name] = script.make_pubkeyhash(address)
# Check validity of collected pubkeys.
for pubkey in provided_pubkeys:
if not script.is_fully_valid(binascii.unhexlify(pubkey)):
raise script.AddressError('invalid public key: {}'.format(pubkey))
compose_method = sys.modules['counterpartylib.lib.messages.{}'.format(name)].compose
compose_params = inspect.getargspec(compose_method)[0]
missing_params = [p for p in compose_params if p not in params and p != 'db']
for param in missing_params:
params[param] = None
# try: # NOTE: For debugging, e.g. with `Invalid Params` error.
tx_info = compose_method(db, **params)
return transaction.construct(db, tx_info, encoding=encoding,
fee_per_kb=fee_per_kb,
regular_dust_size=regular_dust_size,
multisig_dust_size=multisig_dust_size,
op_return_value=op_return_value,
provided_pubkeys=provided_pubkeys,
allow_unconfirmed_inputs=allow_unconfirmed_inputs,
exact_fee=fee,
fee_provided=fee_provided)
# except:
# import traceback
# traceback.print_exc()
def sign_transaction(unsigned_tx_hex, private_key_wif):
"""Sign the transaction."""
return transaction.sign_tx(proxy, unsigned_tx_hex,
private_key_wif=private_key_wif)
def broadcast_transaction(signed_tx_hex):
"""Broadcast a transaction."""
if not config.TESTNET and config.BROADCAST_TX_MAINNET in ['bci', 'bci-failover']:
url = "https://blockchain.info/pushtx"
params = {'tx': signed_tx_hex}
response = requests.post(url, data=params)
if response.text.lower() != 'transaction submitted' or response.status_code != 200:
if config.BROADCAST_TX_MAINNET == 'bci-failover':
return transaction.broadcast_tx(signed_tx_hex)
else:
raise APIError(response.text)
return response.text
else:
return transaction.broadcast_tx(signed_tx_hex)
def do_transaction(db, name, params, private_key_wif, **kwargs):
"""Create, sign and broadcast transaction."""
unsigned_tx = compose_transaction(db, proxy, name, params, **kwargs)
signed_tx = sign_transaction(proxy, unsigned_tx, private_key_wif=private_key_wif)
return broadcast_transaction(proxy, signed_tx)
def init_api_access_log():
"""Init API logger."""
if config.API_LOG:
api_logger = logging.getLogger("tornado")
h = logging_handlers.RotatingFileHandler(config.API_LOG, 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
api_logger.setLevel(logging.INFO)
api_logger.addHandler(h)
api_logger.propagate = False
class APIStatusPoller(threading.Thread):
"""Perform regular checks on the state of the backend and the database."""
def __init__(self):
self.last_database_check = 0
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
logger.debug('Starting API Status Poller.')
global current_api_status_code, current_api_status_response_json
db = database.get_connection(read_only=True, integrity_check=False)
while self.stop_event.is_set() != True:
try:
# Check that bitcoind is running, communicable, and caught up with the blockchain.
# Check that the database has caught up with bitcoind.
if time.time() - self.last_database_check > 10 * 60: # Ten minutes since last check.
code = 11
logger.debug('Checking backend state.')
check.backend_state()
code = 12
logger.debug('Checking database state.')
check.database_state(db, backend.getblockcount())
self.last_database_check = time.time()
except (check.BackendError, check.DatabaseError) as e:
exception_name = e.__class__.__name__
exception_text = str(e)
logger.debug("API Status Poller: %s", exception_text)
jsonrpc_response = jsonrpc.exceptions.JSONRPCServerError(message=exception_name, data=exception_text)
current_api_status_code = code
current_api_status_response_json = jsonrpc_response.json.encode()
else:
current_api_status_code = None
current_api_status_response_json = None
time.sleep(config.BACKEND_POLL_INTERVAL)
class APIServer(threading.Thread):
"""Handle JSON-RPC API calls."""
def __init__(self):
self.is_ready = False
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.ioloop = IOLoop.instance()
def stop(self):
self.ioloop.stop()
self.join()
self.stop_event.set()
def run(self):
logger.info('Starting API Server.')
db = database.get_connection(read_only=True, integrity_check=False)
app = flask.Flask(__name__)
auth = HTTPBasicAuth()
@auth.get_password
def get_pw(username):
if username == config.RPC_USER:
return config.RPC_PASSWORD
return None
######################
#READ API
# Generate dynamically get_{table} methods
def generate_get_method(table):
def get_method(**kwargs):
try:
return get_rows(db, table=table, **kwargs)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
return get_method
for table in API_TABLES:
new_method = generate_get_method(table)
new_method.__name__ = 'get_{}'.format(table)
dispatcher.add_method(new_method)
@dispatcher.add_method
def sql(query, bindings=None):
if bindings == None:
bindings = []
return db_query(db, query, tuple(bindings))
######################
#WRITE/ACTION API
# Generate dynamically create_{transaction} and do_{transaction} methods
def generate_create_method(tx):
def split_params(**kwargs):
transaction_args = {}
common_args = {}
private_key_wif = None
for key in kwargs:
if key in COMMONS_ARGS:
common_args[key] = kwargs[key]
elif key == 'privkey':
private_key_wif = kwargs[key]
else:
transaction_args[key] = kwargs[key]
return transaction_args, common_args, private_key_wif
def create_method(**kwargs):
try:
transaction_args, common_args, private_key_wif = split_params(**kwargs)
return compose_transaction(db, name=tx, params=transaction_args, **common_args)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
def do_method(**kwargs):
try:
transaction_args, common_args, private_key_wif = split_params(**kwargs)
return do_transaction(db, name=tx, params=transaction_args, private_key_wif=private_key_wif, **common_args)
except TypeError as e: #TODO: generalise for all API methods
raise APIError(str(e))
return create_method, do_method
for tx in API_TRANSACTIONS:
create_method, do_method = generate_create_method(tx)
create_method.__name__ = 'create_{}'.format(tx)
do_method.__name__ = 'do_{}'.format(tx)
dispatcher.add_method(create_method)
dispatcher.add_method(do_method)
@dispatcher.add_method
def sign_tx(unsigned_tx_hex, privkey):
return sign_transaction(unsigned_tx_hex, private_key_wif=privkey)
@dispatcher.add_method
def broadcast_tx(signed_tx_hex):
return broadcast_transaction(signed_tx_hex)
@dispatcher.add_method
def get_messages(block_index):
if not isinstance(block_index, int):
raise APIError("block_index must be an integer.")
cursor = db.cursor()
cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_messages_by_index(message_indexes):
"""Get specific messages from the feed, based on the message_index.
@param message_index: A single index, or a list of one or more message indexes to retrieve.
"""
if not isinstance(message_indexes, list):
message_indexes = [message_indexes,]
for idx in message_indexes: #make sure the data is clean
if not isinstance(idx, int):
raise APIError("All items in message_indexes are not integers")
cursor = db.cursor()
cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC'
% (','.join([str(x) for x in message_indexes]),))
messages = cursor.fetchall()
cursor.close()
return messages
@dispatcher.add_method
def get_xcp_supply():
return util.xcp_supply(db)
@dispatcher.add_method
def get_asset_info(assets):
if not isinstance(assets, list):
raise APIError("assets must be a list of asset names, even if it just contains one entry")
assetsInfo = []
for asset in assets:
# BTC and XCP.
if asset in [config.BTC, config.XCP]:
if asset == config.BTC:
supply = backend.get_btc_supply(normalize=False)
else:
supply = util.xcp_supply(db)
assetsInfo.append({
'asset': asset,
'owner': None,
'divisible': True,
'locked': False,
'supply': supply,
'description': '',
'issuer': None
})
continue
# User‐created asset.
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset)))
cursor.close()
if not issuances:
continue #asset not found, most likely
else:
last_issuance = issuances[-1]
locked = False
for e in issuances:
if e['locked']: locked = True
assetsInfo.append({
'asset': asset,
'owner': last_issuance['issuer'],
'divisible': bool(last_issuance['divisible']),
'locked': locked,
'supply': util.asset_supply(db, asset),
'description': last_issuance['description'],
'issuer': last_issuance['issuer']})
return assetsInfo
@dispatcher.add_method
def get_block_info(block_index):
assert isinstance(block_index, int)
cursor = db.cursor()
cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,))
blocks = list(cursor)
if len(blocks) == 1:
block = blocks[0]
elif len(blocks) == 0:
raise exceptions.DatabaseError('No blocks found.')
else:
assert False
cursor.close()
return block
@dispatcher.add_method
def get_blocks(block_indexes):
"""fetches block info and messages for the specified block indexes"""
if not isinstance(block_indexes, (list, tuple)):
raise APIError("block_indexes must be a list of integers.")
if len(block_indexes) >= 250:
raise APIError("can only specify up to 250 indexes at a time.")
block_indexes_str = ','.join([str(x) for x in block_indexes])
cursor = db.cursor()
cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC'
% (block_indexes_str,))
blocks = cursor.fetchall()
cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY block_index ASC, message_index ASC'
% (block_indexes_str,))
messages = collections.deque(cursor.fetchall())
for block in blocks:
# messages_in_block = []
block['_messages'] = []
while len(messages) and messages[0]['block_index'] == block['block_index']:
block['_messages'].append(messages.popleft())
assert not len(messages) #should have been cleared out
cursor.close()
return blocks
@dispatcher.add_method
def get_running_info():
latestBlockIndex = backend.getblockcount()
try:
check.database_state(db, latestBlockIndex)
except exceptions.DatabaseError:
caught_up = False
else:
caught_up = True
try:
last_block = util.CURRENT_BLOCK_INDEX
except:
last_block = None
try:
last_message = util.last_message(db)
except:
last_message = None
return {
'db_caught_up': caught_up,
'bitcoin_block_count': latestBlockIndex,
'last_block': last_block,
'last_message_index': last_message['message_index'] if last_message else -1,
'running_testnet': config.TESTNET,
'running_testcoin': config.TESTCOIN,
'version_major': config.VERSION_MAJOR,
'version_minor': config.VERSION_MINOR,
'version_revision': config.VERSION_REVISION
}
@dispatcher.add_method
def get_element_counts():
counts = {}
cursor = db.cursor()
for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders',
'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends',
'burns', 'cancels', 'order_expirations', 'bet_expirations', 'order_match_expirations',
'bet_match_expirations', 'messages']:
cursor.execute("SELECT COUNT(*) AS count FROM %s" % element)
count_list = cursor.fetchall()
assert len(count_list) == 1
counts[element] = count_list[0]['count']
cursor.close()
return counts
@dispatcher.add_method
def get_asset_names():
cursor = db.cursor()
names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")]
cursor.close()
return names
@dispatcher.add_method
def get_holder_count(asset):
holders = util.holders(db, asset)
addresses = []
for holder in holders:
addresses.append(holder['address'])
return {asset: len(set(addresses))}
@dispatcher.add_method
def get_holders(asset):
holders = util.holders(db, asset)
return holders
@dispatcher.add_method
def search_raw_transactions(address):
return backend.searchrawtransactions(address)
@dispatcher.add_method
def get_unspent_txouts(address, return_confirmed=False):
result = backend.get_unspent_txouts(address, return_confirmed=return_confirmed)
if return_confirmed:
return {'all': result[0], 'confirmed': result[1]}
else:
return result
@dispatcher.add_method
def get_tx_info(tx_hex):
source, destination, btc_amount, fee, data = blocks.get_tx_info(tx_hex)
return source, destination, btc_amount, fee, util.hexlify(data)
@dispatcher.add_method
def unpack(data_hex):
data = binascii.unhexlify(data_hex)
message_type_id = struct.unpack(config.TXTYPE_FORMAT, data[:4])[0]
message = data[4:]
# TODO: This works for precisely those messages for which
# `unpack()` is defined.
for message_type in API_TRANSACTIONS:
if message_type_id == sys.modules['lib.messages.{}'.format(message_type)].ID:
unpack_method = sys.modules['lib.messages.{}'.format(message_type)].unpack
unpacked = unpack_method(db, message, util.CURRENT_BLOCK_INDEX)
return message_type_id, unpacked
@dispatcher.add_method
def search_pubkey(pubkeyhash, provided_pubkeys=None):
return backend.pubkeyhash_to_pubkey(pubkeyhash, provided_pubkeys=provided_pubkeys)
def _set_cors_headers(response):
if config.RPC_ALLOW_CORS:
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'
@app.route('/', methods=["OPTIONS",])
@app.route('/api/', methods=["OPTIONS",])
def handle_options():
response = flask.Response('', 204)
_set_cors_headers(response)
return response
@app.route('/', methods=["POST",])
@app.route('/api/', methods=["POST",])
@auth.login_required
def handle_post():
try:
request_json = flask.request.get_data().decode('utf-8')
request_data = json.loads(request_json)
assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
# params may be omitted
except:
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
#only arguments passed as a dict are supported
if request_data.get('params', None) and not isinstance(request_data['params'], dict):
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
#return an error if API fails checks
if not config.FORCE and current_api_status_code:
return flask.Response(current_api_status_response_json, 200, mimetype='application/json')
jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json')
_set_cors_headers(response)
return response
init_api_access_log()
http_server = HTTPServer(WSGIContainer(app), xheaders=True)
try:
http_server.listen(config.RPC_PORT, address=config.RPC_HOST)
self.is_ready = True
self.ioloop.start()
except OSError:
raise APIError("Cannot start the API subsystem. Is {} already running, or is something else listening on port {}?".format(config.XCP_CLIENT, config.RPC_PORT))
db.close()
http_server.stop()
self.ioloop.close()
return
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
py | 1a329ba4bd430680f6742f71f08cf89aa1caf569 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see SMlib/__init__.py for details)
"""Namespace browser widget"""
import os
import os.path as osp
import socket
from PyQt4.QtGui import (QWidget, QVBoxLayout, QHBoxLayout, QMenu,
QToolButton, QMessageBox, QApplication,
QCursor, QInputDialog)
#from PyQt4.QtCore import SIGNAL, Qt, Signal
from PyQt4.QtCore import SIGNAL, Qt, pyqtSignal
#from PyQt4.compat import getopenfilenames, getsavefilename#
# Local imports
from SMlib.widgets.externalshell.monitor import (
monitor_set_global, monitor_get_global, monitor_del_global,
monitor_copy_global, monitor_save_globals, monitor_load_globals,
communicate, REMOTE_SETTINGS)
from SMlib.widgets.dicteditor import (RemoteDictEditorTableView,
DictEditorTableView)
from SMlib.widgets.dicteditorutils import globalsfilter
from SMlib.utils import encoding
from SMlib.utils.misc import fix_reference_name
from SMlib.utils.programs import is_module_installed
from SMlib.utils.qthelpers import (get_icon, create_toolbutton,
add_actions, create_action,
getopenfilenames, getsavefilename)
from SMlib.utils.iofuncs import iofunctions
from SMlib.widgets.importwizard import ImportWizard
from SMlib.configs.baseconfig import _, get_supported_types
# added by shisj
from SMlib.widgets.treeeditor import RemoteDictEditorTreeView
SUPPORTED_TYPES = get_supported_types()
class NamespaceBrowser(QWidget):
"""Namespace browser (global variables explorer widget)"""
sig_option_changed = pyqtSignal(str, object)
def __init__(self, parent):
QWidget.__init__(self, parent)
self.shellwidget = None
self.is_internal_shell = None
self.is_ipykernel = None
self.is_visible = True # Do not modify: light mode won't work!
self.setup_in_progress = None
# Remote dict editor settings
self.check_all = None
self.exclude_private = None
self.exclude_uppercase = None
self.exclude_capitalized = None
self.exclude_unsupported = None
self.excluded_names = None
self.truncate = None
self.minmax = None
self.collvalue = None
self.inplace = None
self.remote_editing = None
self.autorefresh = None
self.editor = None
self.exclude_private_action = None
self.exclude_uppercase_action = None
self.exclude_capitalized_action = None
self.exclude_unsupported_action = None
self.filename = None
def setup(self, check_all=None, exclude_private=None,
exclude_uppercase=None, exclude_capitalized=None,
exclude_unsupported=None, excluded_names=None,
truncate=None, minmax=None, collvalue=None,
remote_editing=None, inplace=None, autorefresh=None):
"""Setup the namespace browser"""
assert self.shellwidget is not None
self.check_all = check_all
self.exclude_private = exclude_private
self.exclude_uppercase = exclude_uppercase
self.exclude_capitalized = exclude_capitalized
self.exclude_unsupported = exclude_unsupported
self.excluded_names = excluded_names
self.truncate = truncate
self.minmax = minmax
self.collvalue = collvalue
self.inplace = inplace
self.remote_editing = remote_editing
self.autorefresh = autorefresh
if self.editor is not None:
if self.is_internal_shell:
self.editor.setup_menu(truncate, minmax, inplace, collvalue)
else:
self.editor.setup_menu(truncate, minmax, inplace, collvalue,
remote_editing)
self.exclude_private_action.setChecked(exclude_private)
self.exclude_uppercase_action.setChecked(exclude_uppercase)
self.exclude_capitalized_action.setChecked(exclude_capitalized)
self.exclude_unsupported_action.setChecked(exclude_unsupported)
# Don't turn autorefresh on for IPython kernels
# See Issue 1450
if not self.is_ipykernel:
self.auto_refresh_button.setChecked(autorefresh)
self.refresh_table()
return
# Dict editor:
if self.is_internal_shell:
self.editor = DictEditorTableView(self, None, truncate=truncate,
inplace=inplace, minmax=minmax,
collvalue=collvalue)
else:
#self.editor = RemoteDictEditorTableView(self, None,
self.editor = RemoteDictEditorTreeView(self, None,
truncate=truncate, inplace=inplace, minmax=minmax,
collvalue=collvalue, remote_editing=remote_editing,
get_value_func=self.get_value,
set_value_func=self.set_value,
new_value_func=self.set_value,
remove_values_func=self.remove_values,
copy_value_func=self.copy_value,
is_list_func=self.is_list,
get_len_func=self.get_len,
is_array_func=self.is_array,
is_image_func=self.is_image,
is_dict_func=self.is_dict,
get_array_shape_func=self.get_array_shape,
get_array_ndim_func=self.get_array_ndim,
oedit_func=self.oedit,
plot_func=self.plot, imshow_func=self.imshow,
show_image_func=self.show_image)
self.editor.sig_option_changed.connect(self.sig_option_changed.emit)
# Setup layout
hlayout = QHBoxLayout()
vlayout = QVBoxLayout()
toolbar = self.setup_toolbar(exclude_private, exclude_uppercase,
exclude_capitalized, exclude_unsupported,
autorefresh)
vlayout.setAlignment(Qt.AlignTop)
for widget in toolbar:
vlayout.addWidget(widget)
hlayout.addWidget(self.editor)
hlayout.addLayout(vlayout)
self.setLayout(hlayout)
hlayout.setContentsMargins(0, 0, 0, 0)
self.sig_option_changed.connect(self.option_changed)
def set_shellwidget(self, shellwidget):
"""Bind shellwidget instance to namespace browser"""
self.shellwidget = shellwidget
from SMlib.widgets import internalshell
self.is_internal_shell = isinstance(self.shellwidget,
internalshell.InternalShell)
try:
self.is_ipykernel = self.shellwidget.is_ipykernel
except AttributeError:
pass
if not self.is_internal_shell:
shellwidget.set_namespacebrowser(self)
def setup_toolbar(self, exclude_private, exclude_uppercase,
exclude_capitalized, exclude_unsupported, autorefresh):
"""Setup toolbar"""
self.setup_in_progress = True
toolbar = []
refresh_button = create_toolbutton(self, text=_("Refresh"),
icon=get_icon('reload.png'),
triggered=self.refresh_table)
self.auto_refresh_button = create_toolbutton(self,
text=_("Refresh periodically"),
icon=get_icon('auto_reload.png'),
toggled=self.toggle_auto_refresh)
self.auto_refresh_button.setChecked(autorefresh)
load_button = create_toolbutton(self, text=_("Import data"),
icon=get_icon('fileimport.png'),
triggered=self.import_data)
self.save_button = create_toolbutton(self, text=_("Save data"),
icon=get_icon('filesave.png'),
triggered=lambda: self.save_data(self.filename))
self.save_button.setEnabled(False)
save_as_button = create_toolbutton(self,
text=_("Save data as..."),
icon=get_icon('filesaveas.png'),
triggered=self.save_data)
toolbar += [refresh_button, self.auto_refresh_button, load_button,
self.save_button, save_as_button]
self.exclude_private_action = create_action(self,
_("Exclude private references"),
tip=_("Exclude references which name starts"
" with an underscore"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_private', state))
self.exclude_private_action.setChecked(exclude_private)
self.exclude_uppercase_action = create_action(self,
_("Exclude all-uppercase references"),
tip=_("Exclude references which name is uppercase"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_uppercase', state))
self.exclude_uppercase_action.setChecked(exclude_uppercase)
self.exclude_capitalized_action = create_action(self,
_("Exclude capitalized references"),
tip=_("Exclude references which name starts with an "
"uppercase character"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_capitalized', state))
self.exclude_capitalized_action.setChecked(exclude_capitalized)
self.exclude_unsupported_action = create_action(self,
_("Exclude unsupported data types"),
tip=_("Exclude references to unsupported data types"
" (i.e. which won't be handled/saved correctly)"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_unsupported', state))
self.exclude_unsupported_action.setChecked(exclude_unsupported)
options_button = create_toolbutton(self, text=_("Options"),
icon=get_icon('tooloptions.png'))
toolbar.append(options_button)
options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
editor = self.editor
actions = [self.exclude_private_action, self.exclude_uppercase_action,
self.exclude_capitalized_action,
self.exclude_unsupported_action, None,
editor.truncate_action, editor.inplace_action,
editor.collvalue_action]
if is_module_installed('numpy'):
actions.append(editor.minmax_action)
if not self.is_internal_shell:
actions.append(editor.remote_editing_action)
add_actions(menu, actions)
options_button.setMenu(menu)
self.setup_in_progress = False
return toolbar
def option_changed(self, option, value):
"""Option has changed"""
setattr(self, unicode(option), value)
if not self.is_internal_shell:
settings = self.get_view_settings()
communicate(self._get_sock(),
'set_remote_view_settings()', settings=[settings])
def visibility_changed(self, enable):
"""Notify the widget whether its container (the namespace browser
plugin is visible or not"""
self.is_visible = enable
if enable:
self.refresh_table()
def toggle_auto_refresh(self, state):
"""Toggle auto refresh state"""
self.autorefresh = state
if not self.setup_in_progress and not self.is_internal_shell:
communicate(self._get_sock(),
"set_monitor_auto_refresh(%r)" % state)
def _get_sock(self):
"""Return socket connection"""
return self.shellwidget.introspection_socket
def get_internal_shell_filter(self, mode, check_all=None):
"""
Return internal shell data types filter:
* check_all: check all elements data types for sequences
(dict, list, tuple)
* mode (string): 'editable' or 'picklable'
"""
assert mode in SUPPORTED_TYPES.keys()
if check_all is None:
check_all = self.check_all
def wsfilter(input_dict, check_all=check_all,
filters=tuple(SUPPORTED_TYPES[mode])):
"""Keep only objects that can be pickled"""
return globalsfilter(
input_dict, check_all=check_all, filters=filters,
exclude_private=self.exclude_private,
exclude_uppercase=self.exclude_uppercase,
exclude_capitalized=self.exclude_capitalized,
exclude_unsupported=self.exclude_unsupported,
excluded_names=self.excluded_names)
return wsfilter
def get_view_settings(self):
"""Return dict editor view settings"""
settings = {}
for name in REMOTE_SETTINGS:
settings[name] = getattr(self, name)
return settings
def refresh_table(self):
"""Refresh variable table"""
if self.is_visible and self.isVisible():
if self.is_internal_shell:
# Internal shell
wsfilter = self.get_internal_shell_filter('editable')
self.editor.set_filter(wsfilter)
interpreter = self.shellwidget.interpreter
if interpreter is not None:
self.editor.set_data(interpreter.namespace)
self.editor.adjust_columns()
elif self.shellwidget.is_running():
# import time; print >>STDOUT, time.ctime(time.time()), "Refreshing namespace browser"
sock = self._get_sock()
if sock is None:
return
try:
communicate(sock, "refresh()")
except socket.error:
# Process was terminated before calling this method
pass
def process_remote_view(self, remote_view):
"""Process remote view"""
if remote_view is not None:
self.set_data(remote_view)
#------ Remote Python process commands ------------------------------------
def get_value(self, name):
value = monitor_get_global(self._get_sock(), name)
if value is None:
if communicate(self._get_sock(), '%s is not None' % name):
import pickle
msg = unicode(_("Object <b>%s</b> is not picklable") % name)
raise pickle.PicklingError(msg)
return value
def set_value(self, name, value):
monitor_set_global(self._get_sock(), name, value)
self.refresh_table()
def remove_values(self, names):
for name in names:
monitor_del_global(self._get_sock(), name)
self.refresh_table()
def copy_value(self, orig_name, new_name):
monitor_copy_global(self._get_sock(), orig_name, new_name)
self.refresh_table()
def is_list(self, name):
"""Return True if variable is a list or a tuple"""
return communicate(self._get_sock(),
'isinstance(%s, (tuple, list))' % name)
def is_dict(self, name):
"""Return True if variable is a dictionary"""
return communicate(self._get_sock(), 'isinstance(%s, dict)' % name)
def get_len(self, name):
"""Return sequence length"""
return communicate(self._get_sock(), "len(%s)" % name)
def is_array(self, name):
"""Return True if variable is a NumPy array"""
return communicate(self._get_sock(), 'is_array("%s")' % name)
def is_image(self, name):
"""Return True if variable is a PIL.Image image"""
return communicate(self._get_sock(), 'is_image("%s")' % name)
def get_array_shape(self, name):
"""Return array's shape"""
return communicate(self._get_sock(), "%s.shape" % name)
def get_array_ndim(self, name):
"""Return array's ndim"""
return communicate(self._get_sock(), "%s.ndim" % name)
def plot(self, name, funcname):
command = "import SMlib.pyplot; "\
"__fig__ = SMlib.pyplot.figure(); "\
"__items__ = getattr(SMlib.pyplot, '%s')(%s); "\
"SMlib.pyplot.show(); "\
"del __fig__, __items__;" % (funcname, name)
self.shellwidget.send_to_process(command)
def imshow(self, name):
command = "import SMlib.pyplot; " \
"__fig__ = SMlib.pyplot.figure(); " \
"__items__ = SMlib.pyplot.imshow(%s); " \
"SMlib.pyplot.show(); del __fig__, __items__;" % name
self.shellwidget.send_to_process(command)
def show_image(self, name):
command = "%s.show()" % name
self.shellwidget.send_to_process(command)
def oedit(self, name):
command = "from SMlib.widgets.objecteditor import oedit; " \
"oedit('%s', modal=False, namespace=locals());" % name
self.shellwidget.send_to_process(command)
#------ Set, load and save data -------------------------------------------
def set_data(self, data):
"""Set data"""
if data != self.editor.model.get_data():
self.editor.set_data(data)
self.editor.adjust_columns()
def collapse(self):
"""Collapse"""
self.emit(SIGNAL('collapse()'))
def import_data(self, filenames=None):
"""Import data from text file"""
title = _("Import data")
if filenames is None:
if self.filename is None:
basedir = os.getcwdu()
else:
basedir = osp.dirname(self.filename)
filenames, _selfilter = getopenfilenames(self, title, basedir,
iofunctions.load_filters)
if not filenames:
return
elif isinstance(filenames, basestring):
filenames = [filenames]
for filename in filenames:
self.filename = unicode(filename)
ext = osp.splitext(self.filename)[1].lower()
if ext not in iofunctions.load_funcs:
buttons = QMessageBox.Yes | QMessageBox.Cancel
answer = QMessageBox.question(self, title,
_("<b>Unsupported file extension '%s'</b><br><br>"
"Would you like to import it anyway "
"(by selecting a known file format)?"
) % ext, buttons)
if answer == QMessageBox.Cancel:
return
formats = iofunctions.load_extensions.keys()
item, ok = QInputDialog.getItem(self, title,
_('Open file as:'),
formats, 0, False)
if ok:
ext = iofunctions.load_extensions[unicode(item)]
else:
return
load_func = iofunctions.load_funcs[ext]
# 'import_wizard' (self.setup_io)
if isinstance(load_func, basestring):
# Import data with import wizard
error_message = None
try:
text, _encoding = encoding.read(self.filename)
if self.is_internal_shell:
self.editor.import_from_string(text)
else:
base_name = osp.basename(self.filename)
editor = ImportWizard(self, text, title=base_name,
varname=fix_reference_name(base_name))
if editor.exec_():
var_name, clip_data = editor.get_data()
monitor_set_global(self._get_sock(),
var_name, clip_data)
except Exception, error:
error_message = str(error)
else:
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
if self.is_internal_shell:
namespace, error_message = load_func(self.filename)
interpreter = self.shellwidget.interpreter
for key in namespace.keys():
new_key = fix_reference_name(key,
blacklist=interpreter.namespace.keys())
if new_key != key:
namespace[new_key] = namespace.pop(key)
if error_message is None:
interpreter.namespace.update(namespace)
else:
error_message = monitor_load_globals(self._get_sock(),
self.filename, ext)
QApplication.restoreOverrideCursor()
QApplication.processEvents()
if error_message is not None:
QMessageBox.critical(self, title,
_("<b>Unable to load '%s'</b>"
"<br><br>Error message:<br>%s"
) % (self.filename, error_message))
self.refresh_table()
def save_data(self, filename=None):
"""Save data"""
if filename is None:
filename = self.filename
if filename is None:
filename = os.getcwdu()
filename, _selfilter = getsavefilename(self, _("Save data"),
filename,
iofunctions.save_filters)
if filename:
self.filename = filename
else:
return False
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
if self.is_internal_shell:
wsfilter = self.get_internal_shell_filter('picklable',
check_all=True)
namespace = wsfilter(self.shellwidget.interpreter.namespace).copy()
error_message = iofunctions.save(namespace, filename)
else:
settings = self.get_view_settings()
error_message = monitor_save_globals(self._get_sock(),
settings, filename)
QApplication.restoreOverrideCursor()
QApplication.processEvents()
if error_message is not None:
QMessageBox.critical(self, _("Save data"),
_("<b>Unable to save current workspace</b>"
"<br><br>Error message:<br>%s") % error_message)
self.save_button.setEnabled(self.filename is not None)
|
py | 1a329bcb61abede9205a836894b15f2dd9eb805c | from datetime import date, datetime
from functools import reduce
from typing import Any, Union
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import polars as pl
from polars import testing
from polars.datatypes import Float64, Int32, Int64, UInt32, UInt64
def _getattr_multi(obj: object, op: str) -> Any:
""" "
Allows `op` to be multiple layers deep, i.e. op="str.lengths" will mean we first
get the attribute "str", and then the attribute "lengths"
"""
op_list = op.split(".")
return reduce(lambda o, m: getattr(o, m), op_list, obj)
def verify_series_and_expr_api(
input: pl.Series, expected: pl.Series, op: str, *args: Any, **kwargs: Any
) -> None:
"""
Small helper function to test element-wise functions for both the series and expressions api.
Examples
--------
>>> s = pl.Series([1, 3, 2])
>>> expected = pl.Series([1, 2, 3])
>>> verify_series_and_expr_api(s, expected, "sort")
"""
expr = _getattr_multi(pl.col("*"), op)(*args, **kwargs)
result_expr: pl.Series = input.to_frame().select(expr)[:, 0] # type: ignore
result_series = _getattr_multi(input, op)(*args, **kwargs)
testing.assert_series_equal(result_expr, expected)
testing.assert_series_equal(result_series, expected)
def test_cum_agg() -> None:
s = pl.Series("a", [1, 2, 3, 2])
verify_series_and_expr_api(s, pl.Series("a", [1, 3, 6, 8]), "cumsum")
verify_series_and_expr_api(s, pl.Series("a", [1, 1, 1, 1]), "cummin")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 3, 3]), "cummax")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 6, 12]), "cumprod")
def test_init_inputs() -> None:
# Good inputs
pl.Series("a", [1, 2])
pl.Series("a", values=[1, 2])
pl.Series(name="a", values=[1, 2])
pl.Series(values=[1, 2], name="a")
assert pl.Series([1, 2]).dtype == pl.Int64
assert pl.Series(values=[1, 2]).dtype == pl.Int64
assert pl.Series("a").dtype == pl.Float32 # f32 type used in case of no data
assert pl.Series().dtype == pl.Float32
assert pl.Series(values=[True, False]).dtype == pl.Boolean
assert pl.Series(values=np.array([True, False])).dtype == pl.Boolean
assert pl.Series(values=np.array(["foo", "bar"])).dtype == pl.Utf8
assert pl.Series(values=["foo", "bar"]).dtype == pl.Utf8
assert pl.Series("a", [pl.Series([1, 2, 4]), pl.Series([3, 2, 1])]).dtype == pl.List
assert pl.Series(pd.Series([1, 2])).dtype == pl.Int64
assert pl.Series("a", [10000, 20000, 30000], dtype=pl.Time).dtype == pl.Time
# 2d numpy array
res = pl.Series(name="a", values=np.array([[1, 2], [3, 4]]))
assert all(res[0] == np.array([1, 2]))
assert all(res[1] == np.array([3, 4]))
assert (
pl.Series(values=np.array([["foo", "bar"], ["foo2", "bar2"]])).dtype
== pl.Object
)
# Bad inputs
with pytest.raises(ValueError):
pl.Series([1, 2, 3], [1, 2, 3])
with pytest.raises(ValueError):
pl.Series({"a": [1, 2, 3]})
with pytest.raises(OverflowError):
pl.Series("bigint", [2 ** 64])
def test_concat() -> None:
s = pl.Series("a", [2, 1, 3])
assert pl.concat([s, s]).len() == 6
# check if s remains unchanged
assert s.len() == 3
def test_to_frame() -> None:
s = pl.Series([1, 2])
assert s.to_frame().shape == (2, 1)
def test_bitwise_ops() -> None:
a = pl.Series([True, False, True])
b = pl.Series([False, True, True])
assert (a & b).series_equal(pl.Series([False, False, True]))
assert (a | b).series_equal(pl.Series([True, True, True]))
assert (a ^ b).series_equal(pl.Series([True, True, False]))
assert (~a).series_equal(pl.Series([False, True, False]))
# rand/rxor/ror we trigger by casting the left hand to a list here in the test
# Note that the type annotations only allow Series to be passed in, but there is
# specific code to deal with non-Series inputs.
assert (True & a).series_equal(pl.Series([True, False, True])) # type: ignore
assert (True | a).series_equal(pl.Series([True, True, True])) # type: ignore
assert (True ^ a).series_equal(pl.Series([False, True, False])) # type: ignore
def test_bitwise_floats_invert() -> None:
a = pl.Series([2.0, 3.0, 0.0])
assert ~a == NotImplemented
def test_equality() -> None:
a = pl.Series("a", [1, 2])
b = a
cmp = a == b
assert isinstance(cmp, pl.Series)
assert cmp.sum() == 2
assert (a != b).sum() == 0
assert (a >= b).sum() == 2
assert (a <= b).sum() == 2
assert (a > b).sum() == 0
assert (a < b).sum() == 0
assert a.sum() == 3
assert a.series_equal(b)
a = pl.Series("name", ["ham", "foo", "bar"])
testing.assert_series_equal((a == "ham"), pl.Series("name", [True, False, False]))
def test_agg() -> None:
series = pl.Series("a", [1, 2])
assert series.mean() == 1.5
assert series.min() == 1
assert series.max() == 2
@pytest.mark.parametrize(
"s", [pl.Series([1, 2], dtype=Int64), pl.Series([1, 2], dtype=Float64)]
)
def test_arithmetic(s: pl.Series) -> None:
a = s
b = s
assert ((a * b) == [1, 4]).sum() == 2
assert ((a / b) == [1.0, 1.0]).sum() == 2
assert ((a + b) == [2, 4]).sum() == 2
assert ((a - b) == [0, 0]).sum() == 2
assert ((a + 1) == [2, 3]).sum() == 2
assert ((a - 1) == [0, 1]).sum() == 2
assert ((a / 1) == [1.0, 2.0]).sum() == 2
assert ((a // 2) == [0, 1]).sum() == 2
assert ((a * 2) == [2, 4]).sum() == 2
assert ((1 + a) == [2, 3]).sum() == 2
assert ((1 - a) == [0, -1]).sum() == 2
assert ((1 * a) == [1, 2]).sum() == 2
# integer division
testing.assert_series_equal(1 / a, pl.Series([1.0, 0.5])) # type: ignore
if s.dtype == Int64:
expected = pl.Series([1, 0])
else:
expected = pl.Series([1.0, 0.5])
testing.assert_series_equal(1 // a, expected)
# modulo
assert ((1 % a) == [0, 1]).sum() == 2
assert ((a % 1) == [0, 0]).sum() == 2
# negate
assert (-a == [-1, -2]).sum() == 2
# wrong dtypes in rhs operands
assert ((1.0 - a) == [0, -1]).sum() == 2
assert ((1.0 / a) == [1.0, 0.5]).sum() == 2
assert ((1.0 * a) == [1, 2]).sum() == 2
assert ((1.0 + a) == [2, 3]).sum() == 2
assert ((1.0 % a) == [0, 1]).sum() == 2
a = pl.Series("a", [datetime(2021, 1, 1)])
with pytest.raises(ValueError):
a // 2
with pytest.raises(ValueError):
a / 2
with pytest.raises(ValueError):
a * 2
with pytest.raises(ValueError):
a % 2
with pytest.raises(ValueError):
a ** 2
with pytest.raises(ValueError):
2 / a
with pytest.raises(ValueError):
2 // a
with pytest.raises(ValueError):
2 * a
with pytest.raises(ValueError):
2 % a
with pytest.raises(ValueError):
2 ** a
def test_add_string() -> None:
s = pl.Series(["hello", "weird"])
result = s + " world"
testing.assert_series_equal(result, pl.Series(["hello world", "weird world"]))
def test_various() -> None:
a = pl.Series("a", [1, 2])
assert a.is_null().sum() == 0
assert a.name == "a"
a.rename("b", in_place=True)
assert a.name == "b"
assert a.len() == 2
assert len(a) == 2
b = a.slice(1, 1)
assert b.len() == 1
assert b.series_equal(pl.Series("b", [2]))
a.append(b)
assert a.series_equal(pl.Series("b", [1, 2, 2]))
a = pl.Series("a", range(20))
assert a.head(5).len() == 5
assert a.tail(5).len() == 5
assert a.head(5) != a.tail(5)
a = pl.Series("a", [2, 1, 4])
a.sort(in_place=True)
assert a.series_equal(pl.Series("a", [1, 2, 4]))
a = pl.Series("a", [2, 1, 1, 4, 4, 4])
testing.assert_series_equal(a.arg_unique(), pl.Series("a", [0, 1, 3], dtype=UInt32))
assert a.take([2, 3]).series_equal(pl.Series("a", [1, 4]))
assert a.is_numeric()
a = pl.Series("bool", [True, False])
assert not a.is_numeric()
def test_filter_ops() -> None:
a = pl.Series("a", range(20))
assert a[a > 1].len() == 18
assert a[a < 1].len() == 1
assert a[a <= 1].len() == 2
assert a[a >= 1].len() == 19
assert a[a == 1].len() == 1
assert a[a != 1].len() == 19
def test_cast() -> None:
a = pl.Series("a", range(20))
assert a.cast(pl.Float32).dtype == pl.Float32
assert a.cast(pl.Float64).dtype == pl.Float64
assert a.cast(pl.Int32).dtype == pl.Int32
assert a.cast(pl.UInt32).dtype == pl.UInt32
assert a.cast(pl.Datetime).dtype == pl.Datetime
assert a.cast(pl.Date).dtype == pl.Date
def test_to_python() -> None:
a = pl.Series("a", range(20))
b = a.to_list()
assert isinstance(b, list)
assert len(b) == 20
b = a.to_list(use_pyarrow=True)
assert isinstance(b, list)
assert len(b) == 20
a = pl.Series("a", [1, None, 2])
assert a.null_count() == 1
assert a.to_list() == [1, None, 2]
def test_sort() -> None:
a = pl.Series("a", [2, 1, 3])
testing.assert_series_equal(a.sort(), pl.Series("a", [1, 2, 3]))
testing.assert_series_equal(a.sort(reverse=True), pl.Series("a", [3, 2, 1]))
def test_rechunk() -> None:
a = pl.Series("a", [1, 2, 3])
b = pl.Series("b", [4, 5, 6])
a.append(b)
assert a.n_chunks() == 2
assert a.rechunk(in_place=False).n_chunks() == 1
a.rechunk(in_place=True)
assert a.n_chunks() == 1
def test_indexing() -> None:
a = pl.Series("a", [1, 2, None])
assert a[1] == 2
assert a[2] is None
b = pl.Series("b", [True, False])
assert b[0]
assert not b[1]
a = pl.Series("a", ["a", None])
assert a[0] == "a"
assert a[1] is None
a = pl.Series("a", [0.1, None])
assert a[0] == 0.1
assert a[1] is None
def test_arrow() -> None:
a = pl.Series("a", [1, 2, 3, None])
out = a.to_arrow()
assert out == pa.array([1, 2, 3, None])
a = pa.array(["foo", "bar"], pa.dictionary(pa.int32(), pa.utf8()))
s = pl.Series("a", a)
assert s.dtype == pl.Categorical
assert (
pl.from_arrow(pa.array([["foo"], ["foo", "bar"]], pa.list_(pa.utf8()))).dtype
== pl.List
)
def test_view() -> None:
a = pl.Series("a", [1.0, 2.0, 3.0])
assert isinstance(a.view(), np.ndarray)
assert np.all(a.view() == np.array([1, 2, 3]))
def test_ufunc() -> None:
a = pl.Series("a", [1.0, 2.0, 3.0, 4.0])
b = np.multiply(a, 4)
assert isinstance(b, pl.Series)
assert b == [4, 8, 12, 16]
# test if null bitmask is preserved
a = pl.Series("a", [1.0, None, 3.0])
b = np.exp(a)
assert b.null_count() == 1
# test if it works with chunked series.
a = pl.Series("a", [1.0, None, 3.0])
b = pl.Series("b", [4.0, 5.0, None])
a.append(b)
assert a.n_chunks() == 2
c = np.multiply(a, 3)
testing.assert_series_equal(c, pl.Series("a", [3.0, None, 9.0, 12.0, 15.0, None]))
def test_get() -> None:
a = pl.Series("a", [1, 2, 3])
assert a[0] == 1
assert a[:2] == [1, 2]
assert a[range(1)] == [1, 2]
assert a[range(0, 2, 2)] == [1, 3]
def test_set() -> None:
a = pl.Series("a", [True, False, True])
mask = pl.Series("msk", [True, False, True])
a[mask] = False
testing.assert_series_equal(a, pl.Series("", [False] * 3))
def test_set_value_as_list_fail() -> None:
""" " it is not allowed to use a list to set values"""
s = pl.Series("a", [1, 2, 3])
with pytest.raises(ValueError):
s[[0, 1]] = [4, 5]
@pytest.mark.parametrize("key", [True, False, 1.0])
def test_set_invalid_key(key: Any) -> None:
s = pl.Series("a", [1, 2, 3])
with pytest.raises(ValueError):
s[key] = 1
@pytest.mark.parametrize(
"key",
[
pl.Series([False, True, True]),
pl.Series([1, 2], dtype=UInt32),
pl.Series([1, 2], dtype=UInt64),
],
)
def test_set_key_series(key: pl.Series) -> None:
"""only UInt32/UInt64/bool are allowed"""
s = pl.Series("a", [1, 2, 3])
s[key] = 4
testing.assert_series_equal(s, pl.Series("a", [1, 4, 4]))
def test_set_np_array_boolean_mask() -> None:
a = pl.Series("a", [1, 2, 3])
mask = np.array([True, False, True])
a[mask] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.uint32, np.uint64])
def test_set_np_array(dtype: Any) -> None:
a = pl.Series("a", [1, 2, 3])
idx = np.array([0, 2], dtype=dtype)
a[idx] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
@pytest.mark.parametrize("idx", [[0, 2], (0, 2)])
def test_set_list_and_tuple(idx: Union[list, tuple]) -> None:
a = pl.Series("a", [1, 2, 3])
a[idx] = 4
testing.assert_series_equal(a, pl.Series("a", [4, 2, 4]))
def test_fill_null() -> None:
a = pl.Series("a", [1, 2, None])
verify_series_and_expr_api(a, pl.Series("a", [1, 2, 2]), "fill_null", "forward")
verify_series_and_expr_api(
a, pl.Series("a", [1, 2, 14], dtype=Int64), "fill_null", 14
)
def test_apply() -> None:
a = pl.Series("a", [1, 2, None])
b = a.apply(lambda x: x ** 2)
assert b == [1, 4, None]
a = pl.Series("a", ["foo", "bar", None])
b = a.apply(lambda x: x + "py")
assert b == ["foopy", "barpy", None]
b = a.apply(lambda x: len(x), return_dtype=pl.Int32)
assert b == [3, 3, None]
b = a.apply(lambda x: len(x))
assert b == [3, 3, None]
# just check that it runs (somehow problem with conditional compilation)
a = pl.Series("a", [2, 2, 3]).cast(pl.Datetime)
a.apply(lambda x: x)
a = pl.Series("a", [2, 2, 3]).cast(pl.Date)
a.apply(lambda x: x)
def test_shift() -> None:
a = pl.Series("a", [1, 2, 3])
testing.assert_series_equal(a.shift(1), pl.Series("a", [None, 1, 2]))
testing.assert_series_equal(a.shift(-1), pl.Series("a", [2, 3, None]))
testing.assert_series_equal(a.shift(-2), pl.Series("a", [3, None, None]))
testing.assert_series_equal(a.shift_and_fill(-1, 10), pl.Series("a", [2, 3, 10]))
def test_rolling() -> None:
a = pl.Series("a", [1, 2, 3, 2, 1])
testing.assert_series_equal(a.rolling_min(2), pl.Series("a", [None, 1, 2, 2, 1]))
testing.assert_series_equal(a.rolling_max(2), pl.Series("a", [None, 2, 3, 3, 2]))
testing.assert_series_equal(a.rolling_sum(2), pl.Series("a", [None, 3, 5, 5, 3]))
testing.assert_series_equal(
a.rolling_mean(2), pl.Series("a", [None, 1.5, 2.5, 2.5, 1.5])
)
assert a.rolling_std(2).to_list()[1] == pytest.approx(0.7071067811865476)
assert a.rolling_var(2).to_list()[1] == pytest.approx(0.5)
testing.assert_series_equal(
a.rolling_median(4), pl.Series("a", [None, None, None, 2, 2], dtype=Float64)
)
testing.assert_series_equal(
a.rolling_quantile(0, "nearest", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
testing.assert_series_equal(
a.rolling_quantile(0, "lower", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
testing.assert_series_equal(
a.rolling_quantile(0, "higher", 3),
pl.Series("a", [None, None, 1, 2, 1], dtype=Float64),
)
assert a.rolling_skew(4).null_count() == 3
def test_object() -> None:
vals = [[12], "foo", 9]
a = pl.Series("a", vals)
assert a.dtype == pl.Object
assert a.to_list() == vals
assert a[1] == "foo"
def test_repeat() -> None:
s = pl.repeat(1, 10)
assert s.dtype == pl.Int64
assert s.len() == 10
s = pl.repeat("foo", 10)
assert s.dtype == pl.Utf8
assert s.len() == 10
s = pl.repeat(1.0, 5)
assert s.dtype == pl.Float64
assert s.len() == 5
assert s == [1.0, 1.0, 1.0, 1.0, 1.0]
s = pl.repeat(True, 5)
assert s.dtype == pl.Boolean
assert s.len() == 5
def test_median() -> None:
s = pl.Series([1, 2, 3])
assert s.median() == 2
def test_quantile() -> None:
s = pl.Series([1, 2, 3])
assert s.quantile(0.5, "nearest") == 2
assert s.quantile(0.5, "lower") == 2
assert s.quantile(0.5, "higher") == 2
def test_shape() -> None:
s = pl.Series([1, 2, 3])
assert s.shape == (3,)
@pytest.mark.parametrize("arrow_available", [True, False])
def test_create_list_series(arrow_available: bool) -> None:
pl.internals.series._PYARROW_AVAILABLE = arrow_available
a = [[1, 2], None, [None, 3]]
s = pl.Series("", a)
assert s.to_list() == a
def test_iter() -> None:
s = pl.Series("", [1, 2, 3])
itr = s.__iter__()
assert itr.__next__() == 1
assert itr.__next__() == 2
assert itr.__next__() == 3
assert sum(s) == 6
def test_empty() -> None:
a = pl.Series(dtype=pl.Int8)
assert a.dtype == pl.Int8
a = pl.Series()
assert a.dtype == pl.Float32
a = pl.Series("name", [])
assert a.dtype == pl.Float32
a = pl.Series(values=(), dtype=pl.Int8)
assert a.dtype == pl.Int8
def test_describe() -> None:
num_s = pl.Series([1, 2, 3])
float_s = pl.Series([1.3, 4.6, 8.9])
str_s = pl.Series(["abc", "pqr", "xyz"])
bool_s = pl.Series([True, False, None, True, True])
date_s = pl.Series([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3)])
empty_s = pl.Series(np.empty(0))
assert num_s.describe().shape == (6, 2)
assert float_s.describe().shape == (6, 2)
assert str_s.describe().shape == (3, 2)
assert bool_s.describe().shape == (3, 2)
assert date_s.describe().shape == (4, 2)
with pytest.raises(ValueError):
assert empty_s.describe()
def test_is_in() -> None:
s = pl.Series([1, 2, 3])
out = s.is_in([1, 2])
assert out == [True, True, False]
df = pl.DataFrame({"a": [1.0, 2.0], "b": [1, 4]})
assert df[pl.col("a").is_in(pl.col("b")).alias("mask")]["mask"] == [True, False]
def test_str_slice() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
assert df["a"].str.slice(-3) == ["bar", "foo"]
assert df[[pl.col("a").str.slice(2, 4)]]["a"] == ["obar", "rfoo"]
def test_arange_expr() -> None:
df = pl.DataFrame({"a": ["foobar", "barfoo"]})
out = df[[pl.arange(0, pl.col("a").count() * 10)]]
assert out.shape == (20, 1)
assert out.select_at_idx(0)[-1] == 19
# eager arange
out2 = pl.arange(0, 10, 2, eager=True)
assert out2 == [0, 2, 4, 8, 8]
out3 = pl.arange(pl.Series([0, 19]), pl.Series([3, 39]), step=2, eager=True)
assert out3.dtype == pl.List # type: ignore
assert out3[0].to_list() == [0, 2] # type: ignore
def test_round() -> None:
a = pl.Series("f", [1.003, 2.003])
b = a.round(2)
assert b == [1.00, 2.00]
def test_apply_list_out() -> None:
s = pl.Series("count", [3, 2, 2])
out = s.apply(lambda val: pl.repeat(val, val))
assert out[0] == [3, 3, 3]
assert out[1] == [2, 2]
assert out[2] == [2, 2]
def test_is_first() -> None:
s = pl.Series("", [1, 1, 2])
assert s.is_first() == [True, False, True]
def test_reinterpret() -> None:
s = pl.Series("a", [1, 1, 2], dtype=pl.UInt64)
assert s.reinterpret(signed=True).dtype == pl.Int64
df = pl.DataFrame([s])
assert df[[pl.col("a").reinterpret(signed=True)]]["a"].dtype == pl.Int64
def test_mode() -> None:
s = pl.Series("a", [1, 1, 2])
assert s.mode() == [1]
df = pl.DataFrame([s])
assert df[[pl.col("a").mode()]]["a"] == [1]
def test_jsonpath_single() -> None:
s = pl.Series(['{"a":"1"}', None, '{"a":2}', '{"a":2.1}', '{"a":true}'])
expected = pl.Series(
[
"1",
None,
"2",
"2.1",
"true",
]
)
verify_series_and_expr_api(s, expected, "str.json_path_match", "$.a")
def test_extract_regex() -> None:
s = pl.Series(
[
"http://vote.com/ballon_dor?candidate=messi&ref=polars",
"http://vote.com/ballon_dor?candidat=jorginho&ref=polars",
"http://vote.com/ballon_dor?candidate=ronaldo&ref=polars",
]
)
expected = pl.Series(
[
"messi",
None,
"ronaldo",
]
)
verify_series_and_expr_api(s, expected, "str.extract", r"candidate=(\w+)", 1)
def test_rank_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
testing.assert_series_equal(
s.rank("dense"), pl.Series("a", [2, 3, 4, 3, 3, 4, 1], dtype=UInt32)
)
df = pl.DataFrame([s])
assert df.select(pl.col("a").rank("dense"))["a"] == [2, 3, 4, 3, 3, 4, 1]
testing.assert_series_equal(
s.rank("dense", reverse=True),
pl.Series("a", [3, 2, 1, 2, 2, 1, 4], dtype=UInt32),
)
def test_diff_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
expected = pl.Series("a", [1, 1, -1, 0, 1, -3])
testing.assert_series_equal(s.diff(null_behavior="drop"), expected)
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").diff())["a"], pl.Series("a", [None, 1, 1, -1, 0, 1, -3])
)
def test_pct_change_dispatch() -> None:
s = pl.Series("a", [1, 2, 4, 8, 16, 32, 64])
expected = pl.Series("a", [None, None, float("inf"), 3.0, 3.0, 3.0, 3.0])
verify_series_and_expr_api(s, expected, "pct_change", 2)
def test_skew_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
assert s.skew(True) == pytest.approx(-0.5953924651018018)
assert s.skew(False) == pytest.approx(-0.7717168360221258)
df = pl.DataFrame([s])
assert np.isclose(df.select(pl.col("a").skew(False))["a"][0], -0.7717168360221258)
def test_kurtosis_dispatch() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
expected = -0.6406250000000004
assert s.kurtosis() == pytest.approx(expected)
df = pl.DataFrame([s])
assert np.isclose(df.select(pl.col("a").kurtosis())["a"][0], expected)
def test_arr_lengths_dispatch() -> None:
s = pl.Series("a", [[1, 2], [1, 2, 3]])
testing.assert_series_equal(s.arr.lengths(), pl.Series("a", [2, 3], dtype=UInt32))
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").arr.lengths())["a"], pl.Series("a", [2, 3], dtype=UInt32)
)
def test_arr_arithmetic() -> None:
s = pl.Series("a", [[1, 2], [1, 2, 3]])
testing.assert_series_equal(s.arr.sum(), pl.Series("a", [3, 6]))
testing.assert_series_equal(s.arr.mean(), pl.Series("a", [1.5, 2.0]))
testing.assert_series_equal(s.arr.max(), pl.Series("a", [2, 3]))
testing.assert_series_equal(s.arr.min(), pl.Series("a", [1, 1]))
def test_arr_ordering() -> None:
s = pl.Series("a", [[2, 1], [1, 3, 2]])
testing.assert_series_equal(s.arr.sort(), pl.Series("a", [[1, 2], [1, 2, 3]]))
testing.assert_series_equal(s.arr.reverse(), pl.Series("a", [[1, 2], [2, 3, 1]]))
def test_arr_unique() -> None:
s = pl.Series("a", [[2, 1], [1, 2, 2]])
result = s.arr.unique()
assert len(result) == 2
assert sorted(result[0]) == [1, 2]
assert sorted(result[1]) == [1, 2]
def test_sqrt_dispatch() -> None:
s = pl.Series("a", [1, 2])
testing.assert_series_equal(s.sqrt(), pl.Series("a", [1.0, np.sqrt(2)]))
df = pl.DataFrame([s])
testing.assert_series_equal(
df.select(pl.col("a").sqrt())["a"], pl.Series("a", [1.0, np.sqrt(2)])
)
def test_range() -> None:
s = pl.Series("a", [1, 2, 3, 2, 2, 3, 0])
assert s[2:5].series_equal(s[range(2, 5)])
df = pl.DataFrame([s])
assert df[2:5].frame_equal(df[range(2, 5)])
def test_strict_cast() -> None:
with pytest.raises(RuntimeError):
pl.Series("a", [2 ** 16]).cast(dtype=pl.Int16, strict=True)
with pytest.raises(RuntimeError):
pl.DataFrame({"a": [2 ** 16]}).select([pl.col("a").cast(pl.Int16, strict=True)])
def test_list_concat_dispatch() -> None:
s0 = pl.Series("a", [[1, 2]])
s1 = pl.Series("b", [[3, 4, 5]])
expected = pl.Series("a", [[1, 2, 3, 4, 5]])
out = s0.arr.concat([s1])
assert out.series_equal(expected)
out = s0.arr.concat(s1)
assert out.series_equal(expected)
df = pl.DataFrame([s0, s1])
assert df.select(pl.concat_list(["a", "b"]).alias("a"))["a"].series_equal(expected)
assert df.select(pl.col("a").arr.concat("b").alias("a"))["a"].series_equal(expected)
assert df.select(pl.col("a").arr.concat(["b"]).alias("a"))["a"].series_equal(
expected
)
def test_floor_divide() -> None:
s = pl.Series("a", [1, 2, 3])
testing.assert_series_equal(s // 2, pl.Series("a", [0, 1, 1]))
testing.assert_series_equal(
pl.DataFrame([s]).select(pl.col("a") // 2)["a"], pl.Series("a", [0, 1, 1])
)
def test_true_divide() -> None:
s = pl.Series("a", [1, 2])
testing.assert_series_equal(s / 2, pl.Series("a", [0.5, 1.0]))
testing.assert_series_equal(
pl.DataFrame([s]).select(pl.col("a") / 2)["a"], pl.Series("a", [0.5, 1.0])
)
# rtruediv
testing.assert_series_equal(
pl.DataFrame([s]).select(2 / pl.col("a"))["literal"],
pl.Series("literal", [2.0, 1.0]),
)
# https://github.com/pola-rs/polars/issues/1369
vals = [3000000000, 2, 3]
foo = pl.Series(vals)
testing.assert_series_equal(foo / 1, pl.Series(vals, dtype=Float64))
testing.assert_series_equal(
pl.DataFrame({"a": vals}).select([pl.col("a") / 1])["a"],
pl.Series("a", vals, dtype=Float64),
)
def test_invalid_categorical() -> None:
s = pl.Series("cat_series", ["a", "b", "b", "c", "a"]).cast(pl.Categorical)
assert s.std() is None
assert s.var() is None
assert s.median() is None
assert s.quantile(0.5) is None
assert s.mode().to_list() == [None]
def test_bitwise() -> None:
a = pl.Series("a", [1, 2, 3])
b = pl.Series("b", [3, 4, 5])
testing.assert_series_equal(a & b, pl.Series("a", [1, 0, 1]))
testing.assert_series_equal(a | b, pl.Series("a", [3, 6, 7]))
testing.assert_series_equal(a ^ b, pl.Series("a", [2, 6, 6]))
df = pl.DataFrame([a, b])
out = df.select(
[
(pl.col("a") & pl.col("b")).alias("and"),
(pl.col("a") | pl.col("b")).alias("or"),
(pl.col("a") ^ pl.col("b")).alias("xor"),
]
)
testing.assert_series_equal(out["and"], pl.Series("and", [1, 0, 1]))
testing.assert_series_equal(out["or"], pl.Series("or", [3, 6, 7]))
testing.assert_series_equal(out["xor"], pl.Series("xor", [2, 6, 6]))
def test_to_numpy() -> None:
pl.internals.series._PYARROW_AVAILABLE = False
a = pl.Series("a", [1, 2, 3])
assert np.all(a.to_numpy() == np.array([1, 2, 3]))
a = pl.Series("a", [1, 2, None])
np.testing.assert_array_equal(a.to_numpy(), np.array([1.0, 2.0, np.nan]))
def test_from_sequences() -> None:
# test int, str, bool, flt
values = [
[[1], [None, 3]],
[["foo"], [None, "bar"]],
[[True], [None, False]],
[[1.0], [None, 3.0]],
]
for vals in values:
pl.internals.series._PYARROW_AVAILABLE = False
a = pl.Series("a", vals)
pl.internals.series._PYARROW_AVAILABLE = True
b = pl.Series("a", vals)
assert a.series_equal(b, null_equal=True)
assert a.to_list() == vals
def test_comparisons_int_series_to_float() -> None:
srs_int = pl.Series([1, 2, 3, 4])
testing.assert_series_equal(srs_int - 1.0, pl.Series([0, 1, 2, 3]))
testing.assert_series_equal(srs_int + 1.0, pl.Series([2, 3, 4, 5]))
testing.assert_series_equal(srs_int * 2.0, pl.Series([2, 4, 6, 8]))
# todo: this is inconsistent
testing.assert_series_equal(srs_int / 2.0, pl.Series([0.5, 1.0, 1.5, 2.0]))
testing.assert_series_equal(srs_int % 2.0, pl.Series([1, 0, 1, 0]))
testing.assert_series_equal(4.0 % srs_int, pl.Series([0, 0, 1, 0]))
testing.assert_series_equal(srs_int // 2.0, pl.Series([0, 1, 1, 2]))
testing.assert_series_equal(srs_int < 3.0, pl.Series([True, True, False, False]))
testing.assert_series_equal(srs_int <= 3.0, pl.Series([True, True, True, False]))
testing.assert_series_equal(srs_int > 3.0, pl.Series([False, False, False, True]))
testing.assert_series_equal(srs_int >= 3.0, pl.Series([False, False, True, True]))
testing.assert_series_equal(srs_int == 3.0, pl.Series([False, False, True, False]))
testing.assert_series_equal(srs_int - True, pl.Series([0, 1, 2, 3]))
def test_comparisons_float_series_to_int() -> None:
srs_float = pl.Series([1.0, 2.0, 3.0, 4.0])
testing.assert_series_equal(srs_float - 1, pl.Series([0.0, 1.0, 2.0, 3.0]))
testing.assert_series_equal(srs_float + 1, pl.Series([2.0, 3.0, 4.0, 5.0]))
testing.assert_series_equal(srs_float * 2, pl.Series([2.0, 4.0, 6.0, 8.0]))
testing.assert_series_equal(srs_float / 2, pl.Series([0.5, 1.0, 1.5, 2.0]))
testing.assert_series_equal(srs_float % 2, pl.Series([1.0, 0.0, 1.0, 0.0]))
testing.assert_series_equal(4 % srs_float, pl.Series([0.0, 0.0, 1.0, 0.0]))
testing.assert_series_equal(srs_float // 2, pl.Series([0.0, 1.0, 1.0, 2.0]))
testing.assert_series_equal(srs_float < 3, pl.Series([True, True, False, False]))
testing.assert_series_equal(srs_float <= 3, pl.Series([True, True, True, False]))
testing.assert_series_equal(srs_float > 3, pl.Series([False, False, False, True]))
testing.assert_series_equal(srs_float >= 3, pl.Series([False, False, True, True]))
testing.assert_series_equal(srs_float == 3, pl.Series([False, False, True, False]))
testing.assert_series_equal(srs_float - True, pl.Series([0.0, 1.0, 2.0, 3.0]))
def test_comparisons_bool_series_to_int() -> None:
srs_bool = pl.Series([True, False])
# todo: do we want this to work?
testing.assert_series_equal(srs_bool / 1, pl.Series([True, False], dtype=Float64))
with pytest.raises(TypeError, match=r"\-: 'Series' and 'int'"):
srs_bool - 1
with pytest.raises(TypeError, match=r"\+: 'Series' and 'int'"):
srs_bool + 1
with pytest.raises(TypeError, match=r"\%: 'Series' and 'int'"):
srs_bool % 2
with pytest.raises(TypeError, match=r"\*: 'Series' and 'int'"):
srs_bool * 1
with pytest.raises(
TypeError, match=r"'<' not supported between instances of 'Series' and 'int'"
):
srs_bool < 2
with pytest.raises(
TypeError, match=r"'>' not supported between instances of 'Series' and 'int'"
):
srs_bool > 2
def test_trigonometry_functions() -> None:
srs_float = pl.Series("t", [0.0, np.pi])
assert np.allclose(srs_float.sin(), np.array([0.0, 0.0]))
assert np.allclose(srs_float.cos(), np.array([1.0, -1.0]))
assert np.allclose(srs_float.tan(), np.array([0.0, -0.0]))
srs_float = pl.Series("t", [1.0, 0.0, -1])
assert np.allclose(srs_float.arcsin(), np.array([1.571, 0.0, -1.571]), atol=0.01)
assert np.allclose(srs_float.arccos(), np.array([0.0, 1.571, 3.142]), atol=0.01)
assert np.allclose(srs_float.arctan(), np.array([0.785, 0.0, -0.785]), atol=0.01)
def test_abs() -> None:
# ints
s = pl.Series([1, -2, 3, -4])
testing.assert_series_equal(s.abs(), pl.Series([1, 2, 3, 4]))
testing.assert_series_equal(np.abs(s), pl.Series([1, 2, 3, 4])) # type: ignore
# floats
s = pl.Series([1.0, -2.0, 3, -4.0])
testing.assert_series_equal(s.abs(), pl.Series([1.0, 2.0, 3.0, 4.0]))
testing.assert_series_equal(
np.abs(s), pl.Series([1.0, 2.0, 3.0, 4.0]) # type: ignore
)
testing.assert_series_equal(
pl.select(pl.lit(s).abs()).to_series(), pl.Series([1.0, 2.0, 3.0, 4.0])
)
def test_to_dummies() -> None:
s = pl.Series("a", [1, 2, 3])
result = s.to_dummies()
expected = pl.DataFrame({"a_1": [1, 0, 0], "a_2": [0, 1, 0], "a_3": [0, 0, 1]})
assert result.frame_equal(expected)
def test_value_counts() -> None:
s = pl.Series("a", [1, 2, 2, 3])
result = s.value_counts()
expected = pl.DataFrame({"a": [1, 2, 3], "counts": [1, 2, 1]})
result_sorted: pl.DataFrame = result.sort("a")
assert result_sorted.frame_equal(expected)
def test_chunk_lengths() -> None:
s = pl.Series("a", [1, 2, 2, 3])
# this is a Series with one chunk, of length 4
assert s.n_chunks() == 1
assert s.chunk_lengths() == [4]
def test_limit() -> None:
s = pl.Series("a", [1, 2, 3])
assert s.limit(2).series_equal(pl.Series("a", [1, 2]))
def test_filter() -> None:
s = pl.Series("a", [1, 2, 3])
mask = pl.Series("", [True, False, True])
assert s.filter(mask).series_equal(pl.Series("a", [1, 3]))
assert s.filter([True, False, True]).series_equal(pl.Series("a", [1, 3]))
def test_take_every() -> None:
s = pl.Series("a", [1, 2, 3, 4])
assert s.take_every(2).series_equal(pl.Series("a", [1, 3]))
def test_argsort() -> None:
s = pl.Series("a", [5, 3, 4, 1, 2])
expected = pl.Series("a", [3, 4, 1, 2, 0], dtype=UInt32)
verify_series_and_expr_api(s, expected, "argsort")
expected_reverse = pl.Series("a", [0, 2, 1, 4, 3], dtype=UInt32)
verify_series_and_expr_api(s, expected_reverse, "argsort", True)
def test_arg_min_and_arg_max() -> None:
s = pl.Series("a", [5, 3, 4, 1, 2])
assert s.arg_min() == 3
assert s.arg_max() == 0
def test_is_null_is_not_null() -> None:
s = pl.Series("a", [1.0, 2.0, 3.0, None])
assert s.is_null().series_equal(pl.Series("a", [False, False, False, True]))
assert s.is_not_null().series_equal(pl.Series("a", [True, True, True, False]))
def test_is_finite_is_infinite() -> None:
s = pl.Series("a", [1.0, 2.0, np.inf])
s.is_finite().series_equal(pl.Series("a", [True, True, False]))
s.is_infinite().series_equal(pl.Series("a", [False, False, True]))
def test_is_nan_is_not_nan() -> None:
s = pl.Series("a", [1.0, 2.0, 3.0, np.NaN])
assert s.is_nan().series_equal(pl.Series("a", [False, False, False, True]))
assert s.is_not_nan().series_equal(pl.Series("a", [True, True, True, False]))
def test_is_unique() -> None:
s = pl.Series("a", [1, 2, 2, 3])
assert s.is_unique().series_equal(pl.Series("a", [True, False, False, True]))
def test_is_duplicated() -> None:
s = pl.Series("a", [1, 2, 2, 3])
assert s.is_duplicated().series_equal(pl.Series("a", [False, True, True, False]))
def test_dot() -> None:
s = pl.Series("a", [1, 2, 3])
s2 = pl.Series("b", [4.0, 5.0, 6.0])
assert s.dot(s2) == 32
def test_sample() -> None:
s = pl.Series("a", [1, 2, 3, 4, 5])
assert len(s.sample(n=2)) == 2
assert len(s.sample(frac=0.4)) == 2
assert len(s.sample(n=2, with_replacement=True)) == 2
# on a series of length 5, you cannot sample more than 5 items
with pytest.raises(Exception):
s.sample(n=10, with_replacement=False)
# unless you use with_replacement=True
assert len(s.sample(n=10, with_replacement=True)) == 10
def test_peak_max_peak_min() -> None:
s = pl.Series("a", [4, 1, 3, 2, 5])
result = s.peak_min()
expected = pl.Series([False, True, False, True, False])
testing.assert_series_equal(result, expected)
result = s.peak_max()
expected = pl.Series([True, False, True, False, True])
testing.assert_series_equal(result, expected)
def test_shrink_to_fit() -> None:
s = pl.Series("a", [4, 1, 3, 2, 5])
assert s.shrink_to_fit(in_place=True) is None
s = pl.Series("a", [4, 1, 3, 2, 5])
assert isinstance(s.shrink_to_fit(in_place=False), pl.Series)
def test_str_concat() -> None:
s = pl.Series(["1", None, "2"])
result = s.str_concat()
expected = pl.Series(["1-null-2"])
testing.assert_series_equal(result, expected)
def test_str_lengths() -> None:
s = pl.Series(["messi", "ronaldo", None])
expected = pl.Series([5, 7, None], dtype=UInt32)
verify_series_and_expr_api(s, expected, "str.lengths")
def test_str_contains() -> None:
s = pl.Series(["messi", "ronaldo", "ibrahimovic"])
expected = pl.Series([True, False, False])
verify_series_and_expr_api(s, expected, "str.contains", "mes")
def test_str_encode() -> None:
s = pl.Series(["foo", "bar", None])
hex_encoded = pl.Series(["666f6f", "626172", None])
base64_encoded = pl.Series(["Zm9v", "YmFy", None])
verify_series_and_expr_api(s, hex_encoded, "str.encode", "hex")
verify_series_and_expr_api(s, base64_encoded, "str.encode", "base64")
with pytest.raises(ValueError):
s.str.encode("utf8")
def test_str_decode() -> None:
hex_encoded = pl.Series(["666f6f", "626172", None])
base64_encoded = pl.Series(["Zm9v", "YmFy", None])
expected = pl.Series(["foo", "bar", None])
verify_series_and_expr_api(hex_encoded, expected, "str.decode", "hex")
verify_series_and_expr_api(base64_encoded, expected, "str.decode", "base64")
def test_str_decode_exception() -> None:
s = pl.Series(["not a valid", "626172", None])
with pytest.raises(Exception):
s.str.decode(encoding="hex", strict=True)
with pytest.raises(Exception):
s.str.decode(encoding="base64", strict=True)
with pytest.raises(ValueError):
s.str.decode("utf8")
def test_str_replace_str_replace_all() -> None:
s = pl.Series(["hello", "world", "test"])
expected = pl.Series(["hell0", "w0rld", "test"])
verify_series_and_expr_api(s, expected, "str.replace", "o", "0")
s = pl.Series(["hello", "world", "test"])
expected = pl.Series(["hell0", "w0rld", "test"])
verify_series_and_expr_api(s, expected, "str.replace_all", "o", "0")
def test_str_to_lowercase() -> None:
s = pl.Series(["Hello", "WORLD"])
expected = pl.Series(["hello", "world"])
verify_series_and_expr_api(s, expected, "str.to_lowercase")
def test_str_to_uppercase() -> None:
s = pl.Series(["Hello", "WORLD"])
expected = pl.Series(["HELLO", "WORLD"])
verify_series_and_expr_api(s, expected, "str.to_uppercase")
def test_str_rstrip() -> None:
s = pl.Series([" hello ", "world\t "])
expected = pl.Series([" hello", "world"])
testing.assert_series_equal(s.str.rstrip(), expected)
def test_str_lstrip() -> None:
s = pl.Series([" hello ", "\t world"])
expected = pl.Series(["hello ", "world"])
testing.assert_series_equal(s.str.lstrip(), expected)
def test_str_strptime() -> None:
s = pl.Series(["2020-01-01", "2020-02-02"])
expected = pl.Series([date(2020, 1, 1), date(2020, 2, 2)])
verify_series_and_expr_api(s, expected, "str.strptime", pl.Date, "%Y-%m-%d")
s = pl.Series(["2020-01-01 00:00:00", "2020-02-02 03:20:10"])
expected = pl.Series(
[datetime(2020, 1, 1, 0, 0, 0), datetime(2020, 2, 2, 3, 20, 10)]
)
verify_series_and_expr_api(
s, expected, "str.strptime", pl.Datetime, "%Y-%m-%d %H:%M:%S"
)
def test_dt_strftime() -> None:
a = pl.Series("a", [10000, 20000, 30000], dtype=pl.Date)
assert a.dtype == pl.Date
expected = pl.Series("a", ["1997-05-19", "2024-10-04", "2052-02-20"])
verify_series_and_expr_api(a, expected, "dt.strftime", "%F")
def test_dt_year_month_week_day_ordinal_day() -> None:
a = pl.Series("a", [10000, 20000, 30000], dtype=pl.Date)
exp = pl.Series("a", [1997, 2024, 2052], dtype=Int32)
verify_series_and_expr_api(a, exp, "dt.year")
verify_series_and_expr_api(a, pl.Series("a", [5, 10, 2], dtype=UInt32), "dt.month")
verify_series_and_expr_api(a, pl.Series("a", [0, 4, 1], dtype=UInt32), "dt.weekday")
verify_series_and_expr_api(a, pl.Series("a", [21, 40, 8], dtype=UInt32), "dt.week")
verify_series_and_expr_api(a, pl.Series("a", [19, 4, 20], dtype=UInt32), "dt.day")
verify_series_and_expr_api(
a, pl.Series("a", [139, 278, 51], dtype=UInt32), "dt.ordinal_day"
)
assert a.dt.median() == date(2024, 10, 4)
assert a.dt.mean() == date(2024, 10, 4)
def test_dt_datetimes() -> None:
s = pl.Series(["2020-01-01 00:00:00", "2020-02-02 03:20:10"])
s = s.str.strptime(pl.Datetime, fmt="%Y-%m-%d %H:%M:%S")
# hours, minutes, seconds and nanoseconds
verify_series_and_expr_api(s, pl.Series("", [0, 3], dtype=UInt32), "dt.hour")
verify_series_and_expr_api(s, pl.Series("", [0, 20], dtype=UInt32), "dt.minute")
verify_series_and_expr_api(s, pl.Series("", [0, 10], dtype=UInt32), "dt.second")
verify_series_and_expr_api(s, pl.Series("", [0, 0], dtype=UInt32), "dt.nanosecond")
# epoch methods
verify_series_and_expr_api(
s, pl.Series("", [18262, 18294], dtype=Int32), "dt.epoch_days"
)
verify_series_and_expr_api(
s,
pl.Series("", [1_577_836_800, 1_580_613_610], dtype=Int64),
"dt.epoch_seconds",
)
verify_series_and_expr_api(
s,
pl.Series("", [1_577_836_800_000, 1_580_613_610_000], dtype=Int64),
"dt.epoch_milliseconds",
)
def test_reshape() -> None:
s = pl.Series("a", [1, 2, 3, 4])
out = s.reshape((-1, 2))
expected = pl.Series("a", [[1, 2], [3, 4]])
assert out.series_equal(expected)
out = s.reshape((2, 2))
assert out.series_equal(expected)
out = s.reshape((2, -1))
assert out.series_equal(expected)
out = s.reshape((-1, 1))
expected = pl.Series("a", [[1], [2], [3], [4]])
assert out.series_equal(expected)
# test lazy_dispatch
out = pl.select(pl.lit(s).reshape((-1, 1))).to_series()
assert out.series_equal(expected)
def test_init_categorical() -> None:
for values in [[None], ["foo", "bar"], [None, "foo", "bar"]]:
expected = pl.Series("a", values, dtype=pl.Utf8).cast(pl.Categorical)
a = pl.Series("a", values, dtype=pl.Categorical)
testing.assert_series_equal(a, expected)
def test_nested_list_types_preserved() -> None:
expected_dtype = pl.UInt32
srs1 = pl.Series([pl.Series([3, 4, 5, 6], dtype=expected_dtype) for _ in range(5)])
for srs2 in srs1:
assert srs2.dtype == expected_dtype
def test_log_exp() -> None:
a = pl.Series("a", [1, 100, 1000])
b = pl.Series("a", [0.0, 2.0, 3.0])
verify_series_and_expr_api(a, b, "log10")
expected = pl.Series("a", np.log(a.to_numpy()))
verify_series_and_expr_api(a, expected, "log")
expected = pl.Series("a", np.exp(b.to_numpy()))
verify_series_and_expr_api(b, expected, "exp")
def test_shuffle() -> None:
a = pl.Series("a", [1, 2, 3])
out = a.shuffle(2)
expected = pl.Series("a", [2, 3, 1])
testing.assert_series_equal(out, expected)
out = pl.select(pl.lit(a).shuffle(2)).to_series()
testing.assert_series_equal(out, expected)
def test_to_physical() -> None:
# casting an int result in an int
a = pl.Series("a", [1, 2, 3])
verify_series_and_expr_api(a, a, "to_physical")
# casting a date results in an Int32
a = pl.Series("a", [date(2020, 1, 1)] * 3)
expected = pl.Series("a", [18262] * 3, dtype=Int32)
verify_series_and_expr_api(a, expected, "to_physical")
def test_is_between_datetime() -> None:
s = pl.Series("a", [datetime(2020, 1, 1, 10, 0, 0), datetime(2020, 1, 1, 20, 0, 0)])
start = datetime(2020, 1, 1, 12, 0, 0)
end = datetime(2020, 1, 1, 23, 0, 0)
expected = pl.Series("a", [False, True])
# only on the expression api
result = s.to_frame().with_column(pl.col("*").is_between(start, end))["is_between"]
testing.assert_series_equal(result.rename("a"), expected)
@pytest.mark.parametrize("f", ["sin", "cos", "tan", "arcsin", "arccos", "arctan"])
def test_trigonometric(f: str) -> None:
s = pl.Series("a", [0.0])
expected = pl.Series("a", getattr(np, f)(s.to_numpy()))
verify_series_and_expr_api(s, expected, f)
def test_ewm_mean() -> None:
a = pl.Series("a", [2, 5, 3])
expected = pl.Series(
"a",
[
2.0,
4.0,
3.4285714285714284,
],
)
verify_series_and_expr_api(a, expected, "ewm_mean", alpha=0.5, adjust=True)
expected = pl.Series("a", [2.0, 3.8, 3.421053])
verify_series_and_expr_api(a, expected, "ewm_mean", com=2.0, adjust=True)
expected = pl.Series("a", [2.0, 3.5, 3.25])
verify_series_and_expr_api(a, expected, "ewm_mean", alpha=0.5, adjust=False)
a = pl.Series("a", [2, 3, 5, 7, 4])
expected = pl.Series("a", [None, 2.666667, 4.0, 5.6, 4.774194])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=2
)
expected = pl.Series("a", [None, None, 4.0, 5.6, 4.774194])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=3
)
a = pl.Series("a", [None, 1.0, 5.0, 7.0, None, 2.0, 5.0, 4])
expected = pl.Series(
"a",
[
None,
1.0,
3.6666666666666665,
5.571428571428571,
5.571428571428571,
3.6666666666666665,
4.354838709677419,
4.174603174603175,
],
)
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=True, min_periods=1
)
expected = pl.Series("a", [None, 1.0, 3.0, 5.0, 5.0, 3.5, 4.25, 4.125])
verify_series_and_expr_api(
a, expected, "ewm_mean", alpha=0.5, adjust=False, min_periods=1
)
def test_ewm_std_var() -> None:
a = pl.Series("a", [2, 5, 3])
assert (a.ewm_std(alpha=0.5) ** 2).to_list() == a.ewm_var(alpha=0.5).to_list()
def test_extend() -> None:
a = pl.Series("a", [1, 2, 3])
expected = pl.Series("a", [1, 2, 3, 1, 1, 1])
verify_series_and_expr_api(a, expected, "extend", 1, 3)
expected = pl.Series("a", [1, 2, 3, None, None, None])
verify_series_and_expr_api(a, expected, "extend", None, 3)
def test_any_all() -> None:
a = pl.Series("a", [True, False, True])
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "all")
a = pl.Series("a", [True, True, True])
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [True])
verify_series_and_expr_api(a, expected, "all")
a = pl.Series("a", [False, False, False])
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "any")
expected = pl.Series("a", [False])
verify_series_and_expr_api(a, expected, "all")
def test_product() -> None:
a = pl.Series("a", [1, 2, 3])
out = a.product()
assert out == 6
a = pl.Series("a", [1, 2, None])
out = a.product()
assert out is None
a = pl.Series("a", [None, 2, 3])
out = a.product()
assert out is None
|
py | 1a329be3a97544fb60eb7f238dbd0faf16f01b76 | import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
import redis
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
redis_con = None
redis_graph = None
class testQueryValidationFlow(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_con
global redis_graph
redis_con = self.env.getConnection()
redis_graph = Graph("G", redis_con)
self.populate_graph()
def populate_graph(self):
# Create a single graph.
global redis_graph
node = Node(properties={"age": 34})
redis_graph.add_node(node)
redis_graph.commit()
# Expect an error when trying to use a function which does not exists.
def test01_none_existing_function(self):
query = """MATCH (n) RETURN noneExistingFunc(n.age) AS cast"""
try:
redis_graph.query(query)
self.env.assertTrue(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
# Make sure function validation is type case insensitive.
def test02_case_insensitive_function_name(self):
try:
query = """MATCH (n) RETURN mAx(n.age)"""
redis_graph.query(query)
except redis.exceptions.ResponseError:
# function validation should be case insensitive.
self.env.assertTrue(False)
def test03_edge_missing_relation_type(self):
try:
query = """CREATE (n:Person {age:32})-[]->(:person {age:30})"""
redis_graph.query(query)
self.env.assertTrue(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test04_escaped_quotes(self):
query = r"CREATE (:escaped{prop1:'single \' char', prop2: 'double \" char', prop3: 'mixed \' and \" chars'})"
actual_result = redis_graph.query(query)
self.env.assertEquals(actual_result.nodes_created, 1)
self.env.assertEquals(actual_result.properties_set, 3)
query = r"MATCH (a:escaped) RETURN a.prop1, a.prop2, a.prop3"
actual_result = redis_graph.query(query)
expected_result = [["single ' char", 'double " char', 'mixed \' and " chars']]
self.env.assertEquals(actual_result.result_set, expected_result)
def test05_invalid_entity_references(self):
try:
query = """MATCH (a) RETURN e"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
try:
query = """MATCH (a) RETURN a ORDER BY e"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test06_where_references(self):
try:
query = """MATCH (a) WHERE fake = true RETURN a"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test07_with_references(self):
try:
query = """MATCH (a) WITH e RETURN e"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test08_count_distinct_star(self):
try:
query = """MATCH (a) RETURN COUNT(DISTINCT *)"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test09_invalid_apply_all(self):
try:
query = """MATCH (a) RETURN SUM(*)"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test10_missing_params(self):
try:
query = """MATCH (a {name:$name}) RETURN a"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test11_param_error(self):
try:
query = """CYPHER name=({name:'a'}) MATCH (a {name:$name}) RETURN a"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test12_invalid_query_order(self):
try:
query = """MERGE (a) MATCH (a)-[]->(b) RETURN b"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test13_create_bound_variables(self):
try:
query = """MATCH (a)-[e]->(b) CREATE (a)-[e]->(b)"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test14_treat_path_as_entity(self):
redis_graph.query("CREATE ()-[:R]->()")
try:
query= """MATCH x=()-[]->() RETURN x.name"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
def test15_dont_crash_on_multiple_errors(self):
try:
query = """MATCH (a) where id(a) IN range(0) OR id(a) in range(1)"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError:
# Expecting an error.
pass
# Run a query in which a parsed parameter introduces a type in an unsupported context.
def test16_param_introduces_unhandled_type(self):
try:
query = """CYPHER props={a:1,b:2} CREATE (a:A $props)"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("Encountered unhandled type" in e.message)
pass
# Validate that the module fails properly with incorrect argument counts.
def test17_query_arity(self):
# Call GRAPH.QUERY with a missing query argument.
try:
res = redis_con.execute_command("GRAPH.QUERY", "G")
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("wrong number of arguments" in e.message)
pass
# Run queries in which compile-time variables are accessed but not defined.
def test18_undefined_variable_access(self):
try:
query = """CREATE (:person{name:bar[1]})"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("not defined" in e.message)
pass
try:
query = """MATCH (a {val: undeclared}) RETURN a"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("not defined" in e.message)
pass
try:
query = """UNWIND [fake] AS ref RETURN ref"""
redis_graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("not defined" in e.message)
pass
|
py | 1a329cbd714743a70aeaa23288c1a0657306ff95 | import enum
from datetime import datetime as _datetime
from sqlalchemy import Column, DateTime, Enum, Float, ForeignKey, Integer, String
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from .base import Base
from .coin import Coin
class Interval(enum.Enum):
MINUTELY = "MINUTELY"
HOURLY = "HOURLY"
DAILY = "DAILY"
WEEKLY = "WEEKLY"
class CoinValue(Base):
__tablename__ = "coin_value"
id = Column(Integer, primary_key=True)
coin_id = Column(String, ForeignKey("coins.symbol"))
coin = relationship("Coin")
balance = Column(Float)
usd_price = Column(Float)
btc_price = Column(Float)
interval = Column(Enum(Interval))
datetime = Column(DateTime)
def __init__(
self,
coin: Coin,
balance: float,
usd_price: float,
btc_price: float,
interval=Interval.MINUTELY,
datetime: _datetime = None,
):
self.coin = coin
self.balance = balance
self.usd_price = usd_price
self.btc_price = btc_price
self.interval = interval
self.datetime = datetime or _datetime.now()
@hybrid_property
def usd_value(self):
if self.usd_price is None:
return None
return self.balance * self.usd_price
@usd_value.expression
def usd_value(self):
return self.balance * self.usd_price
@hybrid_property
def btc_value(self):
if self.btc_price is None:
return None
return self.balance * self.btc_price
@btc_value.expression
def btc_value(self):
return self.balance * self.btc_price
def info(self):
return {
"balance": self.balance,
"usd_value": self.usd_value,
"btc_value": self.btc_value,
"datetime": self.datetime.isoformat(),
}
|
py | 1a329d1f7a1e9c48bf57c34961a27d0afa20c083 | #!/usr/bin/env python
import sys, os
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['errorbar.capsize'] = 6
matplotlib.rcParams['axes.grid'] = True
matplotlib.rcParams['font.size'] = 18
matplotlib.rcParams['figure.figsize'] = (9.75, 5.85) #(10, 6)
matplotlib.rcParams['savefig.dpi'] = 600
def main(inFile,outFile):
if not os.path.isfile(inFile):
raise ValueError('File {:} does not exist'.format(str(inFile)))
#if output exists mv to .bak
if os.path.isfile(outFile):
print('ATTENTION: {:} exists, moving to *.bak'.format(outFile))
os.rename(outFile, outFile+'.bak')
x, y = np.loadtxt(inFile, skiprows=4, usecols=(0,2), unpack=True)
plt.xlabel("Pore Diameter [Å]")
plt.ylabel("Pore-Size Distribution")
plt.xlim([min(x),max(x)])
plt.plot(x,y)
plt.tight_layout()
plt.savefig(outFile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Plot RASPA Pore-Size Distribution')
parser.add_argument('input', type=str, help='RASPA PSD Output File')
parser.add_argument('output', type=str, help='Filename for the PNG Output')
args = parser.parse_args()
main(args.input,args.output)
|
py | 1a329d8ed2945f2e515e353df0cd85c269c1ca12 | import requests
from bs4 import BeautifulSoup as bs
def image_url_retrieval(url):
r = requests.get(url)
soup = bs(r.content, 'html.parser')
profile_image = soup.find('img', {'alt': 'Avatar'})['src']
return profile_image
def main():
github_user = input('What github user would you like to find? ')
url = f"https://github.com/{github_user}"
print(image_url_retrieval(url))
if __name__ == '__main__':
main() |
py | 1a329dd895c6153db072b4c779b2e35da7124718 | import os
import sys
sys.path.append(os.path.dirname(__file__))
import ConfigParser as cp
from os import path
import argparse
from transfer.base import Transfer
from walker import Walker
def main(file_path=None, all_sync=False):
inifile = cp.SafeConfigParser()
inifile.read(os.getcwd() + "/confing.ini")
""" load config file """
host = inifile.get("receiver", "host")
port = inifile.get("receiver", "port")
user = inifile.get("receiver", "user")
passwd = inifile.get("receiver", "passwd")
header_path = inifile.get("file", "header_path")
transfer = Transfer("ftp")
transfer.inst.connect(host, port, user, passwd)
if all_sync:
syncdir = inifile.get("all_sync", "syncdir")
walker = Walker(syncdir)
w = walker.start()
while True:
try:
file_path = w.next()
remote_path = file_path.replace(header_path, "")
dirname = os.path.dirname(remote_path)
filename = os.path.basename(remote_path)
send(transfer.inst, dirname, filename, file_path)
except StopIteration:
return
if file_path:
remote_path = file_path.replace(header_path, "")
if remote_path[0] != "/":
remote_path = "/" + remote_path
dirname = os.path.dirname(remote_path)
filename = os.path.basename(remote_path)
""" Connection with remote server """
send(transfer.inst, dirname, filename, file_path)
def send(transfer, dirname, filename, file_path):
# Need space on top of directory name.
# But I don't know why this is required...
transfer.mkdir(" "+dirname)
transfer.send(dirname, filename, file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="FTP transfer")
parser.add_argument(
"-f", "--file", dest="file_path", default=None, help="file path")
parser.add_argument(
"-a", "--all", dest="all_sync", default=None, help="all sync")
args = parser.parse_args()
main(args.file_path, args.all_sync)
|
py | 1a32a04ffea85ecc9547778d1b0da26e4a5cd188 | import sys
import numpy as np
from collections import Counter
from cuteSV.cuteSV_genotype import cal_GL, threshold_ref_count, count_coverage
'''
*******************************************
TO DO LIST
*******************************************
1. Identify DP with samfile pointer;
2. Add CIPOS, CILEN and/or CIEND;
3. Determine (IM)PRECISE type.
4. Filter DUP to improve INS FN rate.
*******************************************
'''
def resolution_DUP(path, chr, read_count, max_cluster_bias, sv_size,
bam_path, action, MaxSize, gt_round):
semi_dup_cluster = list()
semi_dup_cluster.append([0, 0, ''])
candidate_single_SV = list()
file = open(path, 'r')
for line in file:
seq = line.strip('\n').split('\t')
if seq[1] != chr:
continue
pos_1 = int(seq[2])
pos_2 = int(seq[3])
read_id = seq[4]
if pos_1 - semi_dup_cluster[-1][0] > max_cluster_bias or pos_2 - semi_dup_cluster[-1][1] > max_cluster_bias:
if len(semi_dup_cluster) >= read_count:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
pass
else:
generate_dup_cluster(semi_dup_cluster,
chr,
read_count,
max_cluster_bias,
sv_size,
candidate_single_SV,
bam_path,
action,
MaxSize,
gt_round)
semi_dup_cluster = []
semi_dup_cluster.append([pos_1, pos_2, read_id])
else:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
semi_dup_cluster = []
semi_dup_cluster.append([pos_1, pos_2, read_id])
else:
semi_dup_cluster.append([pos_1, pos_2, read_id])
if len(semi_dup_cluster) >= read_count:
if semi_dup_cluster[-1][0] == semi_dup_cluster[-1][1] == 0:
pass
else:
generate_dup_cluster(semi_dup_cluster,
chr,
read_count,
max_cluster_bias,
sv_size,
candidate_single_SV,
bam_path,
action,
MaxSize,
gt_round)
file.close()
return candidate_single_SV
def generate_dup_cluster(semi_dup_cluster, chr, read_count, max_cluster_bias,
sv_size, candidate_single_SV, bam_path, action, MaxSize, gt_round):
# calculate support reads
support_read = list(set([i[2] for i in semi_dup_cluster]))
if len(support_read) < read_count:
return
low_b = int(len(semi_dup_cluster)*0.4)
up_b = int(len(semi_dup_cluster)*0.6)
if low_b == up_b:
breakpoint_1 = semi_dup_cluster[low_b][0]
breakpoint_2 = semi_dup_cluster[low_b][1]
else:
breakpoint_1 = [i[0] for i in semi_dup_cluster[low_b:up_b]]
breakpoint_2 = [i[1] for i in semi_dup_cluster[low_b:up_b]]
breakpoint_1 = int(sum(breakpoint_1)/len(semi_dup_cluster[low_b:up_b]))
breakpoint_2 = int(sum(breakpoint_2)/len(semi_dup_cluster[low_b:up_b]))
if sv_size <= breakpoint_2 - breakpoint_1 <= MaxSize or (sv_size <= breakpoint_2 - breakpoint_1 and MaxSize == -1):
if action:
import time
# time_start = time.time()
DV, DR, GT, GL, GQ, QUAL = call_gt(bam_path,
breakpoint_1,
breakpoint_2,
chr,
support_read,
min(max_cluster_bias, breakpoint_2 - breakpoint_1),
gt_round)
# print(DV, DR, GT, GL, GQ, QUAL)
# cost_time = time.time() - time_start
# print("DUP", chr, int(breakpoint_1), int(breakpoint_2), DR, DV, QUAL, "%.4f"%cost_time)
else:
DR = '.'
GT = './.'
GL = '.,.,.'
GQ = "."
QUAL = "."
candidate_single_SV.append([chr,
'DUP',
str(breakpoint_1),
str(breakpoint_2 - breakpoint_1),
str(len(support_read)),
str(DR),
str(GT),
str(GL),
str(GQ),
str(QUAL),
str(','.join(support_read))])
def run_dup(args):
return resolution_DUP(*args)
def call_gt(bam_path, pos_1, pos_2, chr, read_id_list, max_cluster_bias, gt_round):
import pysam
bamfile = pysam.AlignmentFile(bam_path)
querydata = set()
search_start = max(int(pos_1 - max_cluster_bias/2), 0)
search_end = min(int(pos_1 + max_cluster_bias/2), bamfile.get_reference_length(chr))
up_bound = threshold_ref_count(len(read_id_list))
status = count_coverage(chr,
search_start,
search_end,
bamfile,
querydata,
up_bound,
gt_round)
if status == -1:
DR = '.'
GT = "./."
GL = ".,.,."
GQ = "."
QUAL = "."
elif status == 1:
DR = 0
for query in querydata:
if query not in read_id_list:
DR += 1
GT, GL, GQ, QUAL = cal_GL(DR, len(read_id_list))
else:
search_start = max(int(pos_2 - max_cluster_bias/2), 0)
search_end = min(int(pos_2 + max_cluster_bias/2), bamfile.get_reference_length(chr))
status_2 = count_coverage(chr,
search_start,
search_end,
bamfile,
querydata,
up_bound,
gt_round)
# status_2 judgement
DR = 0
for query in querydata:
if query not in read_id_list:
DR += 1
GT, GL, GQ, QUAL = cal_GL(DR, len(read_id_list))
bamfile.close()
return len(read_id_list), DR, GT, GL, GQ, QUAL |
py | 1a32a0f2ce07e26dbe1a452582baa1551b35f990 | import torch
import torch.nn.functional as F
import torch.optim as optim
from gym.spaces import flatten
from blobrl.agents import DQN
from blobrl.memories import ExperienceReplay
from blobrl.networks import C51Network
class CategoricalDQN(DQN):
def __init__(self, observation_space, action_space, memory=ExperienceReplay(), network=None, num_atoms=51,
r_min=-10, r_max=10, step_train=1, batch_size=32, gamma=1.0,
optimizer=None, greedy_exploration=None, device=None):
"""
:param device: torch device to run agent
:type: torch.device
:param action_space:
:param observation_space:
:param memory:
:param network:
:param num_atoms:
:param r_min:
:param r_max:
:param step_train:
:param batch_size:
:param gamma:
:param optimizer:
:param greedy_exploration:
"""
if network is None and optimizer is None:
network = C51Network(observation_space=observation_space,
action_space=action_space)
num_atoms = 51
optimizer = optim.Adam(network.parameters())
super().__init__(observation_space=observation_space, action_space=action_space, memory=memory,
network=network, step_train=step_train, batch_size=batch_size, gamma=gamma,
loss=None, optimizer=optimizer, greedy_exploration=greedy_exploration, device=device)
self.num_atoms = num_atoms
self.r_min = r_min
self.r_max = r_max
self.delta_z = (r_max - r_min) / float(num_atoms - 1)
self.z = torch.tensor([r_min + i * self.delta_z for i in range(num_atoms)], device=self.device)
def get_action(self, observation):
""" Return action choice by the agents
:param observation: stat of environment
:type observation: gym.Space
"""
if not self.greedy_exploration.be_greedy(self.step) and self.with_exploration:
return self.action_space.sample()
observation = torch.tensor([flatten(self.observation_space, observation)], device=self.device).float()
prediction = self.network.forward(observation)
def return_values(values):
if isinstance(values, list):
return [return_values(v) for v in values]
q_values = values * self.z
q_values = torch.sum(q_values, dim=2)
return torch.argmax(q_values).detach().item()
return return_values(prediction)
def apply_loss(self, next_prediction, prediction, actions, rewards, next_observations, dones, len_space):
if isinstance(next_prediction, list):
[self.apply_loss(n, p, a, rewards, next_observations, dones, c) for n, p, a, c in
zip(next_prediction, prediction, actions.permute(1, 0, *[i for i in range(2, len(actions.shape))]),
len_space)]
else:
q_values_next = next_prediction * self.z
q_values_next = torch.sum(q_values_next, dim=2)
actions = F.one_hot(actions.long(), num_classes=len_space)
actions_next = torch.argmax(q_values_next, dim=1)
actions_next = F.one_hot(actions_next, num_classes=len_space)
dones = dones.view(-1, 1)
tz = rewards.view(-1, 1) + self.gamma * self.z * (1 - dones)
tz = tz.clamp(self.r_min, self.r_max)
b = (tz - self.r_min) / self.delta_z
l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
l[(u > 0) * (l == u)] -= 1
u[(l < (self.num_atoms - 1)) * (l == u)] += 1
m_prob = torch.zeros((self.batch_size, len_space, self.num_atoms), device=self.device)
predictions_next = next_prediction[actions_next == 1, :]
offset = torch.linspace(0, (self.batch_size - 1) * self.num_atoms, self.batch_size,
device=self.device).view(-1,
1)
offset = offset.expand(self.batch_size, self.num_atoms)
u_index = (u + offset).view(-1).to(torch.int64)
l_index = (l + offset).view(-1).to(torch.int64)
predictions_next = (dones + (1 - dones) * predictions_next)
m_prob_action = m_prob[actions == 1, :].view(-1)
m_prob_action.index_add_(0, u_index, (predictions_next * (u - b)).view(-1))
m_prob_action.index_add_(0, l_index, (predictions_next * (b - l)).view(-1))
m_prob[actions == 1, :] = m_prob_action.view(-1, self.num_atoms)
self.optimizer.zero_grad()
loss = - prediction.log() * m_prob
loss.sum((1, 2)).mean().backward(retain_graph=True)
def __str__(self):
return 'CategoricalDQN-' + str(self.observation_space) + "-" + str(self.action_space) + "-" + str(
self.network) + "-" + str(self.memory) + "-" + str(self.step_train) + "-" + str(
self.step) + "-" + str(self.batch_size) + "-" + str(self.gamma) + "-" + str(self.loss) + "-" + str(
self.optimizer) + "-" + str(self.greedy_exploration) + "-" + str(self.num_atoms) + "-" + str(
self.r_min) + "-" + str(self.r_max) + "-" + str(self.delta_z) + "-" + str(self.z)
|
py | 1a32a1449b30ea76ad2929ea1e1aefc91037c77e | import torch
import numpy as np
import pandas as pd
import torch.optim as optim
import torch.nn.functional as F
def load_training_data(path='./data/training_label.txt'):
if 'training_label' in path:
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
lines = [line.strip('\n').split(' ') for line in lines]
x = [line[2:] for line in lines]
y = [line[0] for line in lines]
return x, y
else:
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
x = [line.strip('\n').split(' ') for line in lines]
return x
def load_testing_data(path='./data/testing_data.txt'):
with open(path, 'r', encoding='UTF-8') as f:
lines = f.readlines()
X = ["".join(line.strip('\n').split(",")[1:]).strip() for line in lines[1:]]
X = [sen.split(' ') for sen in X]
return X
def evaluation(outputs, labels):
outputs[outputs >= 0.5] = 1
outputs[outputs < 0.5] = 0
correct = torch.sum(torch.eq(outputs, labels)).item()
return correct
|
py | 1a32a206fa12013037fd889e47c2b6aa283f0c7e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/startup.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
#
# CLEAN ROOM MODULE
#
# This module is classified as a "Clean Room" module and is subject to
# restrictions on what it may import.
#
# See: https://king-phisher.readthedocs.io/en/latest/development/modules.html#clean-room-modules
#
################################################################################
import collections
import gc
import logging
import os
import shutil
import subprocess
import sys
from king_phisher import its
from king_phisher import version
ProcessResults = collections.namedtuple('ProcessResults', ('stdout', 'stderr', 'status'))
"""
A named tuple for holding the results of an executed external process.
.. py:attribute:: stdout
A string containing the data the process wrote to stdout.
.. py:attribute:: stderr
A string containing the data the process wrote to stderr.
.. py:attribute:: status
An integer representing the process's exit code.
"""
def _run_pipenv(args, cwd=None):
"""
Execute Pipenv with the supplied arguments and return the
:py:class:`~.ProcessResults`. If the exit status is non-zero, then the
stdout buffer from the Pipenv execution will be written to stderr.
:param tuple args: The arguments for the Pipenv.
:param str cwd: An optional current working directory to use for the
process.
:return: The results of the execution.
:rtype: :py:class:`~.ProcessResults`
"""
path = which('pipenv')
if path is None:
return RuntimeError('pipenv could not be found')
args = (path,) + tuple(args)
results = run_process(args, cwd=cwd)
if results.status:
sys.stderr.write('pipenv encountered the following error:\n')
sys.stderr.write(results.stdout)
sys.stderr.flush()
return results
def pipenv_entry(parser, entry_point):
"""
Run through startup logic for a Pipenv script (see Pipenv: `Custom Script
Shortcuts`_ for more information). This sets up a basic stream logging
configuration, establishes the Pipenv environment and finally calls the
actual entry point using :py:func:`os.execve`.
.. note::
Due to the use of :py:func:`os.execve`, this function does not return.
.. note::
Due to the use of :py:func:`os.execve` and ``os.EX_*`` exit codes, this
function is not available on Windows.
:param parser: The argument parser to use. Arguments are added to it and
extracted before passing the remainder to the entry point.
:param str entry_point: The name of the entry point using Pipenv.
.. _Custom Script Shortcuts: https://pipenv.readthedocs.io/en/latest/advanced/#custom-script-shortcuts
"""
if its.on_windows:
# this is because of the os.exec call and os.EX_* status codes
raise RuntimeError('pipenv_entry is incompatible with windows')
env_group = parser.add_argument_group('environment wrapper options')
env_group.add_argument('--env-install', dest='pipenv_install', default=False, action='store_true', help='install pipenv environment and exit')
env_group.add_argument('--env-update', dest='pipenv_update', default=False, action='store_true', help='update pipenv requirements and exit')
argp_add_default_args(parser)
arguments, _ = parser.parse_known_args()
sys_argv = sys.argv
sys_argv.pop(0)
if sys.version_info < (3, 4):
print('[-] the Python version is too old (minimum required is 3.4)')
return os.EX_SOFTWARE
# initialize basic stream logging
logger = logging.getLogger('KingPhisher.wrapper')
logger.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
logger.addHandler(console_log_handler)
target_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
logger.debug("target diretory: {}".format(target_directory))
os.environ['PIPENV_VENV_IN_PROJECT'] = os.environ.get('PIPENV_VENV_IN_PROJECT', 'True')
os.environ['PIPENV_PIPFILE'] = os.environ.get('PIPENV_PIPFILE', os.path.join(target_directory, 'Pipfile'))
logger.info('checking for the pipenv environment')
if which('pipenv') is None:
logger.exception('pipenv not found, run tools/install.sh --update')
return os.EX_UNAVAILABLE
pipenv_path = which('pipenv')
logger.debug("pipenv path: {0!r}".format(pipenv_path))
if arguments.pipenv_install or not os.path.isdir(os.path.join(target_directory, '.venv')):
if arguments.pipenv_install:
logger.info('installing the pipenv environment')
else:
logger.warning('no pre-existing pipenv environment was found, installing it now')
results = _run_pipenv(('--site-packages', 'install'), cwd=target_directory)
if results.status:
logger.error('failed to install the pipenv environment')
logger.info('removing the incomplete .venv directory')
try:
shutil.rmtree(os.path.join(target_directory, '.venv'))
except OSError:
logger.error('failed to remove the incomplete .venv directory', exc_info=True)
return results.status
if arguments.pipenv_install:
return os.EX_OK
if arguments.pipenv_update:
logger.info('updating the pipenv environment')
results = _run_pipenv(('--site-packages', 'update'), cwd=target_directory)
if results.status:
logger.error('failed to update the pipenv environment')
return results.status
logger.info('the pipenv environment has been updated')
return os.EX_OK
logger.debug('pipenv Pipfile: {}'.format(os.environ['PIPENV_PIPFILE']))
# the blank arg being passed is required for pipenv
passing_argv = [' ', 'run', entry_point] + sys_argv
os.execve(pipenv_path, passing_argv, os.environ)
def run_process(process_args, cwd=None, encoding='utf-8'):
"""
Start a process, wait for it to complete and return a
:py:class:`~.ProcessResults` object.
:param process_args: The arguments for the processes including the binary.
:param cwd: An optional current working directory to use for the process.
:param str encoding: The encoding to use for strings.
:return: The results of the process including the status code and any text
printed to stdout or stderr.
:rtype: :py:class:`~.ProcessResults`
"""
cwd = cwd or os.getcwd()
process_handle = subprocess.Popen(process_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process_handle.wait()
results = ProcessResults(
process_handle.stdout.read().decode(encoding),
process_handle.stderr.read().decode(encoding),
process_handle.returncode
)
return results
def which(program):
"""
Examine the ``PATH`` environment variable to determine the location for the
specified program. If it can not be found None is returned. This is
fundamentally similar to the Unix utility of the same name.
:param str program: The name of the program to search for.
:return: The absolute path to the program if found.
:rtype: str
"""
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def argp_add_default_args(parser, default_root=''):
"""
Add standard arguments to a new :py:class:`argparse.ArgumentParser`
instance. Used to add the utilities argparse options to the wrapper for
display.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
:param str default_root: The default root logger to specify.
"""
parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL'), help='set the logging level')
parser.add_argument('--logger', default=default_root, help='specify the root logger')
gc_group = parser.add_argument_group('garbage collector options')
gc_group.add_argument('--gc-debug-leak', action='store_const', const=gc.DEBUG_LEAK, default=0, help='set the DEBUG_LEAK flag')
gc_group.add_argument('--gc-debug-stats', action='store_const', const=gc.DEBUG_STATS, default=0, help='set the DEBUG_STATS flag')
return parser
def argp_add_client(parser):
"""
Add client-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kpc_group = parser.add_argument_group('client specific options')
kpc_group.add_argument('-c', '--config', dest='config_file', required=False, help='specify a configuration file to use')
kpc_group.add_argument('--no-plugins', dest='use_plugins', default=True, action='store_false', help='disable all plugins')
kpc_group.add_argument('--no-style', dest='use_style', default=True, action='store_false', help='disable interface styling')
return parser
def argp_add_server(parser):
"""
Add server-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kps_group = parser.add_argument_group('server specific options')
kps_group.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='run in the foreground (do not fork)')
kps_group.add_argument('--update-geoip-db', dest='update_geoip_db', action='store_true', default=False, help='update the geoip database and exit')
kps_group.add_argument('--verify-config', dest='verify_config', action='store_true', default=False, help='verify the configuration and exit')
kps_group.add_argument('config_file', action='store', help='configuration file to use')
return parser
|
py | 1a32a2cf5cb3fa6acca0d730abb9ec86d7b1aa5e | import media
import fresh_tomatoes
import httplib # Used in Python 2
conn = httplib.HTTPSConnection("api.themoviedb.org")
payload = "{}"
# Movie IDs from themoviedb.org
movie_id = ["246655", "2080", "49538", "127585", "246655", "263115"]
release_date = []
# for loop to find Release date for each movie_id
for moviedb in movie_id:
conn.request("GET", "/3/movie/" + moviedb + "/release_dates?api_key=c744541ef0c2fd5ff5ccba678f100347", payload) # noqa
res = conn.getresponse()
data = res.read()
j = data.decode("utf-8").find("US") # find position of US release date info
US = data.decode("utf-8")[j:]
i = US.find("T00")
release_date.append(US[i-10:i])
# Creates instances of the Movie class for each movie
xmen = media.Movie("X-Men",
"https://upload.wikimedia.org/wikipedia/en/8/81/X-MenfilmPoster.jpg", # noqa
"https://www.youtube.com/watch?v=nbNcULQFojc",
release_date[0])
xmen_origins_wolverine = media.Movie("X-Men Origins: Wolverine",
"https://upload.wikimedia.org/wikipedia/en/e/ec/X-Men_Origins_Wolverine.jpg", # noqa
"https://www.youtube.com/watch?v=toLpchTUYk8", # noqa
release_date[1])
xmen_first_class = media.Movie("X-Men: First Class",
"https://upload.wikimedia.org/wikipedia/en/5/55/X-MenFirstClassMoviePoster.jpg", #noqa
"https://www.youtube.com/watch?v=UrbHykKUfTM",
release_date[2])
xmen_days_of_future_past = media.Movie("X-Men: Days of Future Past",
"https://upload.wikimedia.org/wikipedia/en/0/0c/X-Men_Days_of_Future_Past_poster.jpg", # noqa
"https://www.youtube.com/watch?v=pK2zYHWDZKo", # noqa
release_date[3])
xmen_apocalypse = media.Movie("X-Men: Apocalypse",
"https://upload.wikimedia.org/wikipedia/en/0/04/X-Men_-_Apocalypse.jpg", # noqa
"https://www.youtube.com/watch?v=Jer8XjMrUB4&spfreload=10", # noqa
release_date[4])
logan = media.Movie("Logan",
"https://upload.wikimedia.org/wikipedia/en/3/37/Logan_2017_poster.jpg", # noqa
"https://www.youtube.com/watch?v=Div0iP65aZo",
release_date[5])
# Creates a movies array to be accessed from fresh_tomatoes.py
movies = [xmen, xmen_origins_wolverine, xmen_first_class,
xmen_days_of_future_past, xmen_apocalypse, logan]
# Creates a HTML page by passing in movies
fresh_tomatoes.open_movies_page(movies)
|
py | 1a32a2e4f73f5c0c90def62e2570d7b4033d7076 | import copy
from collections import OrderedDict
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from ..utils import DeprecationHelper, EasyDict, classproperty
__all__ = ['Space', 'NestedSpace', 'AutoGluonObject', 'List', 'Dict',
'Categorical', 'Choice', 'Real', 'Int', 'Bool']
class Space(object):
"""Basic search space describing set of possible values for hyperparameter.
"""
pass
class SimpleSpace(Space):
"""Non-nested search space (i.e. corresponds to a single simple hyperparameter).
"""
def __repr__(self):
reprstr = self.__class__.__name__
if hasattr(self, 'lower') and hasattr(self, 'upper'):
reprstr += ': lower={}, upper={}'.format(self.lower, self.upper)
if hasattr(self, 'value'):
reprstr += ': value={}'.format(self.value)
return reprstr
def get_hp(self, name):
"""Fetch particular hyperparameter based on its name.
"""
raise NotImplementedError
@property
def hp(self):
""" Return hyperparameter corresponding to this search space.
"""
return self.get_hp(name='')
@property
def default(self):
"""Return default value of hyperparameter corresponding to this search space.
"""
default = self._default if self._default else self.hp.default_value
return default
@default.setter
def default(self, value):
"""Set default value for hyperparameter corresponding to this search space.
"""
self._default = value
@property
def rand(self):
"""Return randomly sampled (but valid) value from this search space.
"""
cs = CS.ConfigurationSpace()
cs.add_hyperparameter(self.hp)
return cs.sample_configuration().get_dictionary()['']
class NestedSpace(Space):
"""Nested hyperparameter search space, which is a search space that itself contains multiple search spaces.
"""
def sample(self, **config):
"""Sample a configuration from this search space.
"""
pass
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
raise NotImplementedError
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
raise NotImplementedError
@property
def default(self):
"""Return default value for hyperparameter corresponding to this search space.
"""
config = self.cs.get_default_configuration().get_dictionary()
return self.sample(**config)
@property
def rand(self):
"""Randomly sample configuration from this nested search space.
"""
config = self.cs.sample_configuration().get_dictionary()
return self.sample(**config)
class AutoGluonObject(NestedSpace):
r"""Searchable objects,
created by decorating a custom Python class or function using the
:func:`autogluon.obj` or :func:`autogluon.func` decorators.
"""
def __call__(self, *args, **kwargs):
"""Convenience method for interacting with AutoGluonObject.
"""
if not self._inited:
self._inited = True
self._instance = self.init()
return self._instance.__call__(*args, **kwargs)
def init(self):
"""Instantiate an actual instance of this `AutoGluonObject`.
In order to interact with such an `object`, you must always first call: `object.init()`.
"""
config = self.cs.get_default_configuration().get_dictionary()
return self.sample(**config)
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in self.kwvars.items():
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, k)
elif isinstance(v, Space):
hp = v.get_hp(name=k)
_add_hp(cs, hp)
else:
_rm_hp(cs, k)
return cs
@classproperty
def kwspaces(cls):
""" OrderedDict representation of this search space.
"""
return cls.__init__.kwspaces
def sample(self):
"""Sample a configuration from this search space.
"""
raise NotImplementedError
def __repr__(self):
return 'AutoGluonObject'
class List(NestedSpace):
r"""Nested search space corresponding to an ordered list of hyperparameters.
Parameters
----------
args : list
a list of search spaces.
Examples
--------
>>> sequence = ag.List(
>>> ag.space.Categorical('conv3x3', 'conv5x5', 'conv7x7'),
>>> ag.space.Categorical('BatchNorm', 'InstanceNorm'),
>>> ag.space.Categorical('relu', 'sigmoid'),
>>> )
"""
def __init__(self, *args):
self.data = [*args]
def __iter__(self):
for elem in self.data:
yield elem
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
self.data[index] = data
def __len__(self):
return len(self.data)
def __getstate__(self):
return self.data
def __setstate__(self, d):
self.data = d
def __getattribute__(self, s):
try:
x = super(List, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
x = self.data.__getattribute__(s)
return x
def sample(self, **config):
"""Sample a configuration from this search space.
"""
ret = []
kwspaces = self.kwspaces
striped_keys = [k.split('.')[0] for k in config.keys()]
for idx, obj in enumerate(self.data):
if isinstance(obj, NestedSpace):
sub_config = _strip_config_space(config, prefix=str(idx))
ret.append(obj.sample(**sub_config))
elif isinstance(obj, SimpleSpace):
ret.append(config[str(idx)])
else:
ret.append(obj)
return ret
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in enumerate(self.data):
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, str(k))
elif isinstance(v, Space):
hp = v.get_hp(name=str(k))
_add_hp(cs, hp)
return cs
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for idx, obj in enumerate(self.data):
k = str(idx)
if isinstance(obj, NestedSpace):
kw_spaces[k] = obj
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(k, sub_k)
kw_spaces[new_k] = sub_v
elif isinstance(obj, Space):
kw_spaces[k] = obj
return kw_spaces
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
class Dict(NestedSpace):
"""Nested search space for dictionary containing multiple hyperparameters.
Examples
--------
>>> g = ag.space.Dict(
>>> hyperparam1 = ag.space.Categorical('alpha', 'beta'),
>>> hyperparam2 = ag.space.Int(0, 3)
>>> )
>>> print(g)
"""
def __init__(self, **kwargs):
self.data = EasyDict(kwargs)
def __getattribute__(self, s):
try:
x = super(Dict, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
x = self.data.__getattribute__(s)
return x
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, data):
self.data[key] = data
def __getstate__(self):
return self.data
def __setstate__(self, d):
self.data = d
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
for k, v in self.data.items():
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, k)
elif isinstance(v, Space):
hp = v.get_hp(name=k)
_add_hp(cs, hp)
return cs
@property
def kwspaces(self):
""" OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for k, obj in self.data.items():
if isinstance(obj, NestedSpace):
kw_spaces[k] = obj
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(k, sub_k)
kw_spaces[new_k] = sub_v
kw_spaces[new_k] = sub_v
elif isinstance(obj, Space):
kw_spaces[k] = obj
return kw_spaces
def sample(self, **config):
"""Sample a configuration from this search space.
"""
ret = {}
ret.update(self.data)
kwspaces = self.kwspaces
kwspaces.update(config)
striped_keys = [k.split('.')[0] for k in config.keys()]
for k, v in kwspaces.items():
if k in striped_keys:
if isinstance(v, NestedSpace):
sub_config = _strip_config_space(config, prefix=k)
ret[k] = v.sample(**sub_config)
else:
ret[k] = v
return ret
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
class Categorical(NestedSpace):
"""Nested search space for hyperparameters which are categorical. Such a hyperparameter takes one value out of the discrete set of provided options.
Parameters
----------
data : Space or python built-in objects
the choice candidates
Examples
--------
a = ag.space.Categorical('a', 'b', 'c', 'd')
b = ag.space.Categorical('resnet50', autogluon_obj())
"""
def __init__(self, *data):
self.data = [*data]
def __iter__(self):
for elem in self.data:
yield elem
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
self.data[index] = data
def __len__(self):
return len(self.data)
@property
def cs(self):
""" ConfigSpace representation of this search space.
"""
cs = CS.ConfigurationSpace()
if len(self.data) == 0:
return CS.ConfigurationSpace()
hp = CSH.CategoricalHyperparameter(name='choice', choices=range(len(self.data)))
_add_hp(cs, hp)
for i, v in enumerate(self.data):
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, str(i))
return cs
def sample(self, **config):
"""Sample a configuration from this search space.
"""
choice = config.pop('choice')
if isinstance(self.data[choice], NestedSpace):
# nested space: Categorical of AutoGluonobjects
min_config = _strip_config_space(config, prefix=str(choice))
return self.data[choice].sample(**min_config)
else:
return self.data[choice]
@property
def kwspaces(self):
"""OrderedDict representation of this search space.
"""
kw_spaces = OrderedDict()
for idx, obj in enumerate(self.data):
if isinstance(obj, NestedSpace):
for sub_k, sub_v in obj.kwspaces.items():
new_k = '{}.{}'.format(idx, sub_k)
kw_spaces[new_k] = sub_v
return kw_spaces
def __repr__(self):
reprstr = self.__class__.__name__ + str(self.data)
return reprstr
Choice = DeprecationHelper(Categorical, 'Choice')
class Real(SimpleSpace):
"""Search space for numeric hyperparameter that takes continuous values.
Parameters
----------
lower : float
the lower bound of the search space
upper : float
the upper bound of the search space
default : float (optional)
default value
log : (True/False)
Whether to search the values on a logarithmic rather than linear scale.
This is useful for numeric hyperparameters (such as learning rates) whose search space spans many orders of magnitude.
Examples
--------
>>> learning_rate = ag.Real(0.01, 0.1, log=True)
"""
def __init__(self, lower, upper, default=None, log=False):
self.lower = lower
self.upper = upper
self.log = log
self._default = default
def get_hp(self, name):
return CSH.UniformFloatHyperparameter(name=name, lower=self.lower, upper=self.upper,
default_value=self._default, log=self.log)
class Int(SimpleSpace):
"""Search space for numeric hyperparameter that takes integer values.
Parameters
----------
lower : int
The lower bound of the search space
upper : int
The upper bound of the search space
default : int (optional)
Default value
Examples
--------
>>> range = ag.space.Int(0, 100)
"""
def __init__(self, lower, upper, default=None):
self.lower = lower
self.upper = upper
self._default = default
def get_hp(self, name):
return CSH.UniformIntegerHyperparameter(name=name, lower=self.lower, upper=self.upper,
default_value=self._default)
class Bool(Int):
"""Search space for hyperparameter that is either True or False.
`ag.Bool()` serves as shorthand for: `ag.space.Categorical(True, False)`
Examples
--------
pretrained = ag.space.Bool()
"""
def __init__(self):
super(Bool, self).__init__(0, 1)
def _strip_config_space(config, prefix):
# filter out the config with the corresponding prefix
new_config = {}
for k, v in config.items():
if k.startswith(prefix):
new_config[k[len(prefix)+1:]] = v
return new_config
def _add_hp(cs, hp):
if hp.name in cs._hyperparameters:
cs._hyperparameters[hp.name] = hp
else:
cs.add_hyperparameter(hp)
def _add_cs(master_cs, sub_cs, prefix, delimiter='.', parent_hp=None):
new_parameters = []
for hp in sub_cs.get_hyperparameters():
new_parameter = copy.deepcopy(hp)
# Allow for an empty top-level parameter
if new_parameter.name == '':
new_parameter.name = prefix
elif not prefix == '':
new_parameter.name = "%s%s%s" % (prefix, '.', new_parameter.name)
new_parameters.append(new_parameter)
for hp in new_parameters:
_add_hp(master_cs, hp)
def _rm_hp(cs, k):
if k in cs._hyperparameters:
cs._hyperparameters.pop(k)
for hp in cs.get_hyperparameters():
if hp.name.startswith("%s."%(k)):
cs._hyperparameters.pop(hp.name)
|
py | 1a32a2f05b28614d2eb95591bed9c5f28885b515 | from datetime import datetime
import pytest
from .fixtures import create_plan, get_subscription # NOQA: F401
from .test_paddle import BadPaddleDataWarning, paddle_client # NOQA: F401
def test_list_subscription_users(paddle_client, get_subscription): # NOQA: F811,E501
subscription_users = paddle_client.list_subscription_users()
for subscription in subscription_users:
assert isinstance(subscription['subscription_id'], int)
assert isinstance(subscription['plan_id'], int)
assert isinstance(subscription['user_id'], int)
assert isinstance(subscription['user_email'], str)
assert isinstance(subscription['marketing_consent'], bool)
assert isinstance(subscription['update_url'], str)
assert isinstance(subscription['update_url'], str)
assert isinstance(subscription['state'], str)
assert isinstance(subscription['cancel_url'], str)
assert isinstance(subscription['signup_date'], str)
datetime.strptime(subscription['signup_date'], '%Y-%m-%d %H:%M:%S')
assert isinstance(subscription['last_payment'], dict)
assert isinstance(subscription['payment_information'], dict)
assert isinstance(subscription['linked_subscriptions'], list)
def test_list_subscription_users_with_subscription_id(paddle_client, get_subscription): # NOQA: F811,E501
subscription_id = get_subscription['subscription_id']
subscription_users = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
for subscription in subscription_users:
assert subscription['subscription_id'] == subscription_id
def test_list_subscription_users_with_plan_id(paddle_client, get_subscription): # NOQA: F811,E501
plan_id = get_subscription['plan_id']
subscription_users = paddle_client.list_subscription_users(plan_id=plan_id)
for subscription in subscription_users:
assert subscription['plan_id'] == plan_id
def test_list_subscription_users_with_state(paddle_client, get_subscription): # NOQA: F811,E501
state = get_subscription['state']
subscription_users = paddle_client.list_subscription_users(state=state)
for subscription in subscription_users:
assert subscription['state'] == state
def test_list_subscription_users_with_page(paddle_client, get_subscription): # NOQA: F811,E501
list_one = paddle_client.list_subscription_users(
results_per_page=1, page=1,
)
list_two = paddle_client.list_subscription_users(
results_per_page=1, page=2,
)
assert list_one != list_two
def test_list_subscription_users_with_results_per_page(paddle_client, get_subscription): # NOQA: F811,E501
list_one = paddle_client.list_subscription_users(
results_per_page=1, page=1,
)
assert len(list_one) == 1
def test_list_subscription_users_invalid_state(paddle_client): # NOQA: F811
with pytest.raises(ValueError) as error:
paddle_client.list_subscription_users(state='test')
error.match('state must be one of active, past due, trialling, paused')
def test_update_subscription(paddle_client, get_subscription): # NOQA: F811
"""
If you get the error:
Unable to find subscription with id 1
You will need to manually enter a subscription_id below.
(this is why it's mocked in the first place, it's a pain sorry)
"""
subscription_id = get_subscription['subscription_id']
# Can't udate passthrough (least destructive) as 'list_subscription_users'
# does not return it in the response
started_at_paused = 'paused_at' in get_subscription
pause = not started_at_paused
response = paddle_client.update_subscription(
subscription_id=subscription_id,
pause=pause,
)
assert response['subscription_id'] == subscription_id
assert isinstance(response['user_id'], int)
assert isinstance(response['plan_id'], int)
assert isinstance(response['next_payment'], dict)
new_subscription_data = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
new_subscription_data = new_subscription_data[0]
if started_at_paused:
assert 'paused_at' not in new_subscription_data
assert 'paused_from' not in new_subscription_data
assert 'paused_reason' not in new_subscription_data
else:
assert isinstance(new_subscription_data['paused_at'], str)
datetime.strptime(new_subscription_data['paused_at'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert isinstance(new_subscription_data['paused_from'], str)
datetime.strptime(new_subscription_data['paused_from'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert new_subscription_data['paused_reason'] == 'voluntary'
# Set the pause state back to what is was before the test ran
paddle_client.update_subscription(
subscription_id=subscription_id,
pause=not pause,
)
# Test the change back worked
new_subscription_data = paddle_client.list_subscription_users(
subscription_id=subscription_id,
)
new_subscription_data = new_subscription_data[0]
if started_at_paused:
assert isinstance(new_subscription_data['paused_at'], str)
datetime.strptime(new_subscription_data['paused_at'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert isinstance(new_subscription_data['paused_from'], str)
datetime.strptime(new_subscription_data['paused_from'], '%Y-%m-%d %H:%M:%S') # NOQA: E501
assert new_subscription_data['paused_reason'] == 'voluntary'
else:
assert 'paused_at' not in new_subscription_data
assert 'paused_from' not in new_subscription_data
assert 'paused_reason' not in new_subscription_data
def test_update_subscription_invalid_currency(paddle_client): # NOQA: F811
with pytest.raises(ValueError) as error:
paddle_client.update_subscription(
subscription_id=1, currency='test'
)
error.match('currency must be one of USD, GBP, EUR')
@pytest.mark.mocked
def test_cancel_subscription(mocker, paddle_client): # NOQA: F811
"""
This test is mocked as subscriptions must be created manually (see
`Creating a subscription` in CONTRIBUTING.md) as there is no API
to do so
If this test fails it means a change has been made which has affected
the cancel subscription endpoint.
The code now needs to be run directly against Paddle's API at least once to
ensure the new code is working as expected.
Please uncomment the '@pytest.mark.skip()' line for the
'cancel_subscription_no_mock' test to run the the cancel_subscription code
against the Paddle API to check the changes work.
Once the `cancel_subscription_no_mock` test passes please update
the mock below and comment out the function again.
"""
subscription_id = 123
json = {
'subscription_id': subscription_id,
'vendor_id': paddle_client.vendor_id,
'vendor_auth_code': paddle_client.api_key,
}
url = 'https://sandbox-vendors.paddle.com/api/2.0/subscription/users_cancel' # NOQA: E501
method = 'POST'
request = mocker.patch('paddle.paddle.requests.request')
paddle_client.cancel_subscription(
subscription_id=subscription_id,
)
request.assert_called_once_with(url=url, json=json, method=method)
# Comment out '@pytest.mark.skip()' to ensure the cancel_subscription
# code is working as expected
@pytest.mark.skip()
def test_cancel_subscription_no_mock(paddle_client, get_subscription): # NOQA: F811,E501
subscription_id = get_subscription
response = paddle_client.cancel_subscription(
subscription_id=subscription_id,
)
assert response is True
|
py | 1a32a354fe7f67fced9b2b1e6e1733aef67edbc5 | import numpy as np
from scipy.linalg import logm
""" UTILITY FUNCTIONS """
def hat(w):
""" Function takes in a vector of size 3 and returns
its corresponding skew-symmetric matrix """
w1 = w[0]
w2 = w[1]
w3 = w[2]
what = np.array( [ [0,-w3,w2], [w3,0,-w1], [-w2,w1,0] ] )
return what
def unhat(what):
""" Function takes in a skew-symmetric matrix and returns
its corresponding vector """
w1 = what[2,1]
w2 = what[0,2]
w3 = what[1,0]
w = np.array( (w1,w2,w3) )
return w
def qmult(q1,q2):
""" Function takes in quaternions q1 and q2, and performs
quaternion multiplication: q3 = q1*q2 """
v1 = q1[0:3]
s1 = q1[3]
q3 = np.block([ [s1*np.identity(3) + hat(v1), v1[:,np.newaxis] ], [-v1, s1] ]) @ q2
return q3
def qconj(q):
""" Function takes in a quaternion and returns its conjugate """
v = q[0:3]
v = -v
qplus = np.concatenate((v,q[3,np.newaxis]),axis=0)
return qplus
def phi_to_quat(phi):
""" Function takes in a rotation parameterized by
Euler Axis & Angle and returns its corresponding quaternion """
if np.linalg.norm(phi) > 10*np.pi/180:
theta = np.linalg.norm(phi)
r = phi/theta
qvec = r*np.sin(theta/2)
qsca = np.array(np.cos(theta/2))
q = np.hstack((qvec,qsca))
else:
qvec = phi/2
qsca = np.array(1-1/8*np.dot(phi,phi))
q = np.hstack((qvec,qsca))
return q
def quat_to_phi(q):
""" Function takes in a rotation parameterized by
a quaternion and returns its corresponding Euler Axis & Angle """
Q = quat_to_rot(q)
phi = unhat(logm(Q))
return phi
def quat_to_rot(q):
""" Function takes in a rotation parameterized by
a quaternion and returns its corresponding rotation matrix """
v = q[0:3]
s = q[3]
A = np.identity(3) + 2*hat(v) @ (s*np.identity(3) + hat(v))
#A = np.transpose(A)
return A
""" Below is another way to convert from quaternion to rotation matrix
def quat_to_rot(q):
q1 = q[0]
q2 = q[1]
q3 = q[2]
q4 = q[3]
Q = np.array( [ [0,-q3,+q2], [+q3,0,-q1], [-q2,q1,0] ] )
A = (q4**2 - (q1**2+q2**2+q3**2))*np.identity(3) + 2*np.outer(np.array([q1,q2,q3]), np.array([q1,q2,q3])) - 2*q4 * Q
return A
""" |
py | 1a32a3c2fa08e6dc44c59ad3895130a0ee715531 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from job_spider.process import SpiderProcess, WriterProcess
from multiprocessing import Queue
import time
def main():
queue = Queue()
p1 = SpiderProcess(queue)
p2 = WriterProcess(queue)
p1.start()
p2.start()
while p2.is_alive():
if not p1.is_alive():
p1 = SpiderProcess(queue)
p1.start()
time.sleep(1)
p1.terminate()
p2.terminate()
if __name__ == '__main__':
main()
|
py | 1a32a3c759dd23078dd7e9de36c9dbd818df9a85 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.v2",
manifest={"StandardSqlDataType", "StandardSqlField", "StandardSqlStructType",},
)
class StandardSqlDataType(proto.Message):
r"""The type of a variable, e.g., a function argument. Examples: INT64:
{type_kind="INT64"} ARRAY: {type_kind="ARRAY",
array_element_type="STRING"} STRUCT<x STRING, y ARRAY>:
{type_kind="STRUCT", struct_type={fields=[ {name="x",
type={type_kind="STRING"}}, {name="y", type={type_kind="ARRAY",
array_element_type="DATE"}} ]}}
Attributes:
type_kind (~.standard_sql.StandardSqlDataType.TypeKind):
Required. The top level type of this field.
Can be any standard SQL data type (e.g.,
"INT64", "DATE", "ARRAY").
array_element_type (~.standard_sql.StandardSqlDataType):
The type of the array's elements, if type_kind = "ARRAY".
struct_type (~.standard_sql.StandardSqlStructType):
The fields of this struct, in order, if type_kind =
"STRUCT".
"""
class TypeKind(proto.Enum):
r""""""
TYPE_KIND_UNSPECIFIED = 0
INT64 = 2
BOOL = 5
FLOAT64 = 7
STRING = 8
BYTES = 9
TIMESTAMP = 19
DATE = 10
TIME = 20
DATETIME = 21
GEOGRAPHY = 22
NUMERIC = 23
BIGNUMERIC = 24
ARRAY = 16
STRUCT = 17
type_kind = proto.Field(proto.ENUM, number=1, enum=TypeKind,)
array_element_type = proto.Field(
proto.MESSAGE, number=2, oneof="sub_type", message="StandardSqlDataType",
)
struct_type = proto.Field(
proto.MESSAGE, number=3, oneof="sub_type", message="StandardSqlStructType",
)
class StandardSqlField(proto.Message):
r"""A field or a column.
Attributes:
name (str):
Optional. The name of this field. Can be
absent for struct fields.
type (~.standard_sql.StandardSqlDataType):
Optional. The type of this parameter. Absent
if not explicitly specified (e.g., CREATE
FUNCTION statement can omit the return type; in
this case the output parameter does not have
this "type" field).
"""
name = proto.Field(proto.STRING, number=1)
type = proto.Field(proto.MESSAGE, number=2, message=StandardSqlDataType,)
class StandardSqlStructType(proto.Message):
r"""
Attributes:
fields (Sequence[~.standard_sql.StandardSqlField]):
"""
fields = proto.RepeatedField(proto.MESSAGE, number=1, message=StandardSqlField,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a32a41cd676fdd7cd9811c84d56c7d6ad70d7e1 | __all__ = ["BleepService","BleepServiceError",]
import os
import urllib2
from django.contrib.sites.models import Site
from django.conf import settings
from parsers.BleepParser import BleepParser
class BleepServiceError(Exception):
"""
Bleep Service exception class.
Attributes:
message -- explanation of the error
"""
def __init__(self, *args):
"""
base constructor for error type
"""
Exception.__init__(self, *args)
class BleepServiceResult(object):
"""
response
"""
def __init__(self, *args):
self.messages = []
def add_msg(self, msg):
return self.messages.append(msg)
def get_msgs(self):
return ",".join(self.messages)
msgs = property(get_msgs)
from django import forms
class BleepServiceForm(forms.Form):
"""
Form for external services
"""
class BleepService(object):
"""
Base class for all Bleep services
Attributes:
service_type -- the name for this Bleep service type
"""
def __init__(self, service_type):
self.service_type= service_type
self.result = BleepServiceResult()
@classmethod
def get_name(cls):
return cls.__name__
@classmethod
def get_doc(cls):
return cls.__doc__
def get_result(self):
return self.result
@classmethod
def get_service_type(cls):
return False
@classmethod
def is_service_type(cls, service_type):
return cls.get_service_type() == service_type
PARAMS = {}
@classmethod
def data_keys(cls):
"""
Returns a dictionary describing the data keys
"""
# return an empty dictionary by default
return dict()
@classmethod
def dispatch(cls, instance):
"""
Dispatch the bleep to its receivers
"""
if 'qued' == instance.bleep_status:
try:
print 'debuggery: looking up parser for content_type ' + instance.bleep_content_type
parser = BleepParser.get_parser(instance.bleep_content_type)
print 'debuggery: parser='+parser.get_name()
target_svc = BleepService.get_service(instance.bleep_service)
print 'debuggery: target_svc='+target_svc.get_name()
instance.bleep_status = 'dspd'
instance.save()
print 'debuggery: telling service to doit...'
result = target_svc.doit(instance, parser)
instance.bleep_status = 'comp'
instance.save()
print 'debuggery: adding comment about completion...'
instance.add_comment(result.get_msgs(),
cat=target_svc.get_name(), stat="completed")
print 'debuggery: comment added.'
except BleepServiceError as (exc):
instance.add_comment("Service request failed. Cause: "+ str(exc))
instance.bleep_status = 'fail'
instance.save()
else:
print 'debuggery: bleep not queued. status: %s' % instance.bleep_status
def perform(self, reqdata):
raise BleepServiceError('the base BleepService does nothing')
def doit(self, instance, parser):
"""
doit -- template method calling: parse_data, perform, get_results
"""
# Get the bleep instance data as a dictionary
bleep_data = instance.as_dict()
# Parse the request data into a dictionary
parsed_reqdata = parser.parse_reqdata(
instance.bleep_get_data, instance.bleep_post_data)
print 'debuggery: ... dumping out parsed_reqdata'
for key,val in parsed_reqdata.iteritems():
print "debuggery: %s=%s" % (key,val)
# Merge the two data sets into a shared context
context_data = dict(bleep_data, **parsed_reqdata)
# Perform the service request
return self.perform(context_data)
@classmethod
def get_service(cls, service_type):
"""
Factory method for BleepService subclasses
"""
for cls in BleepService.__subclasses__():
if cls.is_service_type(service_type):
return cls(service_type)
raise BleepServiceError("no service found for type: %s" % service_type)
@classmethod
def list(cls):
"""
List all subclasses that implement a service
"""
results = []
# list anything for now
for cls in BleepService.__subclasses__():
results.append(cls)
return results
@classmethod
def get_site_url(cls):
"""Returns fully qualified URL (no trailing slash) for the current site."""
current_site = Site.objects.get_current()
protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')
port = getattr(settings, 'MY_SITE_PORT', '')
url = '%s://%s' % (protocol, current_site.domain)
if port:
url += ':%s' % port
return url
@classmethod
def form_class(cls, service_type):
"""
Generate a new class definition for this service type
"""
cls = BleepService.get_service(service_type)
attrs = dict()
attrs['service_class'] = cls
for key,d in cls.PARAMS.iteritems():
print 'debuggery: processing param key: ', key
req = True if d.has_key('optional') else False
initial = d['default'] if d.has_key('default') else None
attrs[key] = forms.CharField(key, label=key, required=req, help_text=d['desc'],
initial=initial,
widget=forms.TextInput(attrs={'size':'40','class':'service_param'}))
print 'debuggery: added form attr: ' + key
attrs['data'] = forms.CharField('data', label='data', required=False,
help_text='extra request data', widget=forms.Textarea)
print 'debuggery: generating new form class: ' + cls.get_name()+'Form'
form_class = type(cls.get_name()+ 'Form',
(BleepServiceForm,), attrs)
return form_class
|
py | 1a32a527e72038185dc65cab7820c153dd9436f3 | # Time: O(n)
# Space: O(n)
class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
n = len(nums)
accu = [0]
for num in nums:
accu.append(accu[-1]+num)
left_pos = [0] * n
total = accu[k]-accu[0]
for i in xrange(k, n):
if accu[i+1]-accu[i+1-k] > total:
left_pos[i] = i+1-k
total = accu[i+1]-accu[i+1-k]
else:
left_pos[i] = left_pos[i-1]
right_pos = [n-k] * n
total = accu[n]-accu[n-k]
for i in reversed(xrange(n-k)):
if accu[i+k]-accu[i] > total:
right_pos[i] = i
total = accu[i+k]-accu[i]
else:
right_pos[i] = right_pos[i+1]
result, max_sum = [], 0
for i in xrange(k, n-2*k+1):
left, right = left_pos[i-1], right_pos[i+k]
total = (accu[i+k]-accu[i]) + \
(accu[left+k]-accu[left]) + \
(accu[right+k]-accu[right])
if total > max_sum:
max_sum = total
result = [left, i, right]
return result
|
py | 1a32a565c28f55f5b33a2922a936ed708431785e | # -*- coding: utf-8 -*-
"""Copy of DS7_ElephantSQL
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1PoprMSkFLB1lAntnxZ0qksVgSX_otxp1
"""
#Installing psycopg2 if necessary
#!pip install psycopg2-binary
#importing libraries
import psycopg2
import sqlite3
#reviewing options and the help documentation
#dir(psycopg2)
#help(psycopg2.connect)
#DB Connection info from Elephent SQL
dbname = 'qpirrfec'
user = 'qpirrfec'
password = 'PASSWORD' #update with PW here!
host = 'salt.db.elephantsql.com'
#creating the connection object from our login credentials above
pg_conn = psycopg2.connect(dbname = dbname, user = user, password = password, host = host)
#creating the cursor object to interact with the database
pg_curs = pg_conn.cursor()
#example query from the test_table we made earlier in elephentsql GUI.
pg_curs.execute('SELECT * FROM test_table;')
pg_curs.fetchall()
#grabbing the rpg_db file from githup
#!wget https://github.com/LambdaSchool/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module1-introduction-to-sql/rpg_db.sqlite3?raw=true
#relabelling it properly
#!mv 'rpg_db.sqlite3?raw=true' rpg_db.sqlite3
#loading the sqlite3 file via sqlite3 connection object
sl_conn = sqlite3.connect('rpg_db.sqlite3')
#creating the sqlite3 connection object
sl_curs = sl_conn.cursor()
#example query for sqlite3 query
sl_curs.execute('SELECT COUNT(*) FROM charactercreator_character').fetchall()
#getting the table schema from the sqlite3 database
sl_curs.execute('PRAGMA table_info(charactercreator_character);').fetchall()
#using the schema information above, creating the schema in elephentsql
#for the file transfer
create_character_table = """
CREATE TABLE charactercreator_character(
character_id SERIAL PRIMARY KEY,
name varchar(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
);
"""
#executing the table creation
pg_curs.execute(create_character_table)
#the show table query for elephentsql
show_tables = """
SELECT *
FROM pg_catalog.pg_tables
WHERE schemaname != 'pg_catalog'
AND schemaname != 'information_schema';
"""
#executing the showtable query in elephentsql
pg_curs.execute(show_tables)
pg_curs.fetchall()
#writing all of the character creator table to the variable characters for transfer
characters = sl_curs.execute('SELECT * from charactercreator_character;').fetchall()
#example of the first entry
characters[0]
#converting the first entry into a string, BUT cutting out the first column
#which was the ID
str(characters[0][1:])
#creating an insert command for one character as an example:
example_insert = """
INSERT INTO charactercreator_character
(name, level, exp, hp, strength, intelligence, dexterity,wisdom)
VALUES """ + str(characters[0][1:]) + ";"
#printing the example
print(example_insert)
#looping to do this for all characters and actually execute to elephentsql
for character in characters:
insert_character = """
INSERT INTO charactercreator_character
(name, level, exp, hp, strength, intelligence, dexterity,wisdom)
VALUES """ + str(character[1:]) + ";"
pg_curs.execute(insert_character)
#showing the table we just made in elephent sql
pg_curs.execute('SELECT * FROM charactercreator_character;')
#example to show everything has been updated to elephentsql!
pg_curs.fetchall()
#closing and commiting to save changes
pg_curs.close()
pg_conn.commit()
#now reopening the connection to check for errors!
pg_curs = pg_conn.cursor()
pg_curs.execute('SELECT * from charactercreator_character;')
pg_characters= pg_curs.fetchall()
#first row in sqlite
characters[0]
#first row in sqelephant
pg_characters[0]
#writing to verify that entries all coppied over accurately!
for character, pg_character in zip(characters,pg_characters):
assert character == pg_character
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.