ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b4098c9d290093edb9342f34afcaf6809f9ff030 | import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class LinCongC(UGen):
"""
A cubic-interpolating linear congruential chaotic generator.
::
>>> lin_cong_c = supriya.ugens.LinCongC.ar(
... a=1.1,
... c=0.13,
... frequency=22050,
... m=1,
... xi=0,
... )
>>> lin_cong_c
LinCongC.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Chaos UGens"
_ordered_input_names = collections.OrderedDict(
[("frequency", 22050), ("a", 1.1), ("c", 0.13), ("m", 1), ("xi", 0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
|
py | b4098d92eca1bcaa655e76ef81582518c4130e60 | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class SoscanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
url = scrapy.Field()
status = scrapy.Field()
time_retrieved = scrapy.Field()
time_loc = scrapy.Field()
time_modified = scrapy.Field()
jsonld = scrapy.Field() |
py | b4098dacf1749b0379c715b0f6f42d07f33fc633 | import matplotlib.pyplot as plt
import numpy as np
import codecs
# math.sqrt比**号开更快,但是为同一个数量级
import math
from PIL import Image, ImageEnhance
from config import train_params
def resize_img(img, size): # size CHW
'''
force to resize the img
'''
img=img.resize((size[1],size[2]),Image.BILINEAR)
return img
def random_crop(img, scale=[0.08,1.0],ratio=[3./4 , 4./3]):
'''
image randomly croped
scale is the rate of area changing
ratio is the rate of higth and
output image has been resized to the setting input size
valuable passed from Config.py
size information
'''
aspect_ratio = math.sqrt(np.random.uniform(*ratio))
bound = min((float(img.size[0])/img.size[1])/(aspect_ratio**2),(float(img.size[1])/img.size[0])*(aspect_ratio**2))
scale_max=min(scale[1],bound)
scale_min=min(scale[0],bound)
target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,scale_max)
target_size = math.sqrt(target_area)
w = int(target_size*aspect_ratio)
h = int(target_size/aspect_ratio)
i = np.random.randint(0,img.size[0] - w + 1)
j = np.random.randint(0,img.size[1] - h + 1)
img=img.crop((i,j,i+w,j+h))
img=img.resize((train_params['img_size'][1],train_params['img_size'][2]),Image.BILINEAR)
return img
def random_crop_scale(img,scale=[0.8,1.0],ratio=[3./4,4./3]):
'''
image randomly croped
scale rate is the mean element
First scale rate be generated, then based on the rate, ratio can be generated with limit
the range of ratio should be large enough, otherwise in order to successfully crop, the ratio will be ignored
valuable passed from Config.py
size information
'''
scale[1]=min(scale[1],1.)
scale_rate=np.random.uniform(*scale)
target_area = img.size[0]*img.size[1]*scale_rate
target_size = math.sqrt(target_area)
bound_max=math.sqrt(float(img.size[0])/img.size[1]/scale_rate)
bound_min=math.sqrt(float(img.size[0])/img.size[1]*scale_rate)
aspect_ratio_max=min(ratio[1],bound_min)
aspect_ratio_min=max(ratio[0],bound_max)
if aspect_ratio_max < aspect_ratio_min:
aspect_ratio = np.random.uniform(bound_min,bound_max)
else:
aspect_ratio = np.random.uniform(aspect_ratio_min,aspect_ratio_max)
w = int(aspect_ratio * target_size)
h = int(target_size / aspect_ratio)
i = np.random.randint(0,img.size[0] - w + 1)
j = np.random.randint(0,img.size[1] - h + 1)
img = img.crop((i,j,i+w,j+h))
img=img.resize((train_params['img_size'][1],train_params['img_size'][2]),Image.BILINEAR)
return img
def rotate_img(img,angle=[-14,15]):
'''
rotate the img
'''
angle = np.random.randint(*angle)
img= img.rotate(angle)
return img
def random_brightness(img):
'''
adjust the image brightnass
valuable passed from Config.py prob,delta
valuable passed from Config.py
'''
# probability
prob = np.random.uniform(0,1)
if prob < train_params['img_process_method']['brightness_prob']:
brightness_delta= train_params['img_process_method']['brightness_delta']
delta = np.random.uniform(-brightness_delta,+brightness_delta)+1
img=ImageEnhance.Brightness(img).enhance(delta)
return img
def random_contrast(img):
'''
adjust the image contrast
valuable passed from Config.py prob,delta
valuable passed from Config.py
'''
# probability
prob = np.random.uniform(0,1)
if prob < train_params['img_process_method']['contrast_prob']:
contrast_delta= train_params['img_process_method']['contrast_delta']
delta = np.random.uniform(-contrast_delta,+contrast_delta)+1
img=ImageEnhance.Contrast(img).enhance(delta)
return img
def random_saturation(img):
'''
adjust the image
valuable passed from Config.py prob,delta
valuable passed from Config.py
'''
# probability
prob = np.random.uniform(0,1)
if prob < train_params['img_process_method']['saturation_prob']:
saturation_delta= train_params['img_process_method']['saturation_delta']
delta = np.random.uniform(-saturation_delta,+saturation_delta)+1
img=ImageEnhance.Color(img).enhance(delta)
return img
def random_hue(img):
'''
adjust the image
valuable passed from Config.py prob,delta
valuable passed from Config.py
'''
# probability
prob = np.random.uniform(0,1)
if prob < train_params['img_process_method']['hue_prob']:
hue_delta= train_params['img_process_method']['hue_delta']
delta = np.random.uniform(-hue_delta,+hue_delta)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:,:,0]=img_hsv[:,:,0]+delta
img=Image.fromarray(img_hsv,mode='HSV').convert('RGB')
return img
def distort_color(img):
'''
randomly Apply different distort order
'''
prob = np.random.uniform(0,1)
if prob< 0.35:
img=random_brightness(img)
img=random_contrast(img)
img=random_saturation(img)
img=random_hue(img)
elif prob < 0.7:
img = random_brightness(img)
img = random_saturation(img)
img = random_hue(img)
img = random_contrast(img)
return img
def custom_img_reader(file_list,mode='train'):
with codecs.open(file_list) as flist:
lines=[line.strip() for line in flist]
def reader():
# shuffle the data
np.random.shuffle(lines) ## 这个操作会在内存上直接随机lines 不用赋值
if mode == 'train':
for line in lines:
img_path,label = line.split()
img=Image.open(img_path)
plt.imshow(img)
try:
if img.mode!='RGB':
img=img.convert('RGB')
if train_params['img_process_method']['is_distort']:
img = distort_color(img)
if train_params['img_process_method']['is_rotate']:
img = rotate_img(img)
if train_params['img_process_method']['is_crop']:
img = random_crop(img)
if train_params['img_process_method']['is_flip']:
prob = np.random.randint(0,2)
if prob == 0:
img=img.transpose(Image.FLIP_LEFT_RIGHT)
img=np.array(img).astype(np.float32)
img -= train_params['mean_rgb']
img=img.transpose((2,0,1)) # 三个维度的关系HWC to CHW 机器学习用的通道数在最前面
img *= 0.007843
x_data = img.astype(np.float32)
y_data = np.array([label]).astype(np.int64)
yield x_data,y_data
except Exception as e:
print('x\n')
pass
if mode == 'eval':
for line in lines:
img_path,label = line.split()
img=Image.open(img_path)
if img.mode!='RGB':
img=img.convert('RGB')
img=resize_img(img,train_params['img_size'])
img=np.array(img).astype(np.float32)
img -= train_params['mean_rgb']
img=img.transpose((2,0,1))
img *= 2./255
yield img,int(label)
if mode == 'test':
for line in lines:
img_path = line
img=Image.open(img_path)
if img.mode!='RGB':
img=img.convert('RGB')
img=resize_img(img,train_params['img_size'])
img=np.array(img).astype(np.float32)
img -= train_params['mean_rgb']
img=img.transpose((2,0,1))
img *= 2./255
yield img
return reader
if __name__=='__main__':
## 测试方法部分的代码正确性
test_img_path='/home/aistudio/data/data504/vegetables/train_imgs/1515827042897.jpg'
img=Image.open(test_img_path)
plt.subplot(1,2,1)
plt.imshow(img)
img=rotate_img(img) # distort(img) random_crop(img) random_crop_scale
plt.subplot(1,2,2)
plt.imshow(img)
img=img.transpose(Image.FLIP_LEFT_RIGHT)
plt.imshow(img)
img=np.array(img).astype(np.float32)
img -= train_params['mean_rgb']
img=img.transpose((2,0,1)) # 三个维度的关系HWC to CHW 机器学习用的通道数在最前面
img *= 0.007843
## 测试reader部分的代码正确性
img_list_path='/home/aistudio/data/data504/vegetables/train_labels.txt'
reader = custom_img_reader(img_list_path)
print(next(reader())[0].shape)
|
py | b4098dd7cce0ad52f6d68a7d26bcdd258534a3e0 | import torch
from net import BiLSTM_CRF, BERT_CRF
from flair.embeddings import BertEmbeddings
from flair.data import Sentence
import pickle
import os
import re
DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
EMBEDDING_DIM = 256
HIDDEN_DIM = 256
num_words = 3000
START_TAG = "<START>"
STOP_TAG = "<STOP>"
tag_to_ix = {
START_TAG: 0,
STOP_TAG: 1,
'B': 2, 'M': 3, 'E': 4,
'S': 5
}
ix_to_tag = {tag_to_ix[ix]: ix for ix in tag_to_ix}
with open(DIR + '/data/word_index.pkl', 'rb') as f:
word_index = pickle.load(f)
def test(dir_model, feature='LSTM'):
if feature == 'BERT':
model = BERT_CRF(tag_to_ix=tag_to_ix)
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint)
model = model.to(device)
# 导入BERT预训练模型
embedding = BertEmbeddings('bert-base-chinese', '-1', 'mean')
while True:
print('输入文本,结束输入"quit":\n')
text = input()
if text != 'quit':
with torch.no_grad():
# 文本转编码
x_test = Sentence(' '.join(text.replace(' ', '|')))
embedding.embed(x_test)
x_test = torch.Tensor(([token.embedding.numpy() for token in x_test])).to(device)
# 输出标注结果
score, test_tag = model(x_test.view([-1, 768]))
tag = [ix_to_tag[ix] for ix in test_tag]
print(tag)
result = re.finditer("S|BM*E", ''.join(tag))
# 定位实体,即"词语"
result = [[m.start(), m.end()] for m in result]
text_cut = ''
for i in result:
text_cut += ('/' + text[i[0]:i[1]])
print('\n分词结果: ', text_cut, '\n')
else:
break
else:
# 导入训练好的模型
model = BiLSTM_CRF(vocab_size=num_words + 2,
tag_to_ix=tag_to_ix,
embedding_dim=EMBEDDING_DIM,
hidden_dim=HIDDEN_DIM)
checkpoint = torch.load(dir_model)
model.load_state_dict(checkpoint)
model = model.to(device)
while True:
print('输入文本,结束输入"quit":\n')
text = input()
if text != 'quit':
with torch.no_grad():
# 文本转编码
x_test = [word_index.get(char, num_words + 1) for char in text]
x_test = torch.LongTensor(x_test).to(device)
# 输出标注结果
score, test_tag = model(x_test)
tag = [ix_to_tag[ix] for ix in test_tag]
result = re.finditer("S|BM*E", ''.join(tag))
# 定位实体,即"词语"
result = [[m.start(), m.end()] for m in result]
text_cut = ''
for i in result:
text_cut += (' ', text[i[0]:i[1]], ' ')
print('\n分词结果: ', text_cut, '\n')
else:
break
if __name__ == '__main__':
# _dir_model = DIR + '/model/LSTM_003.pth'
# test(_dir_model, 'LSTM')
_dir_model = DIR + '/model/BERT_002.pth'
test(_dir_model, 'BERT')
|
py | b4098ecbf3e60c4fe589822abd33e9849c4f4db0 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class IncomeTaxSlabOtherCharges(Document):
pass
|
py | b4098f15ae6263e5a9846c4761a0c270ea7bf090 | # -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
def firefox_installed():
try:
Browser("firefox")
except OSError:
return False
return True
@unittest.skipIf(not firefox_installed(), 'firefox is not installed')
class FirefoxBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("firefox")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'mockfile.txt'
)
self.browser.attach_file('file', file_path)
self.browser.find_by_name('upload').click()
html = self.browser.html
assert 'text/plain' in html
assert open(file_path).read().encode('utf-8') in html
def test_should_support_with_statement(self):
with Browser('firefox') as internet:
pass
@unittest.skipIf(not firefox_installed(), 'firefox is not installed')
class FirefoxWithExtensionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
extension_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'firebug.xpi'
)
cls.browser = Browser('firefox', extensions=[extension_path])
def test_create_a_firefox_instance_with_extension(self):
"should be able to load an extension"
self.assertIn(
'[email protected]',
os.listdir(self.browser.driver.profile.extensionsDir)
)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
@unittest.skipIf(not firefox_installed(), 'firefox is not installed')
class FirefoxBrowserProfilePreferencesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
preferences = {
'dom.max_script_run_time': 360,
'devtools.inspector.enabled': True,
}
cls.browser = Browser("firefox", profile_preferences=preferences)
def test_preference_set(self):
preferences = self.browser.driver.profile.default_preferences
self.assertIn('dom.max_script_run_time', preferences)
value = preferences.get('dom.max_script_run_time')
self.assertEqual(int(value), 360)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
@unittest.skipIf(not firefox_installed(), 'firefox is not installed')
class FirefoxBrowserFullScreenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("firefox", fullscreen=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
|
py | b40990aab4caf811b7e7135770dabddb8dc16ff1 | """
Django settings for Filter project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jhka^j+$_+q+240^n$t(s!^8&!7#6nuc4)5o9(%vbby6$t!o32'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'filter',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Filter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Filter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'filter/static/filter')
|
py | b409921c228c007e6af86257117bf469a7f01b38 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from typing import Any, Dict
from google.cloud.tasks_v2.types import Queue, Task
from airflow.gcp.hooks.tasks import CloudTasksHook
from tests.compat import mock
from tests.gcp.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
API_RESPONSE = {} # type: Dict[Any, Any]
PROJECT_ID = "test-project"
LOCATION = "asia-east2"
FULL_LOCATION_PATH = "projects/test-project/locations/asia-east2"
QUEUE_ID = "test-queue"
FULL_QUEUE_PATH = "projects/test-project/locations/asia-east2/queues/test-queue"
TASK_NAME = "test-task"
FULL_TASK_PATH = (
"projects/test-project/locations/asia-east2/queues/test-queue/tasks/test-task"
)
class TestCloudTasksHook(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.gcp.hooks.base.GoogleCloudBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudTasksHook(gcp_conn_id="test")
@mock.patch("airflow.gcp.hooks.tasks.CloudTasksHook.client_info", new_callable=mock.PropertyMock)
@mock.patch("airflow.gcp.hooks.tasks.CloudTasksHook._get_credentials")
@mock.patch("airflow.gcp.hooks.tasks.CloudTasksClient")
def test_cloud_tasks_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value,
client_info=mock_client_info.return_value
)
self.assertEqual(mock_client.return_value, result)
self.assertEqual(self.hook._client, result)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.create_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_create_queue(self, get_conn):
result = self.hook.create_queue(
location=LOCATION,
task_queue=Queue(),
queue_name=QUEUE_ID,
project_id=PROJECT_ID,
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.create_queue.assert_called_once_with(
parent=FULL_LOCATION_PATH,
queue=Queue(name=FULL_QUEUE_PATH),
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.update_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_update_queue(self, get_conn):
result = self.hook.update_queue(
task_queue=Queue(state=3),
location=LOCATION,
queue_name=QUEUE_ID,
project_id=PROJECT_ID,
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.update_queue.assert_called_once_with(
queue=Queue(name=FULL_QUEUE_PATH, state=3),
update_mask=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.get_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_get_queue(self, get_conn):
result = self.hook.get_queue(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertIs(result, API_RESPONSE)
get_conn.return_value.get_queue.assert_called_once_with(
name=FULL_QUEUE_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.list_queues.return_value": API_RESPONSE}, # type: ignore
)
def test_list_queues(self, get_conn):
result = self.hook.list_queues(location=LOCATION, project_id=PROJECT_ID)
self.assertEqual(result, list(API_RESPONSE))
get_conn.return_value.list_queues.assert_called_once_with(
parent=FULL_LOCATION_PATH,
filter_=None,
page_size=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.delete_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_delete_queue(self, get_conn):
result = self.hook.delete_queue(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertEqual(result, None)
get_conn.return_value.delete_queue.assert_called_once_with(
name=FULL_QUEUE_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.purge_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_purge_queue(self, get_conn):
result = self.hook.purge_queue(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.purge_queue.assert_called_once_with(
name=FULL_QUEUE_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.pause_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_pause_queue(self, get_conn):
result = self.hook.pause_queue(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.pause_queue.assert_called_once_with(
name=FULL_QUEUE_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.resume_queue.return_value": API_RESPONSE}, # type: ignore
)
def test_resume_queue(self, get_conn):
result = self.hook.resume_queue(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.resume_queue.assert_called_once_with(
name=FULL_QUEUE_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.create_task.return_value": API_RESPONSE}, # type: ignore
)
def test_create_task(self, get_conn):
result = self.hook.create_task(
location=LOCATION,
queue_name=QUEUE_ID,
task=Task(),
project_id=PROJECT_ID,
task_name=TASK_NAME,
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.create_task.assert_called_once_with(
parent=FULL_QUEUE_PATH,
task=Task(name=FULL_TASK_PATH),
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.get_task.return_value": API_RESPONSE}, # type: ignore
)
def test_get_task(self, get_conn):
result = self.hook.get_task(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=PROJECT_ID,
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.get_task.assert_called_once_with(
name=FULL_TASK_PATH,
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.list_tasks.return_value": API_RESPONSE}, # type: ignore
)
def test_list_tasks(self, get_conn):
result = self.hook.list_tasks(
location=LOCATION, queue_name=QUEUE_ID, project_id=PROJECT_ID
)
self.assertEqual(result, list(API_RESPONSE))
get_conn.return_value.list_tasks.assert_called_once_with(
parent=FULL_QUEUE_PATH,
response_view=None,
page_size=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.delete_task.return_value": API_RESPONSE}, # type: ignore
)
def test_delete_task(self, get_conn):
result = self.hook.delete_task(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=PROJECT_ID,
)
self.assertEqual(result, None)
get_conn.return_value.delete_task.assert_called_once_with(
name=FULL_TASK_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch( # type: ignore
"airflow.gcp.hooks.tasks.CloudTasksHook.get_conn",
**{"return_value.run_task.return_value": API_RESPONSE}, # type: ignore
)
def test_run_task(self, get_conn):
result = self.hook.run_task(
location=LOCATION,
queue_name=QUEUE_ID,
task_name=TASK_NAME,
project_id=PROJECT_ID,
)
self.assertEqual(result, API_RESPONSE)
get_conn.return_value.run_task.assert_called_once_with(
name=FULL_TASK_PATH,
response_view=None,
retry=None,
timeout=None,
metadata=None,
)
|
py | b40994597095661eb986c32cc2b211975b5bb063 | import pytest
from app.utils.user_permissions import (
translate_permissions_from_db_to_ui,
translate_permissions_from_ui_to_db,
)
@pytest.mark.parametrize('db_permissions,expected_ui_permissions', [
(
['approve_broadcasts', 'reject_broadcasts', 'cancel_broadcasts'],
{'approve_broadcasts'},
),
(
['manage_templates', 'create_broadcasts', 'reject_broadcasts', 'cancel_broadcasts'],
{'create_broadcasts', 'manage_templates'},
),
(
['manage_templates'],
{'manage_templates'},
),
(
['create_broadcasts'],
set(),
),
(
['send_texts', 'send_emails', 'send_letters', 'manage_templates', 'some_unknown_permission'],
{'send_messages', 'manage_templates', 'some_unknown_permission'},
),
])
def test_translate_permissions_from_db_to_ui(
db_permissions,
expected_ui_permissions,
):
ui_permissions = translate_permissions_from_db_to_ui(db_permissions)
assert ui_permissions == expected_ui_permissions
def test_translate_permissions_from_ui_to_db():
ui_permissions = ['send_messages', 'manage_templates', 'some_unknown_permission']
db_permissions = translate_permissions_from_ui_to_db(ui_permissions)
assert db_permissions == {
'send_texts', 'send_emails', 'send_letters', 'manage_templates', 'some_unknown_permission'
}
|
py | b4099540b41cd040001b18d9569ba39de9d44821 | import os
import sys
import shlex
import click
# CREDIT: https://gist.github.com/bortzmeyer/1284249#gistcomment-3074036
def create_ssh(ip: str, port: str, user: str, debug: bool, cd_folder: str = None, shell: str = 'bash'):
"""Create a ssh session"""
ssh = f'/usr/bin/ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR -p {port} {user}@{ip}'
if cd_folder:
ssh = ssh + f' -t "cd {cd_folder}; {shell} --login"'
pid = os.fork()
if pid == 0: # a child process
if debug:
print(f"{ssh}")
cmd = shlex.split(ssh)
os.execv(cmd[0], cmd)
os.wait()
def do_scp(ip: str, port: str, user: str, src: str, target: str, show: bool, debug: bool):
"""Create a ssh session"""
scp = f'/usr/bin/scp -P {port} -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR {src} {user}@{ip}:{target}'
if show:
print(scp)
return
pid = os.fork()
if pid == 0: # a child process
if debug:
print(f"{scp}")
cmd = shlex.split(scp)
os.execv(cmd[0], cmd)
os.wait() |
py | b40996907b139c78a0d7b8bd8ea01be577f78c45 | # -*- coding: utf-8 -*-
"""
Identifying function names in a script
======================================
This demonstrates how Sphinx-Gallery identifies function names to figure out
which functions are called in the script and to which module do they belong.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
import os # noqa, analysis:ignore
import matplotlib.pyplot as plt
import sphinx_gallery.backreferences as spback
filename = spback.__file__.replace('.pyc', '.py')
names = spback.identify_names(filename)
figheight = len(names) + .5
fontsize = 20
###############################################################################
# Sphinx-Gallery examines both the executed code itself, as well as the
# documentation blocks (such as this one, or the top-level one),
# to find backreferences. This means that by writing :obj:`numpy.sin`
# and :obj:`numpy.exp` here, a backreference will be created even though
# they are not explicitly used in the code. This is useful in particular when
# functions return classes -- if you add them to the documented blocks of
# examples that use them, they will be shown in the backreferences.
fig = plt.figure(figsize=(7.5, 8))
for i, (name, obj) in enumerate(names.items()):
fig.text(0.55, (float(len(names)) - 0.5 - i) / figheight,
name,
ha="right",
size=fontsize,
transform=fig.transFigure,
bbox=dict(boxstyle='square', fc="w", ec="k"))
fig.text(0.6, (float(len(names)) - 0.5 - i) / figheight,
obj["module"],
ha="left",
size=fontsize,
transform=fig.transFigure,
bbox=dict(boxstyle='larrow', fc="w", ec="k"))
#
plt.draw()
plt.show()
|
py | b40996925cb124c718d1108bbab1f5006f4e8769 | from itertools import izip
class Plug(object):
"""
Helper class to allow seamless value assignment from one plug to another,
while correctly handling and abstracting away plug type.
"self.type" returns the type of the plug.
This is necessary to determine how to read and write the plug.
"self.value" returns the value of the plug.
"self.value = otherValue" will set the value of the plug to otherValue.
This mutator assumes otherValue to be the same type as self.type
"self.overrideType" returns the type of the override that should be created to override this plug.
"""
def __init__(self, plugOrNode, attrName=None):
"""
Constructors:
Plug(MPlug)
Plug(string (full plug name))
Plug(MObject, MObject)
Plug(MObject, string (attribute name))
Plug(string (node name), string (attribute name))
"""
pass
def __str__(self):
pass
def accepts(self, other):
"""
Returns true if plug would accept a connection with other plug
i.e. plug and other plug are type compatible for connection.
"""
pass
def applyOverrideType(self, overType):
pass
def attribute(self):
"""
Returns the attribute (MFnAttribute) of the plug
"""
pass
def cloneAttribute(self, nodeObj, longName, shortName):
"""
Creates a new attribute on a node by cloning this plug's attribute.
"""
pass
def copyValue(self, other):
"""
Sets the value of plug 'self' to the value contained in plug 'other'
The 'other' plug can be either a Plug or a MPlug.
"""
pass
def createAttributeFrom(self, nodeObj, longName, shortName, limits=None):
"""
Creates a new attribute on a node by cloning this plug's attribute.
Note: None for a limit value means that there is no limit. For example,
if min is None, it means that there is no minimum limit.
"""
pass
def getAttributeLimits(self):
"""
Get the limits of the plug
"""
pass
def isOvrSupported(self):
pass
def localizedTypeString(self):
pass
def overrideType(self, overType):
pass
def setAttributeLimits(self, limits):
pass
def createAttribute(nodeObj, longName, shortName, dict):
"""
Create a new attribute on a node using the given names and properties dictonary.
Returns an MObject to the new attribute. Use MFnDependencyNode.addAttribute()
to add the returned object as a new dynamic attribute on a node.
"""
pass
def getNames(plugName):
pass
__dict__ = None
__weakref__ = None
attributeName = None
hasLimits = None
isConnectable = None
isLocked = None
isNumeric = None
isUnit = None
isValid = None
isVector = None
name = None
nodeName = None
plug = None
type = None
uiUnitValue = None
value = None
kAngle = 12
kArray = 14
kBool = 5
kByte = 4
kColor = 6
kDistance = 13
kDouble = 2
kEnum = 7
kFloat = 1
kInt = 3
kInvalid = 0
kLast = 15
kMessage = 10
kObject = 9
kString = 8
kTime = 11
def toInternalUnits(type, value):
pass
def toUiUnits(type, value):
pass
def findPlug(node, attr):
"""
Return a Plug instance if the MPlug was found, None otherwise.
"""
pass
kPlugHasConnectedParent = []
kPlugHasConnectedChild = []
kUnsupportedAttribute = []
kCompoundTypeStr = []
kUnknownType = []
kArityMismatch = []
kNotOverridablePlug = []
kPlugWithoutLimits = []
kVectorTypeStr = []
|
py | b40997258c2555737b2f8296ef6bdf687a36d675 | # -*- coding: utf-8 -*-
from collections import defaultdict, deque
import random
import select
import sys
import time
import hashlib
try:
unicode
except NameError:
unicode = str
basestring = str
from metaflow.exception import MetaflowException
from metaflow.metaflow_config import AWS_SANDBOX_ENABLED
class BatchClient(object):
def __init__(self):
from ..aws_client import get_aws_client
self._client = get_aws_client('batch')
def active_job_queues(self):
paginator = self._client.get_paginator('describe_job_queues')
return (
queue['jobQueueName']
for page in paginator.paginate()
for queue in page['jobQueues']
if queue['state'] == 'ENABLED' and queue['status'] == 'VALID'
)
def unfinished_jobs(self):
queues = self.active_job_queues()
return (
job
for queue in queues
for status in ['SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING']
for page in self._client.get_paginator('list_jobs').paginate(
jobQueue=queue, jobStatus=status
)
for job in page['jobSummaryList']
)
def describe_jobs(self, job_ids):
for jobIds in [job_ids[i:i+100] for i in range(0, len(job_ids), 100)]:
for jobs in self._client.describe_jobs(jobs=jobIds)['jobs']:
yield jobs
def describe_job_queue(self, job_queue):
paginator = self._client.get_paginator('describe_job_queues').paginate(
jobQueues=[job_queue], maxResults=1)
return paginator.paginate()['jobQueues'][0]
def job(self):
return BatchJob(self._client)
def attach_job(self, job_id):
job = RunningJob(job_id, self._client)
return job.update()
def region(self):
return self._client._client_config.region_name
class BatchJobException(MetaflowException):
headline = 'AWS Batch job error'
class BatchJob(object):
def __init__(self, client):
self._client = client
tree = lambda: defaultdict(tree)
self.payload = tree()
def execute(self):
if self._image is None:
raise BatchJobException(
'Unable to launch AWS Batch job. No docker image specified.'
)
if self._iam_role is None:
raise BatchJobException(
'Unable to launch AWS Batch job. No IAM role specified.'
)
if 'jobDefinition' not in self.payload:
self.payload['jobDefinition'] = \
self._register_job_definition(self._image,
self._iam_role,
self.payload['job_queue'],
self._execution_role,
self._shared_memory,
self._max_swap,
self._swappiness)
response = self._client.submit_job(**self.payload)
job = RunningJob(response['jobId'], self._client)
return job.update()
def _register_job_definition(self,
image,
job_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
host_volumes):
# identify platform from any compute environment associated with the
# queue
if AWS_SANDBOX_ENABLED:
# within the Metaflow sandbox, we can't execute the
# describe_job_queues directive for AWS Batch to detect compute
# environment platform, so let's just default to EC2 for now.
platform = "EC2"
else:
response = self._client.describe_job_queues(jobQueues=[job_queue])
if len(response['jobQueues']) == 0:
raise BatchJobException(
'AWS Batch Job Queue %s not found.' % job_queue)
compute_environment = response['jobQueues'][0] \
['computeEnvironmentOrder'][0] \
['computeEnvironment']
response = self._client.describe_compute_environments(
computeEnvironments=[compute_environment])
platform = response['computeEnvironments'][0] \
['computeResources']['type']
# compose job definition
job_definition = {
'type': 'container',
'containerProperties': {
'image': image,
'jobRoleArn': job_role,
'command': ['echo', 'hello world'],
'resourceRequirements': [
{
'value': '1',
'type': 'VCPU'
},
{
'value': '4096',
'type': 'MEMORY'
}
]
},
# This propagates the AWS Batch resource tags to the underlying
# ECS tasks.
'propagateTags': True
}
if platform == 'FARGATE' or platform == 'FARGATE_SPOT':
if execution_role is None:
raise BatchJobException(
'No AWS Fargate task execution IAM role found. Please see '
'https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html '
'and set the role as METAFLOW_ECS_FARGATE_EXECUTION_ROLE '
'environment variable.')
job_definition['containerProperties']['executionRoleArn'] = \
execution_role
job_definition['platformCapabilities'] = ['FARGATE']
job_definition['containerProperties']['networkConfiguration'] = \
{'assignPublicIp': 'ENABLED'}
if platform == 'EC2' or platform == 'SPOT':
if 'linuxParameters' not in job_definition['containerProperties']:
job_definition['containerProperties']['linuxParameters'] = {}
if shared_memory is not None:
if not (isinstance(shared_memory, (int, unicode, basestring)) and
int(shared_memory) > 0):
raise BatchJobException(
'Invalid shared memory size value ({}); '
'it should be greater than 0'.format(shared_memory))
else:
job_definition['containerProperties'] \
['linuxParameters']['sharedMemorySize'] = int(shared_memory)
if swappiness is not None:
if not (isinstance(swappiness, (int, unicode, basestring)) and
int(swappiness) >= 0 and int(swappiness) < 100):
raise BatchJobException(
'Invalid swappiness value ({}); '
'(should be 0 or greater and less than 100)'.format(swappiness))
else:
job_definition['containerProperties'] \
['linuxParameters']['swappiness'] = int(swappiness)
if max_swap is not None:
if not (isinstance(max_swap, (int, unicode, basestring)) and
int(max_swap) >= 0):
raise BatchJobException(
'Invalid swappiness value ({}); '
'(should be 0 or greater)'.format(max_swap))
else:
job_definition['containerProperties'] \
['linuxParameters']['maxSwap'] = int(max_swap)
if host_volumes:
job_definition['containerProperties']['volumes'] = []
job_definition['containerProperties']['mountPoints'] = []
for host_path in host_volumes:
name = host_path.replace('/', '_').replace('.', '_')
job_definition['containerProperties']['volumes'].append(
{'name': name, 'host': {'sourcePath': host_path}}
)
job_definition['containerProperties']['mountPoints'].append(
{"sourceVolume": name, "containerPath": host_path}
)
# check if job definition already exists
def_name = 'metaflow_%s' % \
hashlib.sha224(str(job_definition).encode('utf-8')).hexdigest()
payload = {'jobDefinitionName': def_name, 'status': 'ACTIVE'}
response = self._client.describe_job_definitions(**payload)
if len(response['jobDefinitions']) > 0:
return response['jobDefinitions'][0]['jobDefinitionArn']
# else create a job definition
job_definition['jobDefinitionName'] = def_name
try:
response = self._client.register_job_definition(**job_definition)
except Exception as ex:
if type(ex).__name__ == 'ParamValidationError' and \
(platform == 'FARGATE' or platform == 'FARGATE_SPOT'):
raise BatchJobException(
'%s \nPlease ensure you have installed boto3>=1.16.29 if '
'you intend to launch AWS Batch jobs on AWS Fargate '
'compute platform.' % ex)
else:
raise ex
return response['jobDefinitionArn']
def job_def(self,
image,
iam_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
host_volumes):
self.payload['jobDefinition'] = \
self._register_job_definition(image,
iam_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
host_volumes)
return self
def job_name(self, job_name):
self.payload['jobName'] = job_name
return self
def job_queue(self, job_queue):
self.payload['jobQueue'] = job_queue
return self
def image(self, image):
self._image = image
return self
def iam_role(self, iam_role):
self._iam_role = iam_role
return self
def execution_role(self, execution_role):
self._execution_role = execution_role
return self
def shared_memory(self, shared_memory):
self._shared_memory = shared_memory
return self
def max_swap(self, max_swap):
self._max_swap = max_swap
return self
def swappiness(self, swappiness):
self._swappiness = swappiness
return self
def command(self, command):
if 'command' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['command'] = []
self.payload['containerOverrides']['command'].extend(command)
return self
def cpu(self, cpu):
if not (isinstance(cpu, (int, unicode, basestring, float)) and float(cpu) > 0):
raise BatchJobException(
'Invalid CPU value ({}); it should be greater than 0'.format(cpu))
if 'resourceRequirements' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['resourceRequirements'] = []
self.payload['containerOverrides']['resourceRequirements'].append(
{'value' : str(cpu), 'type': 'VCPU'}
)
return self
def memory(self, mem):
if not (isinstance(mem, (int, unicode, basestring)) and int(mem) > 0):
raise BatchJobException(
'Invalid memory value ({}); it should be greater than 0'.format(mem))
if 'resourceRequirements' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['resourceRequirements'] = []
self.payload['containerOverrides']['resourceRequirements'].append(
{'value' : str(mem), 'type': 'MEMORY'}
)
return self
def gpu(self, gpu):
if not (isinstance(gpu, (int, unicode, basestring))):
raise BatchJobException(
'invalid gpu value: ({}) (should be 0 or greater)'.format(gpu))
if int(gpu) > 0:
if 'resourceRequirements' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['resourceRequirements'] = []
self.payload['containerOverrides']['resourceRequirements'].append(
{'type': 'GPU', 'value': str(gpu)}
)
return self
def environment_variable(self, name, value):
if 'environment' not in self.payload['containerOverrides']:
self.payload['containerOverrides']['environment'] = []
value = str(value)
if value.startswith("$$.") or value.startswith("$."):
# Context Object substitution for AWS Step Functions
# https://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html
self.payload['containerOverrides']['environment'].append(
{'name': name, 'value.$': value}
)
else:
self.payload['containerOverrides']['environment'].append(
{'name': name, 'value': value}
)
return self
def timeout_in_secs(self, timeout_in_secs):
self.payload['timeout']['attemptDurationSeconds'] = timeout_in_secs
return self
def tag(self, key, value):
self.payload['tags'][key] = str(value)
return self
def parameter(self, key, value):
self.payload['parameters'][key] = str(value)
return self
def attempts(self, attempts):
self.payload['retryStrategy']['attempts'] = attempts
return self
class Throttle(object):
def __init__(self, delta_in_secs=1, num_tries=20):
self.delta_in_secs = delta_in_secs
self.num_tries = num_tries
self._now = None
self._reset()
def _reset(self):
self._tries_left = self.num_tries
self._wait = self.delta_in_secs
def __call__(self, func):
def wrapped(*args, **kwargs):
now = time.time()
if self._now is None or (now - self._now > self._wait):
self._now = now
try:
func(*args, **kwargs)
self._reset()
except TriableException as ex:
self._tries_left -= 1
if self._tries_left == 0:
raise ex.ex
self._wait = (self.delta_in_secs*1.2)**(self.num_tries-self._tries_left) + \
random.randint(0, 3*self.delta_in_secs)
return wrapped
class TriableException(Exception):
def __init__(self, ex):
self.ex = ex
class RunningJob(object):
NUM_RETRIES = 8
def __init__(self, id, client):
self._id = id
self._client = client
self._data = {}
def __repr__(self):
return '{}(\'{}\')'.format(self.__class__.__name__, self._id)
def _apply(self, data):
self._data = data
@Throttle()
def _update(self):
try:
data = self._client.describe_jobs(jobs=[self._id])
except self._client.exceptions.ClientError as err:
code = err.response['ResponseMetadata']['HTTPStatusCode']
if code == 429 or code >= 500:
raise TriableException(err)
raise err
# There have been sporadic reports of empty responses to the
# batch.describe_jobs API call, which can potentially happen if the
# batch.submit_job API call is not strongly consistent(¯\_(ツ)_/¯).
# We add a check here to guard against that. The `update()` call
# will ensure that we poll `batch.describe_jobs` until we get a
# satisfactory response at least once through out the lifecycle of
# the job.
if len(data['jobs']) == 1:
self._apply(data['jobs'][0])
def update(self):
self._update()
while not self._data:
self._update()
return self
@property
def id(self):
return self._id
@property
def info(self):
if not self._data:
self.update()
return self._data
@property
def job_name(self):
return self.info['jobName']
@property
def job_queue(self):
return self.info['jobQueue']
@property
def status(self):
if not self.is_done:
self.update()
return self.info['status']
@property
def status_reason(self):
return self.info.get('statusReason')
@property
def created_at(self):
return self.info['createdAt']
@property
def stopped_at(self):
return self.info.get('stoppedAt', 0)
@property
def is_done(self):
if self.stopped_at == 0:
self.update()
return self.stopped_at > 0
@property
def is_running(self):
return self.status == 'RUNNING'
@property
def is_successful(self):
return self.status == 'SUCCEEDED'
@property
def is_crashed(self):
# TODO: Check statusmessage to find if the job crashed instead of failing
return self.status == 'FAILED'
@property
def reason(self):
return self.info['container'].get('reason')
@property
def status_code(self):
if not self.is_done:
self.update()
return self.info['container'].get('exitCode')
def wait_for_running(self):
if not self.is_running and not self.is_done:
BatchWaiter(self._client).wait_for_running(self.id)
@property
def log_stream_name(self):
return self.info['container'].get('logStreamName')
def logs(self):
def get_log_stream(job):
log_stream_name = job.log_stream_name
if log_stream_name:
return BatchLogs('/aws/batch/job', log_stream_name, sleep_on_no_data=1)
else:
return None
log_stream = None
while True:
if self.is_running or self.is_done or self.is_crashed:
log_stream = get_log_stream(self)
break
elif not self.is_done:
self.wait_for_running()
if log_stream is None:
return
exception = None
for i in range(self.NUM_RETRIES + 1):
try:
check_after_done = 0
for line in log_stream:
if not line:
if self.is_done:
if check_after_done > 1:
return
check_after_done += 1
else:
pass
else:
i = 0
yield line
return
except Exception as ex:
exception = ex
if self.is_crashed:
break
#sys.stderr.write(repr(ex) + '\n')
if i < self.NUM_RETRIES:
time.sleep(2 ** i + random.randint(0, 5))
raise BatchJobException(repr(exception))
def kill(self):
if not self.is_done:
self._client.terminate_job(
jobId=self._id, reason='Metaflow initiated job termination.')
return self.update()
class BatchWaiter(object):
def __init__(self, client):
try:
from botocore import waiter
except:
raise BatchJobException(
'Could not import module \'botocore\' which '
'is required for Batch jobs. Install botocore '
'first.'
)
self._client = client
self._waiter = waiter
def wait_for_running(self, job_id):
model = self._waiter.WaiterModel(
{
'version': 2,
'waiters': {
'JobRunning': {
'delay': 1,
'operation': 'DescribeJobs',
'description': 'Wait until job starts running',
'maxAttempts': 1000000,
'acceptors': [
{
'argument': 'jobs[].status',
'expected': 'SUCCEEDED',
'matcher': 'pathAll',
'state': 'success',
},
{
'argument': 'jobs[].status',
'expected': 'FAILED',
'matcher': 'pathAny',
'state': 'success',
},
{
'argument': 'jobs[].status',
'expected': 'RUNNING',
'matcher': 'pathAny',
'state': 'success',
},
],
}
},
}
)
self._waiter.create_waiter_with_client('JobRunning', model, self._client).wait(
jobs=[job_id]
)
class BatchLogs(object):
def __init__(self, group, stream, pos=0, sleep_on_no_data=0):
from ..aws_client import get_aws_client
self._client = get_aws_client('logs')
self._group = group
self._stream = stream
self._pos = pos
self._sleep_on_no_data = sleep_on_no_data
self._buf = deque()
self._token = None
def _get_events(self):
try:
if self._token:
response = self._client.get_log_events(
logGroupName=self._group,
logStreamName=self._stream,
startTime=self._pos,
nextToken=self._token,
startFromHead=True,
)
else:
response = self._client.get_log_events(
logGroupName=self._group,
logStreamName=self._stream,
startTime=self._pos,
startFromHead=True,
)
self._token = response['nextForwardToken']
return response['events']
except self._client.exceptions.ResourceNotFoundException as e:
# The logs might be delayed by a bit, so we can simply try
# again next time.
return []
def __iter__(self):
while True:
self._fill_buf()
if len(self._buf) == 0:
yield ''
if self._sleep_on_no_data > 0:
select.poll().poll(self._sleep_on_no_data * 1000)
else:
while self._buf:
yield self._buf.popleft()
def _fill_buf(self):
events = self._get_events()
for event in events:
self._buf.append(event['message'])
self._pos = event['timestamp']
|
py | b409973349f560bd8627ade934b2fba8e9a14fe4 | import unittest
from graphs.graph import Graph
from util.file_reader import read_graph_from_file
class TestGraph(unittest.TestCase):
def test_create_directed_graph(self):
"""Create a graph."""
graph = Graph(is_directed=True)
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_edge('A','B')
graph.add_edge('A','C')
graph.add_edge('B','C')
self.assertEqual(len(graph.get_vertices()), 3)
self.assertEqual(len(graph.get_neighbors('A')), 2)
self.assertEqual(len(graph.get_neighbors('B')), 1)
self.assertEqual(len(graph.get_neighbors('C')), 0)
def test_create_undirected_graph(self):
"""Create a graph."""
graph = Graph(is_directed=False)
graph.add_vertex('A')
graph.add_vertex('B')
graph.add_vertex('C')
graph.add_edge('A','B')
graph.add_edge('A','C')
graph.add_edge('B','C')
self.assertEqual(len(graph.get_vertices()), 3)
self.assertEqual(len(graph.get_neighbors('A')), 2)
self.assertEqual(len(graph.get_neighbors('B')), 2)
self.assertEqual(len(graph.get_neighbors('C')), 2)
class TestReadGraphFromFile(unittest.TestCase):
def test_read_directed_graph_from_file(self):
filename = 'test_files/graph_small_directed.txt'
graph = read_graph_from_file(filename)
self.assertEqual(len(graph.get_vertices()), 4)
self.assertEqual(len(graph.get_neighbors('1')), 1)
self.assertEqual(len(graph.get_neighbors('2')), 1)
self.assertEqual(len(graph.get_neighbors('3')), 1)
self.assertEqual(len(graph.get_neighbors('4')), 0)
def test_read_undirected_graph_from_file(self):
filename = 'test_files/graph_small_undirected.txt'
graph = read_graph_from_file(filename)
self.assertEqual(len(graph.get_vertices()), 4)
self.assertEqual(len(graph.get_neighbors('1')), 1)
self.assertEqual(len(graph.get_neighbors('2')), 2)
self.assertEqual(len(graph.get_neighbors('3')), 1)
self.assertEqual(len(graph.get_neighbors('4')), 2)
def test_improper_graph_type(self):
filename = 'test_files/improper_graph_type.txt'
with self.assertRaises(ValueError) as error:
graph = read_graph_from_file(filename)
def test_find_shortest_path(self):
filename = 'test_files/graph_medium_undirected.txt'
graph = read_graph_from_file(filename)
path_from_A_to_F = graph.find_shortest_path('A', 'F')
self.assertEqual(len(path_from_A_to_F), 4)
def test_get_all_vertices_n_away(self):
filename = 'test_files/graph_medium_undirected.txt'
graph = read_graph_from_file(filename)
vertices_1_away = graph.find_vertices_n_away('A', 1)
self.assertEqual(sorted(vertices_1_away), ['B','C'])
vertices_2_away = graph.find_vertices_n_away('A', 2)
self.assertEqual(sorted(vertices_2_away), ['D','E'])
vertices_3_away = graph.find_vertices_n_away('A', 3)
self.assertEqual(vertices_3_away, ['F'])
if __name__ == '__main__':
unittest.main() |
py | b4099868a726fd79e270ef14e943520a82a79cc7 | import pandas as pd
import numpy as np
from functools import singledispatch
import itertools
from ..siu import Symbolic, create_sym_call,Call
def register_symbolic(f):
# TODO: don't use singledispatch if it has already been done
f = singledispatch(f)
@f.register(Symbolic)
def _dispatch_symbol(__data, *args, **kwargs):
return create_sym_call(f, __data.source, *args, **kwargs)
return f
def _coerce_to_str(x):
if isinstance(x, (pd.Series, np.ndarray)):
return x.astype(str)
elif not np.ndim(x) < 2:
raise ValueError("np.ndim must be less than 2, but is %s" %np.ndim(x))
return pd.Series(x, dtype = str)
@register_symbolic
def str_c(x, *args, sep = "", collapse = None):
all_args = itertools.chain([x], args)
strings = list(map(_coerce_to_str, all_args))
return np.sum(strings, axis = 0)
|
py | b409994ed98119a1cffa04b4ec7e11fd829f8555 | import sublime_plugin
import sublime
try:
from typing import Any, List, Dict, Callable, Optional
assert Any and List and Dict and Callable and Optional
except ImportError:
pass
from .core.protocol import Request
from .core.url import filename_to_uri
from .core.registry import session_for_view, sessions_for_view, client_from_session, configs_for_scope
from .core.settings import settings, client_configs
from .core.views import range_to_region
from .core.protocol import Range
from .core.configurations import is_supported_syntax
from .core.documents import is_transient_view
color_phantoms_by_view = dict() # type: Dict[int, sublime.PhantomSet]
class LspColorListener(sublime_plugin.ViewEventListener):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._stored_point = -1
self.initialized = False
self.enabled = False
@classmethod
def is_applicable(cls, _settings: 'Any') -> bool:
syntax = _settings.get('syntax')
is_supported = syntax and is_supported_syntax(syntax, client_configs.all)
disabled_by_user = 'colorProvider' in settings.disabled_capabilities
return is_supported and not disabled_by_user
@property
def phantom_set(self) -> sublime.PhantomSet:
return color_phantoms_by_view.setdefault(self.view.id(), sublime.PhantomSet(self.view, "lsp_color"))
def on_activated_async(self) -> None:
if not self.initialized:
self.initialize()
def initialize(self, is_retry: bool = False) -> None:
configs = configs_for_scope(self.view)
if not configs:
self.initialized = True # no server enabled, re-open file to activate feature.
sessions = list(sessions_for_view(self.view))
if sessions:
self.initialized = True
if any(session.has_capability('colorProvider') for session in sessions):
self.enabled = True
self.send_color_request()
elif not is_retry:
# session may be starting, try again once in a second.
sublime.set_timeout_async(lambda: self.initialize(is_retry=True), 1000)
else:
self.initialized = True # we retried but still no session available.
def on_modified_async(self) -> None:
if self.enabled:
self.schedule_request()
def schedule_request(self) -> None:
sel = self.view.sel()
if len(sel) < 1:
return
current_point = sel[0].begin()
if self._stored_point != current_point:
self._stored_point = current_point
sublime.set_timeout_async(lambda: self.fire_request(current_point), 800)
def fire_request(self, current_point: int) -> None:
if current_point == self._stored_point:
self.send_color_request()
def send_color_request(self) -> None:
if is_transient_view(self.view):
return
client = client_from_session(session_for_view(self.view, 'colorProvider'))
if client:
file_path = self.view.file_name()
if file_path:
params = {
"textDocument": {
"uri": filename_to_uri(file_path)
}
}
client.send_request(
Request.documentColor(params),
self.handle_response
)
def handle_response(self, response: 'Optional[List[dict]]') -> None:
color_infos = response if response else []
phantoms = []
for color_info in color_infos:
color = color_info['color']
red = color['red'] * 255
green = color['green'] * 255
blue = color['blue'] * 255
alpha = color['alpha']
content = """
<div style='padding: 0.4em;
margin-top: 0.2em;
border: 1px solid color(var(--foreground) alpha(0.25));
background-color: rgba({}, {}, {}, {})'>
</div>""".format(red, green, blue, alpha)
range = Range.from_lsp(color_info['range'])
region = range_to_region(range, self.view)
phantoms.append(sublime.Phantom(region, content, sublime.LAYOUT_INLINE))
self.phantom_set.update(phantoms)
def remove_color_boxes(view: sublime.View) -> None:
phantom_set = color_phantoms_by_view.get(view.id())
if phantom_set:
phantom_set.update([])
|
py | b40999c3a206cd10b4a9d4e8e7cc146a04f4ba8a | # IMPORTS
import binascii
import math
from hex_functions import *
from esc_functions import *
from characters import *
import numpy as np
# SPECIFY FILENAME, PRINTERNAME AND OUTPUTFOLDER
filename = 'test3_p600'
printer = 'p600' # one of the printers for which the header and footer files are available in the 'prns' folder
outputfolder = '.'
# SET PARAMETERS
# These parameters depend on the specific printer
# printer units can be found by parsing the prn file
# Same with color codes, print a file with all colors and parse it
# other specs can be found by looking in spec sheet or service manual (if available)
# Shown parameters should work with R2400 / P600 / R3000
# unit parameters
pmgmt = 720
vert = 720
hor = 720
mbase = 2880
nozzles = 180
# set nozzle row numbers (def black = 00)
# Should work with R2400 and P600
black = b'\x00'
lightBlack = b'\x10'
lightLightBlack = b'\x30'
cyan = b'\x02'
lightCyan = b'\x12'
magenta = b'\x01'
lightMagenta = b'\x11'
yellow = b'\x04'
# select dot size
d = b'\x10'
# set page method ID
esc_m = ESC_m(b'\x20')
# set uni or bi directional mode
unim = b'\x00' # 01 uni, 00 bi
# CREATE THE RASTERDATA
# create a matrix 180 by 50, with all ones. When providing this to esc_i_matrix, this would result in very closely printed droplets, with a spacing of 1/360" (I think!?)
num_drops_hor = 50
mat = np.ones((nozzles, 50))
# set the last row to 3 for large drops
mat[-1, :] = 3
# set the row before before the last row to 2
mat[-3, :] = 2
# check output matrix
print(mat)
# Create the raster.
# raster1: all color channels next to each other, printhead is expected to only make one passage
# print all colors while holding the printhead on the same location.
allColors = [black, lightBlack, lightLightBlack,
cyan, lightCyan, magenta, lightMagenta, yellow]
# move to x 1 inch location and:
# let the printhead drop a vertical line for each color
raster1 = b''
x = 1 # one inch from left edge of paper
y = 1.345 # some inches from top edge of paper (doesnt matter)
raster1 += ESC_v(pmgmt, y)
raster1 += ESC_dollar(hor, x)
# keep spacing at 0, fan = 0 and size=1 (size=1 is default)
raster1 += ESC_i_matrix(black, mat, spacing=0, fan=0)
# put all individual rasters together. and close with a 0c
rasterdata = raster1 + b'\x0c'
# LOAD HEADER AND FOOTER FOR SELECTED PRINTER
header = load_prn_file('prns/' + printer + '/' + printer + '-header.prn')
footer = load_prn_file('prns/' + printer + '/' + printer + '-footer.prn')
# header = b''
# footer = b''
# COMPOSE BODY
body = ESC_Graph() + ESC_Units(pmgmt, vert, hor, mbase) + ESC_Kmode() + \
ESC_imode(n=b'\x00') + ESC_Umode(unim) + ESC_edot(d) + ESC_Dras(v=240/3, h=120/3) + \
ESC_C(pmgmt) + ESC_c(pmgmt) + ESC_S(pmgmt) # + esc_m
# COMBINE
total = header + body + rasterdata + footer
# CREATE OUTPUT DIR
filename = outputfolder + '/' + filename + '.prn'
# if not os.path.exists(outputfolder):
# os.makedirs(outputfolder)
# SAVE PRN FILE
save_prn_file(total, filename)
print('DONE!')
print('path: ' + filename)
|
py | b4099b2299a6651841ecd22e5cfffe6a77d393f7 | """Collection of Smoothing Functions"""
# global
import ivy as _ivy
import math as _math
# local
from ivy_vision import single_view_geometry as _ivy_svg
MIN_DENOMINATOR = 1e-12
# noinspection PyUnresolvedReferences
def weighted_image_smooth(mean, weights, kernel_dim):
"""Smooth an image using weight values from a weight image of the same size.
Parameters
----------
mean
Image to smooth *[batch_shape,h,w,d]*
weights
Variance image, with the variance values of each pixel in the image
*[batch_shape,h,w,d]*
kernel_dim
The dimension of the kernel
Returns
-------
ret
Image smoothed based on variance image and smoothing kernel.
"""
# shapes as list
kernel_shape = [kernel_dim, kernel_dim]
dim = mean.shape[-1]
# KW x KW x D
kernel = _ivy.ones(kernel_shape + [dim])
# D
kernel_sum = _ivy.reduce_sum(kernel, [0, 1])[0]
# BS x H x W x D
mean_x_weights = mean * weights
mean_x_weights_sum = _ivy.abs(_ivy.depthwise_conv2d(mean_x_weights, kernel, 1, "VALID"))
sum_of_weights = _ivy.depthwise_conv2d(weights, kernel, 1, "VALID")
new_mean = mean_x_weights_sum / (sum_of_weights + MIN_DENOMINATOR)
new_weights = sum_of_weights / (kernel_sum + MIN_DENOMINATOR)
# BS x H x W x D, # BS x H x W x D
return new_mean, new_weights
def smooth_image_fom_var_image(mean, var, kernel_dim, kernel_scale, dev_str=None):
"""Smooth an image using variance values from a variance image of the same size,
and a spatial smoothing kernel.
Parameters
----------
mean
Image to smooth *[batch_shape,h,w,d]*
var
Variance image, with the variance values of each pixel in the image
*[batch_shape,h,w,d]*
kernel_dim
The dimension of the kernel
kernel_scale
The scale of the kernel along the channel dimension *[d]*
dev_str
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
Same as x if None. (Default value = None)
Returns
-------
ret
Image smoothed based on variance image and smoothing kernel.
"""
if dev_str is None:
dev_str = _ivy.dev_str(mean)
# shapes as list
kernel_shape = [kernel_dim, kernel_dim]
kernel_size = kernel_dim ** 2
dims = mean.shape[-1]
# KH x KW x 2
uniform_pixel_coords = _ivy_svg.create_uniform_pixel_coords_image(kernel_shape, dev_str=dev_str)[..., 0:2]
# 2
kernel_central_pixel_coord = _ivy.array([float(_math.floor(kernel_shape[0] / 2)),
float(_math.floor(kernel_shape[1] / 2))], dev_str=dev_str)
# KH x KW x 2
kernel_xy_dists = kernel_central_pixel_coord - uniform_pixel_coords
kernel_xy_dists_sqrd = kernel_xy_dists ** 2
# KW x KW x D x D
unit_kernel = _ivy.tile(_ivy.reduce_sum(kernel_xy_dists_sqrd, -1, keepdims=True) ** 0.5, (1, 1, dims))
kernel = 1 + unit_kernel * kernel_scale
recip_kernel = 1 / (kernel + MIN_DENOMINATOR)
# D
kernel_sum = _ivy.reduce_sum(kernel, [0, 1])[0]
recip_kernel_sum = _ivy.reduce_sum(recip_kernel, [0, 1])
# BS x H x W x D
recip_var = 1 / (var + MIN_DENOMINATOR)
recip_var_scaled = recip_var + 1
recip_new_var_scaled = _ivy.depthwise_conv2d(recip_var_scaled, recip_kernel, 1, "VALID")
# This 0.99 prevents float32 rounding errors leading to -ve variances, the true equation would use 1.0
recip_new_var = recip_new_var_scaled - recip_kernel_sum * 0.99
new_var = 1 / (recip_new_var + MIN_DENOMINATOR)
mean_x_recip_var = mean * recip_var
mean_x_recip_var_sum = _ivy.abs(_ivy.depthwise_conv2d(mean_x_recip_var, recip_kernel, 1, "VALID"))
new_mean = new_var * mean_x_recip_var_sum
new_var = new_var * kernel_size ** 2 / (kernel_sum + MIN_DENOMINATOR)
# prevent overconfidence from false meas independence assumption
# BS x H x W x D, # BS x H x W x D
return new_mean, new_var
|
py | b4099b3137f01fbb2b7f3d2f5818dc79da0433cd | #
# minixsv, Release 0.9.0
# file: xsvalSchema.py
#
# Derived validator class (for validation of schema files)
#
# history:
# 2004-10-07 rl created
# 2006-08-18 rl W3C testsuite passed for supported features
# 2007-05-24 rl Features for release 0.8 added, several bugs fixed
#
# Copyright (c) 2004-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The minixsv XML schema validator is
#
# Copyright (c) 2004-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import string
import re
import os
from decimal import Decimal
from genxmlif.xmlifUtils import collapseString
from minixsv import *
from xsvalBase import XsValBase, TagException
from xsvalUtils import substituteSpecialEscChars
_localFacetDict = {(XSD_NAMESPACE,"list"): ("length", "minLength", "maxLength", "enumeration", "pattern", "whiteSpace"),
(XSD_NAMESPACE,"union"): ("enumeration", "pattern", "whiteSpace"),
(XSD_NAMESPACE,"anySimpleType"): ("whiteSpace"),}
###########################################################
# Derived validator class for validating one input schema file against the XML rules file
class XsValSchema (XsValBase):
########################################
# overloaded validate method
#
def validate (self, inputTree, xsdTree):
XsValBase.validate(self, inputTree, xsdTree)
self._initInternalAttributes (self.inputRoot)
self._updateLookupTables (self.inputRoot, self.xsdLookupDict)
self._includeAndImport (self.inputTree, self.inputTree, self.xsdIncludeDict, self.xsdLookupDict)
if not self.errorHandler.hasErrors():
# IDs must be unique within a schema file
self.xsdIdDict = {}
self._checkSchemaSecondLevel()
# FIXME: Wellknown schemas are not included in the input tree although the internal attribute has been set!
# Better solution required than this workaround!
self.inputRoot["__WellknownSchemasImported__"] = "false"
########################################
# additional checks for schema files which are not covered by "xsStructs.xsd"
#
def _checkSchemaSecondLevel(self):
targetNamespace = self.inputRoot.getAttribute("targetNamespace")
if targetNamespace == "":
self.errorHandler.raiseError("Empty string not allowed for target namespace!", self.inputRoot)
self._checkElementNodesSecondLevel()
self._checkNotationNodesSecondLevel()
self._checkAnyNodesSecondLevel()
self._checkGroupNodesSecondLevel()
self._checkAttrGroupNodesSecondLevel()
self._checkAttributeNodesSecondLevel()
self._checkAnyAttributesSecondLevel()
if self.errorHandler.hasErrors():
return
self._checkComplexTypesSecondLevel()
self._checkSimpleTypesSecondLevel()
self._checkParticlesSecondLevel()
self._checkIdentityConstraintsSecondLevel()
self._checkKeysSecondLevel()
self._checkKeyRefsSecondLevel()
########################################
# additional checks for element nodes
#
def _checkElementNodesSecondLevel(self):
elementNodes = self.inputRoot.getElementsByTagNameNS (self.inputNsURI, "element")
for elementNode in elementNodes:
if not elementNode.hasAttribute("name") and not elementNode.hasAttribute("ref"):
self._addError ("Element must have 'name' or 'ref' attribute!", elementNode)
continue
if elementNode.hasAttribute("ref"):
for attrName in ("name", "type", "form"):
if elementNode.hasAttribute(attrName):
self._addError ("Element with 'ref' attribute must not have %s attribute!" %repr(attrName), elementNode)
continue
complexTypeNode = elementNode.getFirstChildNS (self.inputNsURI, "complexType")
simpleTypeNode = elementNode.getFirstChildNS (self.inputNsURI, "simpleType")
if elementNode.hasAttribute("ref") and (complexTypeNode != None or simpleTypeNode != None):
self._addError ("Element with 'ref' attribute must not have type definition!", elementNode)
continue
if elementNode.hasAttribute("type") and (complexTypeNode != None or simpleTypeNode != None):
self._addError ("Element with 'type' attribute must not have type definition!", elementNode)
continue
if elementNode.hasAttribute("ref"):
for forbiddenAttr in ("block", "nillable", "default", "fixed"):
if elementNode.hasAttribute(forbiddenAttr):
self._addError ("Element with 'ref' attribute must not have %s attribute!" %repr(forbiddenAttr), elementNode)
self._checkReference (elementNode, self.xsdElementDict)
if elementNode.hasAttribute("type"):
self._checkType (elementNode, "type", self.xsdTypeDict)
self._checkNodeId(elementNode)
self._checkOccurs (elementNode)
self._checkFixedDefault(elementNode)
########################################
# additional checks for notation nodes
#
def _checkNotationNodesSecondLevel(self):
notationNodes = self.inputRoot.getElementsByTagNameNS (self.inputNsURI, "notation")
for notationNode in notationNodes:
if not notationNode.hasAttribute("public") and not notationNode.hasAttribute("system"):
self._addError ("Notation must have 'public' or 'system' attribute!", notationNode)
########################################
# additional checks for anyNodes
#
def _checkAnyNodesSecondLevel(self):
anyNodes = self.inputRoot.getElementsByTagNameNS (self.inputNsURI, "any")
for anyNode in anyNodes:
self._checkOccurs (anyNode)
# check for unique ID
self._checkNodeId (anyNode)
########################################
# additional checks for group nodes
#
def _checkGroupNodesSecondLevel(self):
groupNodes = self.inputRoot.getElementsByTagNameNS (self.inputNsURI, "group")
for groupNode in groupNodes:
self._checkNodeId(groupNode)
if groupNode.hasAttribute("ref"):
self._checkReference (groupNode, self.xsdGroupDict)
self._checkOccurs (groupNode)
if self.errorHandler.hasErrors():
return
# for groupNode in groupNodes:
# if groupNode.hasAttribute("name"):
# self._checkGroupNodeCircularDef(groupNode, {groupNode["name"]:1})
def _checkGroupNodeCircularDef(self, groupNode, groupNameDict):
childGroupsRefNodes, dummy, dummy = groupNode.getXPathList (".//%sgroup" %(self.inputNsPrefixString))
for childGroupRefNode in childGroupsRefNodes:
if childGroupRefNode.hasAttribute("ref"):
childGroupNode = self.xsdGroupDict[childGroupRefNode.getQNameAttribute("ref")]
if not groupNameDict.has_key(childGroupNode["name"]):
groupNameDict[childGroupNode["name"]] = 1
self._checkGroupNodeCircularDef(childGroupNode, groupNameDict)
else:
self._addError ("Circular definition of group %s!" %repr(childGroupNode["name"]), childGroupNode)
########################################
# additional checks for attributeGroup nodes
#
def _checkAttrGroupNodesSecondLevel(self):
attributeGroupNodes = self.inputRoot.getElementsByTagNameNS (self.inputNsURI, "attributeGroup")
for attributeGroupNode in attributeGroupNodes:
if attributeGroupNode.hasAttribute("ref"):
self._checkReference (attributeGroupNode, self.xsdAttrGroupDict)
self._checkNodeId(attributeGroupNode)
########################################
# additional checks for attribute nodes
#
def _checkAttributeNodesSecondLevel(self):
attributeNodes = self.inputRoot.getElementsByTagNameNS (XSD_NAMESPACE, "attribute")
for attributeNode in attributeNodes:
if os.path.basename(attributeNode.getFilePath()) != "XMLSchema-instance.xsd":
# global attributes must always be "qualified"
if (attributeNode.getParentNode() == self.inputRoot or
self._getAttributeFormDefault(attributeNode) == "qualified"):
if self._getTargetNamespace(attributeNode) == XSI_NAMESPACE:
self._addError ("Target namespace of an attribute must not match '%s'!" %XSI_NAMESPACE, attributeNode)
if not attributeNode.hasAttribute("name") and not attributeNode.hasAttribute("ref"):
self._addError ("Attribute must have 'name' or 'ref' attribute!", attributeNode)
continue
if attributeNode.getAttribute("name") == "xmlns":
self._addError ("Attribute must not match 'xmlns'!", attributeNode)
if attributeNode.hasAttribute("ref"):
if attributeNode.hasAttribute("name"):
self._addError ("Attribute may have 'name' OR 'ref' attribute!", attributeNode)
if attributeNode.hasAttribute("type"):
self._addError ("Attribute may have 'type' OR 'ref' attribute!", attributeNode)
if attributeNode.hasAttribute("form"):
self._addError ("Attribute 'form' is not allowed in this context!", attributeNode)
if attributeNode.getFirstChildNS(XSD_NAMESPACE, "simpleType") != None:
self._addError ("Attribute may only have 'ref' attribute OR 'simpleType' child!", attributeNode)
self._checkReference (attributeNode, self.xsdAttributeDict)
if attributeNode.hasAttribute("type"):
if attributeNode.getFirstChildNS(XSD_NAMESPACE, "simpleType") != None:
self._addError ("Attribute may only have 'type' attribute OR 'simpleType' child!", attributeNode)
self._checkType (attributeNode, "type", self.xsdTypeDict, (XSD_NAMESPACE, "simpleType"))
use = attributeNode.getAttribute("use")
if use in ("required", "prohibited") and attributeNode.hasAttribute("default"):
self._addError ("Attribute 'default' is not allowed, because 'use' is '%s'!" %(use), attributeNode)
self._checkNodeId(attributeNode, unambiguousPerFile=0)
self._checkFixedDefault(attributeNode)
########################################
# additional checks for attribute wildcards
#
def _checkAnyAttributesSecondLevel(self):
anyAttributeNodes, dummy, dummy = self.inputRoot.getXPathList (".//%sanyAttribute" %(self.inputNsPrefixString))
for anyAttributeNode in anyAttributeNodes:
# check for unique ID
self._checkNodeId (anyAttributeNode)
########################################
# additional checks for complex types
#
def _checkComplexTypesSecondLevel(self):
prefix = self.inputNsPrefixString
contentNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)scomplexContent/%(prefix)srestriction | .//%(prefix)scomplexContent/%(prefix)sextension" % vars())
for contentNode in contentNodes:
self._checkType(contentNode, "base", self.xsdTypeDict, (XSD_NAMESPACE, "complexType"))
contentNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)ssimpleContent/%(prefix)srestriction | .//%(prefix)ssimpleContent/%(prefix)sextension" % vars())
for contentNode in contentNodes:
baseNsName = contentNode.getQNameAttribute("base")
if baseNsName != (XSD_NAMESPACE, "anyType"):
typeNsName = contentNode.getParentNode().getNsName()
self._checkBaseType(contentNode, baseNsName, self.xsdTypeDict, typeNsName)
else:
self._addError ("Referred type must not be 'anyType'!", contentNode)
# check for unique ID
self._checkNodeId (contentNode)
complexTypeNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)scomplexType | .//%(prefix)sextension" % vars())
for complexTypeNode in complexTypeNodes:
validAttrDict = {}
# check for duplicate attributes
self._updateAttributeDict (complexTypeNode, validAttrDict, 1)
# check for duplicate ID attributes
idAttrNode = None
for key, val in validAttrDict.items():
attrType = val["RefNode"].getQNameAttribute("type")
if attrType == (XSD_NAMESPACE, "ID"):
if not idAttrNode:
idAttrNode = val["Node"]
else:
# TODO: check also if attribute has a type which is derived from ID!
self._addError ("Two attribute declarations of complex type are IDs!", val["Node"])
# check for unique ID
self._checkNodeId (complexTypeNode)
contentNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)scomplexType/%(prefix)s*" % vars())
for contentNode in contentNodes:
self._checkOccurs (contentNode)
contentNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)scomplexContent | .//%(prefix)ssimpleContent" % vars())
for contentNode in contentNodes:
# check for unique ID
self._checkNodeId (contentNode)
########################################
# additional checks for simple types
#
def _checkParticlesSecondLevel(self):
prefix = self.inputNsPrefixString
# check for duplicate element names
particleNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)sall | .//%(prefix)schoice | .//%(prefix)ssequence" % vars())
for particleNode in particleNodes:
elementTypeDict = {}
elementNameDict = {}
groupNameDict = {}
self._checkContainedElements (particleNode, particleNode.getLocalName(), elementNameDict, elementTypeDict, groupNameDict)
self._checkOccurs (particleNode)
# check for unique ID
self._checkNodeId (particleNode)
def _checkContainedElements (self, node, particleType, elementNameDict, elementTypeDict, groupNameDict):
prefix = self.inputNsPrefixString
for childNode in node.getChildren():
childParticleType = childNode.getLocalName()
if childParticleType in ("sequence", "choice", "all"):
dummy = {}
self._checkContainedElements (childNode, childParticleType, dummy, elementTypeDict, groupNameDict)
elif childParticleType in ("group"):
if childNode["ref"] != None:
childGroupNode = self.xsdGroupDict[childNode.getQNameAttribute("ref")]
if not groupNameDict.has_key(childGroupNode["name"]):
groupNameDict[childGroupNode["name"]] = 1
for cChildNode in childGroupNode.getChildren():
if cChildNode.getLocalName() != "annotation":
self._checkContainedElements (cChildNode, particleType, elementNameDict, elementTypeDict, groupNameDict)
else:
self._addError ("Circular definition of group %s!" %repr(childGroupNode["name"]), childNode)
else:
for cChildNode in childNode.getChildren():
if cChildNode.getLocalName() != "annotation":
self._checkContainedElements (cChildNode, particleType, elementNameDict, elementTypeDict, groupNameDict)
else:
if childNode.getLocalName() == "any":
elementName = childNode.getAttribute("namespace")
else:
elementName = childNode.getAttributeOrDefault("name", childNode.getAttribute("ref"))
if childNode.hasAttribute("type"):
if not elementTypeDict.has_key(elementName):
elementTypeDict[elementName] = childNode["type"]
elif childNode["type"] != elementTypeDict[elementName]:
self._addError ("Element %s has identical name and different types within %s!" %(repr(elementName), repr(particleType)), childNode)
if particleType != "sequence":
if not elementNameDict.has_key(elementName):
elementNameDict[elementName] = 1
else:
self._addError ("Element %s is not unique within %s!" %(repr(elementName), repr(particleType)), childNode)
########################################
# additional checks for simple types
#
def _checkSimpleTypesSecondLevel(self):
prefix = self.inputNsPrefixString
simpleTypeNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)ssimpleType" % vars())
for simpleTypeNode in simpleTypeNodes:
# check for unique ID
self._checkNodeId (simpleTypeNode)
restrictionNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)ssimpleType/%(prefix)srestriction" % vars())
for restrictionNode in restrictionNodes:
# check for unique ID
self._checkNodeId (restrictionNode)
if not restrictionNode.hasAttribute("base") and restrictionNode.getFirstChildNS (self.inputNsURI, "simpleType") == None:
self._addError ("Simple type restriction must have 'base' attribute or 'simpleType' child tag!", restrictionNode)
if restrictionNode.hasAttribute("base") and restrictionNode.getFirstChildNS (self.inputNsURI, "simpleType") != None:
self._addError ("Simple type restriction must not have 'base' attribute and 'simpleType' child tag!", restrictionNode)
if restrictionNode.hasAttribute("base"):
self._checkType(restrictionNode, "base", self.xsdTypeDict)
minExcl = restrictionNode.getFirstChildNS(self.inputNsURI, "minExclusive")
minIncl = restrictionNode.getFirstChildNS(self.inputNsURI, "minInclusive")
if minExcl != None and minIncl != None:
self._addError ("Restriction attributes 'minExclusive' and 'minInclusive' cannot be defined together!", restrictionNode)
maxExcl = restrictionNode.getFirstChildNS(self.inputNsURI, "maxExclusive")
maxIncl = restrictionNode.getFirstChildNS(self.inputNsURI, "maxInclusive")
if maxExcl != None and maxIncl != None:
self._addError ("Restriction attributes 'maxExclusive' and 'maxInclusive' cannot be defined together!", restrictionNode)
# check facets of associated primitive type
for restrictionNode in restrictionNodes:
try:
if restrictionNode.hasAttribute("base"):
facetNsName = self._getFacetType (restrictionNode, [restrictionNode.getParentNode(),], self.xsdTypeDict)
if not facetNsName:
continue
if _localFacetDict.has_key(facetNsName):
suppFacets = _localFacetDict[facetNsName]
else:
suppFacets, dummy, dummy = self.xsdTypeDict[facetNsName].getXPathList (".//hfp:hasFacet/@name" % vars())
specifiedFacets = {"length":None, "minLength":None, "maxLength":None,
"minExclusive":None, "minInclusive":None, "maxExclusive":None, "maxInclusive":None,
"totalDigits": None, "fractionDigits":None}
for childNode in restrictionNode.getChildren():
if childNode.getLocalName() in suppFacets:
if specifiedFacets.has_key(childNode.getLocalName()):
specifiedFacets[childNode.getLocalName()] = childNode["value"]
facetElementNode = self.xsdElementDict[childNode.getNsName()]
try:
self._checkElementTag (facetElementNode, restrictionNode, (childNode,), 0)
except TagException, errInst:
self._addError (errInst.errstr, errInst.node, errInst.endTag)
if childNode.getLocalName() in ("enumeration", "minExclusive", "minInclusive", "maxExclusive", "maxInclusive"):
simpleTypeReturnDict = self._checkSimpleType (restrictionNode, "base", childNode, "value", childNode["value"], None, checkAttribute=1)
if simpleTypeReturnDict != None and simpleTypeReturnDict.has_key("orderedValue"):
if childNode.getLocalName() != "enumeration":
specifiedFacets[childNode.getLocalName()] = simpleTypeReturnDict["orderedValue"]
elif childNode.getLocalName() == "enumeration":
self._checkSimpleType (restrictionNode, "base", childNode, "value", childNode["value"], None, checkAttribute=1)
elif childNode.getLocalName() != "annotation":
self._addError ("Facet %s not allowed for base type %s!" %(childNode.getLocalName(), repr(restrictionNode["base"])), childNode)
if specifiedFacets["length"] != None:
if specifiedFacets["minLength"] != None or specifiedFacets["maxLength"] != None:
self._addError ("Facet 'minLength' and 'maxLength' not allowed if facet 'length' is specified!", restrictionNode)
else:
if specifiedFacets["maxLength"] != None and specifiedFacets["minLength"] != None:
if int(specifiedFacets["maxLength"]) < int(specifiedFacets["minLength"]):
self._addError ("Facet 'maxLength' < facet 'minLength'!", restrictionNode)
if specifiedFacets["totalDigits"] != None and specifiedFacets["fractionDigits"] != None:
if int(specifiedFacets["totalDigits"]) < int(specifiedFacets["fractionDigits"]):
self._addError ("Facet 'totalDigits' must be >= 'fractionDigits'!", restrictionNode)
if specifiedFacets["minExclusive"] != None and specifiedFacets["minInclusive"] != None:
self._addError ("Facets 'minExclusive' and 'minInclusive' are mutually exclusive!", restrictionNode)
if specifiedFacets["maxExclusive"] != None and specifiedFacets["maxInclusive"] != None:
self._addError ("Facets 'maxExclusive' and 'maxInclusive' are mutually exclusive!", restrictionNode)
minValue = specifiedFacets["minExclusive"]
if specifiedFacets["minInclusive"] != None:
minValue = specifiedFacets["minInclusive"]
maxValue = specifiedFacets["maxExclusive"]
if specifiedFacets["maxInclusive"] != None:
maxValue = specifiedFacets["maxInclusive"]
# TODO: use orderedValue for '<' check!!
if minValue != None and maxValue != None and maxValue < minValue:
self._addError ("maxValue facet < minValue facet!", restrictionNode)
except TagException:
self._addError ("Primitive type for base type not found!", restrictionNode)
listNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)slist" % vars())
for listNode in listNodes:
# check for unique ID
self._checkNodeId (listNode)
if not listNode.hasAttribute("itemType") and listNode.getFirstChildNS (self.inputNsURI, "simpleType") == None:
self._addError ("List type must have 'itemType' attribute or 'simpleType' child tag!", listNode)
elif listNode.hasAttribute("itemType") and listNode.getFirstChildNS (self.inputNsURI, "simpleType") != None:
self._addError ("List type must not have 'itemType' attribute and 'simpleType' child tag!", listNode)
elif listNode.hasAttribute("itemType"):
itemType = self._checkType(listNode, "itemType", self.xsdTypeDict)
if self.xsdTypeDict.has_key(itemType):
if self.xsdTypeDict[itemType].getLocalName() != "simpleType":
self._addError ("ItemType %s must be a simple type!" %(repr(itemType)), listNode)
elif self.xsdTypeDict[itemType].getFirstChild().getLocalName() == "list":
self._addError ("ItemType %s must not be a list type!" %(repr(itemType)), listNode)
unionNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)ssimpleType/%(prefix)sunion" % vars())
for unionNode in unionNodes:
# check for unique ID
self._checkNodeId (unionNode)
if not unionNode.hasAttribute("memberTypes"):
for childNode in unionNode.getChildren():
if childNode.getLocalName() != "annotation":
break
else:
self._addError ("Union must not be empty!", unionNode)
else:
for memberType in string.split(unionNode["memberTypes"]):
memberNsName = unionNode.qName2NsName(memberType, 1)
self._checkBaseType(unionNode, memberNsName, self.xsdTypeDict)
if self.xsdTypeDict.has_key(memberNsName):
if self.xsdTypeDict[memberNsName].getLocalName() != "simpleType":
self._addError ("MemberType %s must be a simple type!" %(repr(memberNsName)), unionNode)
patternNodes, dummy, dummy = self.inputRoot.getXPathList (".//%(prefix)spattern" % vars())
for patternNode in patternNodes:
pattern = patternNode["value"]
try:
pattern = substituteSpecialEscChars (pattern)
try:
test = re.compile(pattern)
except Exception, errstr:
self._addError (str(errstr), patternNode)
self._addError ("%s is not a valid regular expression!" %(repr(patternNode["value"])), patternNode)
except SyntaxError, errInst:
self._addError (repr(errInst[0]), patternNode)
########################################
# additional checks for keyrefs
#
def _checkIdentityConstraintsSecondLevel(self):
identityConstraintNodes, dummy, dummy = self.inputRoot.getXPathList (".//%sunique" %(self.inputNsPrefixString))
for identityConstraintNode in identityConstraintNodes:
# check for unique ID
self._checkNodeId (identityConstraintNode)
selectorNode = identityConstraintNode.getFirstChildNS(XSD_NAMESPACE, "selector")
self._checkNodeId (selectorNode)
try:
completeChildList, attrNodeList, attrNsNameFirst = identityConstraintNode.getParentNode().getXPathList (selectorNode["xpath"], selectorNode)
if attrNsNameFirst != None:
self._addError ("Selection of attributes is not allowed for selector!", selectorNode)
except Exception, errstr:
self._addError (errstr, selectorNode)
try:
fieldNode = identityConstraintNode.getFirstChildNS(XSD_NAMESPACE, "field")
identityConstraintNode.getParentNode().getXPathList (fieldNode["xpath"], fieldNode)
self._checkNodeId (fieldNode)
except Exception, errstr:
self._addError (errstr, fieldNode)
########################################
# additional checks for keyrefs
#
def _checkKeysSecondLevel(self):
keyNodes, dummy, dummy = self.inputRoot.getXPathList (".//%skey" %(self.inputNsPrefixString))
for keyNode in keyNodes:
# check for unique ID
self._checkNodeId (keyNode)
fieldNode = keyNode.getFirstChildNS(XSD_NAMESPACE, "field")
if fieldNode != None:
self._checkNodeId (fieldNode)
########################################
# additional checks for keyrefs
#
def _checkKeyRefsSecondLevel(self):
keyrefNodes, dummy, dummy = self.inputRoot.getXPathList (".//%skeyref" %(self.inputNsPrefixString))
for keyrefNode in keyrefNodes:
# check for unique ID
self._checkNodeId (keyrefNode)
self._checkKeyRef(keyrefNode, self.xsdIdentityConstrDict)
########################################
# helper methods
#
def _checkFixedDefault(self, node):
if node.hasAttribute("default") and node.hasAttribute("fixed"):
self._addError ("%s may have 'default' OR 'fixed' attribute!" %repr(node.getLocalName()), node)
if node.hasAttribute("default"):
self._checkSimpleType (node, "type", node, "default", node["default"], None, checkAttribute=1)
if node.hasAttribute("fixed"):
self._checkSimpleType (node, "type", node, "fixed", node["fixed"], None, checkAttribute=1)
def _checkReference(self, node, dict):
baseNsName = node.getQNameAttribute("ref")
if dict.has_key(baseNsName):
refNode = dict[baseNsName]
fixedValue = node.getAttribute("fixed")
fixedRefValue = refNode.getAttribute("fixed")
if fixedValue != None and fixedRefValue != None and fixedValue != fixedRefValue:
self._addError ("Fixed value %s of attribute does not match fixed value %s of reference!" %(repr(fixedValue), repr(fixedRefValue)), node)
else:
self._addError ("Reference %s not found!" %(repr(baseNsName)), node)
def _checkType(self, node, typeAttrName, dict, typeNsName=None):
baseNsName = node.getQNameAttribute(typeAttrName)
self._checkBaseType(node, baseNsName, dict, typeNsName)
return baseNsName
def _checkBaseType(self, node, baseNsName, dict, typeNsName=None):
if not dict.has_key(baseNsName) and baseNsName != (XSD_NAMESPACE, "anySimpleType"):
self._addError ("Definition of type %s not found!" %(repr(baseNsName)), node)
elif typeNsName != None:
if typeNsName == (XSD_NAMESPACE, "simpleContent"):
if node.getNsName() == (XSD_NAMESPACE, "restriction"):
if (baseNsName != (XSD_NAMESPACE, "anySimpleType") and
dict[baseNsName].getNsName() == (XSD_NAMESPACE, "complexType") and
dict[baseNsName].getFirstChild().getNsName() == typeNsName):
pass
else:
self._addError ("Referred type %s must be a complex type with simple content!" %(repr(baseNsName)), node)
else: # extension
if (baseNsName == (XSD_NAMESPACE, "anySimpleType") or
dict[baseNsName].getNsName() == (XSD_NAMESPACE, "simpleType") or
(dict[baseNsName].getNsName() == (XSD_NAMESPACE, "complexType") and
dict[baseNsName].getFirstChild().getNsName() == typeNsName)):
pass
else:
self._addError ("Referred type %s must be a simple type or a complex type with simple content!" %(repr(baseNsName)), node)
else:
if typeNsName == (XSD_NAMESPACE, "simpleType") and baseNsName == (XSD_NAMESPACE, "anySimpleType"):
pass
elif dict[baseNsName].getNsName() != typeNsName:
self._addError ("Referred type %s must be a %s!" %(repr(baseNsName), repr(typeNsName)), node)
def _checkKeyRef(self, keyrefNode, dict):
baseNsName = keyrefNode.getQNameAttribute("refer")
if not dict.has_key(baseNsName):
self._addError ("keyref refers unknown key %s!" %(repr(baseNsName)), keyrefNode)
else:
keyNode = dict[baseNsName]["Node"]
if keyNode.getNsName() not in ((XSD_NAMESPACE, "key"), (XSD_NAMESPACE, "unique")):
self._addError ("reference to non-key constraint %s!" %(repr(baseNsName)), keyrefNode)
if len(keyrefNode.getChildrenNS(XSD_NAMESPACE, "field")) != len(keyNode.getChildrenNS(XSD_NAMESPACE, "field")):
self._addError ("key/keyref field size mismatch!", keyrefNode)
def _checkOccurs (self, node):
minOccurs = node.getAttributeOrDefault("minOccurs", "1")
maxOccurs = node.getAttributeOrDefault("maxOccurs", "1")
if maxOccurs != "unbounded":
if string.atoi(minOccurs) > string.atoi(maxOccurs):
self._addError ("Attribute minOccurs > maxOccurs!", node)
def _checkNodeId (self, node, unambiguousPerFile=1):
if node.hasAttribute("id"):
# id must only be unambiguous within one file
if unambiguousPerFile:
nodeId = (node.getAbsUrl(), collapseString(node["id"]))
else:
nodeId = collapseString(node["id"])
if not self.xsdIdDict.has_key(nodeId):
self.xsdIdDict[nodeId] = node
else:
self._addError ("There are multiple occurences of ID value %s!" %repr(nodeId), node)
def _getFacetType(self, node, parentNodeList, xsdTypeDict):
baseNsName = node.getQNameAttribute("base")
try:
baseNode = xsdTypeDict[baseNsName]
except:
self._addError ("Base type %s must be an atomic simple type definition or a builtin type!" %repr(baseNsName), node)
return None
if baseNode in parentNodeList:
self._addError ("Circular type definition (type is contained in its own type hierarchy)!", node)
return None
if baseNode.getNsName() == (XSD_NAMESPACE, "simpleType"):
if baseNode.getAttribute("facetType") != None:
facetType = baseNode.qName2NsName(baseNode["facetType"], 1)
node.getParentNode()["facetType"] = node.nsName2QName(facetType)
return facetType
else:
for baseNodeType in ("list", "union"):
if baseNode.getFirstChildNS (XSD_NAMESPACE, baseNodeType) != None:
return (XSD_NAMESPACE, baseNodeType)
else:
parentNodeList.append(node)
return self._getFacetType(baseNode.getFirstChildNS(XSD_NAMESPACE, "restriction"), parentNodeList, xsdTypeDict)
else:
self._addError ("Base type %s must be an atomic simple type definition or a builtin type!" %repr(baseNsName), node)
return None
|
py | b4099c8ab623c41521656c94501969f654c5552b | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .data_connector import DataConnector
class OfficeATPDataConnector(DataConnector):
"""Represents OfficeATP (Office 365 Advanced Threat Protection) data
connector.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param etag: Etag of the azure resource
:type etag: str
:param kind: Required. Constant filled by server.
:type kind: str
:param tenant_id: The tenant id to connect to, and get the data from.
:type tenant_id: str
:param data_types: The available data types for the connector.
:type data_types: ~securityinsights.models.AlertsDataTypeOfDataConnector
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'data_types': {'key': 'properties.dataTypes', 'type': 'AlertsDataTypeOfDataConnector'},
}
def __init__(self, **kwargs):
super(OfficeATPDataConnector, self).__init__(**kwargs)
self.tenant_id = kwargs.get('tenant_id', None)
self.data_types = kwargs.get('data_types', None)
self.kind = 'OfficeATP'
|
py | b4099cff88e189f73b5c67a542bcbaaf103cfd49 | ########################################################################
#
# Class for creating a data-set consisting of all files in a directory.
#
# Example usage is shown in the file knifey.py and Tutorial #09.
#
# Implemented in Python 3.5
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2016 by Magnus Erik Hvass Pedersen
#
########################################################################
import numpy as np
import os
from cache import cache
########################################################################
def one_hot_encoded(class_numbers, num_classes=None):
"""
Generate the One-Hot encoded class-labels from an array of integers.
For example, if class_number=2 and num_classes=4 then
the one-hot encoded label is the float array: [0. 0. 1. 0.]
:param class_numbers:
Array of integers with class-numbers.
Assume the integers are from zero to num_classes-1 inclusive.
:param num_classes:
Number of classes. If None then use max(cls)-1.
:return:
2-dim array of shape: [len(cls), num_classes]
"""
# Find the number of classes if None is provided.
if num_classes is None:
num_classes = np.max(class_numbers) - 1
return np.eye(num_classes, dtype=float)[class_numbers]
########################################################################
class DataSet:
def __init__(self, in_dir, exts='.jpg'):
"""
Create a data-set consisting of the filenames in the given directory
and sub-dirs that match the given filename-extensions.
For example, the knifey-spoony data-set (see knifey.py) has the
following dir-structure:
knifey-spoony/forky/
knifey-spoony/knifey/
knifey-spoony/spoony/
knifey-spoony/forky/test/
knifey-spoony/knifey/test/
knifey-spoony/spoony/test/
This means there are 3 classes called: forky, knifey, and spoony.
If we set in_dir = "knifey-spoony/" and create a new DataSet-object
then it will scan through these directories and create a training-set
and test-set for each of these classes.
The training-set will contain a list of all the *.jpg filenames
in the following directories:
knifey-spoony/forky/
knifey-spoony/knifey/
knifey-spoony/spoony/
The test-set will contain a list of all the *.jpg filenames
in the following directories:
knifey-spoony/forky/test/
knifey-spoony/knifey/test/
knifey-spoony/spoony/test/
See the TensorFlow Tutorial #09 for a usage example.
:param in_dir:
Root-dir for the files in the data-set.
This would be 'knifey-spoony/' in the example above.
:param exts:
String or tuple of strings with valid filename-extensions.
Not case-sensitive.
:return:
Object instance.
"""
# Extend the input directory to the full path.
in_dir = os.path.abspath(in_dir)
# Input directory.
self.in_dir = in_dir
# Convert all file-extensions to lower-case.
self.exts = tuple(ext.lower() for ext in exts)
# Names for the classes.
self.class_names = []
# Filenames for all the files in the training-set.
self.filenames = []
# Filenames for all the files in the test-set.
self.filenames_test = []
# Class-number for each file in the training-set.
self.class_numbers = []
# Class-number for each file in the test-set.
self.class_numbers_test = []
# Total number of classes in the data-set.
self.num_classes = 0
# For all files/dirs in the input directory.
for name in os.listdir(in_dir):
# Full path for the file / dir.
current_dir = os.path.join(in_dir, name)
# If it is a directory.
if os.path.isdir(current_dir):
# Add the dir-name to the list of class-names.
self.class_names.append(name)
# Training-set.
# Get all the valid filenames in the dir (not sub-dirs).
filenames = self._get_filenames(current_dir)
# Append them to the list of all filenames for the training-set.
self.filenames.extend(filenames)
# The class-number for this class.
class_number = self.num_classes
# Create an array of class-numbers.
class_numbers = [class_number] * len(filenames)
# Append them to the list of all class-numbers for the training-set.
self.class_numbers.extend(class_numbers)
# Test-set.
# Get all the valid filenames in the sub-dir named 'test'.
filenames_test = self._get_filenames(os.path.join(current_dir, 'test'))
# Append them to the list of all filenames for the test-set.
self.filenames_test.extend(filenames_test)
# Create an array of class-numbers.
class_numbers = [class_number] * len(filenames_test)
# Append them to the list of all class-numbers for the test-set.
self.class_numbers_test.extend(class_numbers)
# Increase the total number of classes in the data-set.
self.num_classes += 1
def _get_filenames(self, dir):
"""
Create and return a list of filenames with matching extensions in the given directory.
:param dir:
Directory to scan for files. Sub-dirs are not scanned.
:return:
List of filenames. Only filenames. Does not include the directory.
"""
# Initialize empty list.
filenames = []
# If the directory exists.
if os.path.exists(dir):
# Get all the filenames with matching extensions.
for filename in os.listdir(dir):
if filename.lower().endswith(self.exts):
filenames.append(filename)
return filenames
def get_paths(self, test=False):
"""
Get the full paths for the files in the data-set.
:param test:
Boolean. Return the paths for the test-set (True) or training-set (False).
:return:
Iterator with strings for the path-names.
"""
if test:
# Use the filenames and class-numbers for the test-set.
filenames = self.filenames_test
class_numbers = self.class_numbers_test
# Sub-dir for test-set.
test_dir = "test/"
else:
# Use the filenames and class-numbers for the training-set.
filenames = self.filenames
class_numbers = self.class_numbers
# Don't use a sub-dir for test-set.
test_dir = ""
for filename, cls in zip(filenames, class_numbers):
# Full path-name for the file.
path = os.path.join(self.in_dir, self.class_names[cls], test_dir, filename)
yield path
def get_training_set(self):
"""
Return the list of paths for the files in the training-set,
and the list of class-numbers as integers,
and the class-numbers as one-hot encoded arrays.
"""
return list(self.get_paths()), \
np.asarray(self.class_numbers), \
one_hot_encoded(class_numbers=self.class_numbers,
num_classes=self.num_classes)
def get_test_set(self):
"""
Return the list of paths for the files in the test-set,
and the list of class-numbers as integers,
and the class-numbers as one-hot encoded arrays.
"""
return list(self.get_paths(test=True)), \
np.asarray(self.class_numbers_test), \
one_hot_encoded(class_numbers=self.class_numbers_test,
num_classes=self.num_classes)
########################################################################
def load_cached(cache_path, in_dir):
"""
Wrapper-function for creating a DataSet-object, which will be
loaded from a cache-file if it already exists, otherwise a new
object will be created and saved to the cache-file.
This is useful if you need to ensure the ordering of the
filenames is consistent every time you load the data-set,
for example if you use the DataSet-object in combination
with Transfer Values saved to another cache-file, see e.g.
Tutorial #09 for an example of this.
:param cache_path:
File-path for the cache-file.
:param in_dir:
Root-dir for the files in the data-set.
This is an argument for the DataSet-init function.
:return:
The DataSet-object.
"""
print("Creating dataset from the files in: " + in_dir)
# If the object-instance for DataSet(in_dir=data_dir) already
# exists in the cache-file then reload it, otherwise create
# an object instance and save it to the cache-file for next time.
dataset = cache(cache_path=cache_path,
fn=DataSet, in_dir=in_dir)
return dataset
########################################################################
|
py | b4099d38ad5f0a66a7f97b3b6cccc431cc4521bb | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Reboiler model for distillation.
While the reboiler model, is fairly simple, a major
portion of this code has gone into making this generic and be able to handle
different state variables and the associated splits.
"""
__author__ = "Jaffer Ghouse"
from pandas import DataFrame
# Import Pyomo libraries
from pyomo.common.config import ConfigBlock, ConfigValue, In
from pyomo.network import Port
from pyomo.environ import Reference, Expression, Var, Constraint, \
value, Set, SolverFactory
# Import IDAES cores
import idaes.logger as idaeslog
from idaes.core import (ControlVolume0DBlock,
declare_process_block_class,
EnergyBalanceType,
MomentumBalanceType,
MaterialBalanceType,
UnitModelBlockData,
useDefault)
from idaes.core.util.config import is_physical_parameter_block
from idaes.core.util.exceptions import PropertyPackageError, \
PropertyNotSupportedError, ConfigurationError
from idaes.core.util import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
_log = idaeslog.getIdaesLogger(__name__)
@declare_process_block_class("Reboiler")
class ReboilerData(UnitModelBlockData):
"""
Reboiler unit for distillation model.
Unit model to reboil the liquid from the bottom tray of
the distillation column.
"""
CONFIG = UnitModelBlockData.CONFIG()
CONFIG.declare("has_boilup_ratio", ConfigValue(
default=False,
domain=In([True, False]),
description="Boilup ratio term construction flag",
doc="""Indicates whether terms for boilup ratio should be
constructed,
**default** - False.
**Valid values:** {
**True** - include construction of boilup ratio constraint,
**False** - exclude construction of boilup ratio constraint}"""))
CONFIG.declare("material_balance_type", ConfigValue(
default=MaterialBalanceType.useDefault,
domain=In(MaterialBalanceType),
description="Material balance construction flag",
doc="""Indicates what type of mass balance should be constructed,
**default** - MaterialBalanceType.componentPhase.
**Valid values:** {
**MaterialBalanceType.none** - exclude material balances,
**MaterialBalanceType.componentPhase** - use phase component balances,
**MaterialBalanceType.componentTotal** - use total component balances,
**MaterialBalanceType.elementTotal** - use total element balances,
**MaterialBalanceType.total** - use total material balance.}"""))
CONFIG.declare("energy_balance_type", ConfigValue(
default=EnergyBalanceType.useDefault,
domain=In(EnergyBalanceType),
description="Energy balance construction flag",
doc="""Indicates what type of energy balance should be constructed,
**default** - EnergyBalanceType.enthalpyTotal.
**Valid values:** {
**EnergyBalanceType.none** - exclude energy balances,
**EnergyBalanceType.enthalpyTotal** - single enthalpy balance for material,
**EnergyBalanceType.enthalpyPhase** - enthalpy balances for each phase,
**EnergyBalanceType.energyTotal** - single energy balance for material,
**EnergyBalanceType.energyPhase** - energy balances for each phase.}"""))
CONFIG.declare("momentum_balance_type", ConfigValue(
default=MomentumBalanceType.pressureTotal,
domain=In(MomentumBalanceType),
description="Momentum balance construction flag",
doc="""Indicates what type of momentum balance should be constructed,
**default** - MomentumBalanceType.pressureTotal.
**Valid values:** {
**MomentumBalanceType.none** - exclude momentum balances,
**MomentumBalanceType.pressureTotal** - single pressure balance for material,
**MomentumBalanceType.pressurePhase** - pressure balances for each phase,
**MomentumBalanceType.momentumTotal** - single momentum balance for material,
**MomentumBalanceType.momentumPhase** - momentum balances for each phase.}"""))
CONFIG.declare("has_pressure_change", ConfigValue(
default=False,
domain=In([True, False]),
description="Pressure change term construction flag",
doc="""Indicates whether terms for pressure change should be
constructed,
**default** - False.
**Valid values:** {
**True** - include pressure change terms,
**False** - exclude pressure change terms.}"""))
CONFIG.declare("property_package", ConfigValue(
default=useDefault,
domain=is_physical_parameter_block,
description="Property package to use for control volume",
doc="""Property parameter object used to define property calculations,
**default** - useDefault.
**Valid values:** {
**useDefault** - use default package from parent model or flowsheet,
**PropertyParameterObject** - a PropertyParameterBlock object.}"""))
CONFIG.declare("property_package_args", ConfigBlock(
implicit=True,
description="Arguments to use for constructing property packages",
doc="""A ConfigBlock with arguments to be passed to a property block(s)
and used when constructing these,
**default** - None.
**Valid values:** {
see property package for documentation.}"""))
def build(self):
"""Build the model.
Args:
None
Returns:
None
"""
# Setup model build logger
model_log = idaeslog.getModelLogger(self.name, tag="unit")
# Call UnitModel.build to setup dynamics
super(ReboilerData, self).build()
# Add Control Volume for the Reboiler
self.control_volume = ControlVolume0DBlock(default={
"dynamic": self.config.dynamic,
"has_holdup": self.config.has_holdup,
"property_package": self.config.property_package,
"property_package_args": self.config.property_package_args})
self.control_volume.add_state_blocks(
has_phase_equilibrium=True)
self.control_volume.add_material_balances(
balance_type=self.config.material_balance_type,
has_phase_equilibrium=True)
self.control_volume.add_energy_balances(
balance_type=self.config.energy_balance_type,
has_heat_transfer=True)
self.control_volume.add_momentum_balances(
balance_type=self.config.momentum_balance_type,
has_pressure_change=self.config.has_pressure_change)
# Get liquid and vapor phase objects from the property package
# to be used below. Avoids repition.
_liquid_list = []
_vapor_list = []
for p in self.config.property_package.phase_list:
pobj = self.config.property_package.get_phase(p)
if pobj.is_vapor_phase():
_vapor_list.append(p)
elif pobj.is_liquid_phase():
_liquid_list.append(p)
else:
_liquid_list.append(p)
model_log.warning(
"A non-liquid/non-vapor phase was detected but will "
"be treated as a liquid.")
# Create a pyomo set for indexing purposes. This set is appended to
# model otherwise results in an abstract set.
self._liquid_set = Set(initialize=_liquid_list)
self._vapor_set = Set(initialize=_vapor_list)
if self.config.has_boilup_ratio is True:
self.boilup_ratio = Var(initialize=0.5,
doc="Boilup ratio for reboiler")
def rule_boilup_ratio(self, t):
if hasattr(self.control_volume.properties_out[t],
"flow_mol_phase"):
return self.boilup_ratio * \
sum(self.control_volume.properties_out[t].
flow_mol_phase[p] for p in self._liquid_set) == \
sum(self.control_volume.
properties_out[t].flow_mol_phase["Vap"]
for p in self._vapor_set)
elif hasattr(self.control_volume.properties_out[t],
"flow_mol_phase_comp"):
return self.boilup_ratio * \
sum(self.control_volume.properties_out[t].
flow_mol_phase_comp[p, i]
for p in self._liquid_set
for i in self.control_volume.properties_out[t].
params.component_list) == \
sum(self.control_volume.properties_out[t].
flow_mol_phase_comp[p, i]
for p in self._vapor_set
for i in self.control_volume.properties_out[t].
params.component_list)
else:
raise PropertyNotSupportedError(
"Unrecognized names for flow variables encountered "
"while building the constraint for reboiler.")
self.eq_boilup_ratio = Constraint(self.flowsheet().time,
rule=rule_boilup_ratio)
self._make_ports()
self._make_splits_reboiler()
# Add object reference to variables of the control volume
# Reference to the heat duty
self.heat_duty = Reference(self.control_volume.heat[:])
# Reference to the pressure drop (if set to True)
if self.config.has_pressure_change:
self.deltaP = Reference(self.control_volume.deltaP[:])
def _make_ports(self):
# Add Ports for the reboiler
# Inlet port (the vapor from the top tray)
self.add_inlet_port()
# Outlet ports that always exist irrespective of reboiler type
self.bottoms = Port(noruleinit=True, doc="Bottoms stream.")
self.vapor_reboil = Port(noruleinit=True,
doc="Vapor outlet stream that is returned to "
"to the bottom tray.")
def _make_splits_reboiler(self):
# Get dict of Port members and names
member_list = self.control_volume.\
properties_out[0].define_port_members()
# Create references and populate the reflux, distillate ports
for k in member_list:
local_name = member_list[k].local_name
# Create references and populate the intensive variables
if "flow" not in local_name and "frac" not in local_name \
and "enth" not in local_name:
if not member_list[k].is_indexed():
var = self.control_volume.properties_out[:].\
component(local_name)
else:
var = self.control_volume.properties_out[:].\
component(local_name)[...]
# add the reference and variable name to the reflux port
self.bottoms.add(Reference(var), k)
# add the reference and variable name to the
# vapor outlet port
self.vapor_reboil.add(Reference(var), k)
elif "frac" in local_name:
# Mole/mass frac is typically indexed
index_set = member_list[k].index_set()
# if state var is not mole/mass frac by phase
if "phase" not in local_name:
if "mole" in local_name: # check mole basis/mass basis
# The following conditionals are required when a
# mole frac or mass frac is a state var i.e. will be
# a port member. This gets a bit tricky when handling
# non-conventional systems when you have more than one
# liquid or vapor phase. Hence, the logic here is that
# the mole frac that should be present in the liquid or
# vapor port should be computed by accounting for
# multiple liquid or vapor phases if present. For the
# classical VLE system, this holds too.
if hasattr(self.control_volume.properties_out[0],
"mole_frac_phase_comp") and \
hasattr(self.control_volume.properties_out[0],
"flow_mol_phase"):
flow_phase_comp = False
local_name_frac = "mole_frac_phase_comp"
local_name_flow = "flow_mol_phase"
elif hasattr(self.control_volum.properties_out[0],
"flow_mol_phase_comp"):
flow_phase_comp = True
local_name_flow = "flow_mol_phase_comp"
else:
raise PropertyNotSupportedError(
"No mole_frac_phase_comp or flow_mol_phase or"
" flow_mol_phase_comp variables encountered "
"while building ports for the reboiler. ")
elif "mass" in local_name:
if hasattr(self.control_volume.properties_out[0],
"mass_frac_phase_comp") and \
hasattr(self.control_volume.properties_out[0],
"flow_mass_phase"):
flow_phase_comp = False
local_name_frac = "mass_frac_phase_comp"
local_name_flow = "flow_mass_phase"
elif hasattr(self.control_volum.properties_out[0],
"flow_mass_phase_comp"):
flow_phase_comp = True
local_name_flow = "flow_mass_phase_comp"
else:
raise PropertyNotSupportedError(
"No mass_frac_phase_comp or flow_mass_phase or"
" flow_mass_phase_comp variables encountered "
"while building ports for the reboiler.")
else:
raise PropertyNotSupportedError(
"No mass frac or mole frac variables encountered "
" while building ports for the reboiler. "
"phase_frac as a state variable is not "
"supported with distillation unit models."
)
# Rule for liquid phase mole fraction
def rule_liq_frac(self, t, i):
if not flow_phase_comp:
sum_flow_comp = sum(
self.control_volume.properties_out[t].
component(local_name_frac)[p, i] *
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._liquid_set)
return sum_flow_comp / sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._liquid_set)
else:
sum_flow_comp = sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._liquid_set)
return sum_flow_comp / sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._liquid_set
for i in self.config.property_package.
component_list)
self.e_liq_frac = Expression(
self.flowsheet().time, index_set,
rule=rule_liq_frac)
# Rule for vapor phase mass/mole fraction
def rule_vap_frac(self, t, i):
if not flow_phase_comp:
sum_flow_comp = sum(
self.control_volume.properties_out[t].
component(local_name_frac)[p, i] *
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._vapor_set)
return sum_flow_comp / sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._vapor_set)
else:
sum_flow_comp = sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._vapor_set)
return sum_flow_comp / sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._vapor_set
for i in self.config.property_package.
component_list)
self.e_vap_frac = Expression(
self.flowsheet().time, index_set,
rule=rule_vap_frac)
# add the reference and variable name to the
# distillate port
self.bottoms.add(self.e_liq_frac, k)
# add the reference and variable name to the
# vapor port
self.vapor_reboil.add(self.e_vap_frac, k)
else:
# Assumes mole_frac_phase or mass_frac_phase exist as
# state vars in the port and therefore access directly
# from the state block.
var = self.control_volume.properties_out[:].\
component(local_name)[...]
# add the reference and variable name to the distillate port
self.bottoms.add(Reference(var), k)
# add the reference and variable name to the boil up port
self.vapor_reboil.add(Reference(var), k)
elif "flow" in local_name:
if "phase" not in local_name:
# Assumes that here the var is total flow or component
# flow. However, need to extract the flow by phase from
# the state block. Expects to find the var
# flow_mol_phase or flow_mass_phase in the state block.
# Check if it is not indexed by component list and this
# is total flow
if not member_list[k].is_indexed():
# if state var is not flow_mol/flow_mass
# by phase
local_name_flow = local_name + "_phase"
# Rule for vap flow
def rule_vap_flow(self, t):
return sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._vapor_set)
self.e_vap_flow = Expression(
self.flowsheet().time,
rule=rule_vap_flow)
# Rule to link the liq flow to the distillate
def rule_bottoms_flow(self, t):
return sum(
self.control_volume.properties_out[t].
component(local_name_flow)[p]
for p in self._liquid_set)
self.e_bottoms_flow = Expression(
self.flowsheet().time,
rule=rule_bottoms_flow)
else:
# when it is flow comp indexed by component list
str_split = local_name.split("_")
if len(str_split) == 3 and str_split[-1] == "comp":
local_name_flow = str_split[0] + "_" + \
str_split[1] + "_phase_" + "comp"
# Get the indexing set i.e. component list
index_set = member_list[k].index_set()
# Rule for vap phase flow to the vapor outlet
def rule_vap_flow(self, t, i):
return sum(self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._vapor_set)
self.e_vap_flow = Expression(
self.flowsheet().time, index_set,
rule=rule_vap_flow)
# Rule for liq phase flow to the liquid outlet
def rule_bottoms_flow(self, t, i):
return sum(self.control_volume.properties_out[t].
component(local_name_flow)[p, i]
for p in self._liquid_set)
self.e_bottoms_flow = Expression(
self.flowsheet().time, index_set,
rule=rule_bottoms_flow)
# add the reference and variable name to the
# distillate port
self.bottoms.add(self.e_bottoms_flow, k)
# add the reference and variable name to the
# distillate port
self.vapor_reboil.add(self.e_vap_flow, k)
else:
# when it is flow indexed by phase or indexed by
# both phase and component.
var = self.control_volume.properties_out[:].\
component(local_name)[...]
# add the reference and variable name to the bottoms port
self.bottoms.add(Reference(var), k)
# add the reference and variable name to the
# vapor outlet port
self.vapor_reboil.add(Reference(var), k)
elif "enth" in local_name:
if "phase" not in local_name:
# assumes total mixture enthalpy (enth_mol or enth_mass)
if not member_list[k].is_indexed():
# if state var is not enth_mol/enth_mass
# by phase, add _phase string to extract the right
# value from the state block
local_name_enth = local_name + "_phase"
else:
raise PropertyPackageError(
"Enthalpy is indexed but the variable "
"name does not reflect the presence of an index. "
"Please follow the naming convention outlined "
"in the documentation for state variables.")
# Rule for vap enthalpy. Setting the enthalpy to the
# enth_mol_phase['Vap'] value from the state block
def rule_vap_enth(self, t):
return sum(self.control_volume.properties_out[t].
component(local_name_enth)[p]
for p in self._vapor_set)
self.e_vap_enth = Expression(
self.flowsheet().time,
rule=rule_vap_enth)
# Rule to link the liq flow to the distillate.
# Setting the enthalpy to the
# enth_mol_phase['Liq'] value from the state block
def rule_bottoms_enth(self, t):
return sum(self.control_volume.properties_out[t].
component(local_name_enth)[p]
for p in self._liquid_set)
self.e_bottoms_enth = Expression(
self.flowsheet().time,
rule=rule_bottoms_enth)
# add the reference and variable name to the
# distillate port
self.bottoms.add(self.e_bottoms_enth, k)
# add the reference and variable name to the
# distillate port
self.vapor_reboil.add(self.e_vap_enth, k)
elif "phase" in local_name:
# assumes enth_mol_phase or enth_mass_phase.
# This is an intensive property, you create a direct
# reference irrespective of the reflux, distillate and
# vap_outlet
# Rule for vap flow
if not k.is_indexed():
var = self.control_volume.properties_out[:].\
component(local_name)
else:
var = self.control_volume.properties_out[:].\
component(local_name)[...]
# add the reference and variable name to the distillate port
self.bottoms.add(Reference(var), k)
# add the reference and variable name to the
# vapor outlet port
self.vapor_reboil.add(Reference(var), k)
else:
raise PropertyNotSupportedError(
"Unrecognized enthalpy state variable encountered "
"while building ports for the reboiler. Only total "
"mixture enthalpy or enthalpy by phase are supported.")
def initialize(self, state_args=None, solver=None, optarg=None,
outlvl=idaeslog.NOTSET):
init_log = idaeslog.getInitLogger(self.name, outlvl, tag="unit")
solve_log = idaeslog.getSolveLogger(self.name, outlvl, tag="unit")
solverobj = get_solver(solver, optarg)
# Initialize the inlet and outlet state blocks. Calling the state
# blocks initialize methods directly so that custom set of state args
# can be passed to the inlet and outlet state blocks as control_volume
# initialize method initializes the state blocks with the same
# state conditions.
flags = self.control_volume.properties_in. \
initialize(state_args=state_args,
solver=solver,
optarg=optarg,
outlvl=outlvl,
hold_state=True)
# Initialize outlet state block at same conditions of inlet except
# the temperature. Set the temperature to a temperature guess based
# on the desired boilup_ratio.
# Get index for bubble point temperature and and assume it
# will have only a single phase equilibrium pair. This is to
# support the generic property framework where the T_bubble
# is indexed by the phases_in_equilibrium. In distillation,
# the assumption is that there will only be a single pair
# i.e. vap-liq.
idx = next(iter(self.control_volume.properties_in[0].
temperature_bubble))
temp_guess = 0.5 * (
value(self.control_volume.properties_in[0].temperature_dew[idx]) -
value(self.control_volume.properties_in[0].
temperature_bubble[idx])) + \
value(self.control_volume.properties_in[0].temperature_bubble[idx])
state_args_outlet = {}
state_dict_outlet = (
self.control_volume.properties_in[
self.flowsheet().time.first()]
.define_port_members())
for k in state_dict_outlet.keys():
if state_dict_outlet[k].is_indexed():
state_args_outlet[k] = {}
for m in state_dict_outlet[k].keys():
state_args_outlet[k][m] = value(state_dict_outlet[k][m])
else:
if k != "temperature":
state_args_outlet[k] = value(state_dict_outlet[k])
else:
state_args_outlet[k] = temp_guess
self.control_volume.properties_out.initialize(
state_args=state_args_outlet,
solver=solver,
optarg=optarg,
outlvl=outlvl,
hold_state=False)
if degrees_of_freedom(self) == 0:
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = solverobj.solve(self, tee=slc.tee)
init_log.info(
"Initialization Complete, {}.".format(idaeslog.condition(res))
)
else:
raise ConfigurationError(
"State vars fixed but degrees of freedom "
"for reboiler is not zero during "
"initialization. Please ensure that the boilup_ratio "
"or the outlet temperature is fixed.")
self.control_volume.properties_in.\
release_state(flags=flags, outlvl=outlvl)
def _get_performance_contents(self, time_point=0):
var_dict = {}
if hasattr(self, "heat_duty"):
var_dict["Heat Duty"] = self.heat_duty[time_point]
return {"vars": var_dict}
def _get_stream_table_contents(self, time_point=0):
stream_attributes = {}
stream_dict = {"Inlet": "inlet",
"Vapor Reboil": "vapor_reboil",
"Bottoms": "bottoms"}
for n, v in stream_dict.items():
port_obj = getattr(self, v)
stream_attributes[n] = {}
for k in port_obj.vars:
for i in port_obj.vars[k].keys():
if isinstance(i, float):
stream_attributes[n][k] = value(
port_obj.vars[k][time_point])
else:
if len(i) == 2:
kname = str(i[1])
else:
kname = str(i[1:])
stream_attributes[n][k + " " + kname] = \
value(port_obj.vars[k][time_point, i[1:]])
return DataFrame.from_dict(stream_attributes, orient="columns")
|
py | b4099db248288c1543b15b738fb31f418f201a39 | import sys
import numpy as np
from . import _backend_tk
from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
from ._backend_tk import _BackendTk, FigureCanvasTk
class FigureCanvasTkCairo(FigureCanvasCairo, FigureCanvasTk):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._renderer = RendererCairo(self.figure.dpi)
def draw(self):
width = int(self.figure.bbox.width)
height = int(self.figure.bbox.height)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_ctx_from_surface(surface)
self._renderer.set_width_height(width, height)
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
buf = np.reshape(surface.get_data(), (height, width, 4))
_backend_tk.blit(
self._tkphoto, buf,
(2, 1, 0, 3) if sys.byteorder == "little" else (1, 2, 3, 0))
@_BackendTk.export
class _BackendTkCairo(_BackendTk):
FigureCanvas = FigureCanvasTkCairo
|
py | b4099df5d58d34ccbb7ca84e8f5b25d96acf1c14 | from dataclasses import dataclass, field
from functools import partial
import functools
import itertools
import json
import logging
import os
import random
import time
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import torch
from transformers import (
BertForQuestionAnswering,
AutoTokenizer,
DataCollatorForSeq2Seq,
default_data_collator,
)
import pandas as pd
from datasets import load_dataset, concatenate_datasets
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from hfutils.logger import Logger
from hfutils.pipe.bert import BertPyTorchPipeForQuestionAnswering
from hfutils.calibration import temperature_scale
from hfutils.qa import prepare_validation_features, prepare_train_features
import sys
sys.path.append(".")
from plots.thresholds.utils import *
home_dir = "/mnt/raid0nvme1"
base_dir = os.path.join(home_dir, os.path.join("model-finetune", "outputs", "google"))
model_keys = [
# "XS",
# "S",
# "M",
"L",
"XL",
]
model_names = [
# "bert-tiny-5-finetuned-squadv2",
# "bert-mini-5-finetuned-squadv2",
# "bert-small-2-finetuned-squadv2",
"bert-base-uncased",
"bert-large-uncased",
]
device_map = [
# "cuda:6",
# "cuda:6",
# "cuda:6",
"cuda:6",
"cuda:6",
]
model_paths = [
# f"{home_dir}/HuggingFace/mrm8488/bert-tiny-5-finetuned-squadv2",
# f"{home_dir}/HuggingFace/mrm8488/bert-mini-5-finetuned-squadv2",
# f"{home_dir}/HuggingFace/mrm8488/bert-small-2-finetuned-squadv2",
f"{home_dir}/HuggingFace/twmkn9/bert-base-uncased-squad2",
f"{home_dir}/HuggingFace/madlag/bert-large-uncased-squadv2",
]
tokenizer = AutoTokenizer.from_pretrained(
f"{home_dir}/HuggingFace/bert-base-uncased", use_fast=True,
)
val_dataset = concatenate_datasets(
[load_dataset("squad_v2", split="validation"),load_dataset("squad_v2", split="train")]
).shuffle()
val_dataset = val_dataset.select(range(10000))
column_names = val_dataset.column_names
dataset = val_dataset.map(
functools.partial(
prepare_train_features, column_names=column_names, tokenizer=tokenizer
),
batched=True,
num_proc=10,
remove_columns=column_names,
desc="Running tokenizer on training dataset",
)
dataloader = DataLoader(
dataset,
shuffle=False,
collate_fn=default_data_collator,
batch_size=16,
drop_last=True,
)
model_paths = dict(zip(model_keys, model_paths))
model_names = dict(zip(model_keys, model_names))
model_device = dict(zip(model_keys, device_map))
@torch.no_grad()
def model_inference(model, batch, temperature=None, device="cuda:0"):
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
logits = model((input_ids, token_type_ids, attention_mask))
if temperature is not None:
logits = temperature_scale(logits, temperature)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
return start_logits, end_logits
with open("tests/kernel_duration/latency.json", "r") as fp:
model_latency = json.load(fp)
with open("repository/repo_bert/meta.json", "r") as fp:
model_meta = json.load(fp)
models = load_models(
model_keys,
model_paths,
model_device,
BertForQuestionAnswering,
BertPyTorchPipeForQuestionAnswering,
)
n_models = len(model_keys)
model_probs = dict(zip(model_keys, [list() for _ in range(n_models)]))
model_ans = {}
model_outputs = dict(zip(model_keys, [list() for _ in range(n_models)]))
m = torch.nn.Softmax(dim=-1)
num_labels = len(dataset)
labels = []
for batch in tqdm(dataloader, desc="Collect Train Data"):
start_positions = batch["start_positions"].flatten()
end_positions = batch["end_positions"].flatten()
for i, key in enumerate(model_keys):
start_logits, end_logits = model_inference(
models[key],
batch,
device=model_device[key],
temperature=model_meta[model_names[key]]["temperature"],
)
model_outputs[key].append(torch.stack((start_logits, end_logits), dim=1))
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
labels.append(torch.stack((start_positions, end_positions), dim=1))
def process_func(logits):
probs = m(logits)
if torch.min(probs[:, 0]) < torch.min(probs[:, 1]):
return probs[:, 0]
return probs[:, 1]
model_probs, model_ans, model_outputs, labels = postprocessing_inference(
model_keys, model_outputs, labels, process_func
)
all_thresholds = list(
itertools.product(np.linspace(0, 1, endpoint=True, num=1000), repeat=n_models - 1)
)
max_size = 100000
if len(all_thresholds) > max_size:
rnd_idx = np.random.randint(0, len(all_thresholds), max_size)
all_thresholds = [all_thresholds[i] for i in rnd_idx]
profile_thresholds(
model_keys,
model_probs,
model_ans,
model_latency,
model_names,
all_thresholds,
"bert-2-train",
)
|
py | b4099ecdadbcf825cee2ca3090d10d9e652dbecd | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import time
import re
from xml.dom.minidom import Node, Document, parseString
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
AUTH = ('super/' + USERNAME, PASSWORD)
VERIFY_SSL = not demisto.params().get('unsecure', False)
HOST = demisto.params()['host']
QUERY_URL = HOST + "/phoenix/rest/query/"
REST_ADDRESS = HOST + "/phoenix/rest/h5"
EXTENDED_KEYS = {} # type: dict
def load_extended_keys():
global EXTENDED_KEYS
if demisto.command() == 'fetch-incidents':
last_run = demisto.getLastRun()
EXTENDED_KEYS = last_run.get('extended_keys', {})
else:
integration_context = demisto.getIntegrationContext()
EXTENDED_KEYS = integration_context.get('extended_keys', {})
if not EXTENDED_KEYS:
session = login()
url = REST_ADDRESS + '/eventAttributeType/all'
response = session.get(url, verify=VERIFY_SSL, auth=AUTH)
EXTENDED_KEYS = dict((attr['attributeId'], attr['displayName']) for attr in response.json())
if demisto.command() != 'fetch-incidents':
demisto.setIntegrationContext({'extended_keys': EXTENDED_KEYS})
def parse_resource_type(resource_type):
type_to_url_path = {
'Reports': 'report',
'Rules': 'rule',
'Networks': 'resource/network',
'Watch Lists': 'rule/wl',
'Protocols': 'resource/port',
'Event Type': 'eventType',
'Malware IP': 'mal/ip',
'Malware Domains': 'mal/site',
'Malware Urls': 'mal/url',
'Malware Hash': 'mal/hash',
'Malware Processes': 'mal/proc',
'Country Groups': 'resource/geo',
'Default Password': 'mal/pwd',
'Anonymity Network': 'mal/proxy',
'User Agents': 'mal/agent',
'Remediations': 'remediation',
}
return type_to_url_path.get(resource_type, resource_type)
@logger
def validateSuccessfulResponse(resp, error_text):
if resp.status_code != 200:
return_error('Got response status {} when {}'.format(resp.status_code, error_text))
@logger
def login():
session = requests.session()
login_url = HOST + '/phoenix/login-html.jsf'
response = session.get(login_url, verify=VERIFY_SSL)
# get the VIEW_STATE from the xml returned in the UI login page.
p = re.compile('(value=".{1046}==")')
viewState = p.findall(response.text.encode('utf-8'))
VIEW_STATE = viewState[0][len('value="'):][:-1]
headers = {
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,he;q=0.8'
}
data = {
'loginHtml': 'loginHtml',
'loginHtml:username': USERNAME,
'loginHtml:password': PASSWORD,
'loginHtml:userDomain': 'Empty',
'loginHtml:loginBtn': 'Log In',
'javax.faces.ViewState': VIEW_STATE
}
response = session.post(login_url, headers=headers, data=data, verify=VERIFY_SSL) # type: ignore
return session
def clear_incident_command():
args = demisto.args()
incident_id = args['incident_id']
reason = args.get('close_reason', '')
raw_response = clear_incident(incident_id, reason)
return_outputs("Incident cleared successfully.", {}, raw_response)
@logger
def clear_incident(incident_id, reason):
session = login()
headers = {
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json'
}
response = session.put(
HOST + '/phoenix/rest/h5/incident/clear',
params={'ids': [incident_id], 'user': USERNAME},
headers=headers,
data=reason,
verify=VERIFY_SSL)
validateSuccessfulResponse(response, "triggering events report")
return response.text
@logger
def getEventsByIncident(incident_id, max_results, extended_data, max_wait_time):
session = login()
response = session.get(HOST + '/phoenix/rest/h5/report/triggerEvent?rawMsg=' + incident_id)
validateSuccessfulResponse(response, "triggering events report")
try:
jsonRes = response.json()
queryData = jsonRes[0]['right']
except (ValueError, KeyError):
return_error("Got wrong response format when triggering events report. "
"Expected a json array but got:\n" + response.text)
return getEventsByQuery(session, queryData, max_results, extended_data, max_wait_time,
"FortiSIEM events for Incident " + incident_id, incident_id=incident_id)
@logger
def getEventsByQuery(session, queryData, max_results, extended_data, max_wait_time, tableTitle, incident_id=None):
headers = {
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json'
}
response = session.post(REST_ADDRESS + '/report/run', headers=headers, data=json.dumps(queryData),
verify=VERIFY_SSL)
validateSuccessfulResponse(response, "running report")
data = response.json()
data["report"] = queryData
data = json.dumps(data)
# poll until report progress reaches 100
response = session.post(REST_ADDRESS + '/report/reportProgress', headers=headers, data=data, verify=VERIFY_SSL)
# response contain the percentage of the report loading
while response.text != "100" and max_wait_time > 0:
response = session.post(REST_ADDRESS + '/report/reportProgress', headers=headers, data=data, verify=VERIFY_SSL)
max_wait_time = int(max_wait_time) - 1
time.sleep(1)
params = {
'start': 0,
'perPage': max_results,
'allData': extended_data,
}
response = session.post(REST_ADDRESS + '/report/resultByReport', params=params, headers=headers, data=data,
verify=VERIFY_SSL)
try:
res = response.json()
eventKeys = res["headerData"]["columnNames"]
except (ValueError, KeyError):
return_error("Got wrong response format when getting report results. "
"Expected a json object but got:\n" + response.text)
# reformat results
eventData = []
md = ""
for key in res["lightValueObjects"]:
cur = {
'Event ID': key.get("naturalId", ""),
'Incident ID': incident_id,
}
for i in range(0, len(eventKeys)):
if len(key["data"]) == 0 or key["data"][0] == "No report results found.":
md = "No report results found."
break
else:
cur[eventKeys[i]] = key["data"][i]
if md != "":
# no results were found, not need to loop
break
cur["ExtendedData"] = {}
for extItem in key["extData"]:
if EXTENDED_KEYS.get(extItem["left"]) is not None:
cur[EXTENDED_KEYS.get(extItem["left"]).replace(' ', '')] = extItem["right"] # type: ignore
else:
cur["ExtendedData"][extItem["left"]] = extItem["right"]
eventData.append(cur)
md = tableToMarkdown(tableTitle, eventData, eventKeys) if md == "" else md
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': res,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': {'FortiSIEM.Events(val["Event ID"] && val["Event ID"] == obj["Event ID"])': eventData}
})
@logger
def GetEventQuery():
in_xml = create_query_xml("all", interval='1')
url = QUERY_URL + "eventQuery"
headers = {'Content-Type': 'text/xml'}
resp = requests.request('POST', url, headers=headers, data=in_xml, verify=VERIFY_SSL, auth=AUTH)
validateSuccessfulResponse(resp, "fetching event query")
queryId = resp.text
if 'error code="255"' in queryId:
return_error("Got error code 255 while getting event query. Make sure the query has valid syntax")
return queryId
@logger
def GetIncidentsByOrg(queryId):
# The request will poll until the server completes the query.
url = QUERY_URL + "progress/" + queryId
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
while resp.text != '100':
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
outXML = []
if resp.text == '100':
url = QUERY_URL + 'events/' + queryId + '/0/1000'
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
content = resp.text
if content != '':
outXML.append(content)
# this code is taken directly from their documentation.
# get all results (last "page" has less than 1000 records)
p = re.compile(r'totalCount="\d+"')
mlist = p.findall(content)
if mlist and mlist[0] != '':
mm = mlist[0].replace('"', '')
m = mm.split("=")[-1]
num = 0
if int(m) > 1000:
num = int(m) / 1000
if int(m) % 1000 > 0:
num += 1
if num > 0:
for i in range(num):
url = QUERY_URL + 'events/' + queryId + '/' + str(i * 1000 + 1) + '/1000'
resp = requests.request('GET', url, verify=VERIFY_SSL, auth=AUTH)
content = resp.text
if content != '':
outXML.append(content)
else:
sys.exit(0)
phCustId = "all"
param = dumpXML(outXML, phCustId)
return param
@logger
def create_query_xml(include_value, interval="", single_evt_value="phEventCategory=1", interval_type="Minute",
attr_list=None, limit="All"):
doc = Document()
reports = doc.createElement("Reports")
doc.appendChild(reports)
report = doc.createElement("Report")
report.setAttribute("id", "")
report.setAttribute("group", "report")
reports.appendChild(report)
name = doc.createElement("Name")
report.appendChild(name)
doc.createTextNode("All Incidents")
custScope = doc.createElement("CustomerScope")
custScope.setAttribute("groupByEachCustomer", "true")
report.appendChild(custScope)
include = doc.createElement("Include")
if include_value == "all":
include.setAttribute("all", "true")
custScope.appendChild(include)
else:
custScope.appendChild(include)
include_text = doc.createTextNode(include_value)
include.appendChild(include_text)
exclude = doc.createElement("Exclude")
custScope.appendChild(exclude)
description = doc.createElement("description")
report.appendChild(description)
select = doc.createElement("SelectClause")
select.setAttribute("numEntries", limit)
report.appendChild(select)
attrList = doc.createElement("AttrList")
if attr_list:
attr_text = doc.createTextNode(str(attr_list))
attrList.appendChild(attr_text)
select.appendChild(attrList)
reportInterval = doc.createElement("ReportInterval")
report.appendChild(reportInterval)
window = doc.createElement("Window")
window.setAttribute("unit", interval_type)
window.setAttribute("val", interval)
reportInterval.appendChild(window)
pattern = doc.createElement("PatternClause")
pattern.setAttribute("window", "3600")
report.appendChild(pattern)
subPattern = doc.createElement("SubPattern")
subPattern.setAttribute("displayName", "Events")
subPattern.setAttribute("name", "Events")
pattern.appendChild(subPattern)
single = doc.createElement("SingleEvtConstr")
subPattern.appendChild(single)
single_text = doc.createTextNode(single_evt_value)
single.appendChild(single_text)
_filter = doc.createElement("RelevantFilterAttr")
report.appendChild(_filter)
return doc.toxml()
@logger
def dumpXML(xmlList, phCustId):
param = []
for xml in xmlList:
doc = parseString(xml.encode('utf-8'))
for node in doc.getElementsByTagName("events"):
for node1 in node.getElementsByTagName("event"):
mapping = {}
for node2 in node1.getElementsByTagName("attributes"):
for node3 in node2.getElementsByTagName("attribute"):
item_name = node3.getAttribute("name")
for node4 in node3.childNodes:
if node4.nodeType == Node.TEXT_NODE:
mapping[item_name] = node4.data
if phCustId == "all" or mapping['phCustId'] == phCustId:
param.append(mapping)
return param
@logger
def buildQueryString(args):
res_list = []
for key in args:
if 'IpAddr' not in key:
res_list.append('{} = "{}"'.format(key, args[key]))
else:
res_list.append("{} = {}".format(key, args[key]))
return " AND ".join(res_list)
@logger
def getEventsByFilter(maxResults, extendedData, maxWaitTime, reportWindow, reportWindowUnit):
session = login()
args = demisto.args()
del args["maxResults"]
del args["extendedData"]
del args["maxWaitTime"]
del args["reportWindow"]
del args["reportWindowUnit"]
query_string = buildQueryString(args)
query_data = {
"isReportService": True,
"selectClause": "phRecvTime,reptDevIpAddr,eventType,eventName,rawEventMsg,destIpAddr",
"reportWindow": int(reportWindow),
"reportWindowUnit": reportWindowUnit,
"timeRangeRelative": True,
"eventFilters": [{
"groupBy": "",
"singleConstraint": query_string
}],
"custId": 1
}
return getEventsByQuery(
session,
query_data,
maxResults,
extendedData,
maxWaitTime,
"FortiSIEM Event Results")
def parse_cmdb_list(cmdb_device):
device_dict = {
'DiscoverMethod': cmdb_device.get('discoverMethod', 'N/A'),
'Approved': cmdb_device.get('approved', 'false'),
'CreationMethod': cmdb_device.get('creationMethod', 'N/A'),
'AccessIp': cmdb_device.get('accessIp', 'N/A'),
'Name': cmdb_device.get('name', 'N/A'),
'WinMachineGuid': cmdb_device.get('winMachineGuid', 'N/A'),
'Unmanaged': cmdb_device.get('unmanaged', 'false'),
'Version': cmdb_device.get('version', 'N/A'),
'UpdateMethod': cmdb_device.get('updateMethod', 'N/A'),
}
timestamp = cmdb_device.get('discoverTime', None)
if timestamp and timestamp.isdigit():
device_dict['DiscoverTime'] = timestamp_to_datestring(timestamp)
elif timestamp:
device_dict['DiscoverTime'] = timestamp
else:
device_dict['DiscoverTime'] = 'N/A'
device_type = cmdb_device.get('deviceType')
if device_type:
device_dict['DeviceType'] = "{} {}".format(device_type['model'], device_type['vendor'])
else:
device_dict['DeviceType'] = 'N/A'
return device_dict
def get_cmdb_devices_command():
args = demisto.args()
device_ip = args.get('device_ip')
limit = int(args.get('limit'))
raw_response = get_cmdb_devices(device_ip, limit)
list_of_devices = list(map(parse_cmdb_list, raw_response))
return_outputs(
tableToMarkdown("Devices", list_of_devices),
{'FortiSIEM.CmdbDevices': list_of_devices},
raw_response
)
@logger
def get_cmdb_devices(device_ip=None, limit=100):
cmdb_url = HOST + "/phoenix/rest/cmdbDeviceInfo/devices"
if device_ip:
cmdb_url += "?includeIps=" + device_ip
response = requests.get(cmdb_url, verify=VERIFY_SSL, auth=AUTH)
list_of_devices = json.loads(xml2json(response.text))
if 'response' in list_of_devices:
return_error(list_of_devices["response"]["error"]["description"])
elif 'devices' in list_of_devices:
list_of_devices = list_of_devices['devices']['device']
elif 'device' in list_of_devices:
list_of_devices = [list_of_devices['device']]
return list_of_devices[:limit]
@logger
def get_events_by_query(query, report_window="60", interval_type="Minute", limit="20", extended_data='false',
max_wait_time=60):
session = login()
query_data = {
"isReportService": True,
"selectClause": "phRecvTime,reptDevIpAddr,eventType,eventName,rawEventMsg,destIpAddr",
"reportWindow": int(report_window),
"reportWindowUnit": interval_type,
"timeRangeRelative": True,
"eventFilters": [{
"groupBy": "",
"singleConstraint": query
}],
"custId": 1
}
return getEventsByQuery(
session,
query_data,
limit,
extended_data,
max_wait_time,
"FortiSIEM Event Results")
def get_lists_command():
raw_resources = get_lists()
resources = []
for r in flatten_resources(raw_resources):
resources.append({
'DisplayName': r['displayName'],
'NatualID': r['naturalId'],
'ID': r['id'],
'ResourceType': r['groupType']['displayName'],
'Children': [c['displayName'] for c in r['children']],
})
return_outputs(
tableToMarkdown('Lists:', resources, removeNull=True),
{'FortiSIEM.ResourceList(val.ID && val.ID == obj.ID)': resources},
raw_response=raw_resources)
@logger
def get_lists():
session = login()
url = REST_ADDRESS + '/group/resource'
response = session.get(url, verify=VERIFY_SSL, auth=AUTH)
return response.json()
def flatten_resources(raw_resources):
for r in raw_resources:
yield r
# possible stackoverflow
for sub_resource in flatten_resources(r['children']):
yield sub_resource
def add_item_to_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
group_id = args['group_id']
object_info = args.get('object-info', [])
object_info = dict(object_property.strip().split('=', 1) for object_property in object_info.split(','))
raw_response = add_item_to_resource_list(resource_type, group_id, object_info)
outputs = {'FortiSIEM.Resource(val.id && val.id == obj.id)': createContext(raw_response, removeNull=True)}
return_outputs(tableToMarkdown('Resource was added:', raw_response, removeNull=True), outputs, raw_response)
@logger
def add_item_to_resource_list(resource_type, group_id, object_info):
session = login()
url = '{}/{}/save'.format(REST_ADDRESS, resource_type)
object_info['groupId'] = group_id
object_info['active'] = True
object_info['sysDefined'] = False
response = session.post(url, data=json.dumps(object_info), verify=VERIFY_SSL, auth=AUTH)
response = response.json()
if response.get('code', 0) == -1:
return_error(response['msg'])
return response
def remove_item_from_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
deleted_ids = args.get('ids', '').split(',')
raw_response = remove_item_from_resource_list(resource_type, deleted_ids)
return_outputs(raw_response, {}, raw_response=raw_response)
@logger
def remove_item_from_resource_list(resource_type, deleted_ids):
session = login()
url = '{}/{}/del'.format(REST_ADDRESS, resource_type)
response = session.delete(url, params={'ids': json.dumps(deleted_ids)}, verify=VERIFY_SSL, auth=AUTH)
if response.text != '"OK"':
return_error(response.text)
return 'items with id {} were removed.'.format(deleted_ids)
def get_resource_list_command():
args = demisto.args()
resource_type = parse_resource_type(args['resource_type'])
group_id = args['group_id']
raw_response = get_resource_list(resource_type, group_id)
headers = raw_response.get('headerData', {}).get('keys', [])
ec = []
for element in raw_response.get('lightValueObjects', []):
e = dict(zip(headers, element.get('data', [])))
e['id'] = element.get('objectId')
ec.append(e)
outputs = {'FortiSIEM.Resource(val.id && val.id == obj.id)': createContext(ec, removeNull=True)}
return_outputs(tableToMarkdown('Resource list:', ec, headerTransform=pascalToSpace, removeNull=True),
outputs,
raw_response)
@logger
def get_resource_list(resource_type, group_id):
session = login()
url = '{}/{}/list'.format(REST_ADDRESS, resource_type)
params = {
'groupId': group_id,
'start': 0,
'size': 50,
}
response = session.get(url, params=params, verify=VERIFY_SSL, auth=AUTH)
response = response.json()
if response.get('code', 0) == -1:
return_error(response['msg'])
return response
def convert_keys_to_snake_case(d):
d = dict((k.replace("-", "_"), v) for k, v in d.items())
return d
def test():
try:
login()
except Exception as e:
if isinstance(e, requests.exceptions.SSLError):
demisto.results("Not verified certificate")
else:
demisto.results(str(e))
demisto.results('ok')
def fetch_incidents():
query_id = GetEventQuery()
res = GetIncidentsByOrg(query_id)
known_ids = demisto.getLastRun().get('ids', None)
if known_ids is None or not known_ids:
known_ids = []
incidents = []
for inc in res:
if inc.get('incidentId') not in known_ids:
incidents.append({"name": inc.get('eventName', 'New FortiSIEM Event'), "rawJSON": json.dumps(inc)})
if len(known_ids) >= 1000:
known_ids.pop(0)
known_ids.append(inc.get('incidentId'))
demisto.setLastRun({
'ids': known_ids,
'extended_keys': EXTENDED_KEYS
})
demisto.incidents(incidents)
sys.exit(0)
def main():
try:
handle_proxy()
load_extended_keys()
if demisto.command() == 'test-module':
test()
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'fortisiem-get-events-by-incident':
args = demisto.args()
getEventsByIncident(args['incID'], args['maxResults'], args['extendedData'], args['maxWaitTime'])
elif demisto.command() == 'fortisiem-clear-incident':
clear_incident_command()
elif demisto.command() == 'fortisiem-get-events-by-filter':
args = demisto.args()
getEventsByFilter(args['maxResults'], args['extendedData'], args['maxWaitTime'], args['reportWindow'],
args['reportWindowUnit'])
elif demisto.command() == 'fortisiem-get-events-by-query':
args = convert_keys_to_snake_case(demisto.args())
get_events_by_query(**args)
elif demisto.command() == 'fortisiem-get-cmdb-devices':
get_cmdb_devices_command()
elif demisto.command() == 'fortisiem-get-lists':
get_lists_command()
elif demisto.command() == 'fortisiem-add-item-to-resource-list':
add_item_to_resource_list_command()
elif demisto.command() == 'fortisiem-remove-item-from-resource-list':
remove_item_from_resource_list_command()
elif demisto.command() == 'fortisiem-get-resource-list':
get_resource_list_command()
except Exception as e:
if demisto.command() == 'fetch-incidents':
LOG(str(e))
LOG.print_log()
raise
else:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
py | b4099ed71b21c8c8ecd0979372179669e4c26106 | from collections import OrderedDict
import os
from authutils.oauth2.client import OAuthClient
import flask
from flask_cors import CORS
from flask_sqlalchemy_session import flask_scoped_session, current_session
from urllib.parse import urljoin
from userdatamodel.driver import SQLAlchemyDriver
from fence.auth import logout, build_redirect_url
from fence.blueprints.data.indexd import S3IndexedFileLocation
from fence.blueprints.login.utils import allowed_login_redirects, domain
from fence.errors import UserError
from fence.jwt import keys
from fence.models import migrate
from fence.oidc.client import query_client
from fence.oidc.server import server
from fence.resources.audit_service_client import AuditServiceClient
from fence.resources.aws.boto_manager import BotoManager
from fence.resources.openid.cognito_oauth2 import CognitoOauth2Client as CognitoClient
from fence.resources.openid.google_oauth2 import GoogleOauth2Client as GoogleClient
from fence.resources.openid.microsoft_oauth2 import (
MicrosoftOauth2Client as MicrosoftClient,
)
from fence.resources.openid.okta_oauth2 import OktaOauth2Client as OktaClient
from fence.resources.openid.orcid_oauth2 import OrcidOauth2Client as ORCIDClient
from fence.resources.openid.synapse_oauth2 import SynapseOauth2Client as SynapseClient
from fence.resources.openid.ras_oauth2 import RASOauth2Client as RASClient
from fence.resources.storage import StorageManager
from fence.resources.user.user_session import UserSessionInterface
from fence.error_handler import get_error_response
from fence.utils import random_str
from fence.config import config
from fence.settings import CONFIG_SEARCH_FOLDERS
import fence.blueprints.admin
import fence.blueprints.data
import fence.blueprints.login
import fence.blueprints.oauth2
import fence.blueprints.misc
import fence.blueprints.storage_creds
import fence.blueprints.user
import fence.blueprints.well_known
import fence.blueprints.link
import fence.blueprints.google
import fence.blueprints.privacy
from cdislogging import get_logger
from cdispyutils.config import get_value
from gen3authz.client.arborist.client import ArboristClient
# Can't read config yet. Just set to debug for now, else no handlers.
# Later, in app_config(), will actually set level based on config
logger = get_logger(__name__, log_level="debug")
app = flask.Flask(__name__)
CORS(app=app, headers=["content-type", "accept"], expose_headers="*")
def warn_about_logger():
raise Exception(
"Flask 0.12 will remove and replace all of our log handlers if you call "
"app.logger anywhere. Use get_logger from cdislogging instead."
)
def app_init(
app,
settings="fence.settings",
root_dir=None,
config_path=None,
config_file_name=None,
):
app.__dict__["logger"] = warn_about_logger
app_config(
app,
settings=settings,
root_dir=root_dir,
config_path=config_path,
file_name=config_file_name,
)
app_sessions(app)
app_register_blueprints(app)
server.init_app(app, query_client=query_client)
def app_sessions(app):
app.url_map.strict_slashes = False
app.db = SQLAlchemyDriver(config["DB"])
# TODO: we will make a more robust migration system external from the application
# initialization soon
if config["ENABLE_DB_MIGRATION"]:
logger.info("Running database migration...")
migrate(app.db)
logger.info("Done running database migration.")
else:
logger.info("NOT running database migration.")
session = flask_scoped_session(app.db.Session, app) # noqa
app.session_interface = UserSessionInterface()
def app_register_blueprints(app):
app.register_blueprint(fence.blueprints.oauth2.blueprint, url_prefix="/oauth2")
app.register_blueprint(fence.blueprints.user.blueprint, url_prefix="/user")
creds_blueprint = fence.blueprints.storage_creds.make_creds_blueprint()
app.register_blueprint(creds_blueprint, url_prefix="/credentials")
app.register_blueprint(fence.blueprints.admin.blueprint, url_prefix="/admin")
app.register_blueprint(
fence.blueprints.well_known.blueprint, url_prefix="/.well-known"
)
login_blueprint = fence.blueprints.login.make_login_blueprint(app)
app.register_blueprint(login_blueprint, url_prefix="/login")
link_blueprint = fence.blueprints.link.make_link_blueprint()
app.register_blueprint(link_blueprint, url_prefix="/link")
google_blueprint = fence.blueprints.google.make_google_blueprint()
app.register_blueprint(google_blueprint, url_prefix="/google")
app.register_blueprint(
fence.blueprints.privacy.blueprint, url_prefix="/privacy-policy"
)
fence.blueprints.misc.register_misc(app)
@app.route("/")
def root():
"""
Register the root URL.
"""
endpoints = {
"oauth2 endpoint": "/oauth2",
"user endpoint": "/user",
"keypair endpoint": "/credentials",
}
return flask.jsonify(endpoints)
@app.route("/logout")
def logout_endpoint():
root = config.get("BASE_URL", "")
request_next = flask.request.args.get("next", root)
force_era_global_logout = (
flask.request.args.get("force_era_global_logout") == "true"
)
if request_next.startswith("https") or request_next.startswith("http"):
next_url = request_next
else:
next_url = build_redirect_url(config.get("ROOT_URL", ""), request_next)
if domain(next_url) not in allowed_login_redirects():
raise UserError("invalid logout redirect URL: {}".format(next_url))
return logout(
next_url=next_url, force_era_global_logout=force_era_global_logout
)
@app.route("/jwt/keys")
def public_keys():
"""
Return the public keys which can be used to verify JWTs signed by fence.
The return value should look like this:
{
"keys": [
{
"key-01": " ... [public key here] ... "
}
]
}
"""
return flask.jsonify(
{"keys": [(keypair.kid, keypair.public_key) for keypair in app.keypairs]}
)
def _check_s3_buckets(app):
"""
Function to ensure that all s3_buckets have a valid credential.
Additionally, if there is no region it will produce a warning then try to fetch and cache the region.
"""
buckets = config.get("S3_BUCKETS") or {}
aws_creds = config.get("AWS_CREDENTIALS") or {}
for bucket_name, bucket_details in buckets.items():
cred = bucket_details.get("cred")
region = bucket_details.get("region")
if not cred:
raise ValueError(
"No cred for S3_BUCKET: {}. cred is required.".format(bucket_name)
)
# if this is a public bucket, Fence will not try to sign the URL
# so it won't need to know the region.
if cred == "*":
continue
if cred not in aws_creds:
raise ValueError(
"Credential {} for S3_BUCKET {} is not defined in AWS_CREDENTIALS".format(
cred, bucket_name
)
)
# only require region when we're not specifying an
# s3-compatible endpoint URL (ex: no need for region when using cleversafe)
if not region and not bucket_details.get("endpoint_url"):
logger.warning(
"WARNING: no region for S3_BUCKET: {}. Providing the region will reduce"
" response time and avoid a call to GetBucketLocation which you make lack the AWS ACLs for.".format(
bucket_name
)
)
credential = S3IndexedFileLocation.get_credential_to_access_bucket(
bucket_name,
aws_creds,
config.get("MAX_PRESIGNED_URL_TTL", 3600),
app.boto,
)
if not getattr(app, "boto"):
logger.warning(
"WARNING: boto not setup for app, probably b/c "
"nothing in AWS_CREDENTIALS. Cannot attempt to get bucket "
"bucket regions."
)
return
region = app.boto.get_bucket_region(bucket_name, credential)
config["S3_BUCKETS"][bucket_name]["region"] = region
def app_config(
app, settings="fence.settings", root_dir=None, config_path=None, file_name=None
):
"""
Set up the config for the Flask app.
"""
if root_dir is None:
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
logger.info("Loading settings...")
# not using app.config.from_object because we don't want all the extra flask cfg
# vars inside our singleton when we pass these through in the next step
settings_cfg = flask.Config(app.config.root_path)
settings_cfg.from_object(settings)
# dump the settings into the config singleton before loading a configuration file
config.update(dict(settings_cfg))
# load the configuration file, this overwrites anything from settings/local_settings
config.load(
config_path=config_path,
search_folders=CONFIG_SEARCH_FOLDERS,
file_name=file_name,
)
# load all config back into flask app config for now, we should PREFER getting config
# directly from the fence config singleton in the code though.
app.config.update(**config._configs)
_setup_arborist_client(app)
_setup_audit_service_client(app)
_setup_data_endpoint_and_boto(app)
_load_keys(app, root_dir)
_set_authlib_cfgs(app)
app.storage_manager = StorageManager(config["STORAGE_CREDENTIALS"], logger=logger)
app.debug = config["DEBUG"]
# Following will update logger level, propagate, and handlers
get_logger(__name__, log_level="debug" if config["DEBUG"] == True else "info")
_setup_oidc_clients(app)
_check_s3_buckets(app)
def _setup_data_endpoint_and_boto(app):
if "AWS_CREDENTIALS" in config and len(config["AWS_CREDENTIALS"]) > 0:
value = list(config["AWS_CREDENTIALS"].values())[0]
app.boto = BotoManager(value, logger=logger)
app.register_blueprint(fence.blueprints.data.blueprint, url_prefix="/data")
def _load_keys(app, root_dir):
if root_dir is None:
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
app.keypairs = keys.load_keypairs(os.path.join(root_dir, "keys"))
app.jwt_public_keys = {
config["BASE_URL"]: OrderedDict(
[(str(keypair.kid), str(keypair.public_key)) for keypair in app.keypairs]
)
}
def _set_authlib_cfgs(app):
# authlib OIDC settings
# key will need to be added
settings = {"OAUTH2_JWT_KEY": keys.default_private_key(app)}
app.config.update(settings)
config.update(settings)
# only add the following if not already provided
config.setdefault("OAUTH2_JWT_ENABLED", True)
config.setdefault("OAUTH2_JWT_ALG", "RS256")
config.setdefault("OAUTH2_JWT_ISS", app.config["BASE_URL"])
config.setdefault("OAUTH2_PROVIDER_ERROR_URI", "/api/oauth2/errors")
app.config.setdefault("OAUTH2_JWT_ENABLED", True)
app.config.setdefault("OAUTH2_JWT_ALG", "RS256")
app.config.setdefault("OAUTH2_JWT_ISS", app.config["BASE_URL"])
app.config.setdefault("OAUTH2_PROVIDER_ERROR_URI", "/api/oauth2/errors")
def _setup_oidc_clients(app):
oidc = config.get("OPENID_CONNECT", {})
# Add OIDC client for Google if configured.
if "google" in oidc:
app.google_client = GoogleClient(
config["OPENID_CONNECT"]["google"],
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
# Add OIDC client for ORCID if configured.
if "orcid" in oidc:
app.orcid_client = ORCIDClient(
config["OPENID_CONNECT"]["orcid"],
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
# Add OIDC client for RAS if configured.
if "ras" in oidc:
app.ras_client = RASClient(
oidc["ras"],
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
# Add OIDC client for Synapse if configured.
if "synapse" in oidc:
app.synapse_client = SynapseClient(
oidc["synapse"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger
)
# Add OIDC client for Microsoft if configured.
if "microsoft" in oidc:
app.microsoft_client = MicrosoftClient(
config["OPENID_CONNECT"]["microsoft"],
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
# Add OIDC client for Okta if configured
if "okta" in oidc:
app.okta_client = OktaClient(
config["OPENID_CONNECT"]["okta"],
HTTP_PROXY=config.get("HTTP_PROXY"),
logger=logger,
)
# Add OIDC client for Amazon Cognito if configured.
if "cognito" in oidc:
app.cognito_client = CognitoClient(
oidc["cognito"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger
)
# Add OIDC client for multi-tenant fence if configured.
if "fence" in oidc:
app.fence_client = OAuthClient(**config["OPENID_CONNECT"]["fence"])
def _setup_arborist_client(app):
if app.config.get("ARBORIST"):
app.arborist = ArboristClient(arborist_base_url=config["ARBORIST"])
def _setup_audit_service_client(app):
# Initialize the client regardless of whether audit logs are enabled. This
# allows us to call `app.audit_service_client.create_x_log()` from
# anywhere without checking if audit logs are enabled. The client
# checks that for us.
service_url = app.config.get("AUDIT_SERVICE") or urljoin(
app.config["BASE_URL"], "/audit"
)
app.audit_service_client = AuditServiceClient(
service_url=service_url, logger=logger
)
@app.errorhandler(Exception)
def handle_error(error):
"""
Register an error handler for general exceptions.
"""
return get_error_response(error)
@app.before_request
def check_csrf():
has_auth = "Authorization" in flask.request.headers
no_username = not flask.session.get("username")
if has_auth or no_username:
return
if not config.get("ENABLE_CSRF_PROTECTION", True):
return
# cookie based authentication
if flask.request.method != "GET":
csrf_header = flask.request.headers.get("x-csrf-token")
csrf_cookie = flask.request.cookies.get("csrftoken")
referer = flask.request.headers.get("referer")
logger.debug("HTTP REFERER " + str(referer))
if not all([csrf_cookie, csrf_header, csrf_cookie == csrf_header, referer]):
raise UserError("CSRF verification failed. Request aborted")
@app.after_request
def set_csrf(response):
"""
Create a cookie for CSRF protection if one does not yet exist
"""
if not flask.request.cookies.get("csrftoken"):
secure = config.get("SESSION_COOKIE_SECURE", True)
response.set_cookie("csrftoken", random_str(40), secure=secure, httponly=True)
if flask.request.method in ["POST", "PUT", "DELETE"]:
current_session.commit()
return response
|
py | b4099f32e18d758ee010fbd040e140642cb8aade | import sys
sys.path.append('..')
from utils import *
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
class OthelloNNet(nn.Module):
def __init__(self, game, args):
# game params
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
super(OthelloNNet, self).__init__()
self.conv1 = nn.Conv2d(1, args.num_channels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(args.num_channels, args.num_channels, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(args.num_channels, args.num_channels, 3, stride=1)
self.conv4 = nn.Conv2d(args.num_channels, args.num_channels, 3, stride=1)
self.bn1 = nn.BatchNorm2d(args.num_channels)
self.bn2 = nn.BatchNorm2d(args.num_channels)
self.bn3 = nn.BatchNorm2d(args.num_channels)
self.bn4 = nn.BatchNorm2d(args.num_channels)
self.fc1 = nn.Linear(args.num_channels*(self.board_x-4)*(self.board_y-4), 1024)
self.fc_bn1 = nn.BatchNorm1d(1024)
self.fc2 = nn.Linear(1024, 512)
self.fc_bn2 = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, self.action_size)
self.fc4 = nn.Linear(512, 1)
def forward(self, s):
# s: batch_size x board_x x board_y
s = s.view(-1, 1, self.board_x, self.board_y) # batch_size x 1 x board_x x board_y
s = F.relu(self.bn1(self.conv1(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn2(self.conv2(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn3(self.conv3(s))) # batch_size x num_channels x (board_x-2) x (board_y-2)
s = F.relu(self.bn4(self.conv4(s))) # batch_size x num_channels x (board_x-4) x (board_y-4)
s = s.view(-1, self.args.num_channels*(self.board_x-4)*(self.board_y-4))
s = F.dropout(F.relu(self.fc_bn1(self.fc1(s))), p=self.args.dropout, training=self.training) # batch_size x 1024
s = F.dropout(F.relu(self.fc_bn2(self.fc2(s))), p=self.args.dropout, training=self.training) # batch_size x 512
pi = self.fc3(s) # batch_size x action_size
v = self.fc4(s) # batch_size x 1
return F.log_softmax(pi, dim=1), torch.tanh(v) |
py | b4099faae9374777eec3630973e76de1b7ac7313 | """
Command-line interface implementing various MDS Provider data analytics, including:
- calculate average availability over a period of time
"""
import argparse
import datetime
import statistics
import time
import mds
import measure
import query
def setup_cli():
"""
Create the cli argument interface, and parses incoming args.
Returns a tuple:
- the argument parser
- the parsed args
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--availability",
action="store_true",
help="Run the availability calculation."
)
parser.add_argument(
"--cutoff",
type=int,
default=-1,
help="Maximum allowed length of a time-windowed event (e.g. availability window, trip), in days."
)
parser.add_argument(
"--debug",
action="store_true",
help="Print debug messages."
)
parser.add_argument(
"--duration",
type=int,
help="Number of seconds; with --start_time or --end_time, defines a time query range."
)
parser.add_argument(
"--end",
type=str,
help="The end of the time query range for this request.\
Should be either int Unix seconds or ISO-8601 datetime format.\
At least one of end or start is required."
)
parser.add_argument(
"--local",
action="store_true",
help="Input and query times are local."
)
parser.add_argument(
"--query",
action="append",
type=lambda kv: kv.split("=", 1),
dest="queries",
metavar="QUERY",
help="A {provider_name}={vehicle_type} pair; multiple pairs will be analyzed separately."
)
parser.add_argument(
"--start",
type=str,
help="The beginning of the time query range for this request.\
Should be either int Unix seconds or ISO-8601 datetime format\
At least one of end or start is required."
)
parser.add_argument(
"--version",
type=lambda v: mds.Version(v),
default=mds.Version("0.2.1"),
help="The release version at which to reference MDS, e.g. 0.3.1"
)
return parser, parser.parse_args()
def parse_time_range(start=None, end=None, duration=None, version=None):
"""
Returns a valid range tuple (start, end) given an object with some mix of:
- start
- end
- duration
If both start and end are present, use those. Otherwise, compute from duration.
"""
decoder = mds.TimestampDecoder(version=version)
if start is None and end is None:
raise ValueError("At least one of start or end is required.")
if (start is None or end is None) and duration is None:
raise ValueError("duration is required when only one of start or end is given.")
if start is not None and end is not None:
return decoder.decode(start), decoder.decode(end)
if start is not None:
start = decoder.decode(start)
return start, start + datetime.timedelta(seconds=duration)
if end is not None:
end = decoder.decode(end)
return end - datetime.timedelta(seconds=duration), end
def log(debug, msg):
"""
Prints the message if debugging is turned on.
"""
def _now():
return datetime.datetime.utcnow().isoformat()
if debug:
print(f"[{_now()}] {msg}")
def availability(provider_name, vehicle_type, start, end, **kwargs):
"""
Runs the availability calculation
"""
debug = kwargs.get("debug")
step = datetime.timedelta(days=1)
log(debug, f"Starting calculation for {provider_name}")
while start < end:
_end = start + step
log(debug, f"Counting {start.strftime('%Y-%m-%d')} to {_end.strftime('%Y-%m-%d')}")
q = query.Availability(
start,
_end,
table="csm_availability_windows",
provider_name=provider_name,
vehicle_types=vehicle_type,
**kwargs
)
data = q.get()
log(debug, f"{len(data)} availability records in time period")
devices = measure.DeviceCounter(start, _end, **kwargs)
yield (start, _end, devices.count(data))
start = _end
if __name__ == "__main__":
arg_parser, args = setup_cli()
try:
start, end = parse_time_range(start=args.start, end=args.end, duration=args.duration, version=args.version)
except ValueError as e:
print(e)
arg_parser.print_help()
exit(1)
queries = dict(args.queries)
kwargs = vars(args)
for key in ("start", "end", "duration", "queries"):
del kwargs[key]
if args.availability:
for provider_name, vehicle_type in queries.items():
for _start, _end, count in availability(provider_name, vehicle_type, start, end, **kwargs):
print(f"{provider_name},{vehicle_type},{_start.strftime('%Y-%m-%d')},{_end.strftime('%Y-%m-%d')},{count.average()},{args.cutoff}")
else:
arg_parser.print_help()
exit(0)
|
py | b409a23a32e0612be84cb5e620decc32638b4fd7 | """
Test for the Cloud Reco Service commands.
"""
import io
import uuid
from pathlib import Path
from textwrap import dedent
from typing import List
import yaml
from click.testing import CliRunner
from mock_vws import MockVWS
from mock_vws.database import VuforiaDatabase
from vws import VWS
from vws_cli.query import vuforia_cloud_reco
class TestQuery:
"""
Tests for making image queries.
"""
def test_no_matches(
self,
mock_database: VuforiaDatabase,
tmp_path: Path,
high_quality_image: io.BytesIO,
) -> None:
"""
An empty list is returned if there are no matches.
"""
runner = CliRunner(mix_stderr=False)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands: List[str] = [
str(new_file),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
result_data = yaml.load(result.stdout, Loader=yaml.FullLoader)
assert result_data == []
def test_matches(
self,
tmp_path: Path,
high_quality_image: io.BytesIO,
vws_client: VWS,
mock_database: VuforiaDatabase,
) -> None:
"""
Details of matching targets are shown.
"""
name = uuid.uuid4().hex
target_id = vws_client.add_target(
name=name,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
runner = CliRunner(mix_stderr=False)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
result_data = yaml.load(result.stdout, Loader=yaml.FullLoader)
[matching_target] = result_data
target_timestamp = matching_target['target_data']['target_timestamp']
expected_result_data = {
'target_data': {
'application_metadata': None,
'name': name,
'target_timestamp': target_timestamp,
},
'target_id': target_id,
}
assert matching_target == expected_result_data
def test_image_file_is_dir(
self,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
An appropriate error is given if the given image file path points to a
directory.
"""
runner = CliRunner(mix_stderr=False)
commands: List[str] = [
str(tmp_path),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 2
assert result.stdout == ''
expected_stderr = dedent(
f"""\
Usage: vuforia-cloud-reco [OPTIONS] IMAGE
Try 'vuforia-cloud-reco --help' for help.
Error: Invalid value for 'IMAGE': File '{tmp_path}' is a directory.
""", # noqa: E501
)
assert result.stderr == expected_stderr
def test_relative_path(
self,
tmp_path: Path,
mock_database: VuforiaDatabase,
high_quality_image: io.BytesIO,
) -> None:
"""
Image file paths are resolved.
"""
runner = CliRunner(mix_stderr=False)
new_filename = uuid.uuid4().hex
original_image_file = tmp_path / 'foo'
image_data = high_quality_image.getvalue()
original_image_file.write_bytes(image_data)
commands: List[str] = [
str(new_filename),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
with runner.isolated_filesystem():
new_file = Path(new_filename)
new_file.symlink_to(original_image_file)
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
result_data = yaml.load(result.stdout, Loader=yaml.FullLoader)
assert result_data == []
def test_image_file_does_not_exist(
self,
mock_database: VuforiaDatabase,
tmp_path: Path,
) -> None:
"""
An appropriate error is given if the given image file does not exist.
"""
runner = CliRunner(mix_stderr=False)
does_not_exist_file = tmp_path / uuid.uuid4().hex
commands: List[str] = [
str(does_not_exist_file),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 2
assert result.stdout == ''
expected_stderr = dedent(
f"""\
Usage: vuforia-cloud-reco [OPTIONS] IMAGE
Try 'vuforia-cloud-reco --help' for help.
Error: Invalid value for 'IMAGE': File '{does_not_exist_file}' does not exist.
""", # noqa: E501
)
assert result.stderr == expected_stderr
def test_version() -> None:
"""
``vuforia-cloud-reco --version`` shows the version.
"""
runner = CliRunner(mix_stderr=False)
commands = ['--version']
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.stdout.startswith('vuforia-cloud-reco, version ')
class TestMaxNumResults:
"""
Tests for the ``--max-num-results`` option.
"""
def test_default(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
By default the maximum number of results is 1.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
result_data = yaml.load(result.stdout, Loader=yaml.FullLoader)
assert len(result_data) == 1
def test_custom(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
It is possible to set a custom ``--max-num-results``.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_3 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
vws_client.wait_for_target_processed(target_id=target_id_3)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
result_data = yaml.load(result.stdout, Loader=yaml.FullLoader)
assert len(result_data) == 2
def test_out_of_range(
self,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
``--max-num-results`` must be between 1 and 50.
"""
runner = CliRunner(mix_stderr=False)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(0),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 2
expected_stderr_substring = (
"Error: Invalid value for '--max-num-results': 0 is not in the "
'valid range of 1 to 50.'
)
assert expected_stderr_substring in result.stderr
class TestIncludeTargetData:
"""
Tests for the ``--include-target-data`` option.
"""
def test_default(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
By default, target data is only returned in the top match.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
matches = yaml.load(result.stdout, Loader=yaml.FullLoader)
top_match, second_match = matches
assert top_match['target_data'] is not None
assert second_match['target_data'] is None
def test_top(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
When 'top' is given, target data is only returned in the top match.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--include-target-data',
'top',
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
matches = yaml.load(result.stdout, Loader=yaml.FullLoader)
top_match, second_match = matches
assert top_match['target_data'] is not None
assert second_match['target_data'] is None
def test_none(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
When 'none' is given, target data is not returned in any match.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--include-target-data',
'none',
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
matches = yaml.load(result.stdout, Loader=yaml.FullLoader)
top_match, second_match = matches
assert top_match['target_data'] is None
assert second_match['target_data'] is None
def test_all(
self,
vws_client: VWS,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
When 'all' is given, target data is returned in all matches.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
target_id_2 = vws_client.add_target(
name=uuid.uuid4().hex,
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
vws_client.wait_for_target_processed(target_id=target_id_2)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--include-target-data',
'all',
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
matches = yaml.load(result.stdout, Loader=yaml.FullLoader)
top_match, second_match = matches
assert top_match['target_data'] is not None
assert second_match['target_data'] is not None
def test_other(
self,
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
When a string other than 'top', 'all', or 'none' is given, an error is
shown.
"""
runner = CliRunner(mix_stderr=False)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [
str(new_file),
'--max-num-results',
str(2),
'--include-target-data',
'other',
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 2
expected_stderr = (
"'--include-target-data': invalid choice: other. (choose from "
'top, none, all)'
)
assert expected_stderr in result.stderr
def test_base_vwq_url(high_quality_image: io.BytesIO, tmp_path: Path) -> None:
"""
It is possible to use query a target to a database under a custom VWQ
URL.
"""
runner = CliRunner(mix_stderr=False)
base_vwq_url = 'http://example.com'
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
with MockVWS(base_vwq_url=base_vwq_url) as mock:
mock_database = VuforiaDatabase()
mock.add_database(database=mock_database)
vws_client = VWS(
server_access_key=mock_database.server_access_key,
server_secret_key=mock_database.server_secret_key,
)
target_id = vws_client.add_target(
name='x',
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
vws_client.wait_for_target_processed(target_id=target_id)
commands = [
str(new_file),
'--client-access-key',
mock_database.client_access_key,
'--client-secret-key',
mock_database.client_secret_key,
'--base-vwq-url',
base_vwq_url,
]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
)
assert result.exit_code == 0
[match] = yaml.load(result.stdout, Loader=yaml.FullLoader)
assert match['target_id'] == target_id
def test_env_var_credentials(
high_quality_image: io.BytesIO,
tmp_path: Path,
mock_database: VuforiaDatabase,
) -> None:
"""
It is possible to use environment variables to set the credentials.
"""
runner = CliRunner(mix_stderr=False)
new_file = tmp_path / uuid.uuid4().hex
image_data = high_quality_image.getvalue()
new_file.write_bytes(data=image_data)
commands = [str(new_file)]
result = runner.invoke(
vuforia_cloud_reco,
commands,
catch_exceptions=False,
env={
'VUFORIA_CLIENT_ACCESS_KEY': mock_database.client_access_key,
'VUFORIA_CLIENT_SECRET_KEY': mock_database.client_secret_key,
},
)
assert result.exit_code == 0
|
py | b409a25f28c89f3c33a3269f854a712ad5313deb | """
Implementation of math operations on Array objects.
"""
import math
from collections import namedtuple
from enum import IntEnum
from functools import partial
import operator
import numpy as np
import llvmlite.llvmpy.core as lc
from numba import generated_jit
from numba.core import types, cgutils
from numba.core.extending import overload, overload_method, register_jitable
from numba.np.numpy_support import as_dtype, type_can_asarray
from numba.np.numpy_support import numpy_version
from numba.np.numpy_support import is_nonelike
from numba.core.imputils import (lower_builtin, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
from numba.core.typing import signature
from numba.np.arrayobj import make_array, load_item, store_item, _empty_nd_impl
from numba.np.linalg import ensure_blas
from numba.core.extending import intrinsic
from numba.core.errors import RequireLiteralValue, TypingError
def _check_blas():
# Checks if a BLAS is available so e.g. dot will work
try:
ensure_blas()
except ImportError:
return False
return True
_HAVE_BLAS = _check_blas()
@intrinsic
def _create_tuple_result_shape(tyctx, shape_list, shape_tuple):
"""
This routine converts shape list where the axis dimension has already
been popped to a tuple for indexing of the same size. The original shape
tuple is also required because it contains a length field at compile time
whereas the shape list does not.
"""
# The new tuple's size is one less than the original tuple since axis
# dimension removed.
nd = len(shape_tuple) - 1
# The return type of this intrinsic is an int tuple of length nd.
tupty = types.UniTuple(types.intp, nd)
# The function signature for this intrinsic.
function_sig = tupty(shape_list, shape_tuple)
def codegen(cgctx, builder, signature, args):
lltupty = cgctx.get_value_type(tupty)
# Create an empty int tuple.
tup = cgutils.get_null_value(lltupty)
# Get the shape list from the args and we don't need shape tuple.
[in_shape, _] = args
def array_indexer(a, i):
return a[i]
# loop to fill the tuple
for i in range(nd):
dataidx = cgctx.get_constant(types.intp, i)
# compile and call array_indexer
data = cgctx.compile_internal(builder, array_indexer,
types.intp(shape_list, types.intp),
[in_shape, dataidx])
tup = builder.insert_value(tup, data, i)
return tup
return function_sig, codegen
@intrinsic
def _gen_index_tuple(tyctx, shape_tuple, value, axis):
"""
Generates a tuple that can be used to index a specific slice from an
array for sum with axis. shape_tuple is the size of the dimensions of
the input array. 'value' is the value to put in the indexing tuple
in the axis dimension and 'axis' is that dimension. For this to work,
axis has to be a const.
"""
if not isinstance(axis, types.Literal):
raise RequireLiteralValue('axis argument must be a constant')
# Get the value of the axis constant.
axis_value = axis.literal_value
# The length of the indexing tuple to be output.
nd = len(shape_tuple)
# If the axis value is impossible for the given size array then
# just fake it like it was for axis 0. This will stop compile errors
# when it looks like it could be called from array_sum_axis but really
# can't because that routine checks the axis mismatch and raise an
# exception.
if axis_value >= nd:
axis_value = 0
# Calculate the type of the indexing tuple. All the non-axis
# dimensions have slice2 type and the axis dimension has int type.
before = axis_value
after = nd - before - 1
types_list = []
types_list += [types.slice2_type] * before
types_list += [types.intp]
types_list += [types.slice2_type] * after
# Creates the output type of the function.
tupty = types.Tuple(types_list)
# Defines the signature of the intrinsic.
function_sig = tupty(shape_tuple, value, axis)
def codegen(cgctx, builder, signature, args):
lltupty = cgctx.get_value_type(tupty)
# Create an empty indexing tuple.
tup = cgutils.get_null_value(lltupty)
# We only need value of the axis dimension here.
# The rest are constants defined above.
[_, value_arg, _] = args
def create_full_slice():
return slice(None, None)
# loop to fill the tuple with slice(None,None) before
# the axis dimension.
# compile and call create_full_slice
slice_data = cgctx.compile_internal(builder, create_full_slice,
types.slice2_type(),
[])
for i in range(0, axis_value):
tup = builder.insert_value(tup, slice_data, i)
# Add the axis dimension 'value'.
tup = builder.insert_value(tup, value_arg, axis_value)
# loop to fill the tuple with slice(None,None) after
# the axis dimension.
for i in range(axis_value + 1, nd):
tup = builder.insert_value(tup, slice_data, i)
return tup
return function_sig, codegen
#----------------------------------------------------------------------------
# Basic stats and aggregates
@lower_builtin(np.sum, types.Array)
@lower_builtin("array.sum", types.Array)
def array_sum(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr):
c = zero
for v in np.nditer(arr):
c += v.item()
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@register_jitable
def _array_sum_axis_nop(arr, v):
return arr
def gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero):
def inner(arr, axis):
"""
function that performs sums over one specific axis
The third parameter to gen_index_tuple that generates the indexing
tuples has to be a const so we can't just pass "axis" through since
that isn't const. We can check for specific values and have
different instances that do take consts. Supporting axis summation
only up to the fourth dimension for now.
typing/arraydecl.py:sum_expand defines the return type for sum with
axis. It is one dimension less than the input array.
"""
ndim = arr.ndim
if not is_axis_const:
# Catch where axis is negative or greater than 3.
if axis < 0 or axis > 3:
raise ValueError("Numba does not support sum with axis "
"parameter outside the range 0 to 3.")
# Catch the case where the user misspecifies the axis to be
# more than the number of the array's dimensions.
if axis >= ndim:
raise ValueError("axis is out of bounds for array")
# Convert the shape of the input array to a list.
ashape = list(arr.shape)
# Get the length of the axis dimension.
axis_len = ashape[axis]
# Remove the axis dimension from the list of dimensional lengths.
ashape.pop(axis)
# Convert this shape list back to a tuple using above intrinsic.
ashape_without_axis = _create_tuple_result_shape(ashape, arr.shape)
# Tuple needed here to create output array with correct size.
result = np.full(ashape_without_axis, zero, type(zero))
# Iterate through the axis dimension.
for axis_index in range(axis_len):
if is_axis_const:
# constant specialized version works for any valid axis value
index_tuple_generic = _gen_index_tuple(arr.shape, axis_index,
const_axis_val)
result += arr[index_tuple_generic]
else:
# Generate a tuple used to index the input array.
# The tuple is ":" in all dimensions except the axis
# dimension where it is "axis_index".
if axis == 0:
index_tuple1 = _gen_index_tuple(arr.shape, axis_index, 0)
result += arr[index_tuple1]
elif axis == 1:
index_tuple2 = _gen_index_tuple(arr.shape, axis_index, 1)
result += arr[index_tuple2]
elif axis == 2:
index_tuple3 = _gen_index_tuple(arr.shape, axis_index, 2)
result += arr[index_tuple3]
elif axis == 3:
index_tuple4 = _gen_index_tuple(arr.shape, axis_index, 3)
result += arr[index_tuple4]
return op(result, 0)
return inner
@lower_builtin(np.sum, types.Array, types.intp, types.DTypeSpec)
@lower_builtin(np.sum, types.Array, types.IntegerLiteral, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.intp, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.IntegerLiteral, types.DTypeSpec)
def array_sum_axis_dtype(context, builder, sig, args):
retty = sig.return_type
zero = getattr(retty, 'dtype', retty)(0)
# if the return is scalar in type then "take" the 0th element of the
# 0d array accumulator as the return value
if getattr(retty, 'ndim', None) is None:
op = np.take
else:
op = _array_sum_axis_nop
[ty_array, ty_axis, ty_dtype] = sig.args
is_axis_const = False
const_axis_val = 0
if isinstance(ty_axis, types.Literal):
# this special-cases for constant axis
const_axis_val = ty_axis.literal_value
# fix negative axis
if const_axis_val < 0:
const_axis_val = ty_array.ndim + const_axis_val
if const_axis_val < 0 or const_axis_val > ty_array.ndim:
raise ValueError("'axis' entry is out of bounds")
ty_axis = context.typing_context.resolve_value_type(const_axis_val)
axis_val = context.get_constant(ty_axis, const_axis_val)
# rewrite arguments
args = args[0], axis_val, args[2]
# rewrite sig
sig = sig.replace(args=[ty_array, ty_axis, ty_dtype])
is_axis_const = True
gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero)
compiled = register_jitable(gen_impl)
def array_sum_impl_axis(arr, axis, dtype):
return compiled(arr, axis)
res = context.compile_internal(builder, array_sum_impl_axis, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.sum, types.Array, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.DTypeSpec)
def array_sum_dtype(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr, dtype):
c = zero
for v in np.nditer(arr):
c += v.item()
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.sum, types.Array, types.intp)
@lower_builtin(np.sum, types.Array, types.IntegerLiteral)
@lower_builtin("array.sum", types.Array, types.intp)
@lower_builtin("array.sum", types.Array, types.IntegerLiteral)
def array_sum_axis(context, builder, sig, args):
retty = sig.return_type
zero = getattr(retty, 'dtype', retty)(0)
# if the return is scalar in type then "take" the 0th element of the
# 0d array accumulator as the return value
if getattr(retty, 'ndim', None) is None:
op = np.take
else:
op = _array_sum_axis_nop
[ty_array, ty_axis] = sig.args
is_axis_const = False
const_axis_val = 0
if isinstance(ty_axis, types.Literal):
# this special-cases for constant axis
const_axis_val = ty_axis.literal_value
# fix negative axis
if const_axis_val < 0:
const_axis_val = ty_array.ndim + const_axis_val
if const_axis_val < 0 or const_axis_val > ty_array.ndim:
raise ValueError("'axis' entry is out of bounds")
ty_axis = context.typing_context.resolve_value_type(const_axis_val)
axis_val = context.get_constant(ty_axis, const_axis_val)
# rewrite arguments
args = args[0], axis_val
# rewrite sig
sig = sig.replace(args=[ty_array, ty_axis])
is_axis_const = True
gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero)
compiled = register_jitable(gen_impl)
def array_sum_impl_axis(arr, axis):
return compiled(arr, axis)
res = context.compile_internal(builder, array_sum_impl_axis, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.prod, types.Array)
@lower_builtin("array.prod", types.Array)
def array_prod(context, builder, sig, args):
def array_prod_impl(arr):
c = 1
for v in np.nditer(arr):
c *= v.item()
return c
res = context.compile_internal(builder, array_prod_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.cumsum, types.Array)
@lower_builtin("array.cumsum", types.Array)
def array_cumsum(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
zero = scalar_dtype(0)
def array_cumsum_impl(arr):
out = np.empty(arr.size, dtype)
c = zero
for idx, v in enumerate(arr.flat):
c += v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumsum_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.cumprod, types.Array)
@lower_builtin("array.cumprod", types.Array)
def array_cumprod(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
def array_cumprod_impl(arr):
out = np.empty(arr.size, dtype)
c = 1
for idx, v in enumerate(arr.flat):
c *= v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumprod_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.mean, types.Array)
@lower_builtin("array.mean", types.Array)
def array_mean(context, builder, sig, args):
zero = sig.return_type(0)
def array_mean_impl(arr):
# Can't use the naive `arr.sum() / arr.size`, as it would return
# a wrong result on integer sum overflow.
c = zero
for v in np.nditer(arr):
c += v.item()
return c / arr.size
res = context.compile_internal(builder, array_mean_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.var, types.Array)
@lower_builtin("array.var", types.Array)
def array_var(context, builder, sig, args):
def array_var_impl(arr):
# Compute the mean
m = arr.mean()
# Compute the sum of square diffs
ssd = 0
for v in np.nditer(arr):
val = (v.item() - m)
ssd += np.real(val * np.conj(val))
return ssd / arr.size
res = context.compile_internal(builder, array_var_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.std, types.Array)
@lower_builtin("array.std", types.Array)
def array_std(context, builder, sig, args):
def array_std_impl(arry):
return arry.var() ** 0.5
res = context.compile_internal(builder, array_std_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def zero_dim_msg(fn_name):
msg = ("zero-size array to reduction operation "
"{0} which has no identity".format(fn_name))
return msg
def _is_nat(x):
pass
@overload(_is_nat)
def ol_is_nat(x):
if numpy_version >= (1, 18):
return lambda x: np.isnat(x)
else:
nat = x('NaT')
return lambda x: x == nat
@lower_builtin(np.min, types.Array)
@lower_builtin("array.min", types.Array)
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
MSG = zero_dim_msg('minimum')
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NP < 1.18: NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
# NP >= 1.18: NaT dominates like NaN
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
if _is_nat(min_value):
return min_value
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return v
else:
continue
if v < min_value:
min_value = v
return min_value
elif isinstance(ty, types.Complex):
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
for view in it:
v = view.item()
if v.real < min_value.real:
min_value = v
elif v.real == min_value.real:
if v.imag < min_value.imag:
min_value = v
return min_value
elif isinstance(ty, types.Float):
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
if np.isnan(min_value):
return min_value
for view in it:
v = view.item()
if np.isnan(v):
return v
if v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.max, types.Array)
@lower_builtin("array.max", types.Array)
def array_max(context, builder, sig, args):
ty = sig.args[0].dtype
MSG = zero_dim_msg('maximum')
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NP < 1.18: NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
# NP >= 1.18: NaT dominates like NaN
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
if _is_nat(max_value):
return max_value
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return v
else:
continue
if v > max_value:
max_value = v
return max_value
elif isinstance(ty, types.Complex):
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
for view in it:
v = view.item()
if v.real > max_value.real:
max_value = v
elif v.real == max_value.real:
if v.imag > max_value.imag:
max_value = v
return max_value
elif isinstance(ty, types.Float):
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
if np.isnan(max_value):
return max_value
for view in it:
v = view.item()
if np.isnan(v):
return v
if v > max_value:
max_value = v
return max_value
else:
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.argmin, types.Array)
@lower_builtin("array.argmin", types.Array)
def array_argmin(context, builder, sig, args):
ty = sig.args[0].dtype
if (isinstance(ty, (types.NPDatetime, types.NPTimedelta))):
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
it = np.nditer(arry)
min_value = next(it).take(0)
min_idx = 0
if _is_nat(min_value):
return min_idx
idx = 1
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return idx
else:
idx += 1
continue
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
elif isinstance(ty, types.Float):
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
if np.isnan(min_value):
return min_idx
idx = 0
for v in arry.flat:
if np.isnan(v):
return idx
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
else:
def array_argmin_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
else:
raise RuntimeError('unreachable')
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
res = context.compile_internal(builder, array_argmin_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.argmax, types.Array)
@lower_builtin("array.argmax", types.Array)
def array_argmax(context, builder, sig, args):
ty = sig.args[0].dtype
if (isinstance(ty, (types.NPDatetime, types.NPTimedelta))):
def array_argmax_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
it = np.nditer(arry)
max_value = next(it).take(0)
max_idx = 0
if _is_nat(max_value):
return max_idx
idx = 1
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return idx
else:
idx += 1
continue
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
elif isinstance(ty, types.Float):
def array_argmax_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
if np.isnan(max_value):
return max_idx
idx = 0
for v in arry.flat:
if np.isnan(v):
return idx
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
else:
def array_argmax_impl(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
res = context.compile_internal(builder, array_argmax_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@overload(np.all)
@overload_method(types.Array, "all")
def np_all(a):
def flat_all(a):
for v in np.nditer(a):
if not v.item():
return False
return True
return flat_all
@overload(np.any)
@overload_method(types.Array, "any")
def np_any(a):
def flat_any(a):
for v in np.nditer(a):
if v.item():
return True
return False
return flat_any
def get_isnan(dtype):
"""
A generic isnan() function
"""
if isinstance(dtype, (types.Float, types.Complex)):
return np.isnan
else:
@register_jitable
def _trivial_isnan(x):
return False
return _trivial_isnan
@register_jitable
def less_than(a, b):
return a < b
@register_jitable
def greater_than(a, b):
return a > b
@register_jitable
def check_array(a):
if a.size == 0:
raise ValueError('zero-size array to reduction operation not possible')
def _check_is_integer(v, name):
if not isinstance(v, (int, types.Integer)):
raise TypingError('{} must be an integer'.format(name))
def nan_min_max_factory(comparison_op, is_complex_dtype):
if is_complex_dtype:
def impl(a):
arr = np.asarray(a)
check_array(arr)
it = np.nditer(arr)
return_val = next(it).take(0)
for view in it:
v = view.item()
if np.isnan(return_val.real) and not np.isnan(v.real):
return_val = v
else:
if comparison_op(v.real, return_val.real):
return_val = v
elif v.real == return_val.real:
if comparison_op(v.imag, return_val.imag):
return_val = v
return return_val
else:
def impl(a):
arr = np.asarray(a)
check_array(arr)
it = np.nditer(arr)
return_val = next(it).take(0)
for view in it:
v = view.item()
if not np.isnan(v):
if not comparison_op(return_val, v):
return_val = v
return return_val
return impl
real_nanmin = register_jitable(
nan_min_max_factory(less_than, is_complex_dtype=False)
)
real_nanmax = register_jitable(
nan_min_max_factory(greater_than, is_complex_dtype=False)
)
complex_nanmin = register_jitable(
nan_min_max_factory(less_than, is_complex_dtype=True)
)
complex_nanmax = register_jitable(
nan_min_max_factory(greater_than, is_complex_dtype=True)
)
@overload(np.nanmin)
def np_nanmin(a):
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
return complex_nanmin
else:
return real_nanmin
@overload(np.nanmax)
def np_nanmax(a):
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
return complex_nanmax
else:
return real_nanmax
@overload(np.nanmean)
def np_nanmean(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmean_impl(a):
c = 0.0
count = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c += v.item()
count += 1
# np.divide() doesn't raise ZeroDivisionError
return np.divide(c, count)
return nanmean_impl
@overload(np.nanvar)
def np_nanvar(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanvar_impl(a):
# Compute the mean
m = np.nanmean(a)
# Compute the sum of square diffs
ssd = 0.0
count = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
val = (v.item() - m)
ssd += np.real(val * np.conj(val))
count += 1
# np.divide() doesn't raise ZeroDivisionError
return np.divide(ssd, count)
return nanvar_impl
@overload(np.nanstd)
def np_nanstd(a):
if not isinstance(a, types.Array):
return
def nanstd_impl(a):
return np.nanvar(a) ** 0.5
return nanstd_impl
@overload(np.nansum)
def np_nansum(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, types.Integer):
retty = types.intp
else:
retty = a.dtype
zero = retty(0)
isnan = get_isnan(a.dtype)
def nansum_impl(a):
c = zero
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c += v
return c
return nansum_impl
@overload(np.nanprod)
def np_nanprod(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, types.Integer):
retty = types.intp
else:
retty = a.dtype
one = retty(1)
isnan = get_isnan(a.dtype)
def nanprod_impl(a):
c = one
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c *= v
return c
return nanprod_impl
@overload(np.nancumprod)
def np_nancumprod(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, (types.Boolean, types.Integer)):
# dtype cannot possibly contain NaN
return lambda a: np.cumprod(a)
else:
retty = a.dtype
is_nan = get_isnan(retty)
one = retty(1)
def nancumprod_impl(a):
out = np.empty(a.size, retty)
c = one
for idx, v in enumerate(a.flat):
if ~is_nan(v):
c *= v
out[idx] = c
return out
return nancumprod_impl
@overload(np.nancumsum)
def np_nancumsum(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, (types.Boolean, types.Integer)):
# dtype cannot possibly contain NaN
return lambda a: np.cumsum(a)
else:
retty = a.dtype
is_nan = get_isnan(retty)
zero = retty(0)
def nancumsum_impl(a):
out = np.empty(a.size, retty)
c = zero
for idx, v in enumerate(a.flat):
if ~is_nan(v):
c += v
out[idx] = c
return out
return nancumsum_impl
@register_jitable
def prepare_ptp_input(a):
arr = _asarray(a)
if len(arr) == 0:
raise ValueError('zero-size array reduction not possible')
else:
return arr
def _compute_current_val_impl_gen(op):
def _compute_current_val_impl(current_val, val):
if isinstance(current_val, types.Complex):
# The sort order for complex numbers is lexicographic. If both the
# real and imaginary parts are non-nan then the order is determined
# by the real parts except when they are equal, in which case the
# order is determined by the imaginary parts.
# https://github.com/numpy/numpy/blob/577a86e/numpy/core/fromnumeric.py#L874-L877 # noqa: E501
def impl(current_val, val):
if op(val.real, current_val.real):
return val
elif (val.real == current_val.real
and op(val.imag, current_val.imag)):
return val
return current_val
else:
def impl(current_val, val):
return val if op(val, current_val) else current_val
return impl
return _compute_current_val_impl
_compute_a_max = generated_jit(_compute_current_val_impl_gen(greater_than))
_compute_a_min = generated_jit(_compute_current_val_impl_gen(less_than))
@generated_jit
def _early_return(val):
UNUSED = 0
if isinstance(val, types.Complex):
def impl(val):
if np.isnan(val.real):
if np.isnan(val.imag):
return True, np.nan + np.nan * 1j
else:
return True, np.nan + 0j
else:
return False, UNUSED
elif isinstance(val, types.Float):
def impl(val):
if np.isnan(val):
return True, np.nan
else:
return False, UNUSED
else:
def impl(val):
return False, UNUSED
return impl
@overload_method(types.Array, 'ptp')
@overload(np.ptp)
def np_ptp(a):
if hasattr(a, 'dtype'):
if isinstance(a.dtype, types.Boolean):
raise TypingError("Boolean dtype is unsupported (as per NumPy)")
# Numpy raises a TypeError
def np_ptp_impl(a):
arr = prepare_ptp_input(a)
a_flat = arr.flat
a_min = a_flat[0]
a_max = a_flat[0]
for i in range(arr.size):
val = a_flat[i]
take_branch, retval = _early_return(val)
if take_branch:
return retval
a_max = _compute_a_max(a_max, val)
a_min = _compute_a_min(a_min, val)
return a_max - a_min
return np_ptp_impl
#----------------------------------------------------------------------------
# Median and partitioning
@register_jitable
def nan_aware_less_than(a, b):
if np.isnan(a):
return False
else:
if np.isnan(b):
return True
else:
return a < b
def _partition_factory(pivotimpl):
def _partition(A, low, high):
mid = (low + high) >> 1
# NOTE: the pattern of swaps below for the pivot choice and the
# partitioning gives good results (i.e. regular O(n log n))
# on sorted, reverse-sorted, and uniform arrays. Subtle changes
# risk breaking this property.
# Use median of three {low, middle, high} as the pivot
if pivotimpl(A[mid], A[low]):
A[low], A[mid] = A[mid], A[low]
if pivotimpl(A[high], A[mid]):
A[high], A[mid] = A[mid], A[high]
if pivotimpl(A[mid], A[low]):
A[low], A[mid] = A[mid], A[low]
pivot = A[mid]
A[high], A[mid] = A[mid], A[high]
i = low
j = high - 1
while True:
while i < high and pivotimpl(A[i], pivot):
i += 1
while j >= low and pivotimpl(pivot, A[j]):
j -= 1
if i >= j:
break
A[i], A[j] = A[j], A[i]
i += 1
j -= 1
# Put the pivot back in its final place (all items before `i`
# are smaller than the pivot, all items at/after `i` are larger)
A[i], A[high] = A[high], A[i]
return i
return _partition
_partition = register_jitable(_partition_factory(less_than))
_partition_w_nan = register_jitable(_partition_factory(nan_aware_less_than))
def _select_factory(partitionimpl):
def _select(arry, k, low, high):
"""
Select the k'th smallest element in array[low:high + 1].
"""
i = partitionimpl(arry, low, high)
while i != k:
if i < k:
low = i + 1
i = partitionimpl(arry, low, high)
else:
high = i - 1
i = partitionimpl(arry, low, high)
return arry[k]
return _select
_select = register_jitable(_select_factory(_partition))
_select_w_nan = register_jitable(_select_factory(_partition_w_nan))
@register_jitable
def _select_two(arry, k, low, high):
"""
Select the k'th and k+1'th smallest elements in array[low:high + 1].
This is significantly faster than doing two independent selections
for k and k+1.
"""
while True:
assert high > low # by construction
i = _partition(arry, low, high)
if i < k:
low = i + 1
elif i > k + 1:
high = i - 1
elif i == k:
_select(arry, k + 1, i + 1, high)
break
else: # i == k + 1
_select(arry, k, low, i - 1)
break
return arry[k], arry[k + 1]
@register_jitable
def _median_inner(temp_arry, n):
"""
The main logic of the median() call. *temp_arry* must be disposable,
as this function will mutate it.
"""
low = 0
high = n - 1
half = n >> 1
if n & 1 == 0:
a, b = _select_two(temp_arry, half - 1, low, high)
return (a + b) / 2
else:
return _select(temp_arry, half, low, high)
@overload(np.median)
def np_median(a):
if not isinstance(a, types.Array):
return
def median_impl(a):
# np.median() works on the flattened array, and we need a temporary
# workspace anyway
temp_arry = a.flatten()
n = temp_arry.shape[0]
return _median_inner(temp_arry, n)
return median_impl
@register_jitable
def _collect_percentiles_inner(a, q):
n = len(a)
if n == 1:
# single element array; output same for all percentiles
out = np.full(len(q), a[0], dtype=np.float64)
else:
out = np.empty(len(q), dtype=np.float64)
for i in range(len(q)):
percentile = q[i]
# bypass pivoting where requested percentile is 100
if percentile == 100:
val = np.max(a)
# heuristics to handle infinite values a la NumPy
if ~np.all(np.isfinite(a)):
if ~np.isfinite(val):
val = np.nan
# bypass pivoting where requested percentile is 0
elif percentile == 0:
val = np.min(a)
# convoluted heuristics to handle infinite values a la NumPy
if ~np.all(np.isfinite(a)):
num_pos_inf = np.sum(a == np.inf)
num_neg_inf = np.sum(a == -np.inf)
num_finite = n - (num_neg_inf + num_pos_inf)
if num_finite == 0:
val = np.nan
if num_pos_inf == 1 and n == 2:
val = np.nan
if num_neg_inf > 1:
val = np.nan
if num_finite == 1:
if num_pos_inf > 1:
if num_neg_inf != 1:
val = np.nan
else:
# linear interp between closest ranks
rank = 1 + (n - 1) * np.true_divide(percentile, 100.0)
f = math.floor(rank)
m = rank - f
lower, upper = _select_two(a, k=int(f - 1), low=0, high=(n - 1))
val = lower * (1 - m) + upper * m
out[i] = val
return out
@register_jitable
def _can_collect_percentiles(a, nan_mask, skip_nan):
if skip_nan:
a = a[~nan_mask]
if len(a) == 0:
return False # told to skip nan, but no elements remain
else:
if np.any(nan_mask):
return False # told *not* to skip nan, but nan encountered
if len(a) == 1: # single element array
val = a[0]
return np.isfinite(val) # can collect percentiles if element is finite
else:
return True
@register_jitable
def check_valid(q, q_upper_bound):
valid = True
# avoid expensive reductions where possible
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if q[i] < 0.0 or q[i] > q_upper_bound or np.isnan(q[i]):
valid = False
break
else:
if np.any(np.isnan(q)) or np.any(q < 0.0) or np.any(q > q_upper_bound):
valid = False
return valid
@register_jitable
def percentile_is_valid(q):
if not check_valid(q, q_upper_bound=100.0):
raise ValueError('Percentiles must be in the range [0, 100]')
@register_jitable
def quantile_is_valid(q):
if not check_valid(q, q_upper_bound=1.0):
raise ValueError('Quantiles must be in the range [0, 1]')
@register_jitable
def _collect_percentiles(a, q, check_q, factor, skip_nan):
q = np.asarray(q, dtype=np.float64).flatten()
check_q(q)
q = q * factor
temp_arry = np.asarray(a, dtype=np.float64).flatten()
nan_mask = np.isnan(temp_arry)
if _can_collect_percentiles(temp_arry, nan_mask, skip_nan):
temp_arry = temp_arry[~nan_mask]
out = _collect_percentiles_inner(temp_arry, q)
else:
out = np.full(len(q), np.nan)
return out
def _percentile_quantile_inner(a, q, skip_nan, factor, check_q):
"""
The underlying algorithm to find percentiles and quantiles
is the same, hence we converge onto the same code paths
in this inner function implementation
"""
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
raise TypingError('Not supported for complex dtype')
# this could be supported, but would require a
# lexicographic comparison
def np_percentile_q_scalar_impl(a, q):
return _collect_percentiles(a, q, check_q, factor, skip_nan)[0]
def np_percentile_impl(a, q):
return _collect_percentiles(a, q, check_q, factor, skip_nan)
if isinstance(q, (types.Number, types.Boolean)):
return np_percentile_q_scalar_impl
elif isinstance(q, types.Array) and q.ndim == 0:
return np_percentile_q_scalar_impl
else:
return np_percentile_impl
@overload(np.percentile)
def np_percentile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=False, factor=1.0, check_q=percentile_is_valid
)
@overload(np.nanpercentile)
def np_nanpercentile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=True, factor=1.0, check_q=percentile_is_valid
)
@overload(np.quantile)
def np_quantile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=False, factor=100.0, check_q=quantile_is_valid
)
@overload(np.nanquantile)
def np_nanquantile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=True, factor=100.0, check_q=quantile_is_valid
)
@overload(np.nanmedian)
def np_nanmedian(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmedian_impl(a):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(a.size, a.dtype)
n = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
# all NaNs
if n == 0:
return np.nan
return _median_inner(temp_arry, n)
return nanmedian_impl
@register_jitable
def np_partition_impl_inner(a, kth_array):
# allocate and fill empty array rather than copy a and mutate in place
# as the latter approach fails to preserve strides
out = np.empty_like(a)
idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1
for s in idx:
arry = a[s].copy()
low = 0
high = len(arry) - 1
for kth in kth_array:
_select_w_nan(arry, kth, low, high)
low = kth # narrow span of subsequent partition
out[s] = arry
return out
@register_jitable
def valid_kths(a, kth):
"""
Returns a sorted, unique array of kth values which serve
as indexers for partitioning the input array, a.
If the absolute value of any of the provided values
is greater than a.shape[-1] an exception is raised since
we are partitioning along the last axis (per Numpy default
behaviour).
Values less than 0 are transformed to equivalent positive
index values.
"""
# cast boolean to int, where relevant
kth_array = _asarray(kth).astype(np.int64)
if kth_array.ndim != 1:
raise ValueError('kth must be scalar or 1-D')
# numpy raises ValueError: object too deep for desired array
if np.any(np.abs(kth_array) >= a.shape[-1]):
raise ValueError("kth out of bounds")
out = np.empty_like(kth_array)
for index, val in np.ndenumerate(kth_array):
if val < 0:
out[index] = val + a.shape[-1] # equivalent positive index
else:
out[index] = val
return np.unique(out)
@overload(np.partition)
def np_partition(a, kth):
if not isinstance(a, (types.Array, types.Sequence, types.Tuple)):
raise TypeError('The first argument must be an array-like')
if isinstance(a, types.Array) and a.ndim == 0:
raise TypeError('The first argument must be at least 1-D (found 0-D)')
kthdt = getattr(kth, 'dtype', kth)
if not isinstance(kthdt, (types.Boolean, types.Integer)):
# bool gets cast to int subsequently
raise TypeError('Partition index must be integer')
def np_partition_impl(a, kth):
a_tmp = _asarray(a)
if a_tmp.size == 0:
return a_tmp.copy()
else:
kth_array = valid_kths(a_tmp, kth)
return np_partition_impl_inner(a_tmp, kth_array)
return np_partition_impl
#----------------------------------------------------------------------------
# Building matrices
@register_jitable
def _tri_impl(N, M, k):
shape = max(0, N), max(0, M) # numpy floors each dimension at 0
out = np.empty(shape, dtype=np.float64) # numpy default dtype
for i in range(shape[0]):
m_max = min(max(0, i + k + 1), shape[1])
out[i, :m_max] = 1
out[i, m_max:] = 0
return out
@overload(np.tri)
def np_tri(N, M=None, k=0):
# we require k to be integer, unlike numpy
_check_is_integer(k, 'k')
def tri_impl(N, M=None, k=0):
if M is None:
M = N
return _tri_impl(N, M, k)
return tri_impl
@register_jitable
def _make_square(m):
"""
Takes a 1d array and tiles it to form a square matrix
- i.e. a facsimile of np.tile(m, (len(m), 1))
"""
assert m.ndim == 1
len_m = len(m)
out = np.empty((len_m, len_m), dtype=m.dtype)
for i in range(len_m):
out[i] = m
return out
@register_jitable
def np_tril_impl_2d(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint)
return np.where(mask, m, np.zeros_like(m, dtype=m.dtype))
@overload(np.tril)
def my_tril(m, k=0):
# we require k to be integer, unlike numpy
_check_is_integer(k, 'k')
def np_tril_impl_1d(m, k=0):
m_2d = _make_square(m)
return np_tril_impl_2d(m_2d, k)
def np_tril_impl_multi(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint)
idx = np.ndindex(m.shape[:-2])
z = np.empty_like(m)
zero_opt = np.zeros_like(mask, dtype=m.dtype)
for sel in idx:
z[sel] = np.where(mask, m[sel], zero_opt)
return z
if m.ndim == 1:
return np_tril_impl_1d
elif m.ndim == 2:
return np_tril_impl_2d
else:
return np_tril_impl_multi
@overload(np.tril_indices)
def np_tril_indices(n, k=0, m=None):
# we require integer arguments, unlike numpy
_check_is_integer(n, 'n')
_check_is_integer(k, 'k')
if not is_nonelike(m):
_check_is_integer(m, 'm')
def np_tril_indices_impl(n, k=0, m=None):
return np.nonzero(np.tri(n, m, k=k))
return np_tril_indices_impl
@overload(np.tril_indices_from)
def np_tril_indices_from(arr, k=0):
# we require k to be integer, unlike numpy
_check_is_integer(k, 'k')
if arr.ndim != 2:
raise TypingError("input array must be 2-d")
def np_tril_indices_from_impl(arr, k=0):
return np.tril_indices(arr.shape[0], k=k, m=arr.shape[1])
return np_tril_indices_from_impl
@register_jitable
def np_triu_impl_2d(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint)
return np.where(mask, np.zeros_like(m, dtype=m.dtype), m)
@overload(np.triu)
def my_triu(m, k=0):
# we require k to be integer, unlike numpy
_check_is_integer(k, 'k')
def np_triu_impl_1d(m, k=0):
m_2d = _make_square(m)
return np_triu_impl_2d(m_2d, k)
def np_triu_impl_multi(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint)
idx = np.ndindex(m.shape[:-2])
z = np.empty_like(m)
zero_opt = np.zeros_like(mask, dtype=m.dtype)
for sel in idx:
z[sel] = np.where(mask, zero_opt, m[sel])
return z
if m.ndim == 1:
return np_triu_impl_1d
elif m.ndim == 2:
return np_triu_impl_2d
else:
return np_triu_impl_multi
@overload(np.triu_indices)
def np_triu_indices(n, k=0, m=None):
# we require integer arguments, unlike numpy
_check_is_integer(n, 'n')
_check_is_integer(k, 'k')
if not is_nonelike(m):
_check_is_integer(m, 'm')
def np_triu_indices_impl(n, k=0, m=None):
return np.nonzero(1 - np.tri(n, m, k=k - 1))
return np_triu_indices_impl
@overload(np.triu_indices_from)
def np_triu_indices_from(arr, k=0):
# we require k to be integer, unlike numpy
_check_is_integer(k, 'k')
if arr.ndim != 2:
raise TypingError("input array must be 2-d")
def np_triu_indices_from_impl(arr, k=0):
return np.triu_indices(arr.shape[0], k=k, m=arr.shape[1])
return np_triu_indices_from_impl
def _prepare_array(arr):
pass
@overload(_prepare_array)
def _prepare_array_impl(arr):
if arr in (None, types.none):
return lambda arr: np.array(())
else:
return lambda arr: _asarray(arr).ravel()
def _dtype_of_compound(inobj):
obj = inobj
while True:
if isinstance(obj, (types.Number, types.Boolean)):
return as_dtype(obj)
l = getattr(obj, '__len__', None)
if l is not None and l() == 0: # empty tuple or similar
return np.float64
dt = getattr(obj, 'dtype', None)
if dt is None:
raise TypeError("type has no dtype attr")
if isinstance(obj, types.Sequence):
obj = obj.dtype
else:
return as_dtype(dt)
@overload(np.ediff1d)
def np_ediff1d(ary, to_end=None, to_begin=None):
if isinstance(ary, types.Array):
if isinstance(ary.dtype, types.Boolean):
raise TypeError("Boolean dtype is unsupported (as per NumPy)")
# Numpy tries to do this: return ary[1:] - ary[:-1] which
# results in a TypeError exception being raised
# since np 1.16 there are casting checks for to_end and to_begin to make
# sure they are compatible with the ary
if numpy_version >= (1, 16):
ary_dt = _dtype_of_compound(ary)
to_begin_dt = None
if not(is_nonelike(to_begin)):
to_begin_dt = _dtype_of_compound(to_begin)
to_end_dt = None
if not(is_nonelike(to_end)):
to_end_dt = _dtype_of_compound(to_end)
if to_begin_dt is not None and not np.can_cast(to_begin_dt, ary_dt):
msg = "dtype of to_begin must be compatible with input ary"
raise TypeError(msg)
if to_end_dt is not None and not np.can_cast(to_end_dt, ary_dt):
msg = "dtype of to_end must be compatible with input ary"
raise TypeError(msg)
def np_ediff1d_impl(ary, to_end=None, to_begin=None):
# transform each input into an equivalent 1d array
start = _prepare_array(to_begin)
mid = _prepare_array(ary)
end = _prepare_array(to_end)
out_dtype = mid.dtype
# output array dtype determined by ary dtype, per NumPy
# (for the most part); an exception to the rule is a zero length
# array-like, where NumPy falls back to np.float64; this behaviour
# is *not* replicated
if len(mid) > 0:
out = np.empty((len(start) + len(mid) + len(end) - 1),
dtype=out_dtype)
start_idx = len(start)
mid_idx = len(start) + len(mid) - 1
out[:start_idx] = start
out[start_idx:mid_idx] = np.diff(mid)
out[mid_idx:] = end
else:
out = np.empty((len(start) + len(end)), dtype=out_dtype)
start_idx = len(start)
out[:start_idx] = start
out[start_idx:] = end
return out
return np_ediff1d_impl
def _select_element(arr):
pass
@overload(_select_element)
def _select_element_impl(arr):
zerod = getattr(arr, 'ndim', None) == 0
if zerod:
def impl(arr):
x = np.array((1,), dtype=arr.dtype)
x[:] = arr
return x[0]
return impl
else:
def impl(arr):
return arr
return impl
def _get_d(dx, x):
pass
@overload(_get_d)
def get_d_impl(x, dx):
if is_nonelike(x):
def impl(x, dx):
return np.asarray(dx)
else:
def impl(x, dx):
return np.diff(np.asarray(x))
return impl
@overload(np.trapz)
def np_trapz(y, x=None, dx=1.0):
if isinstance(y, (types.Number, types.Boolean)):
raise TypingError('y cannot be a scalar')
elif isinstance(y, types.Array) and y.ndim == 0:
raise TypingError('y cannot be 0D')
# NumPy raises IndexError: list assignment index out of range
# inspired by:
# https://github.com/numpy/numpy/blob/7ee52003/numpy/lib/function_base.py#L4040-L4065 # noqa: E501
def impl(y, x=None, dx=1.0):
yarr = np.asarray(y)
d = _get_d(x, dx)
y_ave = (yarr[..., slice(1, None)] + yarr[..., slice(None, -1)]) / 2.0
ret = np.sum(d * y_ave, -1)
processed = _select_element(ret)
return processed
return impl
@register_jitable
def _np_vander(x, N, increasing, out):
"""
Generate an N-column Vandermonde matrix from a supplied 1-dimensional
array, x. Store results in an output matrix, out, which is assumed to
be of the required dtype.
Values are accumulated using np.multiply to match the floating point
precision behaviour of numpy.vander.
"""
m, n = out.shape
assert m == len(x)
assert n == N
if increasing:
for i in range(N):
if i == 0:
out[:, i] = 1
else:
out[:, i] = np.multiply(x, out[:, (i - 1)])
else:
for i in range(N - 1, -1, -1):
if i == N - 1:
out[:, i] = 1
else:
out[:, i] = np.multiply(x, out[:, (i + 1)])
@register_jitable
def _check_vander_params(x, N):
if x.ndim > 1:
raise ValueError('x must be a one-dimensional array or sequence.')
if N < 0:
raise ValueError('Negative dimensions are not allowed')
@overload(np.vander)
def np_vander(x, N=None, increasing=False):
if N not in (None, types.none):
if not isinstance(N, types.Integer):
raise TypingError('Second argument N must be None or an integer')
def np_vander_impl(x, N=None, increasing=False):
if N is None:
N = len(x)
_check_vander_params(x, N)
# allocate output matrix using dtype determined in closure
out = np.empty((len(x), int(N)), dtype=dtype)
_np_vander(x, N, increasing, out)
return out
def np_vander_seq_impl(x, N=None, increasing=False):
if N is None:
N = len(x)
x_arr = np.array(x)
_check_vander_params(x_arr, N)
# allocate output matrix using dtype inferred when x_arr was created
out = np.empty((len(x), int(N)), dtype=x_arr.dtype)
_np_vander(x_arr, N, increasing, out)
return out
if isinstance(x, types.Array):
x_dt = as_dtype(x.dtype)
# replicate numpy behaviour w.r.t.type promotion
dtype = np.promote_types(x_dt, int)
return np_vander_impl
elif isinstance(x, (types.Tuple, types.Sequence)):
return np_vander_seq_impl
@overload(np.roll)
def np_roll(a, shift):
if not isinstance(shift, (types.Integer, types.Boolean)):
raise TypingError('shift must be an integer')
def np_roll_impl(a, shift):
arr = np.asarray(a)
out = np.empty(arr.shape, dtype=arr.dtype)
# empty_like might result in different contiguity vs NumPy
arr_flat = arr.flat
for i in range(arr.size):
idx = (i + shift) % arr.size
out.flat[idx] = arr_flat[i]
return out
if isinstance(a, (types.Number, types.Boolean)):
return lambda a, shift: np.asarray(a)
else:
return np_roll_impl
#----------------------------------------------------------------------------
# Mathematical functions
LIKELY_IN_CACHE_SIZE = 8
@register_jitable
def binary_search_with_guess(key, arr, length, guess):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of binary_search_with_guess prior to 1.15:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L447 # noqa: E501
imin = 0
imax = length
# Handle keys outside of the arr range first
if key > arr[length - 1]:
return length
elif key < arr[0]:
return -1
# If len <= 4 use linear search.
# From above we know key >= arr[0] when we start.
if length <= 4:
i = 1
while i < length and key >= arr[i]:
i += 1
return i - 1
if guess > length - 3:
guess = length - 3
if guess < 1:
guess = 1
# check most likely values: guess - 1, guess, guess + 1
if key < arr[guess]:
if key < arr[guess - 1]:
imax = guess - 1
# last attempt to restrict search to items in cache
if guess > LIKELY_IN_CACHE_SIZE and \
key >= arr[guess - LIKELY_IN_CACHE_SIZE]:
imin = guess - LIKELY_IN_CACHE_SIZE
else:
# key >= arr[guess - 1]
return guess - 1
else:
# key >= arr[guess]
if key < arr[guess + 1]:
return guess
else:
# key >= arr[guess + 1]
if key < arr[guess + 2]:
return guess + 1
else:
# key >= arr[guess + 2]
imin = guess + 2
# last attempt to restrict search to items in cache
if (guess < (length - LIKELY_IN_CACHE_SIZE - 1)) and \
(key < arr[guess + LIKELY_IN_CACHE_SIZE]):
imax = guess + LIKELY_IN_CACHE_SIZE
# finally, find index by bisection
while imin < imax:
imid = imin + ((imax - imin) >> 1)
if key >= arr[imid]:
imin = imid + 1
else:
imax = imid
return imin - 1
@register_jitable
def np_interp_impl_complex_fp_inner(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp_complex prior to 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L683 # noqa: E501
dz = np.asarray(x)
dx = np.asarray(xp)
dy = np.asarray(fp)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = np.empty((lenxp - 1), dtype=dtype)
else:
slopes = np.empty(0, dtype=dtype)
if slopes.size:
for i in range(lenxp - 1):
inv_dx = 1 / (dx[i + 1] - dx[i])
real = (dy[i + 1].real - dy[i].real) * inv_dx
imag = (dy[i + 1].imag - dy[i].imag) * inv_dx
slopes[i] = real + 1j * imag
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
real = x_val
imag = 0.0
dres.flat[i] = real + 1j * imag
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
inv_dx = 1 / (dx[j + 1] - dx[j])
real = (dy[j + 1].real - dy[j].real) * inv_dx
imag = (dy[j + 1].imag - dy[j].imag) * inv_dx
slope = real + 1j * imag
real = slope.real * (x_val - dx[j]) + dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
dres.flat[i] = real + 1j * imag
# NOTE: there's a change in master which is not
# in any released version of 1.16.x yet... as
# per the real value implementation, but
# interpolate real and imaginary parts
# independently; this will need to be added in
# due course
return dres
def np_interp_impl_complex_fp_inner_factory(np117_nan_handling):
@register_jitable
def impl(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp_complex post 1.16 with added
# branching to support np1.17 style NaN handling. (see
# `np117_nan_handling` use)
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L628 # noqa: E501
dz = np.asarray(x)
dx = np.asarray(xp)
dy = np.asarray(fp)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = np.empty((lenxp - 1), dtype=dtype)
else:
slopes = np.empty(0, dtype=dtype)
if slopes.size:
for i in range(lenxp - 1):
inv_dx = 1 / (dx[i + 1] - dx[i])
real = (dy[i + 1].real - dy[i].real) * inv_dx
imag = (dy[i + 1].imag - dy[i].imag) * inv_dx
slopes[i] = real + 1j * imag
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
real = x_val
imag = 0.0
dres.flat[i] = real + 1j * imag
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
elif dx[j] == x_val:
# Avoid potential non-finite interpolation
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
inv_dx = 1 / (dx[j + 1] - dx[j])
real = (dy[j + 1].real - dy[j].real) * inv_dx
imag = (dy[j + 1].imag - dy[j].imag) * inv_dx
slope = real + 1j * imag
# The following branches mimic the behavior of
# different numpy version with regard to handling NaNs.
if np117_nan_handling:
# Numpy 1.17 handles NaN correctly
result = np_interp_impl_complex_fp_innermost_117(
x, slope, x_val, dx, dy, i, j,
)
dres.flat[i] = result
else:
# Numpy 1.16 does not handles NaN correctly.
real = slope.real * (x_val - dx[j]) + dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
dres.flat[i] = real + 1j * imag
return dres
return impl
@register_jitable
def np_interp_impl_complex_fp_innermost_117(x, slope, x_val, dx, dy, i, j):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a copy of innermost part of arr_interp_complex post 1.17:
# https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L798-L812 # noqa: E501
# If we get nan in one direction, try the other
real = slope.real * (x_val - dx[j]) + dy[j].real
if np.isnan(real):
real = slope.real * (x_val - dx[j + 1]) + dy[j + 1].real
if np.isnan(real) and dy[j].real == dy[j + 1].real:
real = dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
if np.isnan(imag):
imag = slope.imag * (x_val - dx[j + 1]) + dy[j + 1].imag
if np.isnan(imag) and dy[j].imag == dy[j + 1].imag:
imag = dy[j].imag
return real + 1j * imag
@register_jitable
def np_interp_impl_inner(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp prior to 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L532 # noqa: E501
dz = np.asarray(x, dtype=np.float64)
dx = np.asarray(xp, dtype=np.float64)
dy = np.asarray(fp, dtype=np.float64)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1])
else:
slopes = np.empty(0, dtype=dtype)
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
dres.flat[i] = x_val
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j])
dres.flat[i] = slope * (x_val - dx[j]) + dy[j]
# NOTE: this is in master but not in any released
# version of 1.16.x yet...
#
# If we get nan in one direction, try the other
# if np.isnan(dres.flat[i]):
# dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1]
#
# if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]:
# dres.flat[i] = dy[j]
return dres
def np_interp_impl_inner_factory(np117_nan_handling):
def impl(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp post 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L473 # noqa: E501
dz = np.asarray(x, dtype=np.float64)
dx = np.asarray(xp, dtype=np.float64)
dy = np.asarray(fp, dtype=np.float64)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1])
else:
slopes = np.empty(0, dtype=dtype)
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
dres.flat[i] = x_val
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
elif dx[j] == x_val:
# Avoid potential non-finite interpolation
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j])
dres.flat[i] = slope * (x_val - dx[j]) + dy[j]
# NOTE: this is in np1.17
# https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L610-L616 # noqa: E501
#
# If we get nan in one direction, try the other
if np117_nan_handling:
if np.isnan(dres.flat[i]):
dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1] # noqa: E501
if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]:
dres.flat[i] = dy[j]
return dres
return impl
np_interp_impl_inner_post_np117 = register_jitable(
np_interp_impl_inner_factory(np117_nan_handling=True)
)
np_interp_impl_complex_inner_post_np117 = register_jitable(
np_interp_impl_complex_fp_inner_factory(np117_nan_handling=True)
)
np_interp_impl_inner_pre_np117 = register_jitable(
np_interp_impl_inner_factory(np117_nan_handling=False)
)
np_interp_impl_complex_inner_pre_np117 = register_jitable(
np_interp_impl_complex_fp_inner_factory(np117_nan_handling=False)
)
@overload(np.interp)
def np_interp(x, xp, fp):
# NOTE: there is considerable duplication present in the functions:
# np_interp_impl_complex_fp_inner_116
# np_interp_impl_complex_fp_inner
# np_interp_impl_inner_116
# np_interp_impl_inner
#
# This is because:
# 1. Replicating basic interp is relatively simple, however matching the
# behaviour of NumPy for edge cases is really quite hard, after a
# couple of attempts trying to avoid translation of the C source it
# was deemed unavoidable.
# 2. Due to 1. it is much easier to keep track of changes if the Numba
# source reflects the NumPy C source, so the duplication is kept.
# 3. There are significant changes that happened in the NumPy 1.16
# release series, hence functions with `np116` appended, they behave
# slightly differently!
if hasattr(xp, 'ndim') and xp.ndim > 1:
raise TypingError('xp must be 1D')
if hasattr(fp, 'ndim') and fp.ndim > 1:
raise TypingError('fp must be 1D')
complex_dtype_msg = (
"Cannot cast array data from complex dtype to float64 dtype"
)
xp_dt = determine_dtype(xp)
if np.issubdtype(xp_dt, np.complexfloating):
raise TypingError(complex_dtype_msg)
if numpy_version >= (1, 17):
impl = np_interp_impl_inner_post_np117
impl_complex = np_interp_impl_complex_inner_post_np117
elif numpy_version >= (1, 16):
impl = np_interp_impl_inner_pre_np117
impl_complex = np_interp_impl_complex_inner_pre_np117
else:
impl = np_interp_impl_inner
impl_complex = np_interp_impl_complex_fp_inner
fp_dt = determine_dtype(fp)
dtype = np.result_type(fp_dt, np.float64)
if np.issubdtype(dtype, np.complexfloating):
inner = impl_complex
else:
inner = impl
def np_interp_impl(x, xp, fp):
return inner(x, xp, fp, dtype)
def np_interp_scalar_impl(x, xp, fp):
return inner(x, xp, fp, dtype).flat[0]
if isinstance(x, types.Number):
if isinstance(x, types.Complex):
raise TypingError(complex_dtype_msg)
return np_interp_scalar_impl
return np_interp_impl
#----------------------------------------------------------------------------
# Statistics
@register_jitable
def row_wise_average(a):
assert a.ndim == 2
m, n = a.shape
out = np.empty((m, 1), dtype=a.dtype)
for i in range(m):
out[i, 0] = np.sum(a[i, :]) / n
return out
@register_jitable
def np_cov_impl_inner(X, bias, ddof):
# determine degrees of freedom
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
# determine the normalization factor
fact = X.shape[1] - ddof
# numpy warns if less than 0 and floors at 0
fact = max(fact, 0.0)
# de-mean
X -= row_wise_average(X)
# calculate result - requires blas
c = np.dot(X, np.conj(X.T))
c *= np.true_divide(1, fact)
return c
def _prepare_cov_input_inner():
pass
@overload(_prepare_cov_input_inner)
def _prepare_cov_input_impl(m, y, rowvar, dtype):
if y in (None, types.none):
def _prepare_cov_input_inner(m, y, rowvar, dtype):
m_arr = np.atleast_2d(_asarray(m))
if not rowvar:
m_arr = m_arr.T
return m_arr
else:
def _prepare_cov_input_inner(m, y, rowvar, dtype):
m_arr = np.atleast_2d(_asarray(m))
y_arr = np.atleast_2d(_asarray(y))
# transpose if asked to and not a (1, n) vector - this looks
# wrong as you might end up transposing one and not the other,
# but it's what numpy does
if not rowvar:
if m_arr.shape[0] != 1:
m_arr = m_arr.T
if y_arr.shape[0] != 1:
y_arr = y_arr.T
m_rows, m_cols = m_arr.shape
y_rows, y_cols = y_arr.shape
if m_cols != y_cols:
raise ValueError("m and y have incompatible dimensions")
# allocate and fill output array
out = np.empty((m_rows + y_rows, m_cols), dtype=dtype)
out[:m_rows, :] = m_arr
out[-y_rows:, :] = y_arr
return out
return _prepare_cov_input_inner
@register_jitable
def _handle_m_dim_change(m):
if m.ndim == 2 and m.shape[0] == 1:
msg = ("2D array containing a single row is unsupported due to "
"ambiguity in type inference. To use numpy.cov in this case "
"simply pass the row as a 1D array, i.e. m[0].")
raise RuntimeError(msg)
_handle_m_dim_nop = register_jitable(lambda x: x)
def determine_dtype(array_like):
array_like_dt = np.float64
if isinstance(array_like, types.Array):
array_like_dt = as_dtype(array_like.dtype)
elif isinstance(array_like, (types.Number, types.Boolean)):
array_like_dt = as_dtype(array_like)
elif isinstance(array_like, (types.UniTuple, types.Tuple)):
coltypes = set()
for val in array_like:
if hasattr(val, 'count'):
[coltypes.add(v) for v in val]
else:
coltypes.add(val)
if len(coltypes) > 1:
array_like_dt = np.promote_types(*[as_dtype(ty) for ty in coltypes])
elif len(coltypes) == 1:
array_like_dt = as_dtype(coltypes.pop())
return array_like_dt
def check_dimensions(array_like, name):
if isinstance(array_like, types.Array):
if array_like.ndim > 2:
raise TypeError("{0} has more than 2 dimensions".format(name))
elif isinstance(array_like, types.Sequence):
if isinstance(array_like.key[0], types.Sequence):
if isinstance(array_like.key[0].key[0], types.Sequence):
raise TypeError("{0} has more than 2 dimensions".format(name))
@register_jitable
def _handle_ddof(ddof):
if not np.isfinite(ddof):
raise ValueError('Cannot convert non-finite ddof to integer')
if ddof - int(ddof) != 0:
raise ValueError('ddof must be integral value')
_handle_ddof_nop = register_jitable(lambda x: x)
@register_jitable
def _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER,
_M_DIM_HANDLER):
_M_DIM_HANDLER(m)
_DDOF_HANDLER(ddof)
return _prepare_cov_input_inner(m, y, rowvar, dtype)
def scalar_result_expected(mandatory_input, optional_input):
opt_is_none = optional_input in (None, types.none)
if isinstance(mandatory_input, types.Array) and mandatory_input.ndim == 1:
return opt_is_none
if isinstance(mandatory_input, types.BaseTuple):
if all(isinstance(x, (types.Number, types.Boolean))
for x in mandatory_input.types):
return opt_is_none
else:
if (len(mandatory_input.types) == 1 and
isinstance(mandatory_input.types[0], types.BaseTuple)):
return opt_is_none
if isinstance(mandatory_input, (types.Number, types.Boolean)):
return opt_is_none
if isinstance(mandatory_input, types.Sequence):
if (not isinstance(mandatory_input.key[0], types.Sequence) and
opt_is_none):
return True
return False
@register_jitable
def _clip_corr(x):
return np.where(np.fabs(x) > 1, np.sign(x), x)
@register_jitable
def _clip_complex(x):
real = _clip_corr(x.real)
imag = _clip_corr(x.imag)
return real + 1j * imag
@overload(np.cov)
def np_cov(m, y=None, rowvar=True, bias=False, ddof=None):
# reject problem if m and / or y are more than 2D
check_dimensions(m, 'm')
check_dimensions(y, 'y')
# reject problem if ddof invalid (either upfront if type is
# obviously invalid, or later if value found to be non-integral)
if ddof in (None, types.none):
_DDOF_HANDLER = _handle_ddof_nop
else:
if isinstance(ddof, (types.Integer, types.Boolean)):
_DDOF_HANDLER = _handle_ddof_nop
elif isinstance(ddof, types.Float):
_DDOF_HANDLER = _handle_ddof
else:
raise TypingError('ddof must be a real numerical scalar type')
# special case for 2D array input with 1 row of data - select
# handler function which we'll call later when we have access
# to the shape of the input array
_M_DIM_HANDLER = _handle_m_dim_nop
if isinstance(m, types.Array):
_M_DIM_HANDLER = _handle_m_dim_change
# infer result dtype
m_dt = determine_dtype(m)
y_dt = determine_dtype(y)
dtype = np.result_type(m_dt, y_dt, np.float64)
def np_cov_impl(m, y=None, rowvar=True, bias=False, ddof=None):
X = _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER,
_M_DIM_HANDLER).astype(dtype)
if np.any(np.array(X.shape) == 0):
return np.full((X.shape[0], X.shape[0]), fill_value=np.nan,
dtype=dtype)
else:
return np_cov_impl_inner(X, bias, ddof)
def np_cov_impl_single_variable(m, y=None, rowvar=True, bias=False,
ddof=None):
X = _prepare_cov_input(m, y, rowvar, ddof, dtype, _DDOF_HANDLER,
_M_DIM_HANDLER).astype(dtype)
if np.any(np.array(X.shape) == 0):
variance = np.nan
else:
variance = np_cov_impl_inner(X, bias, ddof).flat[0]
return np.array(variance)
if scalar_result_expected(m, y):
return np_cov_impl_single_variable
else:
return np_cov_impl
@overload(np.corrcoef)
def np_corrcoef(x, y=None, rowvar=True):
x_dt = determine_dtype(x)
y_dt = determine_dtype(y)
dtype = np.result_type(x_dt, y_dt, np.float64)
if dtype == np.complex:
clip_fn = _clip_complex
else:
clip_fn = _clip_corr
def np_corrcoef_impl(x, y=None, rowvar=True):
c = np.cov(x, y, rowvar)
d = np.diag(c)
stddev = np.sqrt(d.real)
for i in range(c.shape[0]):
c[i, :] /= stddev
c[:, i] /= stddev
return clip_fn(c)
def np_corrcoef_impl_single_variable(x, y=None, rowvar=True):
c = np.cov(x, y, rowvar)
return c / c
if scalar_result_expected(x, y):
return np_corrcoef_impl_single_variable
else:
return np_corrcoef_impl
#----------------------------------------------------------------------------
# Element-wise computations
@overload(np.argwhere)
def np_argwhere(a):
# needs to be much more array-like for the array impl to work, Numba bug
# in one of the underlying function calls?
use_scalar = (numpy_version >= (1, 18) and
isinstance(a, (types.Number, types.Boolean)))
if type_can_asarray(a) and not use_scalar:
if numpy_version < (1, 18):
check = register_jitable(lambda x: not np.any(x))
else:
check = register_jitable(lambda x: True)
def impl(a):
arr = np.asarray(a)
if arr.shape == () and check(arr):
return np.zeros((0, 1), dtype=types.intp)
return np.transpose(np.vstack(np.nonzero(arr)))
else:
if numpy_version < (1, 18):
falseish = (0, 1)
trueish = (1, 1)
else:
falseish = (0, 0)
trueish = (1, 0)
def impl(a):
if a is not None and bool(a):
return np.zeros(trueish, dtype=types.intp)
else:
return np.zeros(falseish, dtype=types.intp)
return impl
@overload(np.flatnonzero)
def np_flatnonzero(a):
if type_can_asarray(a):
def impl(a):
arr = np.asarray(a)
return np.nonzero(np.ravel(arr))[0]
else:
def impl(a):
if a is not None and bool(a):
data = [0]
else:
data = [x for x in range(0)]
return np.array(data, dtype=types.intp)
return impl
@register_jitable
def _fill_diagonal_params(a, wrap):
if a.ndim == 2:
m = a.shape[0]
n = a.shape[1]
step = 1 + n
if wrap:
end = n * m
else:
end = n * min(m, n)
else:
shape = np.array(a.shape)
if not np.all(np.diff(shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (np.cumprod(shape[:-1])).sum()
end = shape.prod()
return end, step
@register_jitable
def _fill_diagonal_scalar(a, val, wrap):
end, step = _fill_diagonal_params(a, wrap)
for i in range(0, end, step):
a.flat[i] = val
@register_jitable
def _fill_diagonal(a, val, wrap):
end, step = _fill_diagonal_params(a, wrap)
ctr = 0
v_len = len(val)
for i in range(0, end, step):
a.flat[i] = val[ctr]
ctr += 1
ctr = ctr % v_len
@register_jitable
def _check_val_int(a, val):
iinfo = np.iinfo(a.dtype)
v_min = iinfo.min
v_max = iinfo.max
# check finite values are within bounds
if np.any(~np.isfinite(val)) or np.any(val < v_min) or np.any(val > v_max):
raise ValueError('Unable to safely conform val to a.dtype')
@register_jitable
def _check_val_float(a, val):
finfo = np.finfo(a.dtype)
v_min = finfo.min
v_max = finfo.max
# check finite values are within bounds
finite_vals = val[np.isfinite(val)]
if np.any(finite_vals < v_min) or np.any(finite_vals > v_max):
raise ValueError('Unable to safely conform val to a.dtype')
# no check performed, needed for pathway where no check is required
_check_nop = register_jitable(lambda x, y: x)
def _asarray(x):
pass
@overload(_asarray)
def _asarray_impl(x):
if isinstance(x, types.Array):
return lambda x: x
elif isinstance(x, (types.Sequence, types.Tuple)):
return lambda x: np.array(x)
elif isinstance(x, (types.Number, types.Boolean)):
ty = as_dtype(x)
return lambda x: np.array([x], dtype=ty)
@overload(np.fill_diagonal)
def np_fill_diagonal(a, val, wrap=False):
if a.ndim > 1:
# the following can be simplified after #3088; until then, employ
# a basic mechanism for catching cases where val is of a type/value
# which cannot safely be cast to a.dtype
if isinstance(a.dtype, types.Integer):
checker = _check_val_int
elif isinstance(a.dtype, types.Float):
checker = _check_val_float
else:
checker = _check_nop
def scalar_impl(a, val, wrap=False):
tmpval = _asarray(val).flatten()
checker(a, tmpval)
_fill_diagonal_scalar(a, val, wrap)
def non_scalar_impl(a, val, wrap=False):
tmpval = _asarray(val).flatten()
checker(a, tmpval)
_fill_diagonal(a, tmpval, wrap)
if isinstance(val, (types.Float, types.Integer, types.Boolean)):
return scalar_impl
elif isinstance(val, (types.Tuple, types.Sequence, types.Array)):
return non_scalar_impl
else:
msg = "The first argument must be at least 2-D (found %s-D)" % a.ndim
raise TypingError(msg)
def _np_round_intrinsic(tp):
# np.round() always rounds half to even
return "llvm.rint.f%d" % (tp.bitwidth,)
def _np_round_float(context, builder, tp, val):
llty = context.get_value_type(tp)
module = builder.module
fnty = lc.Type.function(llty, [llty])
fn = module.get_or_insert_function(fnty, name=_np_round_intrinsic(tp))
return builder.call(fn, (val,))
@lower_builtin(np.round, types.Float)
def scalar_round_unary_float(context, builder, sig, args):
res = _np_round_float(context, builder, sig.args[0], args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.round, types.Integer)
def scalar_round_unary_integer(context, builder, sig, args):
res = args[0]
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.round, types.Complex)
def scalar_round_unary_complex(context, builder, sig, args):
fltty = sig.args[0].underlying_float
z = context.make_complex(builder, sig.args[0], args[0])
z.real = _np_round_float(context, builder, fltty, z.real)
z.imag = _np_round_float(context, builder, fltty, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.round, types.Float, types.Integer)
@lower_builtin(np.round, types.Integer, types.Integer)
def scalar_round_binary_float(context, builder, sig, args):
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
# NOTE: this is CPython's algorithm, but perhaps this is overkill
# when emulating Numpy's behaviour.
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (np.round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return np.round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.round, types.Complex, types.Integer)
def scalar_round_binary_complex(context, builder, sig, args):
def round_ndigits(z, ndigits):
return complex(np.round(z.real, ndigits),
np.round(z.imag, ndigits))
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.round, types.Array, types.Integer, types.Array)
def array_round(context, builder, sig, args):
def array_round_impl(arr, decimals, out):
if arr.shape != out.shape:
raise ValueError("invalid output shape")
for index, val in np.ndenumerate(arr):
out[index] = np.round(val, decimals)
return out
res = context.compile_internal(builder, array_round_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.sinc, types.Array)
def array_sinc(context, builder, sig, args):
def array_sinc_impl(arr):
out = np.zeros_like(arr)
for index, val in np.ndenumerate(arr):
out[index] = np.sinc(val)
return out
res = context.compile_internal(builder, array_sinc_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.sinc, types.Number)
def scalar_sinc(context, builder, sig, args):
scalar_dtype = sig.return_type
def scalar_sinc_impl(val):
if val == 0.e0: # to match np impl
val = 1e-20
val *= np.pi # np sinc is the normalised variant
return np.sin(val) / val
res = context.compile_internal(builder, scalar_sinc_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.angle, types.Number)
@lower_builtin(np.angle, types.Number, types.Boolean)
def scalar_angle_kwarg(context, builder, sig, args):
deg_mult = sig.return_type(180 / np.pi)
def scalar_angle_impl(val, deg):
if deg:
return np.arctan2(val.imag, val.real) * deg_mult
else:
return np.arctan2(val.imag, val.real)
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, scalar_angle_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.angle, types.Array)
@lower_builtin(np.angle, types.Array, types.Boolean)
def array_angle_kwarg(context, builder, sig, args):
ret_dtype = sig.return_type.dtype
def array_angle_impl(arr, deg):
out = np.zeros_like(arr, dtype=ret_dtype)
for index, val in np.ndenumerate(arr):
out[index] = np.angle(val, deg)
return out
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, array_angle_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.nonzero, types.Array)
@lower_builtin("array.nonzero", types.Array)
@lower_builtin(np.where, types.Array)
def array_nonzero(context, builder, sig, args):
aryty = sig.args[0]
# Return type is a N-tuple of 1D C-contiguous arrays
retty = sig.return_type
outaryty = retty.dtype
nouts = retty.count
ary = make_array(aryty)(context, builder, args[0])
shape = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
layout = aryty.layout
# First count the number of non-zero elements
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
count = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
builder.store(builder.add(builder.load(count), one), count)
# Then allocate output arrays of the right size
out_shape = (builder.load(count),)
outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue()
for i in range(nouts)]
outarys = [make_array(outaryty)(context, builder, out) for out in outs]
out_datas = [out.data for out in outarys]
# And fill them up
index = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
# Store element indices in output arrays
if not indices:
# For a 0-d array, store 0 in the unique output array
indices = (zero,)
cur = builder.load(index)
for i in range(nouts):
ptr = cgutils.get_item_pointer2(context, builder, out_datas[i],
out_shape, (),
'C', [cur])
store_item(context, builder, outaryty, indices[i], ptr)
builder.store(builder.add(cur, one), index)
tup = context.make_tuple(builder, sig.return_type, outs)
return impl_ret_new_ref(context, builder, sig.return_type, tup)
def array_where(context, builder, sig, args):
"""
np.where(array, array, array)
"""
layouts = set(a.layout for a in sig.args)
npty = np.promote_types(as_dtype(sig.args[1].dtype),
as_dtype(sig.args[2].dtype))
if layouts == set('C') or layouts == set('F'):
# Faster implementation for C-contiguous arrays
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x, dtype=npty)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty(cond.shape, dtype=npty)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@register_jitable
def _where_x_y_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x if c else y
return res
@register_jitable
def _where_x_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x if c else y[idx]
return res
@register_jitable
def _where_y_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y
return res
def _where_inner(context, builder, sig, args, impl):
cond, x, y = sig.args
x_dt = determine_dtype(x)
y_dt = determine_dtype(y)
npty = np.promote_types(x_dt, y_dt)
if cond.layout == 'F':
def where_impl(cond, x, y):
res = np.asfortranarray(np.empty(cond.shape, dtype=npty))
return impl(cond, x, y, res)
else:
def where_impl(cond, x, y):
res = np.empty(cond.shape, dtype=npty)
return impl(cond, x, y, res)
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
array_scalar_scalar_where = partial(_where_inner, impl=_where_x_y_scalar)
array_array_scalar_where = partial(_where_inner, impl=_where_y_scalar)
array_scalar_array_where = partial(_where_inner, impl=_where_x_scalar)
@lower_builtin(np.where, types.Any, types.Any, types.Any)
def any_where(context, builder, sig, args):
cond, x, y = sig.args
if isinstance(cond, types.Array):
if isinstance(x, types.Array):
if isinstance(y, types.Array):
impl = array_where
elif isinstance(y, (types.Number, types.Boolean)):
impl = array_array_scalar_where
elif isinstance(x, (types.Number, types.Boolean)):
if isinstance(y, types.Array):
impl = array_scalar_array_where
elif isinstance(y, (types.Number, types.Boolean)):
impl = array_scalar_scalar_where
return impl(context, builder, sig, args)
def scalar_where_impl(cond, x, y):
"""
np.where(scalar, scalar, scalar): return a 0-dim array
"""
scal = x if cond else y
# This is the equivalent of np.full_like(scal, scal),
# for compatibility with Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
res = context.compile_internal(builder, scalar_where_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@overload(np.real)
def np_real(a):
def np_real_impl(a):
return a.real
return np_real_impl
@overload(np.imag)
def np_imag(a):
def np_imag_impl(a):
return a.imag
return np_imag_impl
#----------------------------------------------------------------------------
# Misc functions
@overload(operator.contains)
def np_contains(arr, key):
if not isinstance(arr, types.Array):
return
def np_contains_impl(arr, key):
for x in np.nditer(arr):
if x == key:
return True
return False
return np_contains_impl
@overload(np.count_nonzero)
def np_count_nonzero(arr, axis=None):
if not type_can_asarray(arr):
raise TypingError("The argument to np.count_nonzero must be array-like")
if is_nonelike(axis):
def impl(arr, axis=None):
arr2 = np.ravel(arr)
return np.sum(arr2 != 0)
else:
def impl(arr, axis=None):
arr2 = arr.astype(np.bool_)
return np.sum(arr2, axis=axis)
return impl
np_delete_handler_isslice = register_jitable(lambda x : x)
np_delete_handler_isarray = register_jitable(lambda x : np.asarray(x))
@overload(np.delete)
def np_delete(arr, obj):
# Implementation based on numpy
# https://github.com/numpy/numpy/blob/af66e487a57bfd4850f4306e3b85d1dac3c70412/numpy/lib/function_base.py#L4065-L4267 # noqa: E501
if not isinstance(arr, (types.Array, types.Sequence)):
raise TypingError("arr must be either an Array or a Sequence")
if isinstance(obj, (types.Array, types.Sequence, types.SliceType)):
if isinstance(obj, (types.SliceType)):
handler = np_delete_handler_isslice
else:
if not isinstance(obj.dtype, types.Integer):
raise TypingError('obj should be of Integer dtype')
handler = np_delete_handler_isarray
def np_delete_impl(arr, obj):
arr = np.ravel(np.asarray(arr))
N = arr.size
keep = np.ones(N, dtype=np.bool_)
obj = handler(obj)
keep[obj] = False
return arr[keep]
return np_delete_impl
else: # scalar value
if not isinstance(obj, types.Integer):
raise TypingError('obj should be of Integer dtype')
def np_delete_scalar_impl(arr, obj):
arr = np.ravel(np.asarray(arr))
N = arr.size
pos = obj
if (pos < -N or pos >= N):
raise IndexError('obj must be less than the len(arr)')
# NumPy raises IndexError: index 'i' is out of
# bounds for axis 'x' with size 'n'
if (pos < 0):
pos += N
return np.concatenate((arr[:pos], arr[pos + 1:]))
return np_delete_scalar_impl
@overload(np.diff)
def np_diff_impl(a, n=1):
if not isinstance(a, types.Array) or a.ndim == 0:
return
def diff_impl(a, n=1):
if n == 0:
return a.copy()
if n < 0:
raise ValueError("diff(): order must be non-negative")
size = a.shape[-1]
out_shape = a.shape[:-1] + (max(size - n, 0),)
out = np.empty(out_shape, a.dtype)
if out.size == 0:
return out
# np.diff() works on each last dimension subarray independently.
# To make things easier, normalize input and output into 2d arrays
a2 = a.reshape((-1, size))
out2 = out.reshape((-1, out.shape[-1]))
# A scratchpad for subarrays
work = np.empty(size, a.dtype)
for major in range(a2.shape[0]):
# First iteration: diff a2 into work
for i in range(size - 1):
work[i] = a2[major, i + 1] - a2[major, i]
# Other iterations: diff work into itself
for niter in range(1, n):
for i in range(size - niter - 1):
work[i] = work[i + 1] - work[i]
# Copy final diff into out2
out2[major] = work[:size - n]
return out
return diff_impl
@overload(np.array_equal)
def np_array_equal(a, b):
if not (type_can_asarray(a) and type_can_asarray(b)):
raise TypingError('Both arguments to "array_equals" must be array-like')
accepted = (types.Boolean, types.Number)
if isinstance(a, accepted) and isinstance(b, accepted):
# special case
def impl(a, b):
return a == b
else:
def impl(a, b):
a = np.asarray(a)
b = np.asarray(b)
if a.shape == b.shape:
return np.all(a == b)
return False
return impl
def validate_1d_array_like(func_name, seq):
if isinstance(seq, types.Array):
if seq.ndim != 1:
raise TypeError("{0}(): input should have dimension 1"
.format(func_name))
elif not isinstance(seq, types.Sequence):
raise TypeError("{0}(): input should be an array or sequence"
.format(func_name))
@overload(np.bincount)
def np_bincount(a, weights=None, minlength=0):
validate_1d_array_like("bincount", a)
if not isinstance(a.dtype, types.Integer):
return
_check_is_integer(minlength, 'minlength')
if weights not in (None, types.none):
validate_1d_array_like("bincount", weights)
# weights is promoted to double in C impl
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c#L93-L95 # noqa: E501
out_dtype = np.float64
@register_jitable
def validate_inputs(a, weights, minlength):
if len(a) != len(weights):
raise ValueError("bincount(): weights and list don't have "
"the same length")
@register_jitable
def count_item(out, idx, val, weights):
out[val] += weights[idx]
else:
out_dtype = types.intp
@register_jitable
def validate_inputs(a, weights, minlength):
pass
@register_jitable
def count_item(out, idx, val, weights):
out[val] += 1
def bincount_impl(a, weights=None, minlength=0):
validate_inputs(a, weights, minlength)
if minlength < 0:
raise ValueError("'minlength' must not be negative")
n = len(a)
a_max = a[0] if n > 0 else -1
for i in range(1, n):
if a[i] < 0:
raise ValueError("bincount(): first argument must be "
"non-negative")
a_max = max(a_max, a[i])
out_length = max(a_max + 1, minlength)
out = np.zeros(out_length, out_dtype)
for i in range(n):
count_item(out, i, a[i], weights)
return out
return bincount_impl
def _searchsorted(func):
def searchsorted_inner(a, v):
n = len(a)
if np.isnan(v):
# Find the first nan (i.e. the last from the end of a,
# since there shouldn't be many of them in practice)
for i in range(n, 0, -1):
if not np.isnan(a[i - 1]):
return i
return 0
lo = 0
hi = n
while hi > lo:
mid = (lo + hi) >> 1
if func(a[mid], (v)):
# mid is too low => go up
lo = mid + 1
else:
# mid is too high, or is a NaN => go down
hi = mid
return lo
return searchsorted_inner
_lt = less_than
_le = register_jitable(lambda x, y: x <= y)
_searchsorted_left = register_jitable(_searchsorted(_lt))
_searchsorted_right = register_jitable(_searchsorted(_le))
@overload(np.searchsorted)
def searchsorted(a, v, side='left'):
side_val = getattr(side, 'literal_value', side)
if side_val == 'left':
loop_impl = _searchsorted_left
elif side_val == 'right':
loop_impl = _searchsorted_right
else:
raise ValueError("Invalid value given for 'side': %s" % side_val)
if isinstance(v, types.Array):
# N-d array and output
def searchsorted_impl(a, v, side='left'):
out = np.empty(v.shape, np.intp)
for view, outview in np.nditer((v, out)):
index = loop_impl(a, view.item())
outview.itemset(index)
return out
elif isinstance(v, types.Sequence):
# 1-d sequence and output
def searchsorted_impl(a, v, side='left'):
out = np.empty(len(v), np.intp)
for i in range(len(v)):
out[i] = loop_impl(a, v[i])
return out
else:
# Scalar value and output
# Note: NaNs come last in Numpy-sorted arrays
def searchsorted_impl(a, v, side='left'):
return loop_impl(a, v)
return searchsorted_impl
@overload(np.digitize)
def np_digitize(x, bins, right=False):
@register_jitable
def are_bins_increasing(bins):
n = len(bins)
is_increasing = True
is_decreasing = True
if n > 1:
prev = bins[0]
for i in range(1, n):
cur = bins[i]
is_increasing = is_increasing and not prev > cur
is_decreasing = is_decreasing and not prev < cur
if not is_increasing and not is_decreasing:
raise ValueError("bins must be monotonically increasing "
"or decreasing")
prev = cur
return is_increasing
# NOTE: the algorithm is slightly different from searchsorted's,
# as the edge cases (bin boundaries, NaN) give different results.
@register_jitable
def digitize_scalar(x, bins, right):
# bins are monotonically-increasing
n = len(bins)
lo = 0
hi = n
if right:
if np.isnan(x):
# Find the first nan (i.e. the last from the end of bins,
# since there shouldn't be many of them in practice)
for i in range(n, 0, -1):
if not np.isnan(bins[i - 1]):
return i
return 0
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] < x:
# mid is too low => narrow to upper bins
lo = mid + 1
else:
# mid is too high, or is a NaN => narrow to lower bins
hi = mid
else:
if np.isnan(x):
# NaNs end up in the last bin
return n
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] <= x:
# mid is too low => narrow to upper bins
lo = mid + 1
else:
# mid is too high, or is a NaN => narrow to lower bins
hi = mid
return lo
@register_jitable
def digitize_scalar_decreasing(x, bins, right):
# bins are monotonically-decreasing
n = len(bins)
lo = 0
hi = n
if right:
if np.isnan(x):
# Find the last nan
for i in range(0, n):
if not np.isnan(bins[i]):
return i
return n
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] < x:
# mid is too high => narrow to lower bins
hi = mid
else:
# mid is too low, or is a NaN => narrow to upper bins
lo = mid + 1
else:
if np.isnan(x):
# NaNs end up in the first bin
return 0
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] <= x:
# mid is too high => narrow to lower bins
hi = mid
else:
# mid is too low, or is a NaN => narrow to upper bins
lo = mid + 1
return lo
if isinstance(x, types.Array):
# N-d array and output
def digitize_impl(x, bins, right=False):
is_increasing = are_bins_increasing(bins)
out = np.empty(x.shape, np.intp)
for view, outview in np.nditer((x, out)):
if is_increasing:
index = digitize_scalar(view.item(), bins, right)
else:
index = digitize_scalar_decreasing(view.item(), bins, right)
outview.itemset(index)
return out
return digitize_impl
elif isinstance(x, types.Sequence):
# 1-d sequence and output
def digitize_impl(x, bins, right=False):
is_increasing = are_bins_increasing(bins)
out = np.empty(len(x), np.intp)
for i in range(len(x)):
if is_increasing:
out[i] = digitize_scalar(x[i], bins, right)
else:
out[i] = digitize_scalar_decreasing(x[i], bins, right)
return out
return digitize_impl
_range = range
@overload(np.histogram)
def np_histogram(a, bins=10, range=None):
if isinstance(bins, (int, types.Integer)):
# With a uniform distribution of bins, use a fast algorithm
# independent of the number of bins
if range in (None, types.none):
inf = float('inf')
def histogram_impl(a, bins=10, range=None):
bin_min = inf
bin_max = -inf
for view in np.nditer(a):
v = view.item()
if bin_min > v:
bin_min = v
if bin_max < v:
bin_max = v
return np.histogram(a, bins, (bin_min, bin_max))
else:
def histogram_impl(a, bins=10, range=None):
if bins <= 0:
raise ValueError("histogram(): `bins` should be a "
"positive integer")
bin_min, bin_max = range
if not bin_min <= bin_max:
raise ValueError("histogram(): max must be larger than "
"min in range parameter")
hist = np.zeros(bins, np.intp)
if bin_max > bin_min:
bin_ratio = bins / (bin_max - bin_min)
for view in np.nditer(a):
v = view.item()
b = math.floor((v - bin_min) * bin_ratio)
if 0 <= b < bins:
hist[int(b)] += 1
elif v == bin_max:
hist[bins - 1] += 1
bins_array = np.linspace(bin_min, bin_max, bins + 1)
return hist, bins_array
else:
# With a custom bins array, use a bisection search
def histogram_impl(a, bins=10, range=None):
nbins = len(bins) - 1
for i in _range(nbins):
# Note this also catches NaNs
if not bins[i] <= bins[i + 1]:
raise ValueError("histogram(): bins must increase "
"monotonically")
bin_min = bins[0]
bin_max = bins[nbins]
hist = np.zeros(nbins, np.intp)
if nbins > 0:
for view in np.nditer(a):
v = view.item()
if not bin_min <= v <= bin_max:
# Value is out of bounds, ignore (also catches NaNs)
continue
# Bisect in bins[:-1]
lo = 0
hi = nbins - 1
while lo < hi:
# Note the `+ 1` is necessary to avoid an infinite
# loop where mid = lo => lo = mid
mid = (lo + hi + 1) >> 1
if v < bins[mid]:
hi = mid - 1
else:
lo = mid
hist[lo] += 1
return hist, bins
return histogram_impl
# Create np.finfo, np.iinfo and np.MachAr
# machar
_mach_ar_supported = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg',
'iexp', 'minexp', 'xmin', 'maxexp', 'xmax', 'irnd',
'ngrd', 'epsilon', 'tiny', 'huge', 'precision',
'resolution',)
MachAr = namedtuple('MachAr', _mach_ar_supported)
# Do not support MachAr field
# finfo
_finfo_supported = ('eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min',
'minexp', 'negep', 'nexp', 'nmant', 'precision',
'resolution', 'tiny', 'bits',)
finfo = namedtuple('finfo', _finfo_supported)
# iinfo
_iinfo_supported = ('min', 'max', 'bits',)
iinfo = namedtuple('iinfo', _iinfo_supported)
@overload(np.MachAr)
def MachAr_impl():
f = np.MachAr()
_mach_ar_data = tuple([getattr(f, x) for x in _mach_ar_supported])
def impl():
return MachAr(*_mach_ar_data)
return impl
def generate_xinfo(np_func, container, attr):
@overload(np_func)
def xinfo_impl(arg):
nbty = getattr(arg, 'dtype', arg)
f = np_func(as_dtype(nbty))
data = tuple([getattr(f, x) for x in attr])
def impl(arg):
return container(*data)
return impl
generate_xinfo(np.finfo, finfo, _finfo_supported)
generate_xinfo(np.iinfo, iinfo, _iinfo_supported)
def _get_inner_prod(dta, dtb):
# gets an inner product implementation, if both types are float then
# BLAS is used else a local function
@register_jitable
def _innerprod(a, b):
acc = 0
for i in range(len(a)):
acc = acc + a[i] * b[i]
return acc
# no BLAS... use local function regardless
if not _HAVE_BLAS:
return _innerprod
flty = types.real_domain | types.complex_domain
floats = dta in flty and dtb in flty
if not floats:
return _innerprod
else:
a_dt = as_dtype(dta)
b_dt = as_dtype(dtb)
dt = np.promote_types(a_dt, b_dt)
@register_jitable
def _dot_wrap(a, b):
return np.dot(a.astype(dt), b.astype(dt))
return _dot_wrap
def _assert_1d(a, func_name):
if isinstance(a, types.Array):
if not a.ndim <= 1:
raise TypingError("%s() only supported on 1D arrays " % func_name)
def _np_correlate_core(ap1, ap2, mode, direction):
pass
class _corr_conv_Mode(IntEnum):
"""
Enumerated modes for correlate/convolve as per:
https://github.com/numpy/numpy/blob/ac6b1a902b99e340cf7eeeeb7392c91e38db9dd8/numpy/core/numeric.py#L862-L870 # noqa: E501
"""
VALID = 0
SAME = 1
FULL = 2
@overload(_np_correlate_core)
def _np_correlate_core_impl(ap1, ap2, mode, direction):
a_dt = as_dtype(ap1.dtype)
b_dt = as_dtype(ap2.dtype)
dt = np.promote_types(a_dt, b_dt)
innerprod = _get_inner_prod(ap1.dtype, ap2.dtype)
Mode = _corr_conv_Mode
def impl(ap1, ap2, mode, direction):
# Implementation loosely based on `_pyarray_correlate` from
# https://github.com/numpy/numpy/blob/3bce2be74f228684ca2895ad02b63953f37e2a9d/numpy/core/src/multiarray/multiarraymodule.c#L1191 # noqa: E501
# For "Mode":
# Convolve uses 'full' by default, this is denoted by the number 2
# Correlate uses 'valid' by default, this is denoted by the number 0
# For "direction", +1 to write the return values out in order 0->N
# -1 to write them out N->0.
if not (mode == Mode.VALID or mode == Mode.FULL):
raise ValueError("Invalid mode")
n1 = len(ap1)
n2 = len(ap2)
length = n1
n = n2
if mode == Mode.VALID: # mode == valid == 0, correlate default
length = length - n + 1
n_left = 0
n_right = 0
elif mode == Mode.FULL: # mode == full == 2, convolve default
n_right = n - 1
n_left = n - 1
length = length + n - 1
else:
raise ValueError("Invalid mode")
ret = np.zeros(length, dt)
n = n - n_left
if direction == 1:
idx = 0
inc = 1
elif direction == -1:
idx = length - 1
inc = -1
else:
raise ValueError("Invalid direction")
for i in range(n_left):
ret[idx] = innerprod(ap1[:idx + 1], ap2[-(idx + 1):])
idx = idx + inc
for i in range(n1 - n2 + 1):
ret[idx] = innerprod(ap1[i : i + n2], ap2)
idx = idx + inc
for i in range(n_right, 0, -1):
ret[idx] = innerprod(ap1[-i:], ap2[:i])
idx = idx + inc
return ret
return impl
@overload(np.correlate)
def _np_correlate(a, v):
_assert_1d(a, 'np.correlate')
_assert_1d(v, 'np.correlate')
@register_jitable
def op_conj(x):
return np.conj(x)
@register_jitable
def op_nop(x):
return x
Mode = _corr_conv_Mode
if a.dtype in types.complex_domain:
if v.dtype in types.complex_domain:
a_op = op_nop
b_op = op_conj
else:
a_op = op_nop
b_op = op_nop
else:
if v.dtype in types.complex_domain:
a_op = op_nop
b_op = op_conj
else:
a_op = op_conj
b_op = op_nop
_NP_PRED = numpy_version > (1, 17)
def impl(a, v):
la = len(a)
lv = len(v)
if _NP_PRED is True:
if la == 0:
raise ValueError("'a' cannot be empty")
if lv == 0:
raise ValueError("'v' cannot be empty")
if la < lv:
return _np_correlate_core(b_op(v), a_op(a), Mode.VALID, -1)
else:
return _np_correlate_core(a_op(a), b_op(v), Mode.VALID, 1)
return impl
@overload(np.convolve)
def np_convolve(a, v):
_assert_1d(a, 'np.convolve')
_assert_1d(v, 'np.convolve')
Mode = _corr_conv_Mode
def impl(a, v):
la = len(a)
lv = len(v)
if la == 0:
raise ValueError("'a' cannot be empty")
if lv == 0:
raise ValueError("'v' cannot be empty")
if la < lv:
return _np_correlate_core(v, a[::-1], Mode.FULL, 1)
else:
return _np_correlate_core(a, v[::-1], Mode.FULL, 1)
return impl
@overload(np.asarray)
def np_asarray(a, dtype=None):
# developer note... keep this function (type_can_asarray) in sync with the
# accepted types implementations below!
if not type_can_asarray(a):
return None
impl = None
if isinstance(a, types.Array):
if is_nonelike(dtype) or a.dtype == dtype.dtype:
def impl(a, dtype=None):
return a
else:
def impl(a, dtype=None):
return a.astype(dtype)
elif isinstance(a, (types.Sequence, types.Tuple)):
# Nested lists cannot be unpacked, therefore only single lists are
# permitted and these conform to Sequence and can be unpacked along on
# the same path as Tuple.
if is_nonelike(dtype):
def impl(a, dtype=None):
return np.array(a)
else:
def impl(a, dtype=None):
return np.array(a, dtype)
elif isinstance(a, (types.Number, types.Boolean)):
dt_conv = a if is_nonelike(dtype) else dtype
ty = as_dtype(dt_conv)
def impl(a, dtype=None):
return np.array(a, ty)
elif isinstance(a, types.containers.ListType):
if not isinstance(a.dtype, (types.Number, types.Boolean)):
raise TypingError(
"asarray support for List is limited "
"to Boolean and Number types")
target_dtype = a.dtype if is_nonelike(dtype) else dtype
def impl(a, dtype=None):
l = len(a)
ret = np.empty(l, dtype=target_dtype)
for i, v in enumerate(a):
ret[i] = v
return ret
elif isinstance(a, types.StringLiteral):
arr = np.asarray(a.literal_value)
def impl(a, dtype=None):
return arr.copy()
return impl
@overload(np.asfarray)
def np_asfarray(a, dtype=np.float64):
# convert numba dtype types into NumPy dtype
if isinstance(dtype, types.Type):
dtype = as_dtype(dtype)
if not np.issubdtype(dtype, np.inexact):
dx = types.float64
else:
dx = dtype
def impl(a, dtype=np.float64):
return np.asarray(a, dx)
return impl
@overload(np.extract)
def np_extract(condition, arr):
def np_extract_impl(condition, arr):
cond = np.asarray(condition).flatten()
a = np.asarray(arr)
if a.size == 0:
raise ValueError('Cannot extract from an empty array')
# the following looks odd but replicates NumPy...
# https://github.com/numpy/numpy/issues/12859
if np.any(cond[a.size:]) and cond.size > a.size:
msg = 'condition shape inconsistent with arr shape'
raise ValueError(msg)
# NumPy raises IndexError: index 'm' is out of
# bounds for size 'n'
max_len = min(a.size, cond.size)
out = [a.flat[idx] for idx in range(max_len) if cond[idx]]
return np.array(out)
return np_extract_impl
@overload(np.select)
def np_select(condlist, choicelist, default=0):
def np_select_arr_impl(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
raise ValueError('list of cases must be same length as list '
'of conditions')
out = default * np.ones(choicelist[0].shape, choicelist[0].dtype)
# should use reversed+zip, but reversed is not available
for i in range(len(condlist) - 1, -1, -1):
cond = condlist[i]
choice = choicelist[i]
out = np.where(cond, choice, out)
return out
# first we check the types of the input parameters
if not isinstance(condlist, (types.List, types.UniTuple)):
raise TypeError('condlist must be a List or a Tuple')
if not isinstance(choicelist, (types.List, types.UniTuple)):
raise TypeError('choicelist must be a List or a Tuple')
if not isinstance(default, (int, types.Number, types.Boolean)):
raise TypeError('default must be a scalar (number or boolean)')
# the types of the parameters have been checked, now we test the types
# of the content of the parameters
# implementation note: if in the future numba's np.where accepts tuples
# as elements of condlist, then the check below should be extended to
# accept tuples
if not isinstance(condlist[0], types.Array):
raise TypeError('items of condlist must be arrays')
if not isinstance(choicelist[0], types.Array):
raise TypeError('items of choicelist must be arrays')
# the types of the parameters and their contents have been checked,
# now we test the dtypes of the content of parameters
if isinstance(condlist[0], types.Array):
if not isinstance(condlist[0].dtype, types.Boolean):
raise TypeError('condlist arrays must contain booleans')
if isinstance(condlist[0], types.UniTuple):
if not (isinstance(condlist[0], types.UniTuple)
and isinstance(condlist[0][0], types.Boolean)):
raise TypeError('condlist tuples must only contain booleans')
# the input types are correct, now we perform checks on the dimensions
if (isinstance(condlist[0], types.Array) and
condlist[0].ndim != choicelist[0].ndim):
raise TypeError('condlist and choicelist elements must have the '
'same number of dimensions')
if isinstance(condlist[0], types.Array) and condlist[0].ndim < 1:
raise TypeError('condlist arrays must be of at least dimension 1')
return np_select_arr_impl
@overload(np.asarray_chkfinite)
def np_asarray_chkfinite(a, dtype=None):
msg = "The argument to np.asarray_chkfinite must be array-like"
if not isinstance(a, (types.Array, types.Sequence, types.Tuple)):
raise TypingError(msg)
if is_nonelike(dtype):
dt = a.dtype
else:
try:
dt = as_dtype(dtype)
except NotImplementedError:
raise TypingError('dtype must be a valid Numpy dtype')
def impl(a, dtype=None):
a = np.asarray(a, dtype=dt)
for i in np.nditer(a):
if not np.isfinite(i):
raise ValueError("array must not contain infs or NaNs")
return a
return impl
#----------------------------------------------------------------------------
# Windowing functions
# - translated from the numpy implementations found in:
# https://github.com/numpy/numpy/blob/v1.16.1/numpy/lib/function_base.py#L2543-L3233 # noqa: E501
# at commit: f1c4c758e1c24881560dd8ab1e64ae750
@register_jitable
def np_bartlett_impl(M):
n = np.arange(M)
return np.where(np.less_equal(n, (M - 1) / 2.0), 2.0 * n / (M - 1),
2.0 - 2.0 * n / (M - 1))
@register_jitable
def np_blackman_impl(M):
n = np.arange(M)
return (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
@register_jitable
def np_hamming_impl(M):
n = np.arange(M)
return 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
@register_jitable
def np_hanning_impl(M):
n = np.arange(M)
return 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
def window_generator(func):
def window_overload(M):
if not isinstance(M, types.Integer):
raise TypingError('M must be an integer')
def window_impl(M):
if M < 1:
return np.array((), dtype=np.float_)
if M == 1:
return np.ones(1, dtype=np.float_)
return func(M)
return window_impl
return window_overload
overload(np.bartlett)(window_generator(np_bartlett_impl))
overload(np.blackman)(window_generator(np_blackman_impl))
overload(np.hamming)(window_generator(np_hamming_impl))
overload(np.hanning)(window_generator(np_hanning_impl))
_i0A = np.array([
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1,
])
_i0B = np.array([
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1,
])
@register_jitable
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x * b1 - b2 + vals[i]
return 0.5 * (b0 - b2)
@register_jitable
def _i0(x):
if x < 0:
x = -x
if x <= 8.0:
y = (0.5 * x) - 2.0
return np.exp(x) * _chbevl(y, _i0A)
return np.exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / np.sqrt(x)
@register_jitable
def _i0n(n, alpha, beta):
y = np.empty_like(n, dtype=np.float_)
t = _i0(np.float_(beta))
for i in range(len(y)):
y[i] = _i0(beta * np.sqrt(1 - ((n[i] - alpha) / alpha)**2.0)) / t
return y
@overload(np.kaiser)
def np_kaiser(M, beta):
if not isinstance(M, types.Integer):
raise TypingError('M must be an integer')
if not isinstance(beta, (types.Integer, types.Float)):
raise TypingError('beta must be an integer or float')
def np_kaiser_impl(M, beta):
if M < 1:
return np.array((), dtype=np.float_)
if M == 1:
return np.ones(1, dtype=np.float_)
n = np.arange(0, M)
alpha = (M - 1) / 2.0
return _i0n(n, alpha, beta)
return np_kaiser_impl
@register_jitable
def _cross_operation(a, b, out):
def _cross_preprocessing(x):
x0 = x[..., 0]
x1 = x[..., 1]
if x.shape[-1] == 3:
x2 = x[..., 2]
else:
x2 = np.multiply(x.dtype.type(0), x0)
return x0, x1, x2
a0, a1, a2 = _cross_preprocessing(a)
b0, b1, b2 = _cross_preprocessing(b)
cp0 = np.multiply(a1, b2) - np.multiply(a2, b1)
cp1 = np.multiply(a2, b0) - np.multiply(a0, b2)
cp2 = np.multiply(a0, b1) - np.multiply(a1, b0)
out[..., 0] = cp0
out[..., 1] = cp1
out[..., 2] = cp2
@generated_jit
def _cross_impl(a, b):
dtype = np.promote_types(as_dtype(a.dtype), as_dtype(b.dtype))
if a.ndim == 1 and b.ndim == 1:
def impl(a, b):
cp = np.empty((3,), dtype)
_cross_operation(a, b, cp)
return cp
else:
def impl(a, b):
shape = np.add(a[..., 0], b[..., 0]).shape
cp = np.empty(shape + (3,), dtype)
_cross_operation(a, b, cp)
return cp
return impl
@overload(np.cross)
def np_cross(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError((
"Incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)"
))
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError((
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"a call to `cross2d(a, b)` from `numba.np.extensions`."
))
return impl
@register_jitable
def _cross2d_operation(a, b):
def _cross_preprocessing(x):
x0 = x[..., 0]
x1 = x[..., 1]
return x0, x1
a0, a1 = _cross_preprocessing(a)
b0, b1 = _cross_preprocessing(b)
cp = np.multiply(a0, b1) - np.multiply(a1, b0)
# If ndim of a and b is 1, cp is a scalar.
# In this case np.cross returns a 0-D array, containing the scalar.
# np.asarray is used to reconcile this case, without introducing
# overhead in the case where cp is an actual N-D array.
# (recall that np.asarray does not copy existing arrays)
return np.asarray(cp)
@generated_jit
def cross2d(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] != 2 or b_.shape[-1] != 2:
raise ValueError((
"Incompatible dimensions for 2D cross product\n"
"(dimension must be 2 for both inputs)"
))
return _cross2d_operation(a_, b_)
return impl
|
py | b409a3c0ca075d80bf46ed3ed37ef2ef8eea169b | # Base Libraries
import argparse
import csv
import sys
import pickle
import numpy as np
import os
import pandas as pd
import time
# The Models
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
import xgboost
import lightgbm
'''
We perform Stratified K fold cross validation coupled with various sampling strategies and
store the confusion matrix and the classification report.
'''
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report, confusion_matrix
from imblearn.over_sampling import SMOTE, ADASYN
from imblearn.combine import SMOTEENN, SMOTETomek
SAMPLING_METHODS = ["ADASYN"]
parser = argparse.ArgumentParser(description='Run all ML models on protein dataset.')
parser.add_argument('--smoketest', dest='smoketest', action='store_true', help='Run models on only first 100 rows of data (for testing)')
parser.set_defaults(smoketest=False)
parser.add_argument('--expensive_classifier', dest='expensive_classifier', action='store_true', help='Run models on time taking classifiers only')
parser.set_defaults(expensive_classifier=False)
args = parser.parse_args()
if not args.expensive_classifier:
ALL_MODELS = [PassiveAggressiveClassifier, Perceptron, RidgeClassifier, SGDClassifier,
LogisticRegression, LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis,
BernoulliNB, GaussianNB, KNeighborsClassifier, NearestCentroid,
DecisionTreeClassifier, AdaBoostClassifier, lightgbm.LGBMClassifier]
ALL_MODEL_NAMES = ['PassiveAggressiveClassifier', 'Perceptron', 'RidgeClassifier', 'SGDClassifier',
'LogisticRegression', 'LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis',
'BernoulliNB', 'GaussianNB', 'KNeighborsClassifier', 'NearestCentroid',
'DecisionTreeClassifier', 'AdaBoostClassifier', 'LGBM']
else:
ALL_MODELS = ["SVC", "LinearSVC", "xgboost"]
ALL_MODEL_NAMES = [SVC, LinearSVC, xgboost.XGBClassifier]
with open('../data/X.pickle','rb') as infile:
X = pickle.load(infile)
with open('../data/y.pickle','rb') as infile:
y = pickle.load(infile)
X = np.reshape(X, newshape=(127537, 300))
# removing translocales
X = X[y != 7]
y = y[y != 7]
if args.smoketest:
X = X[:100]
y = y[:100]
kf = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
if os.path.exists('results.pickle'):
with open('results.pickle', 'rb') as f:
all_results = pickle.load(f)
# model': 'LogisticRegression', 'fold': 2, 'sampling': 'SMOTETomek
resumed_run = True
last_iter_fold = all_results[-1]['fold']
last_iter_model = all_results[-1]['model']
last_iter_sampling = all_results[-1]['sampling']
else:
all_results = []
resumed_run = False
last_iter_fold = None
last_iter_model = None
last_iter_sampling = None
fold = 1
for train_index, test_index in kf.split(X, y):
if resumed_run:
if fold < last_iter_fold:
# this fold has already been done, skip
print("K Fold Cross Validation || Fold #", fold, "already done. Skipped.")
fold+=1
continue
print("K Fold Cross Validation || Fold #", fold)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
for sampling_method in SAMPLING_METHODS:
print("K Fold", fold, "sampling methods begin")
if resumed_run:
if SAMPLING_METHODS.index(last_iter_sampling) > SAMPLING_METHODS.index(sampling_method):
# this sampling method has already been done, skip
print("Fold #", fold, ", sampling", sampling_method, "already done. Skipped.")
continue
print("Sampling strategy", sampling_method, "begun.")
start = time.time()
if sampling_method == "NONE":
X_resampled, y_resampled = X_train, y_train
elif sampling_method == "SMOTE":
X_resampled, y_resampled = SMOTE(random_state=1).fit_resample(X_train, y_train)
elif sampling_method == "ADASYN":
X_resampled, y_resampled = ADASYN(random_state=1).fit_resample(X_train, y_train)
# elif sampling_method == "SMOTEENN":
# print("SMOTEENN skipped")
# continue
# smote_enn = SMOTEENN(random_state=1)
# X_resampled, y_resampled = smote_enn.fit_resample(X_train, y_train)
# elif sampling_method == "SMOTETomek":
# print("SMOTETomek skipped")
# resumed_run = False
# continue
# smote_tomek = SMOTETomek(random_state=1)
# X_resampled, y_resampled = smote_tomek.fit_resample(X, y)
print("Sampling of", sampling_method, "done. Took %.2f"%(time.time()-start))
for (classifier, model_name) in zip(ALL_MODELS, ALL_MODEL_NAMES):
if resumed_run:
if ALL_MODEL_NAMES.index(last_iter_model) > ALL_MODEL_NAMES.index(model_name):
# this sampling method has already been done, skip
print("Fold #", fold, ", sampling", sampling_method, "classifier", model_name, "already done. Skipped.")
continue
elif ALL_MODEL_NAMES.index(last_iter_model) == ALL_MODEL_NAMES.index(model_name):
print("Fold #", fold, ", sampling", sampling_method, "classifier", model_name, "already done. Skipped.")
resumed_run = False
continue
print("Running on model: ", model_name, "with", sampling_method, "sampling method on Fold #", fold)
clf = classifier()
start_train = time.time()
clf.fit(X_resampled, y_resampled)
end_train = time.time()
y_pred = clf.predict(X_test)
end_test = time.time()
results_dict = {
"model": model_name,
"fold": fold,
"sampling": sampling_method,
"confusion_matrix": confusion_matrix(y_test, y_pred),
"report": classification_report(y_test, y_pred, output_dict = True),
"train_time": end_train - start_train,
"test_time": end_test - end_train
}
all_results.append(results_dict)
with open('results.pickle', 'wb') as handle:
pickle.dump(all_results, handle)
print("Model", model_name, "on fold", fold, "with sampling strategy", sampling_method, "completed in total of", time.time()-start_train, "seconds")
fold += 1
print("All runs complete.") |
py | b409a48a4c80143742c5c9c79631ff7a59285cfe | import unittest
import base64
import json
from unittest.mock import MagicMock, call
from lmctl.client.api import AuthenticationAPI
from lmctl.client.exceptions import TNCOClientHttpError
from requests.auth import HTTPBasicAuth
from lmctl.client.client_request import TNCOClientRequest
class TestAuthenticationAPI(unittest.TestCase):
def setUp(self):
self.mock_client = MagicMock()
self.authentication = AuthenticationAPI(self.mock_client)
def test_request_client_access(self):
self.mock_client.make_request_for_json.return_value = {'access_token': '123', 'expires_in': 60}
response = self.authentication.request_client_access('LmClient', 'secret')
self.assertEqual(response, {'access_token': '123', 'expires_in': 60})
client_encoded = base64.b64encode('LmClient:secret'.encode('utf-8'))
auth = HTTPBasicAuth('LmClient', 'secret')
self.mock_client.make_request_for_json.assert_called_with(TNCOClientRequest(
method='POST',
endpoint='oauth/token',
additional_auth_handler=auth,
inject_current_auth=False,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body={'grant_type': 'client_credentials'}
))
def test_request_user_access(self):
self.mock_client.make_request_for_json.return_value = {'access_token': '123', 'expires_in': 60}
response = self.authentication.request_user_access('LmClient', 'secret', 'joe', 'secretpass')
self.assertEqual(response, {'access_token': '123', 'expires_in': 60})
client_encoded = base64.b64encode('LmClient:secret'.encode('utf-8'))
auth = HTTPBasicAuth('LmClient', 'secret')
self.mock_client.make_request_for_json.assert_called_with(TNCOClientRequest(
method='POST',
endpoint='oauth/token',
additional_auth_handler=auth,
inject_current_auth=False,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body={'username': 'joe', 'password': 'secretpass', 'grant_type': 'password'}
))
def test_legacy_login(self):
self.mock_client.make_request_for_json.return_value = {'accessToken': '123', 'expiresIn': 60}
response = self.authentication.legacy_login('joe', 'secretpass')
self.assertEqual(response, {'accessToken': '123', 'expiresIn': 60})
self.mock_client.make_request_for_json.assert_called_with(TNCOClientRequest(
method='POST',
endpoint='ui/api/login',
override_address=None,
inject_current_auth=False,
headers={'Content-Type': 'application/json'},
body=json.dumps({'username': 'joe', 'password': 'secretpass'})
))
def test_legacy_login_older_environments(self):
def request_mock(request):
if request.endpoint == 'ui/api/login':
raise TNCOClientHttpError('Mock error', cause=MagicMock(response=MagicMock(status_code=404, headers={}, body=b'')))
else:
return {'accessToken': '123', 'expiresIn': 60}
self.mock_client.make_request_for_json.side_effect = request_mock
response = self.authentication.legacy_login('joe', 'secretpass')
self.assertEqual(response, {'accessToken': '123', 'expiresIn': 60})
self.mock_client.make_request_for_json.assert_has_calls([
call(
TNCOClientRequest(
method='POST',
endpoint='ui/api/login',
override_address=None,
inject_current_auth=False,
headers={'Content-Type': 'application/json'},
body=json.dumps({'username': 'joe', 'password': 'secretpass'})
)
),
call(
TNCOClientRequest(
method='POST',
endpoint='api/login',
override_address=None,
inject_current_auth=False,
headers={'Content-Type': 'application/json'},
body=json.dumps({'username': 'joe', 'password': 'secretpass'})
)
)
]) |
py | b409a5a8b8398161ee28b8d3fe73f64f5008a92b | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 17 08:04:11 2018
@author: yiyuezhuo
"""
'''
Greeting our great elder toad!
/ \ / \
/ \ / \
| /\ | | /\ |
/\ / \ /\
/ \ _________/ \__________/ \
/ \
( O O )
\ \_ _/ /
\_ --------------------- _/
----___________________---- __-->
/ / = = |\ /| = = =\ >
/ / = = |/ \| = = = \ __-- >
/ /= = = = = = = = \
Source: http://chris.com/ascii/index.php?art=animals/frogs
'''
from .core import Parameter,Data
from .core import optimizing,vb,sampling
from .core import reset
import bayestorch.core as core
import bayestorch.utils as utils
|
py | b409a63be9b838fdd12e5db417c75aefb2f85a76 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import lib_data
import lib_graph
import lib_hparams
import lib_util
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('data_dir', None,
'Path to the base directory for different datasets.')
flags.DEFINE_string('logdir', None,
'Path to the directory where checkpoints and '
'summary events will be saved during training and '
'evaluation. Multiple runs can be stored within the '
'parent directory of `logdir`. Point TensorBoard '
'to the parent directory of `logdir` to see all '
'your runs.')
flags.DEFINE_bool('log_progress', True,
'If False, do not log any checkpoints and summary'
'statistics.')
# Dataset.
flags.DEFINE_string('dataset', None,
'Choices: Jsb16thSeparated, MuseData, Nottingham, '
'PianoMidiDe')
flags.DEFINE_float('quantization_level', 0.125, 'Quantization duration.'
'For qpm=120, notated quarter note equals 0.5.')
flags.DEFINE_integer('num_instruments', 4,
'Maximum number of instruments that appear in this '
'dataset. Use 0 if not separating instruments and '
'hence does not matter how many there are.')
flags.DEFINE_bool('separate_instruments', True,
'Separate instruments into different input feature'
'maps or not.')
flags.DEFINE_integer('crop_piece_len', 64, 'The number of time steps '
'included in a crop')
# Model architecture.
flags.DEFINE_string('architecture', 'straight',
'Convnet style. Choices: straight')
# Hparams for depthwise separable conv.
flags.DEFINE_bool('use_sep_conv', False, 'Use depthwise separable '
'convolutions.')
flags.DEFINE_integer('sep_conv_depth_multiplier', 1, 'Depth multiplier for'
'depthwise separable convs.')
flags.DEFINE_integer('num_initial_regular_conv_layers', 2, 'The number of'
'regular convolutional layers to start with when using'
'depthwise separable convolutional layers.')
# Hparams for reducing pointwise in separable convs.
flags.DEFINE_integer('num_pointwise_splits', 1, 'Num of splits on the'
'pointwise convolution stage in depthwise separable'
'convolutions.')
flags.DEFINE_integer('interleave_split_every_n_layers', 1, 'Num of split'
'pointwise layers to interleave between full pointwise'
'layers.')
# Hparams for dilated conv.
flags.DEFINE_integer('num_dilation_blocks', 3, 'The number dilation blocks'
'that starts from dilation rate=1.')
flags.DEFINE_bool('dilate_time_only', False, 'If set, only dilates the time'
'dimension and not pitch.')
flags.DEFINE_bool('repeat_last_dilation_level', False, 'If set, repeats the'
'last dilation rate.')
flags.DEFINE_integer('num_layers', 64, 'The number of convolutional layers'
'for architectures that do not use dilated convs.')
flags.DEFINE_integer('num_filters', 128,
'The number of filters for each convolutional '
'layer.')
flags.DEFINE_bool('use_residual', True, 'Add residual connections or not.')
flags.DEFINE_integer('batch_size', 20,
'The batch size for training and validating the model.')
# Mask related.
flags.DEFINE_string('maskout_method', 'orderless',
"The choices include: 'bernoulli' "
"and 'orderless' (which "
'invokes gradient rescaling as per NADE).')
flags.DEFINE_bool(
'mask_indicates_context', True,
'Feed inverted mask into convnet so that zero-padding makes sense.')
flags.DEFINE_bool('optimize_mask_only', False,
'Optimize masked predictions only.')
flags.DEFINE_bool('rescale_loss', True, 'Rescale loss based on context size.')
flags.DEFINE_integer(
'patience', 5,
'Number of epochs to wait for improvement before decaying learning rate.')
flags.DEFINE_float('corrupt_ratio', 0.5, 'Fraction of variables to mask out.')
# Run parameters.
flags.DEFINE_integer('num_epochs', 0,
'The number of epochs to train the model. Default '
'is 0, which means to run until terminated '
'manually.')
flags.DEFINE_integer('save_model_secs', 360,
'The number of seconds between saving each '
'checkpoint.')
flags.DEFINE_integer('eval_freq', 5,
'The number of training iterations before validation.')
flags.DEFINE_string(
'run_id', '',
'A run_id to add to directory names to avoid accidentally overwriting when '
'testing same setups.')
def estimate_popstats(unused_sv, sess, m, dataset, unused_hparams):
"""Averages over mini batches for population statistics for batch norm."""
print('Estimating population statistics...')
tfbatchstats, tfpopstats = list(zip(*list(m.popstats_by_batchstat.items())))
nepochs = 3
nppopstats = [lib_util.AggregateMean('') for _ in tfpopstats]
for _ in range(nepochs):
batches = (
dataset.get_featuremaps().batches(size=m.batch_size, shuffle=True))
for unused_step, batch in enumerate(batches):
feed_dict = batch.get_feed_dict(m.placeholders)
npbatchstats = sess.run(tfbatchstats, feed_dict=feed_dict)
for nppopstat, npbatchstat in zip(nppopstats, npbatchstats):
nppopstat.add(npbatchstat)
nppopstats = [nppopstat.mean for nppopstat in nppopstats]
_print_popstat_info(tfpopstats, nppopstats)
# Update tfpopstat variables.
for unused_j, (tfpopstat, nppopstat) in enumerate(
zip(tfpopstats, nppopstats)):
tfpopstat.load(nppopstat)
def run_epoch(supervisor, sess, m, dataset, hparams, eval_op, experiment_type,
epoch_count):
"""Runs an epoch of training or evaluate the model on given data."""
# reduce variance in validation loss by fixing the seed
data_seed = 123 if experiment_type == 'valid' else None
with lib_util.numpy_seed(data_seed):
batches = (
dataset.get_featuremaps().batches(
size=m.batch_size, shuffle=True, shuffle_rng=data_seed))
losses = lib_util.AggregateMean('losses')
losses_total = lib_util.AggregateMean('losses_total')
losses_mask = lib_util.AggregateMean('losses_mask')
losses_unmask = lib_util.AggregateMean('losses_unmask')
start_time = time.time()
for unused_step, batch in enumerate(batches):
# Evaluate the graph and run back propagation.
fetches = [
m.loss, m.loss_total, m.loss_mask, m.loss_unmask, m.reduced_mask_size,
m.reduced_unmask_size, m.learning_rate, eval_op
]
feed_dict = batch.get_feed_dict(m.placeholders)
(loss, loss_total, loss_mask, loss_unmask, reduced_mask_size,
reduced_unmask_size, learning_rate, _) = sess.run(
fetches, feed_dict=feed_dict)
# Aggregate performances.
losses_total.add(loss_total, 1)
# Multiply the mean loss_mask by reduced_mask_size for aggregation as the
# mask size could be different for every batch.
losses_mask.add(loss_mask * reduced_mask_size, reduced_mask_size)
losses_unmask.add(loss_unmask * reduced_unmask_size, reduced_unmask_size)
if hparams.optimize_mask_only:
losses.add(loss * reduced_mask_size, reduced_mask_size)
else:
losses.add(loss, 1)
# Collect run statistics.
run_stats = dict()
run_stats['loss_mask'] = losses_mask.mean
run_stats['loss_unmask'] = losses_unmask.mean
run_stats['loss_total'] = losses_total.mean
run_stats['loss'] = losses.mean
if experiment_type == 'train':
run_stats['learning_rate'] = float(learning_rate)
# Make summaries.
if FLAGS.log_progress:
summaries = tf.Summary()
for stat_name, stat in six.iteritems(run_stats):
value = summaries.value.add()
value.tag = '%s_%s' % (stat_name, experiment_type)
value.simple_value = stat
supervisor.summary_computed(sess, summaries, epoch_count)
tf.logging.info(
'%s, epoch %d: loss (mask): %.4f, loss (unmask): %.4f, '
'loss (total): %.4f, log lr: %.4f, time taken: %.4f',
experiment_type, epoch_count, run_stats['loss_mask'],
run_stats['loss_unmask'], run_stats['loss_total'],
np.log(run_stats['learning_rate']) if 'learning_rate' in run_stats else 0,
time.time() - start_time)
return run_stats['loss']
def main(unused_argv):
"""Builds the graph and then runs training and validation."""
print('TensorFlow version:', tf.__version__)
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.data_dir is None:
tf.logging.fatal('No input directory was provided.')
print(FLAGS.maskout_method, 'separate', FLAGS.separate_instruments)
hparams = _hparams_from_flags()
# Get data.
print('dataset:', FLAGS.dataset, FLAGS.data_dir)
print('current dir:', os.path.curdir)
train_data = lib_data.get_dataset(FLAGS.data_dir, hparams, 'train')
valid_data = lib_data.get_dataset(FLAGS.data_dir, hparams, 'valid')
print('# of train_data:', train_data.num_examples)
print('# of valid_data:', valid_data.num_examples)
if train_data.num_examples < hparams.batch_size:
print('reducing batch_size to %i' % train_data.num_examples)
hparams.batch_size = train_data.num_examples
train_data.update_hparams(hparams)
# Save hparam configs.
logdir = os.path.join(FLAGS.logdir, hparams.log_subdir_str)
tf.gfile.MakeDirs(logdir)
config_fpath = os.path.join(logdir, 'config')
tf.logging.info('Writing to %s', config_fpath)
with tf.gfile.Open(config_fpath, 'w') as p:
hparams.dump(p)
# Build the graph and subsequently running it for train and validation.
with tf.Graph().as_default():
no_op = tf.no_op()
# Build placeholders and training graph, and validation graph with reuse.
m = lib_graph.build_graph(is_training=True, hparams=hparams)
tf.get_variable_scope().reuse_variables()
mvalid = lib_graph.build_graph(is_training=False, hparams=hparams)
tracker = Tracker(
label='validation loss',
patience=FLAGS.patience,
decay_op=m.decay_op,
save_path=os.path.join(FLAGS.logdir, hparams.log_subdir_str,
'best_model.ckpt'))
# Graph will be finalized after instantiating supervisor.
sv = tf.train.Supervisor(
logdir=logdir,
saver=tf.train.Supervisor.USE_DEFAULT if FLAGS.log_progress else None,
summary_op=None,
save_model_secs=FLAGS.save_model_secs)
with sv.PrepareSession() as sess:
epoch_count = 0
while epoch_count < FLAGS.num_epochs or not FLAGS.num_epochs:
if sv.should_stop():
break
# Run training.
run_epoch(sv, sess, m, train_data, hparams, m.train_op, 'train',
epoch_count)
# Run validation.
if epoch_count % hparams.eval_freq == 0:
estimate_popstats(sv, sess, m, train_data, hparams)
loss = run_epoch(sv, sess, mvalid, valid_data, hparams, no_op,
'valid', epoch_count)
tracker(loss, sess)
if tracker.should_stop():
break
epoch_count += 1
print('best', tracker.label, tracker.best)
print('Done.')
return tracker.best
class Tracker(object):
"""Tracks the progress of training and checks if training should stop."""
def __init__(self, label, save_path, sign=-1, patience=5, decay_op=None):
self.label = label
self.sign = sign
self.best = np.inf
self.saver = tf.train.Saver()
self.save_path = save_path
self.patience = patience
# NOTE: age is reset with decay, but true_age is not
self.age = 0
self.true_age = 0
self.decay_op = decay_op
def __call__(self, loss, sess):
if self.sign * loss > self.sign * self.best:
if FLAGS.log_progress:
tf.logging.info('Previous best %s: %.4f.', self.label, self.best)
tf.gfile.MakeDirs(os.path.dirname(self.save_path))
self.saver.save(sess, self.save_path)
tf.logging.info('Storing best model so far with loss %.4f at %s.' %
(loss, self.save_path))
self.best = loss
self.age = 0
self.true_age = 0
else:
self.age += 1
self.true_age += 1
if self.age > self.patience:
sess.run([self.decay_op])
self.age = 0
def should_stop(self):
return self.true_age > 5 * self.patience
def _print_popstat_info(tfpopstats, nppopstats):
"""Prints the average and std of population versus batch statistics."""
mean_errors = []
stdev_errors = []
for j, (tfpopstat, nppopstat) in enumerate(zip(tfpopstats, nppopstats)):
moving_average = tfpopstat.eval()
if j % 2 == 0:
mean_errors.append(abs(moving_average - nppopstat))
else:
stdev_errors.append(abs(np.sqrt(moving_average) - np.sqrt(nppopstat)))
def flatmean(xs):
return np.mean(np.concatenate([x.flatten() for x in xs]))
print('average of pop mean/stdev errors: %g %g' % (flatmean(mean_errors),
flatmean(stdev_errors)))
print('average of batch mean/stdev: %g %g' %
(flatmean(nppopstats[0::2]),
flatmean([np.sqrt(ugh) for ugh in nppopstats[1::2]])))
def _hparams_from_flags():
"""Instantiate hparams based on flags set in FLAGS."""
keys = ("""
dataset quantization_level num_instruments separate_instruments
crop_piece_len architecture use_sep_conv num_initial_regular_conv_layers
sep_conv_depth_multiplier num_dilation_blocks dilate_time_only
repeat_last_dilation_level num_layers num_filters use_residual
batch_size maskout_method mask_indicates_context optimize_mask_only
rescale_loss patience corrupt_ratio eval_freq run_id
num_pointwise_splits interleave_split_every_n_layers
""".split())
hparams = lib_hparams.Hyperparameters(**dict(
(key, getattr(FLAGS, key)) for key in keys))
return hparams
if __name__ == '__main__':
# tf.disable_v2_behavior()
tf.app.run()
|
py | b409a8378e8dad6b9cfaf98012475608646ae981 | duration_seconds = int(input())
seconds = duration_seconds % 60
temp = duration_seconds // 60
minutes = temp % 60
temp = temp // 60
hours = temp % 60
print(f"{hours}:{minutes}:{seconds}")
|
py | b409a85312547a80aa9dcefd8443a3841eebe21e | # -*- coding: utf-8 -*-
"""
==============================
Elekta phantom data with MUSIC
==============================
"""
# authors: Amit & Alex & Eric
from __future__ import print_function
import numpy as np
import mne
from phantom_helpers import (get_data, get_fwd, plot_errors, actual_pos,
maxfilter_options, dipole_indices,
dipole_amplitudes)
errors = np.empty(
(len(maxfilter_options), len(dipole_amplitudes), len(dipole_indices)))
src, fwd = get_fwd()
for ui, use_maxwell_filter in enumerate(maxfilter_options):
for ai, dipole_amplitude in enumerate(dipole_amplitudes):
print(('Processing : %4d nAm : SSS=%s'
% (dipole_amplitude, use_maxwell_filter)).ljust(40), end='')
for di, dipole_idx in enumerate(dipole_indices):
epochs, evoked, cov, sphere = \
get_data(dipole_idx, dipole_amplitude, use_maxwell_filter)
pos = mne.beamformer.rap_music(
evoked, fwd, cov, n_dipoles=1, return_residual=False)[0].pos[0]
errors[ui, ai, di] = 1e3 * np.linalg.norm(
pos - actual_pos[dipole_idx - 1])
print(np.round(errors[ui, ai], 1))
plot_errors(errors, 'music')
|
py | b409a8786a4cbd770f7c6d13137d328063613b2f | import os
import json
import base64
import random
from mock import patch
import pytest
from mapboxgl.viz import *
from mapboxgl.errors import TokenError, LegendError
from mapboxgl.utils import create_color_stops, create_numeric_stops
from matplotlib.pyplot import imread
@pytest.fixture()
def data():
with open('tests/points.geojson') as fh:
return json.loads(fh.read())
@pytest.fixture()
def polygon_data():
with open('tests/polygons.geojson') as fh:
return json.loads(fh.read())
@pytest.fixture()
def linestring_data():
with open('tests/linestrings.geojson') as fh:
return json.loads(fh.read())
TOKEN = 'pk.abc123'
def test_secret_key_CircleViz(data):
"""Secret key raises a token error
"""
secret = 'sk.abc123'
with pytest.raises(TokenError):
CircleViz(data, access_token=secret)
def test_secret_key_GraduatedCircleViz(data):
"""Secret key raises a token error
"""
secret = 'sk.abc123'
with pytest.raises(TokenError):
GraduatedCircleViz(data, access_token=secret)
def test_secret_key_ChoroplethViz(polygon_data):
"""Secret key raises a token error
"""
secret = 'sk.abc123'
with pytest.raises(TokenError):
ChoroplethViz(polygon_data, access_token=secret)
def test_secret_key_LinestringViz(linestring_data):
"""Secret key raises a token error
"""
secret = 'sk.abc123'
with pytest.raises(TokenError):
LinestringViz(linestring_data, access_token=secret)
def test_token_env_CircleViz(monkeypatch, data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = CircleViz(data, color_property="Avg Medicare Payments")
assert TOKEN in viz.create_html()
def test_token_env_GraduatedCircleViz(monkeypatch, data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges")
assert TOKEN in viz.create_html()
def test_token_env_ChoroplethViz(monkeypatch, polygon_data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = ChoroplethViz(polygon_data, color_property="density")
assert TOKEN in viz.create_html()
def test_token_env_LinestringViz(monkeypatch, linestring_data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = LinestringViz(linestring_data, color_property="sample")
assert TOKEN in viz.create_html()
def test_html_color(data):
viz = CircleViz(data,
color_property="Avg Medicare Payments",
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_html_GraduatedCricleViz(data):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_radius_legend_GraduatedCircleViz(data):
"""Raises a LegendError if legend is set to 'radius' legend_function and
legend_gradient is True.
"""
with pytest.raises(LegendError):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
legend_function='radius',
legend_gradient=True,
access_token=TOKEN)
viz.create_html()
def test_html_ChoroplethViz(polygon_data):
viz = ChoroplethViz(polygon_data,
color_property="density",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_html_LinestringViz(linestring_data):
viz = LinestringViz(linestring_data,
color_property="sample",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
assert "<html>" in viz.create_html()
@patch('mapboxgl.viz.display')
def test_display_CircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = CircleViz(data,
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_CircleViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for CircleViz.
"""
viz = CircleViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_GraduatedCircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = GraduatedCircleViz(data,
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
radius_property='Avg Covered Charges',
radius_function_type='match',
color_function_type='match',
radius_default=2,
color_default='red',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_GraduatedCircleViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for CircleViz.
"""
viz = GraduatedCircleViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
radius_property='Avg Covered Charges',
radius_function_type='match',
color_function_type='match',
radius_default=2,
color_default='red',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_HeatmapViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = HeatmapViz(data,
weight_property='Avg Medicare Payments',
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, 'red'], [0.5, 'blue'], [1, 'green']],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_HeatmapViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = HeatmapViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
weight_property='Avg Medicare Payments',
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, 'red'], [0.5, 'blue'], [1, 'green']],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ClusteredCircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = ClusteredCircleViz(data,
radius_stops=[[10, 0], [100, 1]],
color_stops=[[0, "red"], [10, "blue"], [1, "green"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ChoroplethViz(display, polygon_data):
"""Assert that show calls the mocked display function
"""
viz = ChoroplethViz(polygon_data,
color_property="density",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_ChoroplethViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for ChoroplethViz.
"""
data = [{"id": "06", "name": "California", "density": 241.7},
{"id": "11", "name": "District of Columbia", "density": 10065},
{"id": "25", "name": "Massachusetts", "density": 840.2},
{"id": "30", "name": "Montana", "density": 6.858},
{"id": "36", "name": "New York", "density": 412.3},
{"id": "49", "name": "Utah", "density": 34.3},
{"id": "72", "name": "Puerto Rico", "density": 1082}]
viz = ChoroplethViz(data,
vector_url='mapbox://mapbox.us_census_states_2015',
vector_layer_name='states',
vector_join_property='STATEFP',
data_join_property='id',
color_property='density',
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_extruded_ChoroplethViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for ChoroplethViz.
"""
data = [{"id": "06", "name": "California", "density": 241.7},
{"id": "11", "name": "District of Columbia", "density": 10065},
{"id": "25", "name": "Massachusetts", "density": 840.2},
{"id": "30", "name": "Montana", "density": 6.858},
{"id": "36", "name": "New York", "density": 412.3},
{"id": "49", "name": "Utah", "density": 34.3},
{"id": "72", "name": "Puerto Rico", "density": 1082}]
viz = ChoroplethViz(data,
vector_url='mapbox://mapbox.us_census_states_2015',
vector_layer_name='states',
vector_join_property='STATEFP',
data_join_property='id',
color_property='density',
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
height_property='density',
height_stops=create_numeric_stops([0, 50, 100, 500, 1500, 10000], 0, 1000000),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_LinestringViz(display, linestring_data):
"""Assert that show calls the mocked display function
"""
viz = LinestringViz(linestring_data,
color_property="sample",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_LinestringViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for LinestringViz.
"""
data = [{"elevation": x, "weight": random.randint(0,100)} for x in range(0, 21000, 10)]
viz = LinestringViz(data,
vector_url='mapbox://mapbox.mapbox-terrain-v2',
vector_layer_name='contour',
vector_join_property='ele',
data_join_property='elevation',
color_property="elevation",
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
line_width_property='weight',
line_width_stops=create_numeric_stops([0, 25, 50, 75, 100], 1, 6),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_min_zoom(display, data):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
label_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
access_token=TOKEN,
min_zoom=10)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_max_zoom(display, data):
viz = HeatmapViz(data,
weight_property="Avg Medicare Payments",
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, "red"], [0.5, "blue"], [1, "green"]],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN,
max_zoom=5)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ImageVizPath(display, data):
"""Assert that show calls the mocked display function
"""
image_path = os.path.join(os.path.dirname(__file__), 'mosaic.png')
coordinates = [
[-123.40515640309, 32.08296982365502],
[-115.92938988349292, 32.08296982365502],
[-115.92938988349292, 38.534294809274336],
[-123.40515640309, 38.534294809274336]][::-1]
viz = ImageViz(image_path, coordinates, access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ImageVizArray(display, data):
"""Assert that show calls the mocked display function
"""
image_path = os.path.join(os.path.dirname(__file__), 'mosaic.png')
image = imread(image_path)
coordinates = [
[-123.40515640309, 32.08296982365502],
[-115.92938988349292, 32.08296982365502],
[-115.92938988349292, 38.534294809274336],
[-123.40515640309, 38.534294809274336]][::-1]
viz = ImageViz(image, coordinates, access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_RasterTileViz(display, data):
"""Assert that show calls the mocked display function
"""
tiles_url = 'https://a.tile.openstreetmap.org/{z}/{x}/{y}.png'
viz = RasterTilesViz(tiles_url, access_token=TOKEN) |
py | b409a903f39c3dbaa77a6c6717e9185ce35bc8e8 | """Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
|
py | b409a971cfc350fcc1371d1b9c37a6df4b8b9d0a | import json
import discord
from discord.ext import commands
from datetime import datetime
import random
def getConfig():
with open('config.json') as f:
return json.load(f)
def bot_staff(ctx):
return ctx.author.id in ctx.bot.config["owners"] or (
ctx.guild.id == ctx.bot.config["support_guild"]["id"] and ctx.bot.config["staff_role"] in [z.id for z in
ctx.author.roles])
class Embed(discord.Embed):
def __init__(self, ctx: commands.Context, **kwargs):
self.ctx = ctx
self.timestamp = datetime.now()
super().__init__(color=discord.Color.green(), **kwargs)
def author(self):
return super().set_author(name=self.ctx.author, icon_url=self.ctx.author.avatar_url)
def random_footer(self):
return super().set_footer(text=random.choice(self.ctx.bot.config["footer_text_list"]),
icon_url=self.ctx.author.avatar_url)
|
py | b409aa5a7be01ec6a48a72d60eacae9894563ba8 | #!/usr/bin/env python
from ._fitting import __fit_single_decay__, __fit_triple_decay__
from numpy import array, unique
from pandas import Series, concat
from tqdm import tqdm
def fit_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=60, sat_flashlets=None, single_decay=False, bounds=True, single_lims=[100,50000], tau1_lims=[100, 800], tau2_lims=[800, 2000], tau3_lims=[2000, 50000], method='trf', loss='soft_l1', f_scale=0.1, max_nfev=None, xtol=1e-9):
"""
Process the raw transient data and perform the Kolber et al. 1998 relaxation model.
Parameters
----------
seq_time : np.array, dtype=float, shape=[n,]
The sequence time of the flashlets in μs.
flevel : np.array, dtype=float, shape=[n,]
The fluorescence yield of the instrument.
seq : np.array, dtype=int, shape=[n,]
The measurement number.
datetime : np.array, dtype=datetime64, shape=[n,]
The date & time of each measurement in the numpy datetime64 format.
blank : np.array, dtype=float, shape=[n,]
The blank value, must be the same length as flevel.
sat_len : int, default=100
The number of flashlets in the saturation sequence.
rel_len : int, default=60
The number of flashlets in the relaxation sequence.
sat_flashlets : int, default=0
The number of saturation flashlets to include at the start.
single_decay : bool, default=False
If True, will fit a single decay relaxation.
bounds : bool, default=True
If True, will set lower and upper limit bounds for the estimation, not suitable for methods 'lm'.
single_lims : [int, int], default=[100, 50000]
The lower and upper limit bounds for fitting τ, only required if single_decay is True.
tau1_lims: [int, int], default=[100, 800]
The lower and upper limit bounds for fitting τ1.
tau2_lims: [int, int], default=[800, 2000]
The lower and upper limit bounds for fitting τ2.
tau3_lims: [int, int], default=[2000, 50000]
The lower and upper limit bounds for fitting τ3.
fit_method : str, default='trf'
The algorithm to perform minimization. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
loss_method : str, default='soft_l1'
The loss function to be used. Note: Method ‘lm’ supports only ‘linear’ loss. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
fscale : float, default=0.1
The soft margin value between inlier and outlier residuals. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
max_nfev : int, default=None
The number of iterations to perform fitting routine. If None, the value is chosen automatically. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
xtol : float, default=1e-9
The tolerance for termination by the change of the independent variables. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
Returns
-------
res: pandas.DataFrame
The results of the fitting routine with columns as below:
fo_r : np.array, dtype=float, shape=[n,]
The minimum fluorescence level of relaxation phase.
fm_r : np.array, dtype=float, shape=[n,]
The maximum fluorescence level of relaxation phase
tau : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is True.
alpha1 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`1`, only returned if single_decay is False.
tau1 : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha2 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`2`.
tau2 : np.array, dtype=float, shape=[n,]
The rate of QB\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha3 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`3`, only returned if single_decay is False.
tau3 : np.array, dtype=float, shape=[n,]
The rate of PQ reoxidation in μs, only returned if single_decay is False.
bias : np.array, dtype=float, shape=[n,]
The bias of fit in %.
rmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit.
nrmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit normalised to the mean of the fluorescence level.
fo_err : np.array, dtype=float, shape=[n,]
The fit error of Fo_relax in %.
fm_err : np.array, dtype=float, shape=[n,]
The fit error of Fm_relax in %.
tau_err : np.array, dtype=float, shape=[n,]
The fit error of τ, only returned if single_decay is True.
alpha1_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`1`, only returned if single_decay is False.
tau1_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`1`, only returned if single_decay is False.
alpha2_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`2`, only returned if single_decay is False.
tau2_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`2`, only returned if single_decay is False.
alpha3_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`3`, only returned if single_decay is False.
tau3_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`3`, only returned if single_decay is False.
nfl : np.array, dtype=int, shape=[n,]
The number of flashlets used for fitting.
niters : np.array, dype=int, shape=[n,]
The number of functional evaluations done on the fitting routine.
flag : np.array, dtype=int, shape=[n,]
The code associated with the fitting routine success, positive values = SUCCESS, negative values = FAILURE.
-3 : Unable to calculate parameter errors
-2 : F\ :sub:`o` Relax > F\ :sub:`m` Relax
-1 : improper input parameters status returned from MINPACK.
0 : the maximum number of function evaluations is exceeded.
1 : gtol termination condition is satisfied.
2 : ftol termination condition is satisfied.
3 : xtol termination condition is satisfied.
4 : Both ftol and xtol termination conditions are satisfied.
success : np.array, dtype=bool, shape=[n,]
A boolean array reporting whether fit was successful (TRUE) or if not successful (FALSE)
datetime : np.array, dtype=datetime64, shape=[n,]
The date and time associated with the measurement.
Example
-------
>>> rel = ppu.calculate_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=40, single_decay=True, bounds=True, tau_lims=[100, 50000])
"""
seq_time = array(seq_time)
flevel = array(flevel)
seq = array(seq)
dt = array(datetime)
if single_decay:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'single_lims':single_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
else:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'tau1_lims':tau1_lims, 'tau2_lims':tau2_lims, 'tau3_lims':tau3_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
res = []
for s in tqdm(unique(seq)):
i = seq == s
x = seq_time[i]
y = flevel[i]
x_min = min(x[sat_len:])
x = x[sat_len-sat_flashlets:sat_len+rel_len] - x_min
y = y[sat_len-sat_flashlets:sat_len+rel_len]
if single_decay:
rel = __fit_single_decay__(x, y, **opts)
else:
rel = __fit_triple_decay__(x, y, **opts)
res.append(Series(rel))
res = concat(res, axis=1)
res = res.T
if res.empty:
pass
else:
if single_decay:
res.columns = ['fo_r', 'fm_r', 'tau', 'bias', 'rmse', 'nrmse', 'fo_err', 'fm_err', 'tau_err', 'nfl', 'niters', 'flag', 'success']
else:
res.columns = ['fo_r', 'fm_r', 'alpha1', 'tau1', 'alpha2','tau2', 'alpha3', 'tau3', 'bias', 'rsme', 'nrmse', 'for_err', 'fmr_err', 'alpha1_err', 'tau1_err', 'alpha2_err', 'tau2_err', 'alpha3_err', 'tau3_err', 'nfl', 'niters', 'flag', 'success']
res['datetime'] = unique(dt)
return res
|
py | b409aabaafb7ad01a093bbba99785fe4f54dc55e | ###############################################################################
# file Sensors.py
###############################################################################
# brief This module handles all low-level components and sensors
###############################################################################
# author Florian Baumgartner & Thierry Schwaller
# version 1.0
# date 2022-05-11
###############################################################################
# MIT License
#
# Copyright (c) 2022 ICAI Interdisciplinary Center for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import os
import sys
import time
import threading
import re, subprocess
import numpy as np
import psutil
DEBUG = False
LINUX = (sys.platform == 'linux')
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(0, os.path.dirname(__file__) + "/Modules")
from TempSensor import TempSensor
from ToFSensor import ToFSensor
from HMI import HMI
from RotaryEncoder import RotaryEncoder
from scipy.signal import convolve2d
from colorsys import hsv_to_rgb
class Sensors():
def __init__(self, powerSupply=None, leds=None):
self.SRC_AMBIENT = 0
self.SRC_SYSTEM = 1
self.SRC_CPU = 2
self.EVENT_ALERT = 0
self.EVENT_FREE = 1
self.COLOR_BOOT = np.array([1.0, 1.0, 0.0]) # White
self.COLOR_RUN = np.array([0.0, 1.0, 1.0]) # Cyan
self.COLOR_MUTE = np.array([1.0, 0.0, 0.0]) # Red
self.COLOR_STANDBY = np.array([0.62, 0.62, 0.0]) # Yellow (dark)
self._updateRateTemp = 2 # Update rate in Hz
self._updateRateLed = 10 # Update rate in Hz
self._updateRateToF = 3 # Update rate in Hz
self._tempSensorAmbient = TempSensor(0x48)
self._tempSensorSystem = TempSensor(0x49)
self._hmi = HMI(0x62)
self._tofSensor = ToFSensor(self._updateRateToF)
self._rotaryEncoder = RotaryEncoder(pinA=16, pinB=12, pinS=20)
self._powerSupply = powerSupply
self._leds = leds
self._initialized = False
self._runInitialization = False
self._readyState = False
self._runThread = False
self._updateRate = None
self._alertEnable = True
self._alertState = False
self._alertSensitivity = 0.5
self._distanceLevel = 0.0
self._enableMagic = False
self._ledColor = np.zeros((1, 3))
self._shutdownCallback = None
self._ambientTemp = float("NAN")
self._systemTemp = float("NAN")
self._cpuTemp = float("NAN")
self._distanceMap = self._tofSensor.getDistance()
self._timeTemp = 0
self._timeLed = 0
self._timeToF = 0
def __del__(self):
self.end()
def begin(self, updateRate=30):
if not self._initialized:
self._initialized = True
self._runInitialization = True
self._updateRate = updateRate
self._runThread = True
threading.Timer(0, self.update).start()
def end(self, shutdown=False):
self._readyState = False
self._runThread = False
if(self._initialized):
if shutdown:
self._hmi.setButtonColor(self.COLOR_BOOT)
else:
self._hmi.setButtonColor()
self._tempSensorAmbient.end()
self._tempSensorSystem.end()
self._tofSensor.end()
if shutdown:
self._hmi.setButtonColor(self.COLOR_BOOT)
self._hmi.end(not shutdown) # Turn off LED if not shutdown
self._initialized = False
def update(self):
if(self._runInitialization):
self._runInitialization = False
if DEBUG:
print("Asynchronous Sensors Initialization started...")
self._hmi.registerButtonCallback(self._shutdownEvent)
self._hmi.begin()
self._hmi.setButtonColor(self.COLOR_BOOT)
self._hmi.setFanSpeed(1.0) # Do a fan test at startup
self._rotaryEncoder.begin()
self._tempSensorAmbient.begin()
self._tempSensorSystem.begin()
self._tofSensor.begin() # This takes up to 10s
self._hmi.setButtonColor(self.COLOR_RUN)
self._readyState = True
if DEBUG:
print("Asynchronous Sensors Initialization done")
if(self._initialized):
if(self._runThread):
threading.Timer(1.0 / self._updateRate, self.update).start()
else:
return
if(time.time() - self._timeToF > 1 / self._updateRateToF):
if(self._tofSensor.update()):
self._timeToF = time.time()
self._distanceMap = self._tofSensor.getDistance()
event = self._checkDistanceMap(self._distanceMap)
if(event == self.EVENT_ALERT):
self._alertState = True
if(event == self.EVENT_FREE):
self._alertState = False
if DEBUG:
print("Updated ToF Sensor Data")
mute = self.getMute() or self.getAlertState()
if self._powerSupply:
self._powerSupply.enableOutput(not mute)
if self._leds:
self._leds.enableAlert(self.getAlertState())
if(mute):
self._ledColor = self.COLOR_MUTE
else:
self._ledColor = self.COLOR_RUN
if(time.time() - self._timeLed > 1 / self._updateRateLed):
self._timeLed = time.time()
if(self._enableMagic):
r, g, b = hsv_to_rgb(time.time() / 3, 1, 1)
self._ledColor = np.array([r, g, b])
self._hmi.setButtonColor(self._ledColor)
if(time.time() - self._timeTemp > 1 / self._updateRateTemp):
self._timeTemp = time.time()
self._ambientTemp = self._tempSensorAmbient.getTemperature()
self._systemTemp = self._tempSensorSystem.getTemperature()
self._cpuTemp = self._getCpuTemperature()
if not np.isnan(self._systemTemp):
fanSpeed = np.clip((self._systemTemp - 40) / 20, 0, 1)
self._hmi.setFanSpeed(fanSpeed) # 40°C = 0% .. 60°C = 100%
def getReadyState(self):
return self._readyState
def registerShutdownCallback(self, callback):
self._shutdownCallback = callback
def getTemperature(self, source):
if(source == self.SRC_AMBIENT):
return self._ambientTemp
elif(source == self.SRC_SYSTEM):
return self._systemTemp
elif(source == self.SRC_CPU):
return self._cpuTemp
return float("NAN")
def getCpuLoad(self):
return psutil.cpu_percent()
def getDistanceLevel(self):
return self._distanceLevel
def enableAlert(self, state):
self._alertEnable = state
def setAlertSensitivity(self, sensitivity):
if not(0.0 <= sensitivity <= 1.0):
raise ValueError("Sensitivity out of bound: 0.0 .. 1.0")
self._alertSensitivity = sensitivity
def getAlertState(self):
return self._alertState and self._alertEnable
def setVolume(self, volume):
if self._rotaryEncoder:
self._rotaryEncoder.setEncoderValue(volume)
if self._powerSupply:
self._powerSupply.setVolume(volume)
def getVolume(self):
return self._rotaryEncoder.getEncoderValue()
def setMaxVolume(self, maxVolume):
if self._powerSupply:
self._powerSupply.setMaxVolume(maxVolume)
def setMute(self, state):
self._rotaryEncoder.setButtonState(state)
def getMute(self):
return self._rotaryEncoder.getButtonState()
def enableMagic(self, state):
self._enableMagic = state
def _shutdownEvent(self):
if DEBUG:
print("Shutdown Event occurred")
if self._shutdownCallback:
self._shutdownCallback(True)
def _getCpuTemperature(self):
if LINUX:
err, msg = subprocess.getstatusoutput('vcgencmd measure_temp')
if not err:
m = re.search(r'-?\d\.?\d*', msg)
try:
return float(m.group())
except ValueError:
pass
return float("NAN")
def _checkDistanceMap(self, distanceMap):
row_size = 3
column_size = 2
distance_foreground_on = 1200
distance_foreground_off = 1500
sens = max(1,(1 - self._alertSensitivity) * row_size * column_size)
mask = np.ones((row_size,column_size))
foreground_map_on = distanceMap <= distance_foreground_on
foreground_map_off = distanceMap <= distance_foreground_off
element_foreground_on = convolve2d(mask,foreground_map_on,"same") >= sens
element_foreground_off = convolve2d(mask,foreground_map_off,"same") >= sens
self._distanceLevel = np.mean(element_foreground_on)
mute_channel = np.any(element_foreground_on)
if not mute_channel and not np.any(element_foreground_off):
mute_channel = False
return self.EVENT_FREE
if mute_channel:
return self.EVENT_ALERT
return None
if __name__ == '__main__':
sensors = Sensors()
sensors.begin()
time.sleep(10000)
sensors.end()
|
py | b409aae5a0217ae4d0dae15d339831213f05d83a | # Generated by Django 2.0 on 2018-03-25 21:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('league', '0006_remove_leagueregistration_registered_by'),
]
operations = [
migrations.AddField(
model_name='leagueregistration',
name='registered_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
py | b409abe73b6d69eeebe74f0f77bd70a664ff6174 | # built-in
import sys
from argparse import ArgumentParser
from typing import Iterator, List, NamedTuple, NoReturn, TextIO
# app
from ._codes import extract
from ._plugins import Plugin, get_installed
TEMPLATE = '{c.plugin.name:20} | {c.code:8} | {c.message}'
class Code(NamedTuple):
code: str
message: str
plugin: Plugin
def normalize(name: str) -> str:
return name.replace('-', '_').lower()
def get_codes(lookup_name: str) -> Iterator[Code]:
plugins = sorted(get_installed(), key=lambda p: p.name)
if not plugins:
return
checked = set()
for plugin in plugins:
if plugin.name in checked:
continue
checked.add(plugin.name)
is_prefix = lookup_name.startswith(tuple(plugin.codes))
is_name = normalize(lookup_name) == normalize(plugin.name)
if lookup_name and not is_name and not is_prefix:
continue
try:
codes = extract(plugin.name)
except ImportError:
continue
for code in sorted(codes):
if is_prefix and not code.startswith(lookup_name):
continue
yield Code(
code=code,
message=codes[code],
plugin=plugin,
)
def print_codes(lookup_name: str, stream: TextIO) -> int:
count = 0
for code in get_codes(lookup_name):
count += 1
print(TEMPLATE.format(c=code), file=stream)
return count
def main(argv: List[str], stream: TextIO) -> int:
parser = ArgumentParser()
parser.add_argument('lookup_name', nargs='?', help='plugin name, code, or prefix')
args = parser.parse_args(argv)
lookup_name = args.lookup_name or ''
count = print_codes(lookup_name, stream=stream)
return int(count == 0)
def entrypoint() -> NoReturn:
code = main(argv=sys.argv[1:], stream=sys.stdout)
sys.exit(code)
|
py | b409ad74e32ce9f889262f0686b1c27d79f2ce85 | # -*- coding: utf-8 -*-
from tensorflow import keras
# import keras
from utils.ml_utils import MLModel
class SimpleLSTM(MLModel):
"""LSTM for sentimental analysis."""
def __init__(self, hid_dim, dropout_rate, class_dim, **kwargs):
"""Initialize LSTM model.
"""
super(SimpleLSTM, self).__init__(**kwargs)
model = keras.Sequential()
model.add(self.emb_layer)
model.add(keras.layers.LSTM(hid_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate))
model.add(keras.layers.Dense(class_dim, activation='softmax'))
model.compile(loss="categorical_crossentropy", # "binary_crossentropy"
optimizer="adam", # "adam" rmsprop
metrics=["accuracy"])
self.model = model
class BiLSTM(MLModel):
"""Bi LSTM for sentimental analysis."""
def __init__(self, hid_dim, class_dim, dropout_rate, **kwargs):
"""Initialize Bi-LSTM model.
"""
super(BiLSTM, self).__init__(**kwargs)
model = keras.Sequential()
model.add(self.emb_layer)
model.add(keras.layers.Bidirectional(keras.layers.LSTM(hid_dim)))
model.add(keras.layers.Dropout(dropout_rate))
model.add(keras.layers.Dense(class_dim, activation='softmax'))
model.compile(loss="categorical_crossentropy", # "binary_crossentropy"
optimizer="adam", # "adam" rmsprop
metrics=["accuracy"])
self.model = model
class StackedLSTM(MLModel):
"""两层LSTM 堆叠 参考腾讯公众号文章"""
def __init__(self, hid_dim, class_dim, dropout_rate, **kwargs):
"""Initialize Bi-LSTM model.
"""
super(StackedLSTM, self).__init__(**kwargs)
model = keras.Sequential()
hid_dim = 64
dropout_rate = 0.1
model.add(self.emb_layer)
# l = keras.layers.Embedding(10000, 128, input_length=40)
# model.add(l)
model.add(keras.layers.LSTM(hid_dim, dropout=dropout_rate, return_sequences=True))
model.add(keras.layers.LSTM(hid_dim, return_sequences=True))
# inputs = keras.layers.Input(shape=(3, 2, 4))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(class_dim)) # , activation = 'softmax'))
model.add(keras.layers.Activation('softmax'))
model.compile(loss="categorical_crossentropy", # "binary_crossentropy"
optimizer="adam", # "adam" rmsprop
metrics=["accuracy"])
self.model = model
class GRU(MLModel):
"""LSTM for sentimental analysis."""
def __init__(self, hid_dim, dropout_rate, class_dim, **kwargs):
"""Initialize LSTM model.
"""
super(GRU, self).__init__(**kwargs)
model = keras.Sequential()
model.add(self.emb_layer)
model.add(keras.layers.GRU(hid_dim, dropout=dropout_rate,
recurrent_dropout=dropout_rate))
model.add(keras.layers.Dense(class_dim, activation='softmax'))
model.compile(loss="categorical_crossentropy", # "binary_crossentropy"
optimizer="adam", # "adam" rmsprop
metrics=["accuracy"])
self.model = model
|
py | b409ad97f3135c9ef3cd088addfcbe02b5fc3e0d | import tensorflow as tf
import numpy as np
import scipy.misc
import sys
import time
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
class EBGAN():
def __init__ (self, batch_size = 50, image_shape = [28,28,1], embedding_size = 128, frames=8, num_class =10, dim1 = 1024, dim2 = 128,
dim3 = 64, dim_channel = 1, dim4=16, learning_rate_1=sys.argv[1], learning_rate_2=sys.argv[2], momentum=sys.argv[3],
scale=10.0):
self.batch_size = batch_size
self.image_shape = image_shape
self.embedding_size = embedding_size
self.num_class = num_class
self.dim1 = dim1
self.dim2 = dim2
self.dim3 = dim3
self.dim4 = dim4
self.learning_rate_1 = float(learning_rate_1)
self.learning_rate_2 = float(learning_rate_2)
self.momentum = float(momentum)
self.scale = scale
self.frames = frames
self.dim_1 = self.image_shape[0]
self.dim_2 = self.image_shape[0] // 2
self.dim_4 = self.image_shape[0] // 4
self.dim_8 = self.image_shape[0] // 8
self.dim_channel = dim_channel
self.device = "/gpu:0"
self.image_size = reduce(lambda x,y : x*y, image_shape)
self.initializer = tf.random_normal_initializer(stddev=0.02)
def batch_normalize(self, X, eps=1e-6,flag=False):
if flag :
if X.get_shape().ndims == 4:
mean, vari = tf.nn.moments(X, [0,1,2], keep_dims=True)
return tf.nn.batch_normalization(X,mean, vari, variance_epsilon=eps)
elif X.get_shape().ndims == 2:
mean, vari = tf.nn.moments(X, 0, keep_dims=True)
return tf.nn.batch_normalization(X, mean, vari, variance_epsilon=eps)
if X.get_shape().ndims == 4 :
mean = tf.reduce_mean(X,[0,1,2])
stddev = tf.reduce_mean(tf.square(X-mean),[0,1,2])
X = (X - mean)/tf.sqrt(stddev + eps)
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X,[0])
stddev = tf.reduce_mean(tf.square(X-mean),[0])
X = (X - mean)/tf.sqrt(stddev + eps)
else:
raise NoImplementationForSuchDimensions
return X
def lrelu(self, X):
return LeakyRelu(X)
def generate(self, embedding, classes, scope):
with tf.device(self.device):
ystack = tf.reshape(classes, [self.batch_size,1, 1, self.num_class])
embedding = tf.concat(axis=1, values=[embedding, classes])
h1 = tf.layers.dense(embedding, units=self.dim1, activation=None,
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_concat = tf.concat(axis=1, values=[h1_relu, classes])
h2 = tf.layers.dense(h1_concat, units=self.dim_8*self.dim_8*self.dim2,
activation=None, kernel_initializer=self.initializer,
name='dense_2', reuse=scope.reuse)
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3,
values=[tf.reshape(h2_relu, shape=[self.batch_size,self.dim_8,self.dim_8,self.dim2]),
ystack*tf.ones(shape=[self.batch_size, self.dim_8, self.dim_8,
self.num_class])])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = 2*self.dim3,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h3_relu = tf.nn.relu(self.normalize(h3,flag=True))
# print(h3.get_shape())
h3_concat = tf.concat(axis=3,
values=[tf.reshape(h3_relu, shape=[self.batch_size,self.dim_4,self.dim_4,2*self.dim3]),
ystack*tf.ones(shape=[self.batch_size, self.dim_4, self.dim_4, self.num_class])])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = 2*self.dim4,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=tf.nn.relu,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_2")
h4_relu = tf.nn.relu(self.normalize(h4,flag=True))
h4_concat = tf.concat(axis=3,
values=[tf.reshape(h4_relu, shape=[self.batch_size,self.dim_2,self.dim_2,2*self.dim4]),
ystack*tf.ones(shape=[self.batch_size, self.dim_2, self.dim_2, self.num_class])])
h5 = tf.layers.conv2d_transpose(inputs=h4_concat, filters = 4*self.dim4,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_3")
h5_relu = tf.nn.relu(self.normalize(h5, flag=True))
h5_concat = tf.concat(axis=3,
values=[h5_relu, ystack*tf.ones(shape=[self.batch_size, self.dim_1, self.dim_1, self.num_class])])
h6 = tf.layers.conv2d_transpose(inputs=h5_concat, filters = self.dim_channel*self.frames,
kernel_size=[5,5], strides=[1,1], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_4")
return tf.nn.sigmoid(h6)
def encoder_image(self, image, scope):
with tf.device(self.device):
LeakyReLU = tf.contrib.keras.layers.LeakyReLU(alpha=0.2)
image_proc = self.normalize(image,flag=True)
h1 = tf.layers.conv2d(image_proc, filters=48, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_1")
h1_relu = self.normalize(LeakyReLU(h1))
h2 = tf.layers.conv2d(h1_relu, filters=64, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_2")
h2_relu = self.normalize(LeakyReLU(h2))
h3 = tf.layers.conv2d(h2_relu, filters=16, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_3")
h3_relu = self.normalize(LeakyReLU(h3))
h3_reshape = tf.reshape(h3_relu, shape=[self.batch_size, self.dim_8[0]*self.dim_8[1]*16])
h4 = tf.layers.dense(h3_reshape, units=self.embedding_size+self.num_class_image,
activation=None,
kernel_initializer=self.initializer,
name='dense_2',
reuse=scope.reuse)
return h4 # no activation over last layer of h4
def decoder_image(self, embedding, zvalue, scope):
with tf.device(self.device):
ystack = tf.reshape(zvalue, shape=[self.batch_size, 1,1 , self.zdimension])
yneed_1 = ystack*tf.ones([self.batch_size, self.dim_4[0], self.dim_4[1], self.zdimension])
yneed_2 = ystack*tf.ones([self.batch_size, self.dim_2[0], self.dim_2[1], self.zdimension])
yneed_3 = ystack*tf.ones([self.batch_size, self.dim_8[0], self.dim_8[1], self.zdimension])
embedding = tf.concat(axis=1, values=[embedding, zvalue])
h1 = tf.layers.dense(embedding, units=1280, activation=None,
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_reshape = tf.reshape(h1_relu, shape=[self.batch_size, self.dim_8[0], self.dim_8[1], 64])
h1_concat = tf.concat(axis=3, values=[h1_reshape,yneed_3])
h2 = tf.layers.conv2d_transpose(inputs=h1_concat, filters = 64,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3, values=[h2_relu, yneed_1])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = 32,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_2')
h3_relu = tf.nn.relu(self.normalize(h3))
h3_concat = tf.concat(axis=3, values=[h3_relu, yneed_2])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = self.dim_channel,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_3')
return tf.nn.sigmoid(h4)
def discriminate_image(self, image, zvalue, scope):
with tf.device(self.device):
with tf.variable_scope("encoder") as scope:
embedding = self.encoder_image(image, scope)
with tf.variable_scope("decoder") as scope:
image_reconstr = self.encoder_image(embedding, zvalue, scope)
return tf.sqrt(tf.reduce_mean(tf.square(image - image_reconstr)))
def build_mode(self):
with tf.device(self.device):
embedding = tf.placeholder(tf.float32, [self.batch_size, self.embedding_size])
classes = tf.placeholder(tf.float32, [self.batch_size,self.num_class])
r_image = tf.placeholder(tf.float32,[self.batch_size] + self.image_shape)
real_image = tf.reshape(r_image,[self.batch_size] + self.image_shape)
with tf.variable_scope("generator") as scope:
h4 = self.generate(embedding,classes,scope)
g_image = h4
with tf.variable_scope("discriminator") as scope:
real_value = self.discriminate(real_image,classes,scope)
with tf.variable_scope("discriminator") as scope:
scope.reuse_variables()
fake_value = self.discriminate(g_image,classes,scope)
d_cost = real_value - fake_value
g_cost = fake_value
return embedding, classes, r_image, d_cost, g_cost, fake_value, real_value
|
py | b409ada66cbd8e6e2f160ebbe5a81d3f5df4dd1a | """"""
# Import third-party modules
import pytest
# Import local modules
from photoshop import Session
from photoshop.api.enumerations import TextType
class TestTextItem:
"""Test the solidColor."""
# pylint: disable=attribute-defined-outside-init
@pytest.fixture(autouse=True)
def setup(self, psd_file):
"""Setup for current test."""
self.session = Session(
file_path=psd_file("textitem"), action="open", auto_close=True
)
self.session.run_action()
doc = self.session.active_document
layer = doc.activeLayer
self.text_item = layer.textItem() # -> TextItem
yield
# self.session.close()
def test_alternateLigatures(self):
assert self.text_item.alternateLigatures == 0
def test_antiAliasMethod(self):
assert self.text_item.antiAliasMethod == 3
def test_autoKerning(self):
assert self.text_item.autoKerning == 2
def test_autoLeadingAmount(self):
assert self.text_item.autoLeadingAmount == 120.00000476837158
def test_set_autoLeadingAmount(self):
self.text_item.autoLeadingAmount = 20
assert self.text_item.autoLeadingAmount == 20.000000298023224
def test_baseline_shift(self):
assert self.text_item.baselineShift == 0.0
def test_fauxBold(self):
assert not self.text_item.fauxBold
def test_set_fauxBold(self):
assert not self.text_item.fauxBold
self.text_item.fauxBold = True
assert self.text_item.fauxBold
def test_fauxItalic(self):
assert not self.text_item.fauxItalic
def test_firstLineIndent(self):
assert self.text_item.firstLineIndent == 0.0
def test_get_font(self):
assert self.text_item.font == "ArialMT"
def test_set_font(self):
self.text_item.font = "AdobeThai-Regular"
assert self.text_item.font == "AdobeThai-Regular"
def test_hangingPunctuation(self):
assert not self.text_item.hangingPunctuation
def test_hyphenateAfterFirst(self):
assert self.text_item.hyphenateAfterFirst == 2
def test_justification(self):
assert self.text_item.justification == 1
def test_set_justification(self):
self.text_item.justification = 2
assert self.text_item.justification == 2
def test_kind(self):
assert self.text_item.kind == 1
def test_set_kind(self):
self.text_item.kind = TextType.ParagraphText
assert self.text_item.kind == 2
assert self.text_item.kind == TextType.ParagraphText
def test_noBreak(self):
assert not self.text_item.noBreak
def test_position(self):
assert self.text_item.position == (5.0, 57.0)
def test_size(self):
assert self.text_item.size == 18.0
def test_change_size(self):
self.text_item.size = 20
assert self.text_item.size == 20.0
|
py | b409adf935cf43eb4bbae5c4c9a05774306d882c | """
Author : Puranjay Rajvanshi
Date : June 9th 2020
File : game_winner.py
Description : Decides the game winner.
"""
"""
Big Idea for the code written in this file:
===========================================
Assuming any go board size.
Given: given a board state, determine who's the winner
0 : blank site
1 : black piece
2 : White piece
These are the rules to calculate the winner:
First all the dead stones of both sides are removed from the board. Then one side's living stones are counted,
including the vacant points enclosed by those stones. Vacant points situated between both sides' living stones
are shared equally. A vacant point counts as one stone.
The winner is determined by comparison with 180-1/2, which is half the number of points on the board. If the total
of one side's living stones and enclosed vacant points is larger than 180-1/2, then that side is the winner. If the
total is less than 180-1/2, then that side loses. If the total is equal to 180-1/2, the game is a draw.
In games with compensation, the comparison is made with different numbers, according to separate rules.
Removal of dead stones will be a separate function since it'll be needed for both Suicide an KO functions
"""
import numpy as np
def game_winner(board):
"""
:param: board: A 2-Dimensional numpy array
:return: 0: Draw
1: Black wins
2: White wins
Basic intuition:
if black piece is encountered increase points for black by 1
if white piece is encountered increase points for white by 1
if blank sites is encountered we try to find the size of the cluster of blank sites
if blank sites are completely surrounded by black then points for black increases by the group size
if blank sites are completely surrounded by white then points for white increases by the group size
if blank sites are completely surrounded by both then points for both increases by (group size)/2
"""
visited = set()
m = board.shape[0]
n = board.shape[1]
if m == 19:
komi = 3.75
else:
komi = (m/2) - 1
count_black = -komi
count_white = komi
offset = np.array([[1,0],[0,1],[-1,0],[0,-1]])
for i in range(m):
for j in range(n):
if (i,j) in visited:
continue
elif board[i][j] == 1:
count_black+=1
elif board[i][j] == 2:
count_white+=1
elif board[i][j] == 0:
queue = set()
queue.add((i,j))
black_neighbour = False
white_neighbour = False
group_count = 0
while queue:
node_x,node_y = queue.pop()
if (node_x,node_y) in visited:
continue
visited.add((node_x,node_y))
group_count+=1
neighbours = offset+np.array([node_x,node_y])
for neighbour in neighbours:
if (neighbour[0],neighbour[1]) in visited:
continue
elif 0<=neighbour[0]<m and 0<=neighbour[1]<n:
val = board[neighbour[0]][neighbour[1]]
if val == 1:
black_neighbour = True
elif val == 2:
white_neighbour = True
elif val == 0:
queue.add((neighbour[0],neighbour[1]))
if black_neighbour and white_neighbour:
count_black+=(group_count/2)
count_white+=(group_count/2)
elif black_neighbour:
count_black+=group_count
elif white_neighbour:
count_white+=group_count
if count_white>count_black:
return 2
elif count_black>count_white:
return 1
else:
return 0
if __name__ == "__main__":
board = np.zeros((19,19))
board[15][8] = 1
board[5][6] = 2
print(board)
print(game_winner(board))
|
py | b409ae22cde415c6bd8450fe544b4baa3687056b | # coding: utf-8
"""
Spinnaker API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from spinnaker_swagger_client.api_client import ApiClient
class V2CanaryConfigControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_canary_config_using_post(self, config, **kwargs): # noqa: E501
"""Create a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_canary_config_using_post(config, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object config: config (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_canary_config_using_post_with_http_info(config, **kwargs) # noqa: E501
else:
(data) = self.create_canary_config_using_post_with_http_info(config, **kwargs) # noqa: E501
return data
def create_canary_config_using_post_with_http_info(self, config, **kwargs): # noqa: E501
"""Create a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_canary_config_using_post_with_http_info(config, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object config: config (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config', 'configuration_account_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_canary_config_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config' is set
if ('config' not in params or
params['config'] is None):
raise ValueError("Missing the required parameter `config` when calling `create_canary_config_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'configuration_account_name' in params:
query_params.append(('configurationAccountName', params['configuration_account_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'config' in params:
body_params = params['config']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/canaryConfig', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_canary_config_using_delete(self, id, **kwargs): # noqa: E501
"""Delete a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_canary_config_using_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_canary_config_using_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_canary_config_using_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_canary_config_using_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_canary_config_using_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'configuration_account_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_canary_config_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_canary_config_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'configuration_account_name' in params:
query_params.append(('configurationAccountName', params['configuration_account_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/canaryConfig/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_canary_config_using_get(self, id, **kwargs): # noqa: E501
"""Retrieve a canary configuration by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_canary_config_using_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_canary_config_using_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_canary_config_using_get_with_http_info(id, **kwargs) # noqa: E501
return data
def get_canary_config_using_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieve a canary configuration by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_canary_config_using_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'configuration_account_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_canary_config_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_canary_config_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'configuration_account_name' in params:
query_params.append(('configurationAccountName', params['configuration_account_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/canaryConfig/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_canary_configs_using_get(self, **kwargs): # noqa: E501
"""Retrieve a list of canary configurations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_canary_configs_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application
:param str configuration_account_name: configurationAccountName
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_canary_configs_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_canary_configs_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_canary_configs_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve a list of canary configurations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_canary_configs_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: application
:param str configuration_account_name: configurationAccountName
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'configuration_account_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_canary_configs_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'application' in params:
query_params.append(('application', params['application'])) # noqa: E501
if 'configuration_account_name' in params:
query_params.append(('configurationAccountName', params['configuration_account_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/canaryConfig', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_canary_config_using_put(self, config, id, **kwargs): # noqa: E501
"""Update a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_canary_config_using_put(config, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object config: config (required)
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_canary_config_using_put_with_http_info(config, id, **kwargs) # noqa: E501
else:
(data) = self.update_canary_config_using_put_with_http_info(config, id, **kwargs) # noqa: E501
return data
def update_canary_config_using_put_with_http_info(self, config, id, **kwargs): # noqa: E501
"""Update a canary configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_canary_config_using_put_with_http_info(config, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object config: config (required)
:param str id: id (required)
:param str configuration_account_name: configurationAccountName
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['config', 'id', 'configuration_account_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_canary_config_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'config' is set
if ('config' not in params or
params['config'] is None):
raise ValueError("Missing the required parameter `config` when calling `update_canary_config_using_put`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_canary_config_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'configuration_account_name' in params:
query_params.append(('configurationAccountName', params['configuration_account_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'config' in params:
body_params = params['config']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v2/canaryConfig/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b409af172fa80dd217a7fce60599cd7c0021c1ee | #!/usr/bin/python
import sys
import os
import sqlite3
from sqlite3 import Error
from Connect2DB import *
from tabulate import tabulate
import pandas as dp
def db_name(workspace):
global db_file
db_file = workspace
def create_connection():
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
conn.execute('''CREATE TABLE accessPoints
(ESSID, BSSID, VENDOR, CHAN, PWR, ENC, CIPHER, AUTH)''')
conn.execute('''CREATE TABLE ProbeRequests
(ESSID, CLIENT, VENDOR, PWR)''')
conn.execute('''CREATE TABLE ProbeResponses
(ESSID, BSSID, VENDOR, CHAN, PWR, ENC, CIPHER, AUTH, CLIENT)''')
conn.execute('''CREATE TABLE EAP
(SRC_MAC, USERNAME, BSSID)''')
conn.execute('''CREATE TABLE INSCOPE_SSIDS
(ESSID)''')
conn.execute('''CREATE TABLE LOOT
(MAC, USERNAME, PASSWORD)''')
except Error as e:
conn.close()
finally:
conn.close()
def list():
db_list = str(os.listdir('db/'))[1:-1].replace('.db','').replace(',','').replace('\'','').split()
dbl=[]
for p in db_list:
dbl.append("workspace load "+p)
def display_list():
dl = str(os.listdir('db/'))[1:-1].replace('.db','').replace(',','').replace('\'','')
frame = dp.DataFrame(dl.split())
print tabulate(frame, showindex=False, headers=['Workspaces'], tablefmt='psql')
def delete_workspace(workspace):
os.system('rm -rf db/'+workspace+'.db')
def connect_db():
global connection
connection = sqlite3.connect(db_file, check_same_thread=False)
connection.text_factory = str
class load():
def begin(self):
connection.execute("BEGIN TRANSACTION")
def insert_ACCESS_POINT(self, SSID, MAC, VENDOR, CHL, SIG, ENC, CHR, ATH):
connection.execute("insert into accessPoints (ESSID, BSSID, VENDOR, CHAN, PWR, ENC, CIPHER, AUTH) values (?,?,?,?,?,?,?,?)", (SSID, MAC, VENDOR, CHL, SIG, ENC, CHR, ATH))
def Insert_Probe_REQUEST(self, SSID, MAC, VENDOR, SIG):
connection.execute("insert into ProbeRequests (ESSID, CLIENT, VENDOR, PWR) values (?,?,?,?)", (SSID, MAC, VENDOR, SIG))
def Insert_Probe_RESPONSE(self, SSID, MAC, VENDOR, CHL, SIG, ENC, CHR, ATH, RPCM):
connection.execute("insert into ProbeResponses (ESSID, BSSID, VENDOR, CHAN, PWR, ENC, CIPHER, AUTH, CLIENT) values (?,?,?,?,?,?,?,?,?)", (SSID, MAC, VENDOR, CHL, SIG, ENC, CHR, ATH, RPCM))
def Insert_EAP(self, sender, user, ap):
connection.execute("insert into EAP (SRC_MAC, USERNAME, BSSID) values (?,?,?)", (sender, user, ap))
def Close(self):
connection.commit()
connection.close()
|
py | b409af71f9ace85f5575ad14591d390806064e6c | import torch
from torch import nn
from torch import Tensor
from typing import Any, Dict, List, Optional, Union, cast
from ..constants import CATEGORICAL, LABEL, LOGITS, FEATURES
from .ft_transformer import _TokenInitialization, CLSToken, FT_Transformer
class CategoricalFeatureTokenizer(nn.Module):
"""
Feature tokenizer for categorical features in tabular data.
It transforms the input categorical features to tokens (embeddings).
The categorical features usually refers to discrete features.
"""
def __init__(
self,
num_categories: List[int],
d_token: int,
bias: Optional[bool] = True,
initialization: Optional[str] = "normal",
) -> None:
"""
Parameters
----------
num_categories:
A list of integers. Each one is the number of categories in one categorical column.
d_token:
The size of one token.
bias:
If `True`, for each feature, an additional trainable vector will be added to the
embedding regardless of feature value. Notablly, the bias are not shared between features.
initialization:
Initialization policy for parameters. Must be one of `['uniform', 'normal']`.
References
----------
1. Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko,
"Revisiting Deep Learning Models for Tabular Data", 2021
https://arxiv.org/pdf/2106.11959.pdf
2. Code: https://github.com/Yura52/tabular-dl-revisiting-models
"""
super().__init__()
self.num_categories = num_categories
category_offsets = torch.tensor([0] + num_categories[:-1]).cumsum(0)
self.register_buffer("category_offsets", category_offsets, persistent=False)
self.embeddings = nn.Embedding(sum(num_categories), d_token)
self.bias = nn.Parameter(Tensor(len(num_categories), d_token)) if bias else None
initialization_ = _TokenInitialization.from_str(initialization)
for parameter in [self.embeddings.weight, self.bias]:
if parameter is not None:
initialization_.apply(parameter, d_token)
@property
def n_tokens(self) -> int:
"""The number of tokens."""
return len(self.num_categories)
@property
def d_token(self) -> int:
"""The size of one token."""
return self.embeddings.embedding_dim
def forward(self, x: Tensor) -> Tensor:
x = self.embeddings(x + self.category_offsets[None])
if self.bias is not None:
x = x + self.bias[None]
return x
class CategoricalTransformer(nn.Module):
"""
FT-Transformer for categorical tabular features.
The input dimension is automatically computed based on
the number of categories in each categorical column.
"""
def __init__(
self,
prefix: str,
num_categories: List[int],
d_token: int,
cls_token: Optional[bool] = False,
out_features: Optional[int] = None,
num_classes: Optional[int] = 0,
token_bias: Optional[bool] = True,
token_initialization: Optional[str] = "normal",
n_blocks: Optional[int] = 0,
attention_n_heads: Optional[int] = 8,
attention_initialization: Optional[str] = "kaiming",
attention_normalization: Optional[str] = "layer_norm",
attention_dropout: Optional[str] = 0.2,
residual_dropout: Optional[str] = 0.0,
ffn_activation: Optional[str] = "reglu",
ffn_normalization: Optional[str] = "layer_norm",
ffn_d_hidden: Optional[str] = 6,
ffn_dropout: Optional[str] = 0.0,
prenormalization: Optional[bool] = True,
first_prenormalization: Optional[bool] = False,
kv_compression_ratio: Optional[float] = None,
kv_compression_sharing: Optional[str] = None,
head_activation: Optional[str] = "relu",
head_normalization: Optional[str] = "layer_norm",
) -> None:
"""
Parameters
----------
prefix
The model prefix.
num_categories
A list of integers. Each one is the number of categories in one categorical column.
d_token
The size of one token for `_CategoricalFeatureTokenizer`.
cls_token
If `True`, cls token will be added to the token embeddings.
out_features
Dimension of output features.
num_classes
Number of classes. 1 for a regression task.
token_bias
If `True`, for each feature, an additional trainable vector will be added in `_CategoricalFeatureTokenizer`
to the embedding regardless of feature value. Notablly, the bias are not shared between features.
token_initialization
Initialization policy for parameters in `_CategoricalFeatureTokenizer` and `_CLSToke`.
Must be one of `['uniform', 'normal']`.
n_blocks
Number of the `FT_Transformer` blocks, which should be non-negative.
attention_n_heads
Number of attention heads in each `FT_Transformer` block, which should be postive.
attention_initialization
Weights initalization scheme for Multi Headed Attention module.
attention_dropout
Dropout ratio for the Multi Headed Attention module.
residual_dropout
Dropout ratio for the linear layers in FT_Transformer block.
ffn_activation
Activation function type for the Feed-Forward Network module.
ffn_normalization
Normalization scheme of the Feed-Forward Network module.
ffn_d_hidden
Number of the hidden nodes of the linaer layers in the Feed-Forward Network module.
ffn_dropout
Dropout ratio of the hidden nodes of the linaer layers in the Feed-Forward Network module.
prenormalization, first_prenormalization
Prenormalization to stablize the training.
kv_compression_ratio
The compression ration to reduce the input sequence length.
kv_compression_sharing
If `true` the projections will share weights.
head_activation
Activation function type of the MLP layer.
head_normalization
Normalization scheme of the MLP layer.
References
----------
1. Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko,
"Revisiting Deep Learning Models for Tabular Data", 2021
https://arxiv.org/pdf/2106.11959.pdf
2. Code: https://github.com/Yura52/tabular-dl-revisiting-models
"""
super().__init__()
assert num_categories, "num_categories must be non-empty"
assert d_token > 0, "d_token must be positive"
assert n_blocks >= 0, "n_blocks must be non-negative"
assert attention_n_heads > 0, "attention_n_heads must be postive"
assert token_initialization in ["uniform", "normal"], "initialization must be uniform or normal"
self.num_categories = num_categories
self.prefix = prefix
self.out_features = out_features
self.categorical_feature_tokenizer = CategoricalFeatureTokenizer(
num_categories=num_categories,
d_token=d_token,
bias=token_bias,
initialization=token_initialization,
)
self.cls_token = (
CLSToken(
d_token=d_token,
initialization=token_initialization,
)
if cls_token
else nn.Identity()
)
if kv_compression_ratio is not None:
n_tokens = self.categorical_feature_tokenizer.n_tokens + 1
else:
n_tokens = None
self.transformer = FT_Transformer(
d_token=d_token,
n_blocks=n_blocks,
attention_n_heads=attention_n_heads,
attention_dropout=attention_dropout,
attention_initialization=attention_initialization,
attention_normalization=attention_normalization,
ffn_d_hidden=ffn_d_hidden,
ffn_dropout=ffn_dropout,
ffn_activation=ffn_activation,
ffn_normalization=ffn_normalization,
residual_dropout=residual_dropout,
prenormalization=prenormalization,
first_prenormalization=first_prenormalization,
last_layer_query_idx=None,
n_tokens=n_tokens,
kv_compression_ratio=kv_compression_ratio,
kv_compression_sharing=kv_compression_sharing,
head_activation=head_activation,
head_normalization=head_normalization,
d_out=out_features,
)
self.head = FT_Transformer.Head(
d_in=d_token,
d_out=num_classes,
bias=True,
activation=head_activation,
normalization=head_normalization if prenormalization else "Identity",
)
self.name_to_id = self.get_layer_ids()
@property
def categorical_key(self):
return f"{self.prefix}_{CATEGORICAL}"
@property
def label_key(self):
return f"{self.prefix}_{LABEL}"
def forward(self, batch: dict):
"""
Parameters
----------
batch
A dictionary containing the input mini-batch data.
We need to use the keys with the model prefix to index required data.
Returns
-------
A dictionary with logits and features.
"""
categorical_features = []
for categorical_feature in batch[self.categorical_key]:
categorical_features.append(categorical_feature)
categorical_features = torch.stack(categorical_features, dim=1)
features = self.categorical_feature_tokenizer(categorical_features)
features = self.cls_token(features)
features = self.transformer(features)
logits = self.head(features)
return {
self.prefix: {
LOGITS: logits,
FEATURES: features,
}
}
def get_layer_ids(
self,
):
"""
All layers have the same id 0 since there is no pre-trained models used here.
Returns
-------
A dictionary mapping the layer names (keys) to their ids (values).
"""
name_to_id = {}
for n, _ in self.named_parameters():
name_to_id[n] = 0
return name_to_id
|
py | b409b0151bd60968fd2b9dec029a72b0a2ab7071 | import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
startTime = time.time()
print('==> Experiment 1i')
def loadData(filepath):
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_test = np.array(f.get('validationFeatures'))
y_test = np.array(f.get('validationLabels'))
del f
print('==> Data sizes:',X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder()
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_test = enc.fit_transform(y_test.copy()).astype(int).toarray()
return [X_train, y_train, X_test, y_test]
# Neural-network model set-up
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial)
def runNeuralNet(num_features, hidden_layer_size, X_train, y_train, X_test, y_test, batchSize, numEpochs):
'''
NN config parameters
'''
num_classes = y_test.shape[1]
print('==> Creating Neural net with %d features, %d hidden units, and %d classes'%(num_features, hidden_layer_size, num_classes))
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = init_weight_variable([num_features, hidden_layer_size])
b1 = init_bias_variable([hidden_layer_size])
# Hidden layer activation function: ReLU
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = init_weight_variable([hidden_layer_size, num_classes])
b2 = init_bias_variable([num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h1, W2) + b2
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.global_variables_initializer())
'''
Training config
'''
numTrainingVec = len(X_train)
print_freq = 5
train_accuracies = []
test_accuracies = []
train_costs = []
test_costs = []
print('Training with %d samples, a batch size of %d, for %d epochs'%(numTrainingVec, batchSize, numEpochs))
for epoch in range(numEpochs):
epochStart = time.time()
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})
epochEnd = time.time()
# calculate the accuracies and costs at this epoch
train_accuracy = accuracy.eval(feed_dict={x:X_train, y_: y_train})
test_accuracy = accuracy.eval(feed_dict={x: X_test, y_: y_test})
train_cost = cross_entropy.eval(feed_dict={x:X_train, y_: y_train})
test_cost = cross_entropy.eval(feed_dict={x: X_test, y_: y_test})
# update the lists
train_accuracies += [train_accuracy]
test_accuracies += [test_accuracy]
train_costs += [train_cost]
test_costs += [test_cost]
# Print accuracy
if (epoch + 1) % print_freq == 0:
print("epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g"%(epoch+1, epochEnd - epochStart, train_accuracy, test_accuracy, train_cost, test_cost))
# Validation
train_accuracy = accuracy.eval(feed_dict={x:X_train, y_: y_train})
test_accuracy = accuracy.eval(feed_dict={x: X_test, y_: y_test})
train_cost = cross_entropy.eval(feed_dict={x:X_train, y_: y_train})
test_cost = cross_entropy.eval(feed_dict={x:X_test, y_: y_test})
print("test accuracy %g"%(test_accuracy))
return [train_accuracies, test_accuracies, train_costs, test_costs]
'''
our main
'''
'''
Plot the cost at each epoch for each of these downsampling amounts
'''
[X_train, y_train, X_test, y_test] = loadData('pylon2/ci560sp/cstrong/taylorswift_smallDataset_71_7.mat')
numEpochs = 5
numTrainingSamples = X_train.shape[0]
# leave the testing data the same, downsample the training data
print("==> Starting Downsampling Tests for exp1c")
# set the rates we want to test at
batchSizes = [100, 500, 1000, 5000, 10000]
matplotlib.rcParams.update({'font.size': 8})
trainingAccuracyLists = []
testAccuracyLists = []
trainingCostLists = []
testCostLists = []
times = []
for curSize in batchSizes:
startOfLoop = time.time()
print("==> Test with Batch Size of %d"%(curSize))
[trainingAccuracies, testAccuracies, trainingCosts, testCosts] = runNeuralNet(121, 100, X_train, y_train, X_test, y_test, curSize, numEpochs)
# store the data at each epoch
trainingAccuracyLists += [trainingAccuracies]
testAccuracyLists += [testAccuracies]
trainingCostLists += [trainingCosts]
testCostLists += [testCosts]
endOfTraining = time.time()
times += [endOfTraining - startOfLoop]
endOfLoop = time.time()
print("Test with Batch Size of %d took: %g"%(curSize, endOfLoop - startOfLoop))
#track the time of the whole experiment
endTime = time.time()
print("Whole experiment Took: %g"%(endTime - startTime))
'''
Printing results
'''
print("--------------------------")
print("Summary Of Results")
print("--------------------------")
print("Batch Sizes: %s"%str(batchSizes))
print("Training Accuracy Lists: %s"%str(trainingAccuracyLists))
print("Test Accuracy Lists: %s"%str(testAccuracyLists))
print("Training Cost Lists: %s"%str(trainingCostLists))
print("Test Cost Lists: %s"%str(testCostLists))
'''
Plotting results
'''
#setup the figure, will add plots to it in the loop
fig = plt.figure(figsize=(8,11))
trainingPlot = fig.add_subplot(311)
trainingPlot.set_xlabel("Epoch Numbers")
trainingPlot.set_ylabel("Cross-Entropy Error")
trainingPlot.set_title("Error vs. Epoch Number")
for i in range(len(trainingAccuracyLists)):
curSize = batchSizes[i]
trainingCosts = trainingCostLists[i]
testCosts = testCostLists[i]
numEpochs = len(trainingAccuracies)
epochNumbers = range(numEpochs)
# only put on validation cost
trainingPlot.plot(epochNumbers, testCosts, label="Validation, Batchsize = %d"%(curSize), marker="o", markersize="3", ls="None")
# plots have already been added to the figure during the loop
trainingPlot.legend(loc="upper right", frameon=False)
# add in the final values
finalTrainingAccuracies = [curList[-1] for curList in trainingAccuracyLists]
finalTestAccuracies = [curList[-1] for curList in testAccuracyLists]
finalTrainingCosts = [curList[-1] for curList in trainingCostLists]
finalTestCosts = [curList[-1] for curList in testCostLists]
errPlot = fig.add_subplot(312)
errPlot.plot(batchSizes, finalTrainingCosts, label="Training", marker="o", markersize="3", ls="None")
errPlot.plot(batchSizes, finalTestCosts, label="Validation", marker="o", markersize="3", ls="None")
errPlot.set_xlabel("Batch Size")
errPlot.set_ylabel("Cross-Entropy Error")
errPlot.legend(loc="lower right", frameon=False)
errPlot.set_title("Final Error vs. Batch Size")
# plot the time versus batch size
timePlot = fig.add_subplot(313)
timePlot.plot(batchSizes, times, label="Time", marker="o", markersize="3", ls="None")
timePlot.set_xlabel("Batch Size")
timePlot.set_ylabel("Time to Make and Train NN (S)")
timePlot.set_title("Time vs. Batch Size")
fig.tight_layout()
fig.savefig('exp1k_BatchSize.png')
|
py | b409b02aafc430324582de6c2f1f079bcd4f7523 | # coding: utf-8
###
# @file krum.py
# @author Sébastien Rouault <[email protected]>
#
# @section LICENSE
#
# Copyright © 2018-2019 Sébastien ROUAULT.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# @section DESCRIPTION
#
# Multi-Krum GAR.
###
import math
import numpy as np
import tensorflow.compat.v1 as tf
import warnings
import autodist.tools as tools
import autodist.native as native
from . import _GAR, register, deprecated_native
# ---------------------------------------------------------------------------- #
# Krum GAR class
class PYKrumGAR(_GAR):
""" Full-Python/(deprecated) native Multi-Krum GAR class.
"""
def _aggregate(self, *gradients):
""" Aggregate the gradient using the associated (deprecated) native helper.
Args:
gradients List of submitted gradients, as numpy arrays
Returns:
Aggregated gradient, as a numpy array
"""
if self.__nbselected == self.__nbworkers:
# Fast path average
result = gradients[0]
for i in range(1, self.__nbworkers):
result += gradients[i]
result /= float(self.__nbworkers)
return result
else:
# Compute list of scores
scores = [list() for i in range(self.__nbworkers)]
for i in range(self.__nbworkers - 1):
score = scores[i]
for j in range(i + 1, self.__nbworkers):
# With: 0 <= i < j < nbworkers
distance = deprecated_native.squared_distance(gradients[i], gradients[j])
if math.isnan(distance):
distance = math.inf
score.append(distance)
scores[j].append(distance)
nbinscore = self.__nbworkers - self.__nbbyzwrks - 2
for i in range(self.__nbworkers):
score = scores[i]
score.sort()
scores[i] = sum(score[:nbinscore])
# Return the average of the m gradients with the smallest score
pairs = [(gradients[i], scores[i]) for i in range(self.__nbworkers)]
pairs.sort(key=lambda pair: pair[1])
result = pairs[0][0]
for i in range(1, self.__nbselected):
result += pairs[i][0]
result /= float(self.__nbselected)
return result
def __init__(self, nbworkers, nbbyzwrks, args):
warnings.warn("Python/native implementation of Krum has been deprecated in favor of the CO implementations", category=DeprecationWarning, stacklevel=3)
self.__nbworkers = nbworkers
self.__nbbyzwrks = nbbyzwrks
self.__nbselected = nbworkers - nbbyzwrks - 2
def aggregate(self, gradients):
# Assertion
assert len(gradients) > 0, "Empty list of gradient to aggregate"
# Computation
gradients = [gradients[0],gradients[1]]
return tf.py_func(self._aggregate, gradients, gradients[0].dtype, stateful=False, name="GAR_krum")
class TFKrumGAR(_GAR):
""" Full-TensorFlow Multi-Krum GAR class.
"""
def __init__(self, nbworkers, nbbyzwrks, args):
self.__nbworkers = nbworkers
self.__nbbyzwrks = nbbyzwrks
self.__nbselected = nbworkers - nbbyzwrks - 2
def aggregate(self, gradients):
with tf.name_scope("GAR_krum_tf"):
# Assertion
assert len(gradients) > 0, "Empty list of gradient to aggregate"
# Distance computations
distances = []
for i in range(self.__nbworkers - 1):
dists = list()
for j in range(i + 1, self.__nbworkers):
sqr_dst = tf.reduce_sum(tf.squared_difference(gradients[i], gradients[j]))
dists.append(tf.negative(tf.where(tf.is_finite(sqr_dst), sqr_dst, tf.constant(np.inf, dtype=sqr_dst.dtype)))) # Use of 'negative' to get the smallest distances and score indexes in 'nn.top_k'
distances.append(dists)
# Score computations
scores = []
for i in range(self.__nbworkers):
dists = []
for j in range(self.__nbworkers):
if j == i:
continue
if j < i:
dists.append(distances[j][i - j - 1])
else:
dists.append(distances[i][j - i - 1])
dists = tf.parallel_stack(dists)
dists, _ = tf.nn.top_k(dists, k=(self.__nbworkers - self.__nbbyzwrks - 2), sorted=False)
scores.append(tf.reduce_sum(dists))
# Average of the 'nbselected' smallest scoring gradients
gradients = tf.parallel_stack(gradients)
scores = tf.parallel_stack(scores)
_, indexes = tf.nn.top_k(scores, k=self.__nbselected, sorted=False)
return tf.reduce_mean(tf.gather(gradients, indexes), axis=0)
class COKrumGAR(_GAR):
""" Full-custom operation Multi-Krum GAR class.
"""
# Name of the associated custom operation
co_name = "krum"
def __init__(self, nbworkers, nbbyzwrks, args):
self.__nbworkers = nbworkers
self.__nbbyzwrks = nbbyzwrks
self.__nbselected = nbworkers - nbbyzwrks - 2
def aggregate(self, gradients):
# Assertion
assert len(gradients) > 0, "Empty list of gradient to aggregate"
# Computation
reshape_gradients = gradients
shape = gradients[0].shape
if len(shape) == 2 and shape[1] == 10:
for i in range(len(gradients)):
reshape_gradients[i] = tf.reshape(gradients[i],[54080,])
else:
for i in range(len(gradients)):
reshape_gradients[i] = tf.reshape(gradients[i],[-1])
grad_avg = native.instantiate_op(type(self).co_name, tf.parallel_stack(gradients), f=self.__nbbyzwrks, m=self.__nbselected)
# lib = tf.load_op_library('/home/starly/Desktop/Fed/autodist/autodist/native/op_krum.so')
# grad_avg = lib.Krum(gradients=tf.parallel_stack(gradients), f=self.__nbbyzwrks, m=self.__nbselected)
if len(shape) == 2 and shape[1] == 10:
#x = tf.placeholder(tf.float32, shape=[10,None], name="tmp")
#grad_avg = tf.reshape(grad_avg, shape=[tf.shape(x)[0],10])
grad_avg = tf.reshape(grad_avg, shape=[5408,10])
else:
grad_avg = tf.reshape(grad_avg, shape)
return grad_avg
# ---------------------------------------------------------------------------- #
# GAR registering
# Register aggregation rules
register("krum-py", PYKrumGAR)
register("krum-tf", TFKrumGAR)
if COKrumGAR.co_name in native.itemize_op():
register("krum-co", COKrumGAR)
else:
tools.warning("GAR 'krum-co' could not be registered since the associated custom operation " + repr(COKrumGAR.co_name) + " is unavailable")
|
py | b409b092e3aa0a1ed63335a45e213b8a8748517e | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#end_pymotw_header
import imaplib
import imaplib_connect
with imaplib_connect.open_connection() as c:
# Find the "SEEN" messages in INBOX
c.select('INBOX')
typ, [response] = c.search(None, 'SEEN')
if typ != 'OK':
raise RuntimeError(response)
msg_ids = ','.join(response.decode('utf-8').split(' '))
# Create a new mailbox, "Example.Today"
typ, create_response = c.create('Example.Today')
print('CREATED Example.Today:', create_response)
# Copy the messages
print('COPYING:', msg_ids)
c.copy(msg_ids, 'Example.Today')
# Look at the results
c.select('Example.Today')
typ, [response] = c.search(None, 'ALL')
print('COPIED:', response)
|
py | b409b0e3e13fbe81f9bb59647fafe3bb5238831f | from importlib import import_module
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.urls import include
from django.views.generic.base import RedirectView
from allauth import app_settings
from allauth.socialaccount import providers
# from allauth.socialaccount.providers.google.urls import urlpatterns as allauth_aocialaccount_google_urlpatterns
from .views import SignupView, LoginView, LogoutView, PasswordChangeView, PasswordSetView, \
AccountInactiveView, EmailView, EmailVerificationSentView, ConfirmEmailView, PasswordResetView, \
PasswordResetDoneView, PasswordResetFromKeyView, PasswordResetFromKeyDoneView, \
SocialSignupView, SocialLoginCancelledView, SocialLoginErrorView, SocialConnectionsView
allauth_i18n_urlpatterns = [
url(r"^signup/$", SignupView.as_view(), name="account_signup"),
url(r"^login/$", LoginView.as_view(), name="account_login"),
url(r"^logout/$", LogoutView.as_view(), name="account_logout"),
]
allauth_urlpatterns = [
# Password
url(r"^password/change/$", login_required(PasswordChangeView.as_view()),
name="account_change_password"),
url(r"^password/set/$", login_required(PasswordSetView.as_view()), name="account_set_password"),
# password reset
url(r"^password/reset/$", PasswordResetView.as_view(),
name="account_reset_password"),
url(r"^password/reset/done/$", PasswordResetDoneView.as_view(),
name="account_reset_password_done"),
# SPA frontend required view
url(r"^password/reset/key/$", RedirectView.as_view(url='/password/reset/', permanent=False),
name="account_reset_password_key_redirect"),
url(r"^password/reset/key/(?P<uidb36>[0-9A-Za-z]+)/(?P<key>.+)/$",
PasswordResetFromKeyView.as_view(),
name="account_reset_password_from_key"),
url(r"^password/reset/key/done/$", PasswordResetFromKeyDoneView.as_view(),
name="account_reset_password_from_key_done"),
# E-mail
url(r"^email/$", login_required(EmailView.as_view()), name="account_email"),
url(r"^confirm-email/$", EmailVerificationSentView.as_view(),
name="account_email_verification_sent"),
url(r"^inactive/$", AccountInactiveView.as_view(), name="account_inactive"),
url(r"^confirm-email/(?P<key>[-:\w]+)/$", ConfirmEmailView.as_view(),
name="account_confirm_email"),
# completely overridden
# url(r'', include('allauth.urls')),
]
if app_settings.SOCIALACCOUNT_ENABLED:
allauth_socialaccount_urlpatterns = []
allauth_socialaccount_urlpatterns += [
url(r'^login/cancelled/$', SocialLoginCancelledView.as_view(),
name='socialaccount_login_cancelled'),
url(r'^login/error/$', SocialLoginErrorView.as_view(),
name='socialaccount_login_error'),
url(r'^signup/$', SocialSignupView.as_view(), name='socialaccount_signup'),
url(r'^connections/$', SocialConnectionsView.as_view(), name='socialaccount_connections'),
]
# taken from allauth.urls
# Provider urlpatterns, as separate attribute (for reusability).
provider_urlpatterns = []
for provider in providers.registry.get_list():
try:
prov_mod = import_module(provider.get_package() + '.urls')
except ImportError:
continue
prov_urlpatterns = getattr(prov_mod, 'urlpatterns', None)
if prov_urlpatterns:
allauth_socialaccount_urlpatterns += prov_urlpatterns
allauth_urlpatterns += [
url(r'^social/', include(allauth_socialaccount_urlpatterns)),
]
# completely overridden
# url(r'^social/', include('allauth.socialaccount.urls'))
|
py | b409b285a8283d92f6b1a3d506c2a5b51121623a | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from enum import IntEnum
class Intents(IntEnum):
"""
Discord client intents.
These give your client more permissions.
NOTE: The given Intents must also be enabled for your client on
the discord dashboard.
"""
NONE = 0
GUILDS = 1 << 0
GUILD_MEMBERS = 1 << 1
GUILD_BANS = 1 << 2
GUILD_EMOJIS_AND_STICKERS = 1 << 3
GUILD_INTEGRATIONS = 1 << 4
GUILD_WEBHOOKS = 1 << 5
GUILD_INVITES = 1 << 6
GUILD_VOICE_STATES = 1 << 7
GUILD_PRESENCES = 1 << 8
GUILD_MESSAGES = 1 << 9
GUILD_MESSAGE_REACTIONS = 1 << 10
GUILD_MESSAGE_TYPING = 1 << 11
DIRECT_MESSAGES = 1 << 12
DIRECT_MESSAGE_REACTIONS = 1 << 13
DIRECT_MESSAGE_TYPING = 1 << 14
@staticmethod
def all():
"""Consists of all intents"""
res = 0
for intent in list(map(lambda itm: itm.value, Intents)):
res |= intent
return res
|
py | b409b2cefb2de1ff0c254156de740182f4e85575 | import websocket_server
import websocket
import ssl
import json
ws = websocket.WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})
def sendToApi(command):
try:
ws.connect("wss://emotivcortex.com:54321")
ws.send(command)
result= ws.recv()
return result
finally:
ws.close()
def getToken():
client_id = open('D:\\noela\\Documents\\3TI\\TFE\\EmotivAPIConnection\\client_id.txt', 'r').read()
client_secret = open('D:\\noela\\Documents\\3TI\\TFE\\EmotivAPIConnection\\client_secret.txt', 'r').read()
license_key = open('D:\\noela\\Documents\\3TI\\TFE\\EmotivAPIConnection\\license_key.txt', 'r').read()
auth_json = sendToApi(authorize(client_id, client_secret, license_key))
return json.loads(auth_json)["result"]["_auth"]
def authorize(client_id, client_secret, license):
return json.dumps({ 'jsonrpc': '2.0', 'method': 'authorize', 'params': \
{ 'client_id': client_id, 'client_secret': client_secret, 'license': license, 'debit': 10 } , 'id': 1 })
def queryHeadsets():
query_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'queryHeadsets', 'params': \
{ } , 'id': 1 }))
return json.loads(query_json)["result"]
def createSession(token):
session_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'createSession', 'params': \
{ '_auth': token, 'status': 'open' } , 'id': 1 }))
return json.loads(session_json)["result"]["status"], json.loads(session_json)["result"]["id"]
def activeSession(token, sid):
activeSession_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'updateSession', 'params': \
{ '_auth': token, 'session': sid, 'status': 'active' } , 'id': 1 }))
return json.loads(activeSession_json)#["result"]["status"]
def startRecord(token, sid):
startRecord_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'updateSession', 'params': \
{ '_auth': token, 'status': 'startRecord', 'session': sid } , 'id': 1 }))
return json.loads(startRecord_json)["result"]["status"]
def stopRecord(token, sid):
stopRecord_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'updateSession', 'params': \
{ '_auth': token, 'status': 'stopRecord', 'session': sid } , 'id': 1 }))
return json.loads(stopRecord_json)["result"]["status"]
def closeSession(token, sid):
closeSession_json = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'updateSession', 'params': \
{ '_auth': token, 'status': 'close', 'session': sid } , 'id': 1 }))
return json.loads(closeSession_json)["result"]["status"]
def subscribe(token, sid):
data = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'subscribe', 'params': \
{ '_auth': token, 'streams': [ 'mot' ], 'session': sid } , 'id': 1 }))
return json.loads(data)#["result"]["sid"]
""" , 'session': sid, 'replay': 'false' """
def unsubscribe(token, sid):
message = sendToApi(json.dumps({ 'jsonrpc': '2.0', 'method': 'unsubscribe', 'params': \
{ '_auth': token, 'streams': [ 'mot' ], 'session': sid } , 'id': 1 }))
return json.loads(message)#["result"]["message"] |
py | b409b30cb7c78426217a669e9cd2fdf4e31d73ce | ##########################################################################################
# Copyright (c) MemSQL. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
##########################################################################################
"""
non_cycle_test.py
non_cycle.py unit test
"""
from frodo.domain import DBObject, Operation, Result
from frodo.history import History, HistoryElem
from frodo.non_cycle import find_g1a, find_g1b
import unittest
class TestNonCyclicalAnomalies(unittest.TestCase):
def get_g1a_anomaly_hist(self) -> History:
obj: DBObject = DBObject(0, "tab")
hist: History = History(
[
# 0
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0
),
# 1
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0),
# 2
HistoryElem(Operation(Operation.Type.WRITE, obj=obj, value=0), Result(), 0, 0, 0.0, 0.0),
# 3
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0),
# 4
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0
),
# 5
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0),
# 6
HistoryElem(Operation(Operation.Type.WRITE, obj=obj, value=1), Result(), 0, 1, 0.0, 0.0),
# 7
HistoryElem(
Operation(Operation.Type.ROLLBACK, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0
),
# 8
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0
),
# 9
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0),
# 10
HistoryElem(Operation(Operation.Type.READ, obj=obj), Result(value=[("0,1",)]), 0, 2, 0.0, 0.0),
# 11
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0),
# 12
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 3, 0.0, 0.0
),
# 13
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 3, 0.0, 0.0),
# 14
HistoryElem(Operation(Operation.Type.READ, obj=obj), Result(value=[("0",)]), 0, 3, 0.0, 0.0),
# 15
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 8, 0.0, 0.0),
]
)
hist[2].op.stmt([])
hist[6].op.stmt([0])
return hist
def get_g1b_anomaly_hist(self) -> History:
obj: DBObject = DBObject(0, "tab")
hist: History = History(
[
# 0
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0
),
# 1
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0),
# 2
HistoryElem(Operation(Operation.Type.WRITE, obj=obj, value=0), Result(), 0, 0, 0.0, 0.0),
# 3
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 0, 0.0, 0.0),
# 4
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0
),
# 5
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0),
# 6
HistoryElem(Operation(Operation.Type.WRITE, obj=obj, value=1), Result(), 0, 1, 0.0, 0.0),
# 7
HistoryElem(Operation(Operation.Type.WRITE, obj=obj, value=2), Result(), 0, 1, 0.0, 0.0),
# 8
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 1, 0.0, 0.0),
# 9
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0
),
# 10
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0),
# 11
HistoryElem(Operation(Operation.Type.READ, obj=obj), Result(value=[("0,1",)]), 0, 2, 0.0, 0.0),
# 12
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 2, 0.0, 0.0),
# 13
HistoryElem(
Operation(Operation.Type.SET_ISOLATION, isolation_level="serializable"), Result(), 0, 3, 0.0, 0.0
),
# 14
HistoryElem(Operation(Operation.Type.BEGIN, isolation_level="serializable"), Result(), 0, 3, 0.0, 0.0),
# 15
HistoryElem(Operation(Operation.Type.READ, obj=obj), Result(value=[("0",)]), 0, 3, 0.0, 0.0),
# 16
HistoryElem(Operation(Operation.Type.COMMIT, isolation_level="serializable"), Result(), 0, 8, 0.0, 0.0),
]
)
hist[2].op.stmt([])
hist[6].op.stmt([0])
hist[7].op.stmt([0, 1])
return hist
def test_g1a_anomaly(self) -> None:
wrong_hist: History = self.get_g1a_anomaly_hist()
correct_hist: History = self.get_g1b_anomaly_hist()
self.assertEqual(len(find_g1a(wrong_hist)), 1)
self.assertEqual(len(find_g1a(correct_hist)), 0)
print(find_g1a(wrong_hist)[0])
def test_g1b_anomaly(self) -> None:
wrong_hist: History = self.get_g1b_anomaly_hist()
correct_hist: History = self.get_g1a_anomaly_hist()
self.assertEqual(len(find_g1b(wrong_hist)), 1)
self.assertEqual(len(find_g1b(correct_hist)), 0)
print(find_g1b(wrong_hist)[0])
|
py | b409b533c0056b2809c3ce42130c1ebb15fc4970 | import pytest
import server.integrations.slack as slack
@pytest.fixture
def anomalies():
return []
@pytest.fixture
def config():
return {'url': 'http://localhost:3000/notarealurl'}
@pytest.fixture
def empty_data():
return { "text": "", "blocks": [] }
@pytest.fixture
def data(empty_data):
empty_data['blocks'] = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "Monosi - Anomaly Detected",
}
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": "*Type:*\nTable Health",
},
{
"type": "mrkdwn",
"text": "*Table:*\n{}.{}.{}".format("database", "schema", "table_name")
},
],
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": "*Column:*\n{}".format("column_name"),
},
{
"type": "mrkdwn",
"text": "*Metric:*\n{}".format("metric"),
}
]
},
]
return empty_data
def test__create_headers_empty(empty_data):
headers = slack.SlackIntegration._create_headers(empty_data)
assert 'Content-Type' in headers
assert 'Content-Length' in headers
assert headers['Content-Type'] == 'application/json'
assert int(headers['Content-Length']) == 232
def test__create_headers_not_empty(data):
headers = slack.SlackIntegration._create_headers(data)
assert 'Content-Type' in headers
assert 'Content-Length' in headers
assert headers['Content-Type'] == 'application/json'
assert int(headers['Content-Length']) == 232
# TODO: Requires DB setup for a metric to retrieve
# def test__retrieve_metric(anomalies):
# anomaly = anomalies[0]
# metric = slack.SlackIntegration._retrieve_metric(anomaly)
# def test__append_anomaly(data, anomalies):
# anomaly = anomalies[0]
# slack.SlackIntegration._append_anomaly(data, anomaly)
# def test__create_request(anomalies, config):
# url, data, headers = slack.SlackIntegration._create_request(anomalies, config)
def test__send_empty(config):
empty_anomalies = []
slack.SlackIntegration.send(empty_anomalies, config)
# def test__send(): # TODO: Intercept request
# raise NotImplementedError
|
py | b409b5632e3bf0e1000ec30ff85ea79af3a794e9 | import logging
import pandas as pd
import xmldataset
import zipfile
logger = logging.getLogger('skaki')
def parse_xml(ratings):
'''parses ratings from an XML string as formatted by fide.com'''
profile = '''
playerslist
player
fideid = dataset:players
name = dataset:players
country = dataset:players
sex = dataset:players
title = dataset:players
w_title = dataset:players
o_title = dataset:players
foa_title = dataset:players
rating = dataset:players
games = dataset:players
k = dataset:players
rapid_rating = dataset:players
rapid_games = dataset:players
rapid_k = dataset:players
blitz_rating = dataset:players
blitz_games = dataset:players
blitz_k = dataset:players
birthday = dataset:players
flag = dataset:players
'''
records = xmldataset.parse_using_profile(ratings, profile)
df = pd.DataFrame.from_records(records['players'])
logger.info(f'parsed {len(df)} ratings from XML')
return df
def read_zipfile(path):
'''reads XML data from the zipfile at path.'''
logger.debug(f'rading zipfile {path}')
FNAME = 'players_list_xml_foa.xml'
with zipfile.ZipFile(path) as zf:
try:
ratings = zf.read(FNAME)
return ratings
except KeyError as e:
logger.error(f'could not read zipfile: {e}.')
def country_names(df):
'''Makes country human readable and creates country_code to contain
the ISO country code used by in the FIDE dataset. The mapping used is
from countrycode.org
:param: dataframe with a country column with ISO country codes
:return: transformed dataframe
'''
if df is None or df.empty:
raise ValueError('cannot transform an empty dataframe!')
# load country codes mapping
countries = pd.read_table('ioc.txt')
ioc = dict(zip(countries.IOC.values, countries.Country.values))
ioc['FID'] = 'Fide'
logger.debug(ioc.get('BAN'))
# add natural country names
df['country_code'] = df['country']
df['country'] = df.country_code.apply(lambda x: ioc.get(x, None))
logger.debug(df)
return df
def load(path):
'''loads fide ratings from a path. unzip, then read the XML format
published by FIDE since the TXT file is not in any easily parseable format
return: a pandas dataframe with all the ratings
'''
logger.debug(f'loading ratings from {path}...')
if zipfile.is_zipfile(path):
ratings = read_zipfile(path)
else:
logger.debug(f'not a zipfile, assuming XML directly')
ratings = open(path).read()
df = parse_xml(ratings)
logger.info(f'read {len(df)} ratings from {path}')
return df
|
py | b409b61b0887a5fa767c90d13d860219b54009a8 | # -*- coding: utf-8 -*-
import commands
import os
import re
import socket
import shutil
import threading
from hashlib import md5
from oslo.config import cfg
from dns_updater.utils.updater_util import DnsdbApi
from dns_updater.utils.updater_util import send_alarm_email
from dns_updater.utils.updater_util import backup_file
from dnsdb_common.library.exception import UpdaterErr
from dnsdb_common.library.log import getLogger
log = getLogger(__name__)
CONF = cfg.CONF
def _is_local_dns(group_name=None):
if group_name is None:
group_name = CONF.host_group
return group_name.lower().startswith('local')
def _get_named_dir():
return CONF.bind_conf.named_dir
def _get_acl_dir():
return CONF.bind_conf.acl_dir
def _get_local_hostname():
return socket.gethostname()
def _get_named_path():
named_dir = _get_named_dir()
return os.path.join(named_dir, 'named.conf')
def can_reload(group_name):
return DnsdbApi.can_reload(group_name)['data']
def update_host_md5(named_conf_md5):
try:
DnsdbApi.update_host_md5(CONF.host_ip, named_conf_md5)
except Exception as e:
send_alarm_email(u'主机%s更新named.conf文件成功,更新数据库失败\n原因%s' % (_get_local_hostname(), e))
log.exception(e)
return
def get_named_md5():
name_file = _get_named_path()
with open(name_file) as f:
content = f.read()
if _is_local_dns():
res = re.findall('listen-on {[\s\d\.;]+};', content)[0]
content = content.replace(res, '#localdns_listen_mark')
return md5(content).hexdigest()
# 使用named-checkconf检查需要reload的配置文件
def check_named_conf(named_file):
if CONF.etc.env == 'dev':
return
status, output = commands.getstatusoutput('%s %s' % (CONF.bind_conf.named_checkconf, named_file))
if status == 0:
log.info('check %s ok' % named_file)
else:
raise UpdaterErr('check %s fail, %s' % (named_file, output))
def copy_named_conf(named_file):
named_path = _get_named_path()
# 备份
backup_file('named', named_path)
status, output = commands.getstatusoutput(
'cp %s %s && chown named:named %s' % (named_file, named_path, named_path))
if status == 0:
log.info('update name.conf ok')
else:
raise UpdaterErr('copy_named_conf failed: %s' % output)
# reload配置文件使之生效
def reload_conf():
if CONF.etc.env == 'dev':
return
status, output = commands.getstatusoutput('%s reload' % CONF.bind_conf.rndc)
if status == 0:
log.info('rndc reload success')
else:
raise UpdaterErr('reload named.conf failed: %s' % output)
def update_named_conf(group_name):
named_conf = DnsdbApi.get_named_conf(group_name)['data']
named_dir = _get_named_dir()
new_name_path = os.path.join(named_dir, group_name)
to_use_file = '{0}_used'.format(new_name_path)
with open(new_name_path, 'w') as f:
f.write(named_conf)
shutil.copy(new_name_path, to_use_file)
# 如果是local dns 检查前先获取本机ip 将listen-on {ip};添加到option中
if _is_local_dns():
status, output = commands.getstatusoutput(
"ip address | grep inet | awk '{print $2}' | awk -F '/' '{print $1}' | grep -E '(^127\.|^192\.|^10\.)'")
iplist = [ip.strip() for ip in output.split('\n')]
if len(iplist) <= 1:
raise UpdaterErr('listen ip %s replace failed' % ','.join(iplist))
log.info('listen ip: %s' % iplist)
with open(to_use_file) as f:
content = f.read()
content = content.replace('#localdns_listen_mark', 'listen-on {%s;};' % (';'.join(iplist)))
open(to_use_file, 'w').write(content)
check_named_conf(to_use_file)
if can_reload(group_name):
copy_named_conf(to_use_file)
reload_conf()
class UpdateConfThread(threading.Thread):
def __init__(self, update_type, kwargs):
super(UpdateConfThread, self).__init__()
self.update_type = update_type
self.group_name = kwargs['group_name']
self.kwargs = kwargs
def update_named(self):
named_conf_md5 = get_named_md5()
if named_conf_md5 == self.kwargs['group_conf_md5']:
return update_host_md5(named_conf_md5)
update_named_conf(self.group_name)
return update_host_md5(self.kwargs['group_conf_md5'])
def update_acl(self):
acl_dir = _get_acl_dir()
acl_files = self.kwargs.get('acl_files', [])
filenames = {filename: os.path.join(acl_dir, filename) for filename in acl_files}
for acl_file, acl_path in filenames.iteritems():
# 生成新的配置文件
content = DnsdbApi.get_acl_content(acl_file)['data']
with open('{}.tmp'.format(acl_path), 'w') as f:
f.write(content)
# 重新加载配置
if can_reload(self.group_name):
tmp_conf_dict = {}
for acl_file in filenames.values():
# 备份原来配置文件
backup_file('acl', acl_file)
back = acl_file + '.bak'
shutil.copy(acl_file, back)
# 拷贝新的配置文件
shutil.copy('{}.tmp'.format(acl_file), acl_file)
tmp_conf_dict[acl_file] = back
# 检查文件语法
try:
check_named_conf(_get_named_path())
except UpdaterErr as e:
# 配置文件还原
for conf_file, back in tmp_conf_dict.iteritems():
shutil.copy(back, conf_file)
raise
reload_conf()
def run(self):
msg = ''
is_success = True
try:
if self.update_type == 'named.conf':
self.update_named()
elif self.update_type == 'acl':
self.update_acl()
except Exception as e:
send_alarm_email(u'更新文件失败\n主机: %s\n原因: %s' % (_get_local_hostname(), e))
log.exception(e)
msg = str(e)
is_success = False
deploy_id = self.kwargs.get('deploy_id', None)
if deploy_id:
DnsdbApi.update_deploy_info(deploy_id, is_success, msg)
def start_update_thread(update_type, **kwargs):
thread = UpdateConfThread(update_type, kwargs)
thread.start()
|
py | b409b6481a0d90918e866c6bef9163f93cff5252 | # -*- coding: utf-8 -*-
import itsdangerous
import mock
import pytest
import unittest
from future.moves.urllib.parse import urlparse, parse_qs
from uuid import UUID
from api.base.settings.defaults import API_BASE
from framework.auth.cas import CasResponse
from osf.models import OSFUser, Session, ApiOAuth2PersonalToken
from osf_tests.factories import (
AuthUserFactory,
UserFactory,
OSFGroupFactory,
ProjectFactory,
ApiOAuth2ScopeFactory,
RegistrationFactory,
Auth,
)
from osf.utils.permissions import CREATOR_PERMISSIONS
from website import settings
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestUsers:
@pytest.fixture()
def user_one(self):
return AuthUserFactory(fullname='Freddie Mercury I')
@pytest.fixture()
def user_two(self):
return AuthUserFactory(fullname='Freddie Mercury II')
def test_returns_200(self, app):
res = app.get('/{}users/'.format(API_BASE))
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
def test_find_user_in_users(self, app, user_one, user_two):
url = '/{}users/'.format(API_BASE)
res = app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert user_two._id in ids
def test_all_users_in_users(self, app, user_one, user_two):
url = '/{}users/'.format(API_BASE)
res = app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert user_one._id in ids
assert user_two._id in ids
def test_merged_user_is_not_in_user_list_after_2point3(
self, app, user_one, user_two):
user_two.merge_user(user_one)
res = app.get('/{}users/?version=2.3'.format(API_BASE))
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert res.status_code == 200
assert user_two._id in ids
assert user_one._id not in ids
def test_merged_user_is_returned_before_2point3(
self, app, user_one, user_two):
user_two.merge_user(user_one)
res = app.get('/{}users/'.format(API_BASE))
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert res.status_code == 200
assert user_two._id in ids
assert user_one._id in ids
def test_find_multiple_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=fred'.format(API_BASE)
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id in ids
assert user_two._id in ids
def test_find_single_user_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=my'.format(API_BASE)
user_one.fullname = 'My Mom'
user_one.save()
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id in ids
assert user_two._id not in ids
def test_find_no_user_in_users(self, app, user_one, user_two):
url = '/{}users/?filter[full_name]=NotMyMom'.format(API_BASE)
res = app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert user_one._id not in ids
assert user_two._id not in ids
def test_more_than_one_projects_in_common(self, app, user_one, user_two):
group = OSFGroupFactory(creator=user_one)
group.make_member(user_two)
project1 = ProjectFactory(creator=user_one)
project1.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project1.save()
project2 = ProjectFactory(creator=user_one)
project2.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project2.save()
project3 = ProjectFactory()
project4 = ProjectFactory()
project3.add_osf_group(group)
project4.add_osf_group(group)
project4.is_deleted = True
project3.save()
project4.save()
RegistrationFactory(
project=project1,
creator=user_one,
is_public=True)
url = '/{}users/?show_projects_in_common=true'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
if user['id'] == user_two._id:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 4
def test_users_projects_in_common(self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/?show_projects_in_common=true'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 0
def test_users_projects_in_common_with_embed_and_right_query(
self, app, user_one, user_two):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
url = '/{}users/{}/nodes/?embed=contributors&show_projects_in_common=true'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 1
def test_users_projects_in_common_exclude_deleted_projects(
self, app, user_one, user_two):
project_list = []
for x in range(1, 10):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
project_list.append(project)
for x in range(1, 5):
project = project_list[x]
project.reload()
project.remove_node(auth=Auth(user=user_one))
url = '/{}users/{}/nodes/?embed=contributors&show_projects_in_common=true'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' in meta
assert meta['projects_in_common'] == 5
def test_users_projects_in_common_with_embed_without_right_query(
self, app, user_one, user_two):
project = ProjectFactory(creator=user_one)
project.add_contributor(
contributor=user_two,
permissions=CREATOR_PERMISSIONS,
auth=Auth(user=user_one)
)
project.save()
url = '/{}users/{}/nodes/?embed=contributors'.format(
API_BASE, user_two._id)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data'][0]['embeds']['contributors']['data']
for user in user_json:
meta = user['embeds']['users']['data']['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_no_projects_in_common_with_wrong_query(
self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/?filter[full_name]={}'.format(
API_BASE, user_one.fullname)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_no_projects_in_common_without_filter(
self, app, user_one, user_two):
user_one.fullname = 'hello'
user_one.save()
url = '/{}users/'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
user_json = res.json['data']
for user in user_json:
meta = user['relationships']['nodes']['links']['related']['meta']
assert 'projects_in_common' not in meta
def test_users_list_takes_profile_image_size_param(
self, app, user_one, user_two):
size = 42
url = '/{}users/?profile_image_size={}'.format(API_BASE, size)
res = app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = parse_qs(
urlparse(profile_image_url).query)
assert int(query_dict.get('s')[0]) == size
def test_users_list_filter_multiple_field(self, app, user_one, user_two):
john_doe = UserFactory(fullname='John Doe')
john_doe.given_name = 'John'
john_doe.family_name = 'Doe'
john_doe.save()
doe_jane = UserFactory(fullname='Doe Jane')
doe_jane.given_name = 'Doe'
doe_jane.family_name = 'Jane'
doe_jane.save()
url = '/{}users/?filter[given_name,family_name]=Doe'.format(API_BASE)
res = app.get(url)
data = res.json['data']
assert len(data) == 2
def test_users_list_filter_multiple_fields_with_additional_filters(
self, app, user_one, user_two):
john_doe = UserFactory(fullname='John Doe')
john_doe.given_name = 'John'
john_doe.family_name = 'Doe'
john_doe.save()
doe_jane = UserFactory(fullname='Doe Jane')
doe_jane.given_name = 'Doe'
doe_jane.family_name = 'Jane'
doe_jane.save()
url = '/{}users/?filter[given_name,family_name]=Doe&filter[id]={}'.format(
API_BASE, john_doe._id)
res = app.get(url)
data = res.json['data']
assert len(data) == 1
def test_users_list_filter_multiple_fields_with_bad_filter(
self, app, user_one, user_two):
url = '/{}users/?filter[given_name,not_a_filter]=Doe'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 400
@pytest.mark.django_db
class TestUsersCreate:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def email_unconfirmed(self):
return '[email protected]'
@pytest.fixture()
def url_base(self):
return '/{}users/'.format(API_BASE)
@pytest.fixture()
def data(self, email_unconfirmed):
return {
'data': {
'type': 'users',
'attributes': {
'username': email_unconfirmed,
'full_name': 'Test Account'
}
}
}
def tearDown(self, app):
super(TestUsersCreate, self).tearDown()
app.reset() # clears cookies
OSFUser.remove()
@mock.patch('framework.auth.views.mails.send_mail')
def test_logged_in_user_with_basic_auth_cannot_create_other_user_or_send_mail(
self, mock_mail, app, user, email_unconfirmed, data, url_base):
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
auth=user.auth,
expect_errors=True
)
assert res.status_code == 403
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
def test_logged_out_user_cannot_create_other_user_or_send_mail(
self, mock_mail, app, email_unconfirmed, data, url_base):
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
expect_errors=True
)
assert res.status_code == 401
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
def test_cookied_requests_can_create_and_email(
self, mock_mail, app, user, email_unconfirmed, data, url_base):
session = Session(data={'auth_user_id': user._id})
session.save()
cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(session._id)
app.set_cookie(settings.COOKIE_NAME, str(cookie))
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data
)
assert res.status_code == 201
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_can_create_and_send_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_does_not_send_email_without_kwarg(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
url_base,
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.users.create unavailable')
def test_properly_scoped_token_can_create_without_username_but_not_send_email(
self, mock_auth, mock_mail, app, user, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Authorized Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.users.create'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
data['data']['attributes'] = {'full_name': 'No Email'}
assert OSFUser.objects.filter(fullname='No Email').count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
username = res.json['data']['attributes']['username']
try:
UUID(username)
except ValueError:
raise AssertionError('Username is not a valid UUID')
assert OSFUser.objects.filter(fullname='No Email').count() == 1
assert mock_mail.call_count == 0
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
def test_improperly_scoped_token_can_not_create_or_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Unauthorized Token',
)
token.save()
scope = ApiOAuth2ScopeFactory()
scope.name = 'unauthorized scope'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)},
expect_errors=True
)
assert res.status_code == 403
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
assert mock_mail.call_count == 0
@pytest.mark.skip # failing locally post converision
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.admin unavailable')
def test_admin_scoped_token_can_create_and_send_email(
self, mock_auth, mock_mail, app, user, email_unconfirmed, data, url_base):
token = ApiOAuth2PersonalToken(
owner=user,
name='Admin Token',
)
scope = ApiOAuth2ScopeFactory()
scope.name = 'osf.admin'
scope.save()
token.scopes.add(scope)
mock_cas_resp = CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s.name for s in token.scopes.all()]
}
)
mock_auth.return_value = user, mock_cas_resp
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 0
res = app.post_json_api(
'{}?send_email=true'.format(url_base),
data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert res.status_code == 201
assert res.json['data']['attributes']['username'] == email_unconfirmed
assert OSFUser.objects.filter(username=email_unconfirmed).count() == 1
assert mock_mail.call_count == 1
|
py | b409b689ebfca910dca4bcb1eb3221423dc5c137 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ExpandedLandingPageViewServiceTransport
from .grpc import ExpandedLandingPageViewServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ExpandedLandingPageViewServiceTransport]]
_transport_registry['grpc'] = ExpandedLandingPageViewServiceGrpcTransport
__all__ = (
'ExpandedLandingPageViewServiceTransport',
'ExpandedLandingPageViewServiceGrpcTransport',
)
|
py | b409b73c61eab28192603bdebe23cd1b4a457e01 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.resources.route53.RecordSet import RecordSet # pylint: disable=E0401
class TestRoute53RecordSets(BaseRuleTestCase):
"""Test CloudFront Aliases Configuration"""
def setUp(self):
"""Setup"""
super(TestRoute53RecordSets, self).setUp()
self.collection.register(RecordSet())
self.success_templates = [
'test/fixtures/templates/good/route53.yaml'
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/route53.yaml', 31)
|
py | b409b758a45ea369fc6b35d18de0b40fe90c1ec5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from asynet_mask_rcnn.utils.comm import is_main_process
from asynet_mask_rcnn.utils.comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = asynet_mask_rcnn.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file
|
py | b409b77c7d8c55eaede5addf0c402b996e4ba1e1 | import time
from selenium.webdriver.support.ui import Select
from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
project_cach = None
def open_project_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/manage_proj_page.php")):
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def change_list_value(self, select_name, text):
wd = self.app.wd
if text is not None:
select = Select(wd.find_element_by_name(select_name))
select.select_by_visible_text(text)
def change_checkbox_value(self, checkbox_name, bool):
wd = self.app.wd
if bool is not None:
checkbox = wd.find_element_by_name(checkbox_name)
if not bool:
checkbox.click()
def fill_project_form(self, project):
wd = self.app.wd
self.change_field_value("name", project.name)
self.change_list_value("status", project.status)
self.change_checkbox_value("inherit_global", project.inherit_gl_categories)
self.change_list_value("view_state", project.view_status)
self.change_field_value("description", project.description)
def add_project(self, project):
wd = self.app.wd
self.open_project_page()
wd.find_element_by_css_selector("input[value='Create New Project']").click()
# заполнение полей
self.fill_project_form(project)
wd.find_element_by_xpath("//input[@value='Add Project']").click()
self.project_cach = None
def get_project_list(self):
wd = self.app.wd
self.open_project_page()
if self.project_cach is None:
self.project_cach = []
table = wd.find_elements_by_css_selector(".width100")[1]
rows = table.find_elements_by_tag_name("tr")
for row in rows[2:len(rows)]:
cells = row.find_elements_by_tag_name("td")
name = cells[0].find_element_by_tag_name("a").text
status = cells[1].text
enabled = cells[2].text
view_status = cells[3].text
description = cells[4].text
id = cells[0].find_element_by_tag_name("a").get_attribute("href").split("id=")[1]
self.project_cach.append(
Project(name=name, status=status, id=id, enabled=enabled,
view_status=view_status, description=description))
return list(self.project_cach)
def delete_project_by_id(self, id):
wd = self.app.wd
self.open_project_page()
self.open_project_by_id(id)
wd.find_element_by_css_selector("input[value='Delete Project']").click()
wd.find_element_by_css_selector("input[value='Delete Project']").click()
self.open_project_page()
self.project_cach = None
def open_project_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//a[contains(@href, 'manage_proj_edit_page.php?project_id=%s')]" % id).click()
|
py | b409b860efbc7d1aaec7889b8181a956f972aa96 | # -*- coding: utf-8 -*-
import os
import errno
import numpy as np
import nibabel as nib
def float_in_filename(num):
""" Function to remove the "." of the float to avoid it in filenames.
It convert the Numeric input into a str
If the input is simply an int, it just converts it into str
Parameters
----------
num: Numeric
Returns:
str
the string "dot" will replace the dot of the float
"""
spl = str(num).split(".")
if len(spl) > 1:
return spl[0] + "dot" + spl[1]
elif len(spl) == 1:
return spl[0]
else:
print("uknown value: " + str(num))
def split_clusters(nii, res_folder, name):
""" Split a 3D image into several nifti files. Each one containing a
distinct value from the source image.
Parameters
----------
nii: nibabel.Nifti1Image
the image to split
res_folder: str
the path to the result folder. The function will create a folder
which will contain all the splitted images
name: str
suffix for the result files, it will also be used to name the result
folder
"""
# extract the needed informations from the source image
data = nii.get_data()
affine = nii.affine
folder = os.path.join(res_folder, name)
# Try to create the folder and ignore the error in the case it alread exists
try:
os.mkdir(folder)
# note that all the other errors like permissions error will be caught
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
# TODO change the amax for a walk through an array of the unique values
# find the maximum value of the source
o_max = np.amax(data)
if np.amin(data) < 0:
print("The source image contains negative values")
return
while o_max > 0:
# we create an empty (full of 0) array with the same shape as data
mask = np.zeros(data.shape)
# save the cluster in mask with its original value
mask[np.where(data == o_max)] = o_max
# we remove the cluster from data
data[np.where(data == o_max)] = 0
# we save the mask
img_ROIs = nib.Nifti1Image(mask, affine)
path = os.path.join(
folder, "clu_" + float_in_filename(o_max) + "_" + name + ".nii.gz")
nib.save(img_ROIs, path)
# The maximum of the remaining values of data
o_max = np.amax(data)
print("All the cluster has been splitted in " + folder)
return
# clu = np.array(np.where(data == i))
# mask[clu[0,], clu[1,], clu[2,]] = i
# tt.append(len(clu))
|
py | b409b8bd7a8d7426017d7dc1d57530e5e41a9e4c | import setuptools
from tflens.helper.config import VERSION
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="tflens",
version=VERSION,
author="Juan Manuel Ruiz Fernández",
description="Terraform state viewer",
keywords="terraform viewer state tfstate cli",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/neovasili/tflens",
packages=setuptools.find_packages(include=[
'tflens',
'tflens.controller',
'tflens.exception',
'tflens.helper',
'tflens.model',
'tflens.service'
]),
entry_points = {
"console_scripts": ['tflens = tflens.__main__:main']
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Documentation",
"Topic :: Terminals",
],
python_requires='>=3.6',
)
|
py | b409b91b88f93e00d109134276813500b0cfd28c | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
from mindspore.nn import Cell
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
import mindspore as ms
from mindspore.train.model import Model
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Select(Cell):
def __init__(self, dtype):
super(Select, self).__init__()
self.select = P.Select()
def construct(self, cond, inputa, inputb):
return self.select(cond, inputa, inputb)
def me_select(cond, inputa, inputb, dtype=ms.float32):
net = Select(dtype)
net.set_train()
model = Model(net)
if isinstance(inputa, np.ndarray) == True:
inputa = Tensor(inputa)
if isinstance(inputb, np.ndarray) == True:
inputb = Tensor(inputb)
if isinstance(cond, np.bool_) == True:
cond = np.array(cond)
out = model.predict(Tensor(cond), inputa, inputb)
return out.asnumpy()
def cmp_select(input_cond, inputa, inputb):
cond = input_cond > 0.5
out_me = me_select(cond, inputa, inputb)
print(input_cond)
print(cond)
print(inputa)
print(inputb)
print(out_me)
def test_select_2_2():
input_cond = np.random.rand(2, 2)
inputa = np.random.randn(2, 2).astype(np.float32)
inputb = np.random.randn(2, 2).astype(np.float32)
cmp_select(input_cond, inputa, inputb)
|
py | b409b9b5510c181ad7ba1f6137d839236c4e1f14 | from mitty.lib.cigars import cigarv2_v1
def test_cigar_conversion():
"""Cigars: Converting cigar V2 to V1"""
test_cases = [
('33=1X79=1X26=1X109=', '250M'),
('1X26=1X123=1X82=1X15=', '250M'),
('89=10D161=', '89M10D161M'),
('99M1X', '100M'),
('10M10D1X9M', '10M10D10M')
]
for tc in test_cases:
assert cigarv2_v1(tc[0]) == tc[1], cigarv2_v1(tc[0]) |
py | b409ba6815a3ed5d515bf75e29efc45b26ebebeb | from sanic import Sanic
from sanic.response import json
from sanic.views import HTTPMethodView
import jwt
from sanic_jwt import exceptions, Initialize
from sanic_jwt.decorators import protected
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def to_dict(self):
properties = ["user_id", "username"]
return {prop: getattr(self, prop, None) for prop in properties}
users = [User(1, "user1", "abcxyz"), User(2, "user2", "abcxyz")]
username_table = {u.username: u for u in users}
# userid_table = {u.user_id: u for u in users}
async def authenticate(request, *args, **kwargs):
username = request.json.get("username", None)
password = request.json.get("password", None)
if not username or not password:
raise exceptions.AuthenticationFailed("Missing username or password.")
user = username_table.get(username, None)
if user is None:
raise exceptions.AuthenticationFailed("User not found.")
if password != user.password:
raise exceptions.AuthenticationFailed("Password is incorrect.")
return user
sanic_app = Sanic()
sanic_jwt = Initialize(sanic_app, authenticate=authenticate)
class PublicView(HTTPMethodView):
def get(self, request):
return json({"hello": "world"})
class ProtectedView(HTTPMethodView):
decorators = [protected()]
async def get(self, request):
return json({"protected": True})
class PartiallyProtectedView(HTTPMethodView):
async def get(self, request):
return json({"protected": True})
@protected()
async def patch(self, request):
return json({"protected": True})
sanic_app.add_route(PublicView.as_view(), "/")
sanic_app.add_route(ProtectedView.as_view(), "/protected")
sanic_app.add_route(PartiallyProtectedView.as_view(), "/partially")
class TestEndpointsCBV(object):
def test_unprotected(self):
_, response = sanic_app.test_client.get("/")
assert response.status == 200
def test_protected(self):
_, response = sanic_app.test_client.get("/protected")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get(
"reasons"
)
def test_partially_protected(self):
_, response = sanic_app.test_client.get("/partially")
assert response.status == 200
_, response = sanic_app.test_client.patch("/partially")
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header not present." in response.json.get(
"reasons"
)
def test_auth_invalid_method(self):
_, response = sanic_app.test_client.get("/auth")
assert response.status == 405
assert b"Error: Method GET not allowed for URL /auth" in response.body
def test_auth_proper_credentials(self):
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(
sanic_jwt.config.access_token_name(), None
)
payload = jwt.decode(
access_token,
sanic_jwt.config.secret(),
algorithms=sanic_jwt.config.algorithm(),
)
assert response.status == 200
assert access_token is not None
assert isinstance(payload, dict)
assert sanic_jwt.config.user_id() in payload
assert "exp" in payload
_, response = sanic_app.test_client.get(
"/protected",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
_, response = sanic_app.test_client.patch(
"/partially",
headers={"Authorization": "Bearer {}".format(access_token)},
)
assert response.status == 200
|
py | b409baac956053948bb1f5e7db2dbbe65d46f78f | import time
import types
import unittest
from test.support import cpython_only
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertIn(call(1, 2, 3), [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_extended_not_equal(self):
a = call(x=1).foo
b = call(x=2).foo
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertNotEqual(a, b)
def test_nested_calls_not_equal(self):
a = call(x=1).foo().bar
b = call(x=2).foo().bar
self.assertEqual(a, a)
self.assertEqual(b, b)
self.assertNotEqual(a, b)
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
def test_call_with_name(self):
self.assertEqual(_Call((), 'foo')[0], 'foo')
self.assertEqual(_Call((('bar', 'barz'),),)[0], '')
self.assertEqual(_Call((('bar', 'barz'), {'hello': 'world'}),)[0], '')
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
mock = create_autospec(SomeClass)
self._check_someclass_mock(mock)
mock = create_autospec(SomeClass())
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self, x):
pass
class Bar(object):
def f(self, y):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a(x=5)
this_mock.a.assert_called_with(x=5)
this_mock.a.assert_called_with(5)
self.assertRaises(TypeError, this_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a(5)
instance_mock.a.assert_called_with(5)
instance_mock.a.assert_called_with(x=5)
self.assertRaises(TypeError, instance_mock.a, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f(6)
instance_mock.Bar.f.assert_called_with(6)
instance_mock.Bar.f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f(6)
instance_mock.Bar().f.assert_called_with(6)
instance_mock.Bar().f.assert_called_with(y=6)
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
mock.assert_called_with(1, b=2)
mock.assert_called_with(a=1, b=2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
mock.f.assert_called_with(a=3, b=4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock.assert_called_once_with(a=1)
self.assertRaises(AssertionError, mock.assert_called_once_with, 2)
mock(4, 5)
mock.assert_called_with(4, 5)
mock.assert_called_with(a=4, b=5)
self.assertRaises(AssertionError, mock.assert_called_with, a=5, b=4)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self, x, y):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock(1, 2)
mock.assert_called_once_with(1, 2)
mock.assert_called_once_with(x=1, y=2)
self.assertRaises(TypeError, mock, 'a')
instance = mock(1, 2)
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with('a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
instance.assert_called_with(a='a')
mock = create_autospec(Callable(1, 2))
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
a.f(self=10)
a.f.assert_called_with(10)
a.f.assert_called_with(self=10)
def test_autospec_data_descriptor(self):
class Descriptor(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, cls=None):
if obj is None:
return self
return self.value
def __set__(self, obj, value):
pass
class MyProperty(property):
pass
class Foo(object):
__slots__ = ['slot']
@property
def prop(self):
return 3
@MyProperty
def subprop(self):
return 4
desc = Descriptor(42)
foo = create_autospec(Foo)
def check_data_descriptor(mock_attr):
# Data descriptors don't have a spec.
self.assertIsInstance(mock_attr, MagicMock)
mock_attr(1, 2, 3)
mock_attr.abc(4, 5, 6)
mock_attr.assert_called_once_with(1, 2, 3)
mock_attr.abc.assert_called_once_with(4, 5, 6)
# property
check_data_descriptor(foo.prop)
# property subclass
check_data_descriptor(foo.subprop)
# class __slot__
check_data_descriptor(foo.slot)
# plain data descriptor
check_data_descriptor(foo.desc)
@cpython_only # PyPy can easily extract a spec from a builtin function
def test_autospec_on_bound_builtin_function(self):
meth = types.MethodType(time.ctime, time.time())
self.assertIsInstance(meth(), str)
mocked = create_autospec(meth)
# no signature, so no spec to check against
mocked()
mocked.assert_called_once_with()
mocked.reset_mock()
mocked(4, 5, 6)
mocked.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertIn(kall, mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertIn(calls, mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertIn(calls, mock.call_args_list)
calls = [call(3, 4)]
self.assertIn(calls, mock.call_args_list)
self.assertNotIn(call('fish'), mock.call_args_list)
self.assertNotIn([call('fish')], mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
|
py | b409bb7e4fa24f5434a094b2236f772839c1ef4c | from ansiblelint import AnsibleLintRule
class InconsistentNaming(AnsibleLintRule):
id = 'ANSIBLE0014'
description = 'Human meaningful name of the role or task to communicate the purpose'
severity = 'medium'
tags = {'clarity'}
version_added = 'v1.0.0'
# _modules = ['hosts']
shortdesc = 'InconsistentNaming'
def match(self, file, line):
if "name:" in line:
if len(line.split("name:")[1]) < 7:
return True
return False
|
py | b409bb981a6a4a59139f1455e993eb834170e813 | #!/usr/bin/env python
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import uuid
import subprocess
from tiny_test_fw import Utility
import ttfw_idf
from ble import lib_ble_client
# When running on local machine execute the following before running this script
# > make app bootloader
# > make print_flash_cmd | tail -n 1 > build/download.config
@ttfw_idf.idf_example_test(env_tag="Example_WIFI_BT")
def test_example_app_ble_central(env, extra_data):
"""
Steps:
1. Discover Bluetooth Adapter and Power On
"""
interface = 'hci0'
adv_host_name = "BleCentTestApp"
adv_iface_index = 0
adv_type = 'peripheral'
adv_uuid = '1811'
subprocess.check_output(['rm','-rf','/var/lib/bluetooth/*'])
subprocess.check_output(['hciconfig','hci0','reset'])
# Acquire DUT
dut = env.get_dut("blecent", "examples/bluetooth/nimble/blecent", dut_class=ttfw_idf.ESP32DUT)
# Get binary file
binary_file = os.path.join(dut.app.binary_path, "blecent.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("blecent_bin_size", "{}KB".format(bin_size // 1024))
# Upload binary and start testing
Utility.console_log("Starting blecent example test app")
dut.start_app()
dut.reset()
device_addr = ':'.join(re.findall('..', '%012x' % uuid.getnode()))
# Get BLE client module
ble_client_obj = lib_ble_client.BLE_Bluez_Client(interface)
if not ble_client_obj:
raise RuntimeError("Get DBus-Bluez object failed !!")
# Discover Bluetooth Adapter and power on
is_adapter_set = ble_client_obj.set_adapter()
if not is_adapter_set:
raise RuntimeError("Adapter Power On failed !!")
# Write device address to dut
dut.expect("BLE Host Task Started", timeout=60)
dut.write(device_addr + "\n")
'''
Blecent application run:
Create GATT data
Register GATT Application
Create Advertising data
Register advertisement
Start advertising
'''
ble_client_obj.start_advertising(adv_host_name, adv_iface_index, adv_type, adv_uuid)
# Call disconnect to perform cleanup operations before exiting application
ble_client_obj.disconnect()
# Check dut responses
dut.expect("Connection established", timeout=60)
dut.expect("Service discovery complete; status=0", timeout=60)
print("Service discovery passed\n\tService Discovery Status: 0")
dut.expect("GATT procedure initiated: read;", timeout=60)
dut.expect("Read complete; status=0", timeout=60)
print("Read passed\n\tSupportedNewAlertCategoryCharacteristic\n\tRead Status: 0")
dut.expect("GATT procedure initiated: write;", timeout=60)
dut.expect("Write complete; status=0", timeout=60)
print("Write passed\n\tAlertNotificationControlPointCharacteristic\n\tWrite Status: 0")
dut.expect("GATT procedure initiated: write;", timeout=60)
dut.expect("Subscribe complete; status=0", timeout=60)
print("Subscribe passed\n\tClientCharacteristicConfigurationDescriptor\n\tSubscribe Status: 0")
if __name__ == '__main__':
test_example_app_ble_central()
|
py | b409bbaa6745ca6dc5411e958f36fedc15617e59 | from django.shortcuts import render
def Index(request):
return render(request, 'home/home.html')
|
py | b409bbad8b22d67650623f782ad14a2dcc53c513 | import tkinter as tk # ejecutar "sudo apt-get install python3-tk" si hay problemas con la importac
from tkinter import ttk
import numpy as np
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
class Interface:
def __init__(self):
self.window = tk.Tk()
self.window.title("Fisica")
self.window.minsize(800, 600)
self.window.maxsize(1280, 960)
self.entrada_posicion_x0 = ttk.Entry()
self.entrada_posicion_y0 = ttk.Entry()
self.entrada_angulo_inicial = ttk.Entry()
self.entrada_aceleracion_inicial = ttk.Entry()
self.deslizador_posicion_x0 = ttk.Scale()
self.deslizador_posicion_y0 = ttk.Scale()
self.deslizador_angulo_inicial = ttk.Scale()
self.deslizador_aceleracion_inicial = ttk.Scale()
self.pestañas = ttk.Notebook(self.window)
self.tab_ideal = ttk.Frame(self.pestañas)
self.opciones = ttk.Frame(self.tab_ideal)
# Inicializar los botones de la interfaz
self.boton_posicion = ttk.Button(self.opciones, text="Posición", width=10, command=lambda: self.boton_posicionf())
self.boton_velocidad = ttk.Button(self.opciones, text="Velocidad", width=10, command=lambda: self.boton_velocidadf())
self.boton_aceleracion = ttk.Button(self.opciones, text="Aceleración", width=10, command=lambda: self.boton_aceleracionf())
self.boton_alcance_horizontal = ttk.Button(self.opciones, text="Alcance Horizontal", width=10, command=lambda: self.boton_alcance_horizontalf())
self.boton_altura_maxima = ttk.Button(self.opciones, text="Altura Màxima", width=10, command=lambda: self.boton_altura_maximaf())
self.boton_camino_recorrido = ttk.Button(self.opciones, text="Camino Recorrido", width=10, command=lambda: self.boton_camino_recorridof())
self.boton_radio_y_centro_de_curvatura_circulo_obsculador = ttk.Button(self.opciones, text="Radio y Centro de Curvatura y Circulo Obsculador", width=10, command=lambda: self.boton_radio_y_centro_de_curvatura_circulo_obsculadorf())
self.boton_aceleracion_normal_y_tangencial = ttk.Button(self.opciones, text="A. normal y tangencial", width=10, command=lambda: self.boton_aceleracion_normal_y_tangencialf())
self.boton_vector_normal = ttk.Button(self.opciones, text="Vector normal", width=10,
command=lambda: self.boton_vector_normalf())
#self.boton_circulo_osculador = ttk.Button(self.opciones, text="Circulo Osculador", width=10,
#command=lambda: self.boton_circulo_osculadorf())
self.create_widgets()
def create_widgets(self):
def f_posicion_x0(event):
print(posicion_x0.get())
def f_posicion_y0(event):
print(posicion_y0.get())
def f_angulo_inicial(event):
print(angulo_inicial.get())
def f_Rapidez_inicial(event):
print(Rapidez_inicial.get())
# Limpia Entry iniciales, de modo que al hacer click estos se vacian
def limpiar_entrada_x0(event):
if self.entrada_posicion_x0.get() == "X0":
self.entrada_posicion_x0.delete(0,'end')
def limpiar_entrada_y0(event):
if self.entrada_posicion_y0.get() == "Y0":
self.entrada_posicion_y0.delete(0,'end')
def limpiar_entrada_angulo(event):
if self.entrada_angulo_inicial.get() == "Angulo":
self.entrada_angulo_inicial.delete(0,'end')
def limpiar_entrada_Rapidez(event):
if self.entrada_Rapidez_inicial.get() == "Rapidez Inicial":
self.entrada_Rapidez_inicial.delete(0,'end')
# Variables de los deslizadores
posicion_x0 = tk.IntVar()
posicion_y0 = tk.IntVar()
angulo_inicial = tk.IntVar()
Rapidez_inicial = tk.IntVar()
self.pestañas.pack(side=tk.TOP, fill=tk.BOTH, expand=True, ipadx=10, ipady=10)
tab_balistica = tk.Frame(self.pestañas)
tab_seguridad = tk.Frame(self.pestañas)
self.pestañas.add(self.tab_ideal, text="Movimiento Ideal", compound=tk.TOP)
self.pestañas.add(tab_seguridad, text="Parabola de Seguridad", compound=tk.TOP)
self.pestañas.add(tab_balistica, text="Movimiento Balistico", compound=tk.TOP)
# tutorial = ttk.LabelFrame(self.window, text="Instrucciones")
# tutorial.pack(side=tk.RIGHT, fill=tk.X, expand=True, padx=10, pady=10)
self.opciones.pack(side=tk.RIGHT, fill=tk.BOTH, expand=False, padx=5, pady=5)
self.boton_posicion.pack(side=tk.TOP, padx=10, pady=10)
self.boton_velocidad.pack(side=tk.TOP, padx=10, pady=10)
self.boton_aceleracion.pack(side=tk.TOP, padx=10, pady=10)
self.boton_alcance_horizontal.pack(side=tk.TOP, padx=10, pady=10)
self.boton_altura_maxima.pack(side=tk.TOP, padx=10, pady=10)
self.boton_camino_recorrido.pack(side=tk.TOP, padx=10, pady=10)
self.boton_radio_y_centro_de_curvatura_circulo_obsculador.pack(side=tk.TOP, padx=10, pady=10)
self.boton_aceleracion_normal_y_tangencial.pack(side=tk.TOP, padx=10, pady=10)
self.boton_vector_normal.pack(side=tk.TOP, padx=10, pady=10)
#self.boton_circulo_osculador.pack(side=tk.TOP, padx=10, pady=10)
graphics = ttk.LabelFrame(self.tab_ideal, text="Gráfica")
graphics.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=5, pady=5)
separador = ttk.Separator(self.tab_ideal, orient="horizontal")
separador.pack(side=tk.TOP, expand=False, fill=tk.X)
variables = ttk.LabelFrame(self.tab_ideal, text="Controles")
variables.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=5, pady=5)
# Contenedores de los controles
posicion = ttk.Frame(variables)
posicion.pack(side=tk.LEFT, expand=True, padx=5, pady=5)
Rapidez = ttk.Frame(variables)
Rapidez.pack(side=tk.LEFT, expand=True, padx=5, pady=5)
angulo = ttk.Frame(variables)
angulo.pack(side=tk.LEFT, expand=True, padx=5, pady=5)
# Añadir elementos de entrada de texto
self.entrada_posicion_x0 = ttk.Entry(posicion, justify=tk.CENTER)
self.entrada_posicion_x0.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.entrada_posicion_x0.insert(tk.END, "X0")
self.entrada_posicion_x0.bind("<Button-1>", limpiar_entrada_x0)
self.entrada_posicion_y0 = ttk.Entry(posicion, justify=tk.CENTER)
self.entrada_posicion_y0.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.entrada_posicion_y0.insert(tk.END, "Y0")
self.entrada_posicion_y0.bind("<Button-1>", limpiar_entrada_y0)
self.entrada_Rapidez_inicial = ttk.Entry(Rapidez, justify=tk.CENTER)
self.entrada_Rapidez_inicial.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.entrada_Rapidez_inicial.insert(tk.END, "Rapidez Inicial")
self.entrada_Rapidez_inicial.bind("<Button-1>", limpiar_entrada_Rapidez)
self.entrada_angulo_inicial = ttk.Entry(angulo, justify=tk.CENTER)
self.entrada_angulo_inicial.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.entrada_angulo_inicial.insert(tk.END, "Angulo Inicial")
self.entrada_angulo_inicial.bind("<Button-1>", limpiar_entrada_angulo)
# Añadir elementos deslizadores para actualizar datos
self.deslizador_posicion_x0 = ttk.Scale(posicion, variable=posicion_x0, from_=0, to=100, orient=tk.HORIZONTAL)
self.deslizador_posicion_x0.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=10, pady=10)
self.deslizador_posicion_x0.set(50)
self.deslizador_posicion_x0.bind("<B1-Motion>", f_posicion_x0)
self.deslizador_posicion_x0.bind("<ButtonRelease-1>", f_posicion_x0)
self.deslizador_posicion_y0 = ttk.Scale(posicion, variable=posicion_y0, from_=0, to=100, orient=tk.HORIZONTAL)
self.deslizador_posicion_y0.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=10, pady=10)
self.deslizador_posicion_y0.set(50)
self.deslizador_posicion_y0.bind("<B1-Motion>", f_posicion_y0)
self.deslizador_posicion_y0.bind("<ButtonRelease-1>", f_posicion_y0)
self.deslizador_angulo_inicial = ttk.Scale(angulo, variable=angulo_inicial, from_=0, to=90, orient=tk.HORIZONTAL)
self.deslizador_angulo_inicial.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=10, pady=10)
self.deslizador_angulo_inicial.set(180)
self.deslizador_angulo_inicial.bind("<B1-Motion>", f_angulo_inicial)
self.deslizador_Rapidez_inicial = ttk.Scale(Rapidez, variable=Rapidez_inicial, from_=0, to=100, orient=tk.HORIZONTAL)
self.deslizador_Rapidez_inicial.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=10, pady=10)
self.deslizador_Rapidez_inicial.set(50)
self.deslizador_Rapidez_inicial.bind("<B1-Motion>", f_Rapidez_inicial)
self.deslizador_Rapidez_inicial.bind("<ButtonRelease-1>", f_Rapidez_inicial)
#Insercion Grafico en la zona indicada
figura = Figure(figsize=(4, 3), dpi=100) # define la proporcion del gráfico
ecuacion = np.arange(0, 10, .01)
figura.add_subplot(111).plot(ecuacion, ecuacion * ecuacion)
canvas = FigureCanvasTkAgg(figura, master=graphics)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
# Todo declarar todos los elementos de la interfaz dentro del __init__
def update_position_value(self):
self.entrada_posicion_x0.insert(tk.END, self.entrada_posicion_x0.get())
# Todo declarar todos los elementos de la interfaz dentro del __init__
def update_angle_value(self):
self.entrada_posicion_x0.insert(tk.END, self.entrada_posicion_x0.get())
# Todo declarar todos los elementos de la interfaz dentro del __init__
def update_acceleration_value(self):
self.entrada_posicion_x0.insert(tk.END, self.entrada_posicion_x0.get())
# Declaracion de botones
def boton_posicionf(self):
# Metodo para almacenar datos de las entradas de datos
def copiar_valores(event):
self.tiempo_datos[0] = entrada_tiempo.get()
master.destroy()
# Metodo para validar la entrada de datos (Solo Numeros por ahora)
def check(v, p):
if p.isdigit():
return True
elif p is "":
return True
else:
return False
# Datos Iniciales
# inicializa la ventana popup
master = tk.Tk()
master.title("Posicion")
# Crea un frame contenedor para la izquierda y la derecha
frame_arriba = ttk.Frame(master)
frame_centro = ttk.Frame(master)
frame_abajo = ttk.Frame(master)
frame_aceptar = ttk.Frame(master)
validacion_tiempo = (frame_abajo.register(check), '%v', '%P')
#validacion_y = (frame_derecha.register(check), '%v', '%P')
frame_arriba.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=5, pady=5)
frame_centro.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=5, pady=5)
frame_abajo.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=5, pady=5)
frame_aceptar.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=5, pady=5)
# Crea las titulos de la entrada de datos
tiempo = ttk.Label(frame_abajo, text="Tiempo: ")
aceptar = ttk.Button(frame_aceptar, text="ACEPTAR")
tiempo_init = ttk.Label(frame_arriba, text="Intervalo de tiempo")
tiempo_init_x = ttk.Entry(frame_arriba, state='readonly', justify='center')
tiempo_init_y = ttk.Entry(frame_arriba, state='readonly')
tiempo_init.pack(side=tk.TOP)
tiempo_init_x.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
tiempo_init_y.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
tiempo_init_x.configure(state='normal')
tiempo_init_x.delete(0,'end')
tiempo_init_x.insert(0,"0")
tiempo_init_x.configure(state='readonly')
#Separador de datos
separador = ttk.Separator(frame_centro, orient="horizontal")
separador.pack(side=tk.TOP, expand=False, fill=tk.X)
# Crea formularios para entrada de datos
entrada_tiempo = ttk.Entry(frame_abajo, validate="key", validatecommand=validacion_tiempo)
#entrada_y = ttk.Entry(frame_derecha, validate="key", validatecommand=validacion_y)
tiempo.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
#posicion_y.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
entrada_tiempo.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
# entrada_y.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
aceptar.pack(fill=tk.BOTH, expand=1)
aceptar.bind("<Button-1>", copiar_valores)
def boton_velocidadf(self):
pass
def boton_aceleracionf(self):
#pop up de ingreso de datos
Pop_Up = tk.Tk()
Pop_Up.title("Aceleracion")
Pop_Up.minsize(400,300)
label = tk.Label(Pop_Up)
label.pack()
button = tk.Button(Pop_Up, text = 'Evaluar' , width = 10, command = Pop_Up.destroy)
button.pack(side=tk.BOTTOM)
#ciclo de la ventana emergente
Pop_Up.mainloop()
#generamiento de la grafica
#generacion del punto de posicion a medir
#generacion del vector con origen en el punto de posicion
#posible desplazamiento con deslizador
pass
def boton_alcance_horizontalf(self):
pass
def boton_altura_maximaf(self):
pass
def boton_camino_recorridof(self):
pass
def boton_radio_y_centro_de_curvatura_circulo_obsculadorf(self):
pass
def boton_aceleracion_normal_y_tangencialf(self):
pass
def boton_vector_normalf(self):
pass
# Lista de almacenado de datos
tiempo_datos = [0, 0]
|
py | b409bc00efa7d45869f12d6432d503de6f4f55ed | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Possível solução da questão 1 da AD1-2 do curso de
Fundamentos de Programação do CEDERJ (2022.1) """
# # # # # Subprogramas # # # # #
# Função para checar se a restrição de repetição está sendo satisfeita
def checaRestricao(listaInteiros, listaNumeros):
# Iniciamos criando uma chave de decisão
restricaoSatisfeita = True
inteiro = 1 # o primeiro inteiro é sempre 1
# Se a restrição for quebrada para qualquer inteiro podemos imediatamente parar a execução
while (inteiro <= max(listaInteiros) and restricaoSatisfeita):
# Lista dos dois índices onde surge o inteiro
indicesInteiro = []
# Verificação se cada número na lista é o inteiro
for indice in range(len(listaNumeros)):
numero = listaNumeros[indice]
if (numero == inteiro):
indicesInteiro.append(indice)
# Checando a restrição para este inteiro
restricaoSatisfeita = (indicesInteiro[1] == indicesInteiro[0] + inteiro + 1)
inteiro = inteiro + 1
return restricaoSatisfeita
# Fim da função checaRestricao
# Função para permutar a lista de entrada
def permutaN(listaInteiros, listaNumeros, indice = 0, permutacoes = []):
# Loop de permutação:
# Permutaremos o elemento no índice dado com todos à direita deste mesmo elemento
for i in range(indice, len(listaNumeros)):
# Copiamos o arranjo inicial dado
permutacao = []
for j in range(0, len(listaNumeros)):
permutacao.append(listaNumeros[j])
# Permutamos o i-ésimo elemento com o elemento no indice dado
permutacao[indice], permutacao[i] = permutacao[i], permutacao[indice]
# Condição de aceitação: não foi adicionado previamente e obedece às restrições
if (permutacao not in permutacoes) and (checaRestricao(listaInteiros, permutacao)):
permutacoes.append(permutacao)
# Recursivamente atualizamos o índice e realizamos uma nova permutação
permutaN(listaInteiros, permutacao, indice + 1, permutacoes)
# Retorno do resultado:
return permutacoes
# Fim da função permutaN
# # # # # Programa principal # # # # #
# Entrada do valor inteiro n
n = int(input())
# Gerando uma lista com todos os inteiros de 1 até n (com duas aparições)
listaInteiros = [] # criando lista
# Preenchendo com a primeira aparição de cada inteiro
for indice in range(n):
listaInteiros.append(indice + 1)
# Repetindo os números
listaNumeros = listaInteiros + listaInteiros
# Obtendo sequências que satisfazem as restrições
sequencias = permutaN(listaInteiros, listaNumeros)
# Retornando resultados ao usuário
if (len(sequencias) == 0):
print("Não há sequências com o valor", n, "de entrada")
else:
for seq in sequencias:
print(seq)
print("Há", len(sequencias), "sequências") |
py | b409bc1025fc527f7852806b55fd67e510249336 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for doriancoin utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini")))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test/util/data"), "doriancoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn)).read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, doriancoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
|
py | b409bc6dd0faec7f99b17b14bff02f6900a307e1 | """Wraps a flag class."""
import logging
from ..tools import File
log = logging.getLogger("ECC")
class Flag:
"""Utility class for storing possibly separated flag.
Attributes:
PREFIXES_WITH_PATHS (str[]): Full list of prefixes that are followed
by paths.
SEPARABLE_PREFIXES (str[]): Full list of prefixes that may take a
second part as an input.
"""
def __init__(self, prefix, body, separator=' '):
"""Initialize a flag with two parts.
Args:
prefix (str): Flag's prefix. Can be empty.
body (str): The body of the flag that combined with the prefix
creates the full flag.
"""
self.__body = body
self.__prefix = prefix
self.__separator = separator
@property
def prefix(self):
"""Prefix of the flag. Empty if not separable."""
return self.__prefix
@property
def body(self):
"""Body of the flag. Full flag if not separable."""
return self.__body
def as_list(self):
"""Return flag as list of its parts."""
if self.__prefix:
return [self.__prefix] + [self.__body]
return [self.__body]
def __str__(self):
"""Return flag as a string."""
if self.__prefix:
return self.__prefix + self.__separator + self.__body
return self.__body
def __repr__(self):
"""Return flag as a printable string."""
if self.__prefix:
return '({}, {})'.format(self.__prefix, self.__body)
return '({})'.format(self.__body)
def __hash__(self):
"""Compute a hash of a flag."""
if self.__prefix:
return hash(self.__prefix + self.__body)
return hash(self.__body)
def __eq__(self, other):
"""Check if it is equal to another flag."""
return self.__prefix == other.prefix and self.__body == other.body
@staticmethod
def tokenize_list(all_split_line, current_folder=''):
"""Find flags, that need to be separated and separate them.
Args:
all_split_line (str[]): A list of all flags split.
Returns (Flag[]): A list of flags containing two parts if needed.
"""
flags = []
skip_next_entry = False
log.debug("Tokenizing: %s", all_split_line)
for i, entry in enumerate(all_split_line):
if entry.startswith("#"):
continue
if skip_next_entry:
skip_next_entry = False
continue
if entry in Flag.SEPARABLE_PREFIXES:
# add both this and next part to a flag
if (i + 1) < len(all_split_line):
flags += Flag.Builder()\
.with_prefix(all_split_line[i])\
.with_body(all_split_line[i + 1])\
.build_with_expansion(current_folder)
skip_next_entry = True
continue
flags += Flag.Builder()\
.from_unparsed_string(entry)\
.build_with_expansion(current_folder)
return flags
class Builder:
"""Builder for flags providing a nicer interface."""
def __init__(self):
"""Initialize the empty internal flag."""
self.__prefix = ''
self.__body = ''
def from_unparsed_string(self, chunk):
"""Parse an unknown string into body and prefix."""
chunk = chunk.strip()
for prefix in Flag.SEPARABLE_PREFIXES:
if chunk.startswith(prefix):
self.__prefix = prefix
self.__body = chunk[len(prefix):]
break
# We did not find any separable prefix, so it's all body.
if not self.__body:
self.__body = chunk
return self
def with_body(self, body):
"""Set the body to the internal flag."""
self.__body = body.strip()
return self
def with_prefix(self, prefix):
"""Set the prefix to the internal flag."""
self.__prefix = prefix.strip()
if self.__prefix not in Flag.SEPARABLE_PREFIXES:
log.warning("Unexpected flag prefix: '%s'", self.__prefix)
return self
def build_with_expansion(self, current_folder='', wildcard_values={}):
"""Expand all expandable entries and return a resulting list."""
if self.__prefix in Flag.PREFIXES_WITH_PATHS:
all_flags = []
for expanded_body in File.expand_all(
input_path=self.__body,
wildcard_values=wildcard_values,
current_folder=current_folder):
all_flags.append(Flag(self.__prefix, expanded_body))
return all_flags
# This does not hold a path. Therefore we don't need to expand it.
return [Flag(prefix=self.__prefix, body=self.__body)]
def build(self):
"""Create a flag."""
if self.__prefix in Flag.PREFIXES_WITH_PATHS:
self.__body = File.canonical_path(self.__body)
return Flag(self.__prefix, self.__body)
# All prefixes that denote includes.
PREFIXES_WITH_PATHS = set([
"--cuda-path",
"--ptxas-path"
"-B",
"-cxx-isystem",
"-F",
"-fmodules-cache-path",
"-fmodules-user-build-path",
"-fplugin",
"-fprebuilt-module-path"
"-fprofile-use",
"-I",
"-idirafter",
"-iframework",
"-iframeworkwithsysroot",
"-imacros",
"-include",
"-include-pch",
"-iprefix",
"-iquote",
"-isysroot",
"-isystem",
"-isystem",
"-isystem-after",
"-iwithprefix",
"-iwithprefixbefore",
"-iwithsysroot",
"-L",
"-MF",
"-module-dependency-dir",
"-msvc",
"-o"
"-objcmt-whitelist-dir-path",
"/cxx-isystem",
"/I",
"/msvc",
])
# Generated from `clang -help` with regex: ([-/][\w-]+)\s\<\w+\>\s
SEPARABLE_PREFIXES = set([
"--analyzer-output",
"--config",
"-arcmt-migrate-report-output",
"-cxx-isystem",
"-dependency-dot",
"-dependency-file",
"-F",
"-fmodules-cache-path",
"-fmodules-user-build-path",
"-I",
"-idirafter",
"-iframework",
"-imacros",
"-include",
"-include-pch",
"-iprefix",
"-iquote",
"-isysroot",
"-isystem",
"-ivfsoverlay",
"-iwithprefix",
"-iwithprefixbefore",
"-iwithsysroot",
"-meabi",
"-MF",
"-MJ",
"-mllvm",
"-module-dependency-dir",
"-MQ",
"-MT",
"-mthread-model",
"-o",
"-serialize-diagnostics",
"-T",
"-Tbss",
"-Tdata",
"-Ttext",
"-working-directory",
"-x",
"-Xanalyzer",
"-Xassembler",
"-Xclang",
"-Xlinker",
"-Xopenmp-target",
"-Xpreprocessor",
"-z",
"/FI",
"/I",
"/link",
"/Tc",
"/Tp",
"/U"
])
|
py | b409bc70f2d6160ed58a8fb62c086e7d74026e35 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
L{twisted.words} support for Instance Messenger.
"""
from __future__ import print_function
from twisted.internet import defer
from twisted.internet import error
from twisted.python import log
from twisted.python.failure import Failure
from twisted.spread import pb
from twisted.words.im.locals import ONLINE, OFFLINE, AWAY
from twisted.words.im import basesupport, interfaces
from zope.interface import implements
class TwistedWordsPerson(basesupport.AbstractPerson):
"""I a facade for a person you can talk to through a twisted.words service.
"""
def __init__(self, name, wordsAccount):
basesupport.AbstractPerson.__init__(self, name, wordsAccount)
self.status = OFFLINE
def isOnline(self):
return ((self.status == ONLINE) or
(self.status == AWAY))
def getStatus(self):
return self.status
def sendMessage(self, text, metadata):
"""Return a deferred...
"""
if metadata:
d=self.account.client.perspective.directMessage(self.name,
text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('directMessage',self.name, text)
def metadataFailed(self, result, text):
print("result:",result,"text:",text)
return self.account.client.perspective.directMessage(self.name, text)
def setStatus(self, status):
self.status = status
self.chat.getContactsList().setContactStatus(self)
class TwistedWordsGroup(basesupport.AbstractGroup):
implements(interfaces.IGroup)
def __init__(self, name, wordsClient):
basesupport.AbstractGroup.__init__(self, name, wordsClient)
self.joined = 0
def sendGroupMessage(self, text, metadata=None):
"""Return a deferred.
"""
#for backwards compatibility with older twisted.words servers.
if metadata:
d=self.account.client.perspective.callRemote(
'groupMessage', self.name, text, metadata)
d.addErrback(self.metadataFailed, "* "+text)
return d
else:
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def setTopic(self, text):
self.account.client.perspective.callRemote(
'setGroupMetadata',
{'topic': text, 'topic_author': self.client.name},
self.name)
def metadataFailed(self, result, text):
print("result:",result,"text:",text)
return self.account.client.perspective.callRemote('groupMessage',
self.name, text)
def joining(self):
self.joined = 1
def leaving(self):
self.joined = 0
def leave(self):
return self.account.client.perspective.callRemote('leaveGroup',
self.name)
class TwistedWordsClient(pb.Referenceable, basesupport.AbstractClientMixin):
"""In some cases, this acts as an Account, since it a source of text
messages (multiple Words instances may be on a single PB connection)
"""
def __init__(self, acct, serviceName, perspectiveName, chatui,
_logonDeferred=None):
self.accountName = "%s (%s:%s)" % (acct.accountName, serviceName, perspectiveName)
self.name = perspectiveName
print("HELLO I AM A PB SERVICE", serviceName, perspectiveName)
self.chat = chatui
self.account = acct
self._logonDeferred = _logonDeferred
def getPerson(self, name):
return self.chat.getPerson(name, self)
def getGroup(self, name):
return self.chat.getGroup(name, self)
def getGroupConversation(self, name):
return self.chat.getGroupConversation(self.getGroup(name))
def addContact(self, name):
self.perspective.callRemote('addContact', name)
def remote_receiveGroupMembers(self, names, group):
print('received group members:', names, group)
self.getGroupConversation(group).setGroupMembers(names)
def remote_receiveGroupMessage(self, sender, group, message, metadata=None):
print('received a group message', sender, group, message, metadata)
self.getGroupConversation(group).showGroupMessage(sender, message, metadata)
def remote_memberJoined(self, member, group):
print('member joined', member, group)
self.getGroupConversation(group).memberJoined(member)
def remote_memberLeft(self, member, group):
print('member left')
self.getGroupConversation(group).memberLeft(member)
def remote_notifyStatusChanged(self, name, status):
self.chat.getPerson(name, self).setStatus(status)
def remote_receiveDirectMessage(self, name, message, metadata=None):
self.chat.getConversation(self.chat.getPerson(name, self)).showMessage(message, metadata)
def remote_receiveContactList(self, clist):
for name, status in clist:
self.chat.getPerson(name, self).setStatus(status)
def remote_setGroupMetadata(self, dict_, groupName):
if dict_.has_key("topic"):
self.getGroupConversation(groupName).setTopic(dict_["topic"], dict_.get("topic_author", None))
def joinGroup(self, name):
self.getGroup(name).joining()
return self.perspective.callRemote('joinGroup', name).addCallback(self._cbGroupJoined, name)
def leaveGroup(self, name):
self.getGroup(name).leaving()
return self.perspective.callRemote('leaveGroup', name).addCallback(self._cbGroupLeft, name)
def _cbGroupJoined(self, result, name):
groupConv = self.chat.getGroupConversation(self.getGroup(name))
groupConv.showGroupMessage("sys", "you joined")
self.perspective.callRemote('getGroupMembers', name)
def _cbGroupLeft(self, result, name):
print('left',name)
groupConv = self.chat.getGroupConversation(self.getGroup(name), 1)
groupConv.showGroupMessage("sys", "you left")
def connected(self, perspective):
print('Connected Words Client!', perspective)
if self._logonDeferred is not None:
self._logonDeferred.callback(self)
self.perspective = perspective
self.chat.getContactsList()
pbFrontEnds = {
"twisted.words": TwistedWordsClient,
"twisted.reality": None
}
class PBAccount(basesupport.AbstractAccount):
implements(interfaces.IAccount)
gatewayType = "PB"
_groupFactory = TwistedWordsGroup
_personFactory = TwistedWordsPerson
def __init__(self, accountName, autoLogin, username, password, host, port,
services=None):
"""
@param username: The name of your PB Identity.
@type username: string
"""
basesupport.AbstractAccount.__init__(self, accountName, autoLogin,
username, password, host, port)
self.services = []
if not services:
services = [('twisted.words', 'twisted.words', username)]
for serviceType, serviceName, perspectiveName in services:
self.services.append([pbFrontEnds[serviceType], serviceName,
perspectiveName])
def logOn(self, chatui):
"""
@returns: this breaks with L{interfaces.IAccount}
@returntype: DeferredList of L{interfaces.IClient}s
"""
# Overriding basesupport's implementation on account of the
# fact that _startLogOn tends to return a deferredList rather
# than a simple Deferred, and we need to do registerAccountClient.
if (not self._isConnecting) and (not self._isOnline):
self._isConnecting = 1
d = self._startLogOn(chatui)
d.addErrback(self._loginFailed)
def registerMany(results):
for success, result in results:
if success:
chatui.registerAccountClient(result)
self._cb_logOn(result)
else:
log.err(result)
d.addCallback(registerMany)
return d
else:
raise error.ConnectionError("Connection in progress")
def _startLogOn(self, chatui):
print('Connecting...', end=' ')
d = pb.getObjectAt(self.host, self.port)
d.addCallbacks(self._cbConnected, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbConnected(self, root, chatui):
print('Connected!')
print('Identifying...', end=' ')
d = pb.authIdentity(root, self.username, self.password)
d.addCallbacks(self._cbIdent, self._ebConnected,
callbackArgs=(chatui,))
return d
def _cbIdent(self, ident, chatui):
if not ident:
print('falsely identified.')
return self._ebConnected(Failure(Exception("username or password incorrect")))
print('Identified!')
dl = []
for handlerClass, sname, pname in self.services:
d = defer.Deferred()
dl.append(d)
handler = handlerClass(self, sname, pname, chatui, d)
ident.callRemote('attach', sname, pname, handler).addCallback(handler.connected)
return defer.DeferredList(dl)
def _ebConnected(self, error):
print('Not connected.')
return error
|
py | b409bd04fb3b4aab3a7ebf67b25cd80ec2e785d4 | # BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import unittest
from benchexec.util import ProcessExitCode
import tempfile
import os
import stat
sys.dont_write_bytecode = True # prevent creation of .pyc files
from benchexec import util
class TestParse(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
def assertEqualNumberAndUnit(self, value, number, unit):
self.assertEqual(util.split_number_and_unit(value), (number, unit))
def test_split_number_and_unit(self):
self.assertEqualNumberAndUnit("1", 1, "")
self.assertEqualNumberAndUnit("1s", 1, "s")
self.assertEqualNumberAndUnit(" 1 s ", 1, "s")
self.assertEqualNumberAndUnit("-1s", -1, "s")
self.assertEqualNumberAndUnit("1abc", 1, "abc")
self.assertEqualNumberAndUnit("1 abc ", 1, "abc")
self.assertRaises(ValueError, util.split_number_and_unit, "")
self.assertRaises(ValueError, util.split_number_and_unit, "abc")
self.assertRaises(ValueError, util.split_number_and_unit, "s")
self.assertRaises(ValueError, util.split_number_and_unit, "a1a")
try:
self.assertEqualNumberAndUnit("- 1", -1, "")
except ValueError:
pass # Python 2 accepts this syntax, Python 3 does not
def test_parse_memory_value(self):
self.assertEqual(util.parse_memory_value("1"), 1)
self.assertEqual(util.parse_memory_value("1B"), 1)
self.assertEqual(util.parse_memory_value("1kB"), 1000)
self.assertEqual(util.parse_memory_value("1MB"), 1000 * 1000)
self.assertEqual(util.parse_memory_value("1GB"), 1000 * 1000 * 1000)
self.assertEqual(util.parse_memory_value("1TB"), 1000 * 1000 * 1000 * 1000)
def test_parse_timespan_value(self):
self.assertEqual(util.parse_timespan_value("1"), 1)
self.assertEqual(util.parse_timespan_value("1s"), 1)
self.assertEqual(util.parse_timespan_value("1min"), 60)
self.assertEqual(util.parse_timespan_value("1h"), 60 * 60)
self.assertEqual(util.parse_timespan_value("1d"), 24 * 60 * 60)
class TestProcessExitCode(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
def ProcessExitCode_with_value(self, value):
return ProcessExitCode(raw=value << 8, value=value, signal=None)
def ProcessExitCode_with_signal(self, signal):
return ProcessExitCode(raw=signal, value=None, signal=signal)
def test_boolness(self):
self.assertFalse(self.ProcessExitCode_with_value(0))
self.assertTrue(self.ProcessExitCode_with_value(1))
self.assertTrue(self.ProcessExitCode_with_signal(1))
def test_value(self):
self.assertEqual(self.ProcessExitCode_with_value(0).value, 0)
self.assertEqual(self.ProcessExitCode_with_value(1).value, 1)
self.assertEqual(ProcessExitCode.from_raw(0).value, 0)
self.assertEqual(ProcessExitCode.from_raw(256).value, 1)
self.assertIsNone(self.ProcessExitCode_with_signal(1).value)
self.assertIsNone(ProcessExitCode.from_raw(1).value)
def test_signal(self):
self.assertEqual(self.ProcessExitCode_with_signal(1).signal, 1)
self.assertEqual(ProcessExitCode.from_raw(1).signal, 1)
self.assertIsNone(self.ProcessExitCode_with_value(0).signal)
self.assertIsNone(self.ProcessExitCode_with_value(1).signal)
self.assertIsNone(ProcessExitCode.from_raw(0).signal)
self.assertIsNone(ProcessExitCode.from_raw(256).signal)
class TestRmtree(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
def setUp(self):
self.base_dir = tempfile.mkdtemp(prefix="BenchExec_test_util_rmtree")
def test_writable_file(self):
util.write_file("", self.base_dir, "tempfile")
util.rmtree(self.base_dir)
self.assertFalse(
os.path.exists(self.base_dir), "Failed to remove directory with file"
)
def test_writable_dir(self):
os.mkdir(os.path.join(self.base_dir, "tempdir"))
util.rmtree(self.base_dir)
self.assertFalse(
os.path.exists(self.base_dir),
"Failed to remove directory with child directory",
)
def test_nonwritable_file(self):
temp_file = os.path.join(self.base_dir, "tempfile")
util.write_file("", temp_file)
os.chmod(temp_file, 0)
util.rmtree(self.base_dir)
self.assertFalse(
os.path.exists(self.base_dir),
"Failed to remove directory with non-writable file",
)
def create_and_delete_directory(self, mode):
tempdir = os.path.join(self.base_dir, "tempdir")
os.mkdir(tempdir)
util.write_file("", tempdir, "tempfile")
os.chmod(tempdir, mode)
util.rmtree(self.base_dir)
self.assertFalse(os.path.exists(self.base_dir), "Failed to remove directory")
def test_nonwritable_dir(self):
self.create_and_delete_directory(stat.S_IRUSR | stat.S_IXUSR)
def test_nonexecutable_dir(self):
self.create_and_delete_directory(stat.S_IRUSR | stat.S_IWUSR)
def test_nonreadable_dir(self):
self.create_and_delete_directory(stat.S_IWUSR | stat.S_IXUSR)
def test_dir_without_any_permissions(self):
self.create_and_delete_directory(0)
|
py | b409bd5782feb1d9d644c6ffbeabc99c9267fa79 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param ip_configurations: IP configurations for virtual network gateway.
:type ip_configurations:
list[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayIPConfiguration]
:param gateway_type: The type of this virtual network gateway. Possible
values are: 'Vpn' and 'ExpressRoute'. Possible values include: 'Vpn',
'ExpressRoute'
:type gateway_type: str or
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayType
:param vpn_type: The type of this virtual network gateway. Possible values
are: 'PolicyBased' and 'RouteBased'. Possible values include:
'PolicyBased', 'RouteBased'
:type vpn_type: str or ~azure.mgmt.network.v2017_09_01.models.VpnType
:param enable_bgp: Whether BGP is enabled for this virtual network gateway
or not.
:type enable_bgp: bool
:param active_active: ActiveActive flag
:type active_active: bool
:param gateway_default_site: The reference of the LocalNetworkGateway
resource which represents local network site having default routes. Assign
Null value in case of removing existing default site setting.
:type gateway_default_site:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param sku: The reference of the VirtualNetworkGatewaySku resource which
represents the SKU selected for Virtual network gateway.
:type sku: ~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewaySku
:param vpn_client_configuration: The reference of the
VpnClientConfiguration resource which represents the P2S VpnClient
configurations.
:type vpn_client_configuration:
~azure.mgmt.network.v2017_09_01.models.VpnClientConfiguration
:param bgp_settings: Virtual network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_09_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
VirtualNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGateway resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualNetworkGatewayIPConfiguration]'},
'gateway_type': {'key': 'properties.gatewayType', 'type': 'str'},
'vpn_type': {'key': 'properties.vpnType', 'type': 'str'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'active_active': {'key': 'properties.activeActive', 'type': 'bool'},
'gateway_default_site': {'key': 'properties.gatewayDefaultSite', 'type': 'SubResource'},
'sku': {'key': 'properties.sku', 'type': 'VirtualNetworkGatewaySku'},
'vpn_client_configuration': {'key': 'properties.vpnClientConfiguration', 'type': 'VpnClientConfiguration'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGateway, self).__init__(**kwargs)
self.ip_configurations = kwargs.get('ip_configurations', None)
self.gateway_type = kwargs.get('gateway_type', None)
self.vpn_type = kwargs.get('vpn_type', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.active_active = kwargs.get('active_active', None)
self.gateway_default_site = kwargs.get('gateway_default_site', None)
self.sku = kwargs.get('sku', None)
self.vpn_client_configuration = kwargs.get('vpn_client_configuration', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
|
py | b409bdbc44fbcab668c63ba1560d1d9b02d4e573 | from __future__ import absolute_import
import logging
from datetime import date, timedelta
from dateutil.parser import parse
from dexter.app import celery_app as app
from dexter.processing import DocumentProcessor, DocumentProcessorNT
# force configs for API keys to be set
import dexter.core
# This is a collection of periodic tasks for Dexter, using
# Celery to drive task completion.
log = logging.getLogger(__name__)
@app.task
def back_process_feeds():
""" Enqueue a task to fetch yesterday's feeds. """
if date.today() == date(2019, 6, 6):
d1 = date(2019, 6, 3)
d2 = date(2019, 4, 21)
# days = [d1 + timedelta(days=x) for x in range((d2 - d1).days + 1)]
days = [d1]
filter_parm = ''
for d in days:
fetch_filtered_daily_feeds.delay(d.isoformat(), filter_parm)
elif date.today() == date(2019, 6, 7):
d1 = date(2019, 6, 4)
d2 = date(2019, 4, 21)
# days = [d1 + timedelta(days=x) for x in range((d2 - d1).days + 1)]
days = [d1]
filter_parm = ''
for d in days:
fetch_filtered_daily_feeds.delay(d.isoformat(), filter_parm)
else:
print 'Already Done!'
@app.task
def fetch_yesterdays_feeds():
""" Enqueue a task to fetch yesterday's feeds. """
yesterday = date.today() - timedelta(days=1)
fetch_daily_feeds.delay(yesterday.isoformat())
# retry after 30 minutes, retry for up to 7 days
@app.task(bind=True, default_retry_delay=30*60, max_retries=7*24*2)
def fetch_filtered_daily_feeds(self, day, filter_parm):
""" Fetch feed of URLs to crawl and queue up a task to grab and process
each url. """
try:
day = parse(day)
dp = DocumentProcessorNT()
count = 0
for item in dp.fetch_filtered_daily_feed_items(day, filter_parm):
get_feed_item.delay(item)
count += 1
except Exception as e:
log.error("Error processing daily feeds for %s" % day, exc_info=e)
self.retry(exc=e)
if count == 0:
# nothing to do, retry later
self.retry()
# retry after 30 minutes, retry for up to 7 days
@app.task(bind=True, default_retry_delay=30*60, max_retries=7*24*2)
def fetch_daily_feeds(self, day):
""" Fetch feed of URLs to crawl and queue up a task to grab and process
each url. """
try:
day = parse(day)
dp = DocumentProcessorNT()
count = 0
for item in dp.fetch_daily_feed_items(day):
get_feed_item.delay(item)
count += 1
except Exception as e:
log.error("Error processing daily feeds for %s" % day, exc_info=e)
self.retry(exc=e)
if count == 0:
# nothing to do, retry later
self.retry()
# retry every minute, for up to 24 hours.
@app.task(bind=True, rate_limit="10/m", default_retry_delay=30, max_retries=2)
def get_feed_item(self, item):
""" Fetch and process a document feed item. """
try:
dp = DocumentProcessorNT()
dp.process_feed_item(item)
except Exception as e:
log.error("Error processing feed item: %s" % item, exc_info=e)
self.retry()
@app.task
def backfill_taxonomies():
""" Enqueue a task to backfill taxonomies """
try:
dp = DocumentProcessorNT()
dp.backfill_taxonomies()
except Exception as e:
log.error("Error backfilling taxonomies: %s" % e.message, exc_info=e)
|
py | b409be41b0f340f2465b9b9df82afd0cd5d1dc5e | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""XManager launcher for CIFAR10.
Usage:
xmanager launch examples/cifar10_tensorflow_tpu/launcher.py
"""
import asyncio
import itertools
import os
from absl import app
from absl import flags
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import build_image
from xmanager.cloud import caip
from xmanager.contrib import tpu
FLAGS = flags.FLAGS
flags.DEFINE_string('tensorboard', None, 'Tensorboard instance.')
def main(_):
with xm_local.create_experiment(experiment_title='cifar10') as experiment:
directory = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
# pyformat: disable
spec = xm.PythonContainer(
# Package the current directory that this script is in.
path='.',
# tpuvm requires Python3.8 and GLIBC_2.29, which requires at least
# debian:11 or ubuntu:20.04
base_image='ubuntu:20.04',
docker_instructions=(
['RUN apt-get update && apt-get install -y python-is-python3 python3-pip wget'] + # pylint: disable=line-too-long
build_image.default_steps(directory, use_deep_module=False) +
tpu.tpuvm_docker_instructions()),
entrypoint=xm.ModuleName('cifar10'),
)
# pyformat: enable
[executable] = experiment.package([
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Caip.Spec(),
args={},
),
])
learning_rates = [0.1, 0.001]
trials = list(
dict([('learning_rate', lr)])
for (lr,) in itertools.product(learning_rates))
tensorboard = FLAGS.tensorboard
if not tensorboard:
tensorboard = caip.client().get_or_create_tensorboard('cifar10')
tensorboard = asyncio.get_event_loop().run_until_complete(tensorboard)
for i, hyperparameters in enumerate(trials):
output_dir = os.environ.get('GOOGLE_CLOUD_BUCKET_NAME', None)
if output_dir:
output_dir = os.path.join(output_dir, str(experiment.experiment_id),
str(i))
tensorboard_capability = xm_local.TensorboardCapability(
name=tensorboard, base_output_directory=output_dir)
experiment.add(
xm.Job(
executable=executable,
executor=xm_local.Caip(
requirements=xm.JobRequirements(tpu_v2=8),
tensorboard=tensorboard_capability),
args=hyperparameters,
))
if __name__ == '__main__':
app.run(main)
|
py | b409bed208f327408a61aa69062a1614917dbff5 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.29-filter-categories
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_list_catalog_items_response import ApiListCatalogItemsResponse # noqa: E501
from swagger_client.rest import ApiException
class TestApiListCatalogItemsResponse(unittest.TestCase):
"""ApiListCatalogItemsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListCatalogItemsResponse(self):
"""Test ApiListCatalogItemsResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_list_catalog_items_response.ApiListCatalogItemsResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b409bee86bcf51b999ed28eb4cd20235233e0823 |
import torch
from enum import Enum
from sklearn.metrics import mean_squared_error
from torch import nn
from torch.nn.modules.loss import _Loss
import numpy as np
from datasets.radar_dataset import DataContent
from run_scripts import print_, device
class DeltaSNR(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(DeltaSNR, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
noise_mask = noise_mask.to(device)
sinr_delta_mean = 0
num_packets = target_re_im.shape[0]
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
target_re_im_packet = target_re_im[p]
sinr_output = sinr_from_re_im_format(output_re_im_packet, object_mask, noise_mask)
sinr_target = sinr_from_re_im_format(target_re_im_packet, object_mask, noise_mask)
sinr_delta_mean += torch.abs(sinr_target - sinr_output)
else:
print_('WARNING: Not implemented yet.')
assert False
sinr_delta_mean /= num_packets
return sinr_delta_mean
class SINRLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(SINRLoss, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
noise_mask = noise_mask.to(device)
neg_sinr_mean = 0
num_packets = target_re_im.shape[0]
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
neg_sinr_mean -= sinr_from_re_im_format(output_re_im_packet, object_mask, noise_mask)
else:
print_('WARNING: Not implemented yet.')
assert False
neg_sinr_mean /= num_packets
return neg_sinr_mean
class MSEWeightedMagPhase(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(MSEWeightedMagPhase, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
self.mse = nn.MSELoss()
self.w_mag = 0.0
self.w_phase = 0.0
self.w_re_im = 1.0
self.epoch = 0
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
loss = 0
num_packets = target_re_im.shape[0]
num_re = int(target_re_im.shape[2] / 2)
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
target_re_im_packet = target_re_im[p]
output_re_packet = output_re_im_packet[:, :num_re]
output_im_packet = output_re_im_packet[:, num_re:]
target_re_packet = target_re_im_packet[:, :num_re]
target_im_packet = target_re_im_packet[:, num_re:]
output_peaks_re = torch.masked_select(output_re_packet, object_mask)
output_peaks_im = torch.masked_select(output_im_packet, object_mask)
target_peaks_re = torch.masked_select(target_re_packet, object_mask)
target_peaks_im = torch.masked_select(target_im_packet, object_mask)
phase_target = torch.atan(target_peaks_im / target_peaks_re)
phase_output = torch.atan(output_peaks_im / output_peaks_re)
target_max_mag = torch.sqrt(target_re_packet ** 2 + target_im_packet ** 2).view(-1).max()
target_re_packet_log_mag = target_re_packet / target_max_mag
target_im_packet_log_mag = target_im_packet / target_max_mag
target_log_mag = 10 * torch.log10(torch.sqrt(target_re_packet_log_mag ** 2 + target_im_packet_log_mag ** 2))
target_log_mag = torch.masked_select(target_log_mag, object_mask)
output_max_mag = torch.sqrt(output_re_packet ** 2 + output_im_packet ** 2).view(-1).max()
output_re_packet_log_mag = output_re_packet / output_max_mag
output_im_packet_log_mag = output_im_packet / output_max_mag
output_log_mag = 10 * torch.log10(torch.sqrt(output_re_packet_log_mag ** 2 + output_im_packet_log_mag ** 2))
output_log_mag = torch.masked_select(output_log_mag, object_mask)
loss += self.w_re_im * self.mse(output_re_im, target_re_im) +\
self.w_mag * self.mse(output_log_mag, target_log_mag) +\
self.w_phase * self.mse(phase_output, phase_target)
else:
print_('WARNING: Not implemented yet.')
assert False
loss /= num_packets
return loss
def next_epoch(self):
pass
self.epoch += 1
if self.epoch % 10 == 0 and self.w_re_im > 0.4:
self.w_re_im -= 0.1
self.w_mag = (1 - self.w_re_im) / 2
self.w_phase = (1 - self.w_re_im) / 2
class MSE(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(MSE, self).__init__(size_average, reduce, reduction)
self.mse = nn.MSELoss()
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
return self.mse.forward(output_re_im, target_re_im)
class ObjectiveFunction(Enum):
DELTA_SNR = DeltaSNR()
MSE = MSE()
MSE_MAG_PHASE_WEIGHTED = MSEWeightedMagPhase()
SINR = SINRLoss()
def __call__(self, *args):
return self.value(*args)
@staticmethod
def loss_to_running_loss(batch_loss, batch_size):
return batch_loss * batch_size
@staticmethod
def loss_from_running_loss(running_loss, sample_size):
return running_loss / sample_size
@staticmethod
def from_name(value):
if value == ObjectiveFunction.DELTA_SNR.name:
return ObjectiveFunction.DELTA_SNR
elif value == ObjectiveFunction.MSE.name:
return ObjectiveFunction.MSE
elif value == ObjectiveFunction.MSE_MAG_PHASE_WEIGHTED.name:
return ObjectiveFunction.MSE_MAG_PHASE_WEIGHTED
elif value == ObjectiveFunction.SINR.name:
return ObjectiveFunction.SINR
else:
return None
@staticmethod
def objective_func_name(func):
try:
if func.name in ObjectiveFunction.__members__:
return func.name
else:
return 'None'
except AttributeError:
return 'None'
def sinr_log_mag(log_mag_rd_target, log_mag_rd_test, object_mask, noise_mask):
return np.average(log_mag_rd_test[object_mask]) - np.average(log_mag_rd_test[noise_mask])
def sinr(rd_target, rd_test, object_mask, noise_mask):
rd_test_mag = np.abs(rd_test)**2
obj_values = rd_test_mag[object_mask]
obj_magnitude = np.average(obj_values)
noise_values = rd_test_mag[noise_mask]
noise_magnitude = np.average(noise_values)
return 10 * np.log10(obj_magnitude / noise_magnitude)
def sinr_1d(cr_target, cr_test, object_mask, noise_mask):
cr_test_mag = np.abs(cr_test)**2
obj_values = cr_test_mag[object_mask]
obj_magnitude = np.average(obj_values)
noise_values = cr_test_mag[noise_mask]
noise_magnitude = np.average(noise_values)
return 10 * np.log10(obj_magnitude / noise_magnitude)
def sinr_from_re_im_format(re_im_packet, obj_mask, noise_mask):
if len(re_im_packet.shape) == 3:
re_im_packet = re_im_packet[0]
num_re = int(re_im_packet.shape[1]/2)
re_packet = re_im_packet[:, :num_re]
im_packet = re_im_packet[:, num_re:]
mag = re_packet ** 2 + im_packet ** 2
obj_values = torch.masked_select(mag, obj_mask)
obj_magnitude = torch.mean(obj_values)
noise_values = torch.masked_select(mag, noise_mask)
noise_magnitude = torch.mean(noise_values)
return 10 * torch.log10(obj_magnitude / noise_magnitude)
def peak_mag_mse(log_mag_rd_target, log_mag_rd_test, object_mask, noise_mask):
obj_values_target = log_mag_rd_target[object_mask]
obj_values_test = log_mag_rd_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
return mean_squared_error(obj_values_target, obj_values_test)
def evm(rd_target, rd_test, object_mask, noise_mask):
obj_values_target = rd_target[object_mask]
obj_values_test = rd_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_norm(rd_target, rd_test, object_mask, noise_mask):
rd_target_norm = rd_target / np.amax(np.abs(rd_target))
rd_test_norm = rd_test / np.amax(np.abs(rd_test))
obj_values_target = rd_target_norm[object_mask]
obj_values_test = rd_test_norm[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_1d(cr_target, cr_test, object_mask, noise_mask):
obj_values_target = cr_target[object_mask]
obj_values_test = cr_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_1d_norm(cr_target, cr_test, object_mask, noise_mask):
cr_target_norm = cr_target / np.amax(np.abs(cr_target))
cr_test_norm = cr_test / np.amax(np.abs(cr_test))
obj_values_target = cr_target_norm[object_mask]
obj_values_test = cr_test_norm[object_mask]
if len(obj_values_target) == 0:
print_('WARNING: no obj peak targets found in evm_1d_norm!')
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def rd_obj_peak_phase_mse(rd_target, rd_test, object_mask, noise_mask):
peaks_target = rd_target[object_mask]
peaks_test = rd_test[object_mask]
if len(peaks_target) == 0:
print_('WARNING: no peaks found for evaluation metric.')
return np.nan
peaks_target_imag = np.imag(peaks_target)
peaks_target_real = np.real(peaks_target)
peaks_phase_target = np.arctan(peaks_target_imag.astype('float') / peaks_target_real.astype('float'))
peaks_test_imag = np.imag(peaks_test)
peaks_test_real = np.real(peaks_test)
peaks_phase_test = np.arctan(peaks_test_imag.astype('float') / peaks_test_real.astype('float'))
phase_mse = mean_squared_error(peaks_phase_target, peaks_phase_test)
return phase_mse
def rd_obj_peak_log_mag_mse(rd_target, rd_test, object_mask, noise_mask):
peaks_target = rd_target[object_mask]
peaks_test = rd_test[object_mask]
if len(peaks_target) == 0:
print_('WARNING: no peaks found for evaluation metric.')
return np.nan
mag_target = np.abs(peaks_target)
mag_test = np.abs(peaks_test)
phase_mse = mean_squared_error(mag_target, mag_test)
return phase_mse
|
py | b409c0babad81054f53114706847a3313eac9d0c | import argparse
#import logging
import os
import time
from functools import partial
import h5py
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from .data_reader import DataReader_mseed_array, DataReader_pred
from .model_pred import ModelConfig, UNet
from .postprocess import extract_picks, extract_amplitude
tf.compat.v1.disable_eager_execution()
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def pred_fn(data_reader, figure_dir=None, prob_dir=None, log_dir=None, **kwargs):
# ----------------------------------------------------------
# define default arguments
# ----------------------------------------------------------
kwargs.setdefault('batch_size', 20)
kwargs.setdefault('model_dir', '')
kwargs.setdefault('data_dir', '')
kwargs.setdefault('hdf5_file', 'data.h5')
kwargs.setdefault('hdf5_group', 'data')
kwargs.setdefault('min_p_prob', 0.6)
kwargs.setdefault('min_s_prob', 0.6)
kwargs.setdefault('mpd', 50)
kwargs.setdefault('amplitude', False)
kwargs.setdefault('format', 'hdf5')
kwargs.setdefault('s3_url', 'localhost:9000')
kwargs.setdefault('stations', '')
# ----------------------------------------------------------
# cheap trick to reuse most of the original code
# ----------------------------------------------------------
args = AttrDict(kwargs)
# ----------------------------------------------------------
current_time = time.strftime("%y%m%d-%H%M%S")
with tf.compat.v1.name_scope('Input_Batch'):
if args.format == "mseed_array":
batch_size = 1
else:
batch_size = args.batch_size
dataset = data_reader.dataset(batch_size)
batch = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
config = ModelConfig(X_shape=data_reader.X_shape)
#with open(os.path.join(log_dir, 'config.log'), 'w') as fp:
# fp.write('\n'.join("%s: %s" % item for item in vars(config).items()))
sess_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=kwargs.get(
'inter_op_parallellism_threads', 0),
intra_op_parallelism_threads=kwargs.get(
'intra_op_parallelism_threads', 0))
#sess_config.gpu_options.allow_growth = True
# sess_config.log_device_placement = False
with tf.compat.v1.Session(config=sess_config) as sess:
model = UNet(config=config, input_batch=batch)
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=5)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
latest_check_point = tf.train.latest_checkpoint(args.model_dir)
#logging.info(f"restoring model {latest_check_point}")
saver.restore(sess, latest_check_point)
predictions, fnames, picks = [], [], []
amps = [] if args.amplitude else None
for _ in tqdm(range(0, data_reader.num_data, batch_size), desc="Pred"):
if args.amplitude:
pred_batch, X_batch, amp_batch, fname_batch, t0_batch = sess.run(
[model.preds, batch[0], batch[1], batch[2], batch[3]],
feed_dict={model.drop_rate: 0, model.is_training: False},
)
# X_batch, amp_batch, fname_batch, t0_batch = sess.run([batch[0], batch[1], batch[2], batch[3]])
else:
pred_batch, X_batch, fname_batch, t0_batch = sess.run(
[model.preds, batch[0], batch[1], batch[2]],
feed_dict={model.drop_rate: 0, model.is_training: False},
)
picks_ = extract_picks(
preds=pred_batch, fnames=fname_batch, t0=t0_batch,
config=args, exclusive=kwargs.get('exclusive', False))
picks.extend(picks_)
if args.amplitude:
amps_ = extract_amplitude(amp_batch, picks_)
amps.extend(amps_)
# store the batch predictions
predictions.extend(pred_batch)
fnames.extend(fname_batch)
#tf.compat.v1.get_variable_scope().reuse_variables()
# reset graph to not keep on building on the same one
tf.compat.v1.reset_default_graph()
# convert lists to numpy arrays
predictions = np.float32(predictions).squeeze()
fnames = list(np.asarray(fnames).astype('U'))
# order the outputs
ordered_proba = np.zeros_like(predictions, dtype=np.float32)
ordered_picks = np.zeros((data_reader.num_data, 2, 2), dtype=object)
for i in range(data_reader.num_data):
sample_name = f'sample{i}'
idx = fnames.index(sample_name)
ordered_proba[i, ...] = predictions[idx, ...].squeeze()
ordered_picks[i, 0, 0] = np.array(picks[idx].p_idx).squeeze()
ordered_picks[i, 0, 1] = np.array(picks[idx].p_prob).squeeze()
ordered_picks[i, 1, 0] = np.array(picks[idx].s_idx).squeeze()
ordered_picks[i, 1, 1] = np.array(picks[idx].s_prob).squeeze()
return ordered_proba, ordered_picks
|
py | b409c22fde30c71b3c73abe007e7b42b9daf955a | import unittest
import numpy as np
from test.zipload import Zipload
VERBOSE = False
class Base(unittest.TestCase):
def setUp(self):
Zipload.download()
def _log(self, r):
# Reduce test verbosity:
if not VERBOSE:
return
print(r.header)
items = ((k, v) for k, v in r.groups.items() if isinstance(k, str))
fmt = '{0.name:>14s}:{1.name:<14s} {1.desc:36s} {2}'
for _, g in sorted(items):
for _, p in sorted(g.params.items()):
value = None
if p.bytes_per_element == 4:
if p.dimensions:
value = p.float_array
else:
value = p.float_value
if p.bytes_per_element == 2:
if p.dimensions:
value = p.int16_array
else:
value = p.int16_value
if p.bytes_per_element == 1:
if p.dimensions:
value = p.int8_array
else:
value = p.int8_value
if p.bytes_per_element == -1:
if len(p.dimensions) > 1:
value = [s for s in p.string_array]
else:
value = p.string_value
print(fmt.format(g, p, value))
def load_data(reader):
''' Fetch point and analog data arrays
'''
# Fetch sample rate parameters
first_frame = reader.first_frame
nframe = reader.frame_count
npoint = reader.header.point_count
nanalog_count = reader.analog_sample_count
nanalog_channel = reader.analog_used
nanalog_per_frame_samples = reader.analog_per_frame
# Generate data arrays
point_frames = np.zeros([nframe, npoint, 5], dtype=np.float64)
analog_frames = np.zeros([nanalog_count, nanalog_channel], dtype=np.float64)
# Start reading POINT and ANALOG blocks
for i, points, analog in reader.read_frames(copy=False):
# Extract columns 0:5
index = i - first_frame
aindex = index * nanalog_per_frame_samples
point_frames[index] = points
analog_frames[aindex:aindex+nanalog_per_frame_samples] = analog.T
# Return data frames
return point_frames, analog_frames
def create_camera_mask(point_frames):
''' Create a mask for POINT data using the 4:th column.
'''
return point_frames[:, :, 4] >= 0
|
py | b409c2990a756ae0e69226914c78d053487423ad |
from flask import Flask, jsonify, session
from flask.ext.cors import CORS
from werkzeug.contrib.fixers import ProxyFix
import logging
import os
import sys
import json
from flask import g
from tornado.web import FallbackHandler, RequestHandler, Application
from tornado.wsgi import WSGIContainer
import tornado
instance_path = os.path.dirname(os.path.realpath(__file__)) + '/config/'
app = Flask(__name__, instance_relative_config=True, instance_path=instance_path)
db_global = None
def setup_views():
from annotator_supreme.views.version_view import VersionView
from annotator_supreme.views.dataset_view import DatasetView
from annotator_supreme.views.image_view import ImageView
from annotator_supreme.views.image_edit_view import ImageEditView
from annotator_supreme.views.annotation_view import AnnoView
from annotator_supreme.views.plugins_view import PluginsView
from annotator_supreme.views.webapp.image_test import ImageTestViewWebApp
from annotator_supreme.views.webapp.annotation_view import AnnotationViewWebApp
from annotator_supreme.views.webapp.dataset_view import DatasetViewWebApp
from annotator_supreme.views.webapp.plugins_view import PluginsViewWebApp
from annotator_supreme.views.webapp.upload_video_view import UploadVideoViewWebApp
from annotator_supreme.views.webapp.upload_view import UploadViewWebApp
from annotator_supreme.views.webapp.visualize_images_view import VisualizeImagesViewWebApp
from annotator_supreme.views.webapp.login_view import LoginViewWebApp
from annotator_supreme.views.webapp.beer_view import BeerViewWebApp
VersionView.register(app)
AnnoView.register(app)
DatasetView.register(app)
ImageView.register(app)
ImageEditView.register(app)
PluginsView.register(app)
ImageTestViewWebApp.register(app)
AnnotationViewWebApp.register(app)
DatasetViewWebApp.register(app)
PluginsViewWebApp.register(app)
UploadViewWebApp.register(app)
UploadVideoViewWebApp.register(app)
VisualizeImagesViewWebApp.register(app)
LoginViewWebApp.register(app)
BeerViewWebApp.register(app)
# app.wsgi_app = ProxyFix(app.wsgi_app)
def build_app():
server_env = os.getenv('SERVER_ENV')
if server_env is not None:
server_env = server_env.lower().strip()
envs = {'production': 'production.py', 'development': 'development.py', 'on_premise': 'premise.py'}
env_file = ''
if server_env is None or server_env not in envs.keys():
logging.warning(
'No SERVER_ENV variable found. Assuming development...')
env_file = envs['development']
server_env = 'development'
else:
env_file = envs[server_env]
app.config.from_pyfile(env_file)
CORS(app)
CLUSTER_IP = os.getenv("CLUSTER_IP")
if CLUSTER_IP is None:
app.config["CLUSTER_IP"] = "127.0.0.1"
else:
app.config["CLUSTER_IP"] = CLUSTER_IP
# we define an app secret key to keep session variables secure
app.secret_key = '6869fab6ae6e276e7f6e1c3fcf5253ca'
# seting the logger using the flask
# logging.basicConfig(format='annotator-supreme: %(asctime)s - %(levelname)s - %(message)s')
app.debug = app.config["APP_DEBUG"]
fmt = '%(levelname)s - [%(asctime)s] - %(name)s : %(message)s'
formatter = logging.Formatter(fmt)
app.logger.setLevel(app.config["LOG_LEVEL"])
# Bug?? the lines bellow actually duplicate the logging
#for handler in app.logger.handlers:
# app.logger.warning("Changing handler: %s",handler)
# handler.setFormatter(formatter)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
app.logger.warning("ROOT HANDLER: %s",handler)
handler.setFormatter(formatter)
app.logger.info("Setting up database (Cassandra)...")
from annotator_supreme.controllers import database_controller
db_controller = database_controller.DatabaseController()
db_controller.setup_database()
app.logger.info('DB Controller done.')
# from annotator_supreme.controllers.base_controller import ReverseProxied
# app.wsgi_app = ReverseProxied(app.wsgi_app)
app.logger.info('Registering views')
setup_views()
app.logger.info('done.')
app.logger.info('App %s built successfully!', __name__)
app.logger.info('annotator-supreme - The most awesome annotator and provider of CV datasets.')
app.logger.info('app.debug = %s', app.debug)
return app
|
py | b409c3010f9f63b528c2519bb0fabdffa77c364e | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1ControllerRevision(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'data': 'object',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'revision': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'kind': 'kind',
'metadata': 'metadata',
'revision': 'revision'
}
def __init__(self, api_version=None, data=None, kind=None, metadata=None, revision=None, local_vars_configuration=None): # noqa: E501
"""V1ControllerRevision - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._data = None
self._kind = None
self._metadata = None
self._revision = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if data is not None:
self.data = data
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.revision = revision
@property
def api_version(self):
"""Gets the api_version of this V1ControllerRevision. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ControllerRevision. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ControllerRevision.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ControllerRevision. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""Gets the data of this V1ControllerRevision. # noqa: E501
Data is the serialized representation of the state. # noqa: E501
:return: The data of this V1ControllerRevision. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1ControllerRevision.
Data is the serialized representation of the state. # noqa: E501
:param data: The data of this V1ControllerRevision. # noqa: E501
:type: object
"""
self._data = data
@property
def kind(self):
"""Gets the kind of this V1ControllerRevision. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ControllerRevision. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ControllerRevision.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ControllerRevision. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ControllerRevision. # noqa: E501
:return: The metadata of this V1ControllerRevision. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ControllerRevision.
:param metadata: The metadata of this V1ControllerRevision. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def revision(self):
"""Gets the revision of this V1ControllerRevision. # noqa: E501
Revision indicates the revision of the state represented by Data. # noqa: E501
:return: The revision of this V1ControllerRevision. # noqa: E501
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this V1ControllerRevision.
Revision indicates the revision of the state represented by Data. # noqa: E501
:param revision: The revision of this V1ControllerRevision. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and revision is None: # noqa: E501
raise ValueError("Invalid value for `revision`, must not be `None`") # noqa: E501
self._revision = revision
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ControllerRevision):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ControllerRevision):
return True
return self.to_dict() != other.to_dict()
|
py | b409c3196808f95ee8dee734bd5999fd256a29e2 | from tests.testmodels import Event, Team, Tournament
from tortoise.aggregation import Count, Min, Sum
from tortoise.contrib import test
from tortoise.exceptions import ConfigurationError
class TestAggregation(test.TestCase):
async def test_aggregation(self):
tournament = Tournament(name="New Tournament")
await tournament.save()
await Tournament.create(name="Second tournament")
await Event(name="Without participants", tournament_id=tournament.id).save()
event = Event(name="Test", tournament_id=tournament.id)
await event.save()
participants = []
for i in range(2):
team = Team(name=f"Team {(i + 1)}")
await team.save()
participants.append(team)
await event.participants.add(participants[0], participants[1])
await event.participants.add(participants[0], participants[1])
tournaments_with_count = (
await Tournament.all()
.annotate(events_count=Count("events"))
.filter(events_count__gte=1)
)
self.assertEqual(len(tournaments_with_count), 1)
self.assertEqual(tournaments_with_count[0].events_count, 2)
event_with_lowest_team_id = (
await Event.filter(id=event.id).first().annotate(lowest_team_id=Min("participants__id"))
)
self.assertEqual(event_with_lowest_team_id.lowest_team_id, participants[0].id)
ordered_tournaments = (
await Tournament.all().annotate(events_count=Count("events")).order_by("events_count")
)
self.assertEqual(len(ordered_tournaments), 2)
self.assertEqual(ordered_tournaments[1].id, tournament.id)
event_with_annotation = (
await Event.all().annotate(tournament_test_id=Sum("tournament__id")).first()
)
self.assertEqual(
event_with_annotation.tournament_test_id, event_with_annotation.tournament_id
)
with self.assertRaisesRegex(ConfigurationError, "name__id not resolvable"):
await Event.all().annotate(tournament_test_id=Sum("name__id")).first()
|
py | b409c3f9c3d17de77b627819542dc36eb9a5873d | '''
Blind Source Separation offline example
=======================================================
Demonstrate the performance of different blind source separation (BSS) algorithms:
1) Independent Vector Analysis (IVA)
The method implemented is described in the following publication.
N. Ono, *Stable and fast update rules for independent vector analysis based
on auxiliary function technique*, Proc. IEEE, WASPAA, 2011.
2) Independent Low-Rank Matrix Analysis (ILRMA)
The method implemented is described in the following publications
D. Kitamura, N. Ono, H. Sawada, H. Kameoka, H. Saruwatari, *Determined blind
source separation unifying independent vector analysis and nonnegative matrix
factorization,* IEEE/ACM Trans. ASLP, vol. 24, no. 9, pp. 1626-1641, September 2016
D. Kitamura, N. Ono, H. Sawada, H. Kameoka, and H. Saruwatari *Determined Blind
Source Separation with Independent Low-Rank Matrix Analysis*, in Audio Source Separation,
S. Makino, Ed. Springer, 2018, pp. 125-156.
Both algorithms work in the STFT domain. The test files were extracted from the
`CMU ARCTIC <http://www.festvox.org/cmu_arctic/>`_ corpus.
Depending on the input arguments running this script will do these actions:.
1. Separate the sources.
2. Show a plot of the clean and separated spectrograms
3. Show a plot of the SDR and SIR as a function of the number of iterations.
4. Create a `play(ch)` function that can be used to play the `ch` source (if you are in ipython say).
5. Save the separated sources as .wav files
6. Show a GUI where a mixed signals and the separated sources can be played
This script requires the `mir_eval` to run, and `tkinter` and `sounddevice` packages for the GUI option.
'''
import numpy as np
from scipy.io import wavfile
from mir_eval.separation import bss_eval_sources
# We concatenate a few samples to make them long enough
wav_files = [
['examples/input_samples/cmu_arctic_us_axb_a0004.wav',
'examples/input_samples/cmu_arctic_us_axb_a0005.wav',
'examples/input_samples/cmu_arctic_us_axb_a0006.wav',],
['examples/input_samples/cmu_arctic_us_aew_a0001.wav',
'examples/input_samples/cmu_arctic_us_aew_a0002.wav',
'examples/input_samples/cmu_arctic_us_aew_a0003.wav',]
]
if __name__ == '__main__':
choices = ['ilrma', 'auxiva']
import argparse
parser = argparse.ArgumentParser(description='Demonstration of blind source separation using IVA or ILRMA.')
parser.add_argument('-b', '--block', type=int, default=2048,
help='STFT block size')
parser.add_argument('-a', '--algo', type=str, default=choices[0], choices=choices,
help='Chooses BSS method to run')
parser.add_argument('--gui', action='store_true',
help='Creates a small GUI for easy playback of the sound samples')
parser.add_argument('--save', action='store_true',
help='Saves the output of the separation to wav files')
args = parser.parse_args()
if args.gui:
# avoids a bug with tkinter and matplotlib
import matplotlib
matplotlib.use('TkAgg')
import pyroomacoustics as pra
# STFT frame length
L = args.block
# Room 4m by 6m
room_dim = [8, 9]
# source location
source = np.array([1, 4.5])
# create an anechoic room with sources and mics
room = pra.ShoeBox(
room_dim,
fs=16000,
max_order=15,
absorption=0.35,
sigma2_awgn=1e-8)
# get signals
signals = [ np.concatenate([wavfile.read(f)[1].astype(np.float32)
for f in source_files])
for source_files in wav_files ]
delays = [1., 0.]
locations = [[2.5,3], [2.5, 6]]
# add mic and good source to room
# Add silent signals to all sources
for sig, d, loc in zip(signals, delays, locations):
room.add_source(loc, signal=np.zeros_like(sig), delay=d)
# add microphone array
room.add_microphone_array(
pra.MicrophoneArray(np.c_[[6.5, 4.49], [6.5, 4.51]], fs=room.fs)
)
# compute RIRs
room.compute_rir()
# Record each source separately
separate_recordings = []
for source, signal in zip(room.sources, signals):
source.signal[:] = signal
room.simulate()
separate_recordings.append(room.mic_array.signals)
source.signal[:] = 0.
separate_recordings = np.array(separate_recordings)
# Mix down the recorded signals
mics_signals = np.sum(separate_recordings, axis=0)
# Monitor Convergence
#####################
ref = np.moveaxis(separate_recordings, 1, 2)
SDR, SIR = [], []
def convergence_callback(Y):
global SDR, SIR
from mir_eval.separation import bss_eval_sources
ref = np.moveaxis(separate_recordings, 1, 2)
y = np.array([pra.istft(Y[:,:,ch], L, L,
transform=np.fft.irfft, zp_front=L//2, zp_back=L//2) for ch in range(Y.shape[2])])
sdr, sir, sar, perm = bss_eval_sources(ref[:,:y.shape[1]-L//2,0], y[:,L//2:ref.shape[1]+L//2])
SDR.append(sdr)
SIR.append(sir)
# START BSS
###########
# The STFT needs front *and* back padding
X = np.array([pra.stft(ch, L, L, transform=np.fft.rfft, zp_front=L//2, zp_back=L//2) for ch in mics_signals])
X = np.moveaxis(X, 0, 2)
# Run BSS
bss_type = args.algo
if bss_type == 'auxiva':
# Run AuxIVA
Y = pra.bss.auxiva(X, n_iter=30, proj_back=True, callback=convergence_callback)
elif bss_type == 'ilrma':
# Run ILRMA
Y = pra.bss.ilrma(X, n_iter=30, n_components=30, proj_back=True,
callback=convergence_callback)
# Run iSTFT
y = np.array([pra.istft(Y[:,:,ch], L, L, transform=np.fft.irfft, zp_front=L//2, zp_back=L//2) for ch in range(Y.shape[2])])
# Compare SIR
#############
sdr, sir, sar, perm = bss_eval_sources(ref[:,:y.shape[1]-L//2,0], y[:,L//2:ref.shape[1]+L//2])
print('SDR:', sdr)
print('SIR:', sir)
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(2,2,1)
plt.specgram(ref[0,:,0], NFFT=1024, Fs=room.fs)
plt.title('Source 0 (clean)')
plt.subplot(2,2,2)
plt.specgram(ref[1,:,0], NFFT=1024, Fs=room.fs)
plt.title('Source 1 (clean)')
plt.subplot(2,2,3)
plt.specgram(y[perm[0],:], NFFT=1024, Fs=room.fs)
plt.title('Source 0 (separated)')
plt.subplot(2,2,4)
plt.specgram(y[perm[1],:], NFFT=1024, Fs=room.fs)
plt.title('Source 1 (separated)')
plt.tight_layout(pad=0.5)
plt.figure()
a = np.array(SDR)
b = np.array(SIR)
plt.plot(np.arange(a.shape[0]) * 10, a[:,0], label='SDR Source 0', c='r', marker='*')
plt.plot(np.arange(a.shape[0]) * 10, a[:,1], label='SDR Source 1', c='r', marker='o')
plt.plot(np.arange(b.shape[0]) * 10, b[:,0], label='SIR Source 0', c='b', marker='*')
plt.plot(np.arange(b.shape[0]) * 10, b[:,1], label='SIR Source 1', c='b', marker='o')
plt.legend()
plt.tight_layout(pad=0.5)
if not args.gui:
plt.show()
else:
plt.show(block=False)
if args.save:
from scipy.io import wavfile
wavfile.write('bss_iva_mix.wav', room.fs,
pra.normalize(mics_signals[0,:], bits=16).astype(np.int16))
for i, sig in enumerate(y):
wavfile.write('bss_iva_source{}.wav'.format(i+1), room.fs,
pra.normalize(sig, bits=16).astype(np.int16))
if args.gui:
# Make a simple GUI to listen to the separated samples
from tkinter import Tk, Button, Label
import sounddevice as sd
# Now come the GUI part
class PlaySoundGUI(object):
def __init__(self, master, fs, mix, sources):
self.master = master
self.fs = fs
self.mix = mix
self.sources = sources
master.title("A simple GUI")
self.label = Label(master, text="This is our first GUI!")
self.label.pack()
self.mix_button = Button(master, text='Mix', command=lambda: self.play(self.mix))
self.mix_button.pack()
self.buttons = []
for i, source in enumerate(self.sources):
self.buttons.append(Button(master, text='Source ' + str(i+1), command=lambda src=source : self.play(src)))
self.buttons[-1].pack()
self.stop_button = Button(master, text="Stop", command=sd.stop)
self.stop_button.pack()
self.close_button = Button(master, text="Close", command=master.quit)
self.close_button.pack()
def play(self, src):
sd.play(pra.normalize(src) * 0.75, samplerate=self.fs, blocking=False)
root = Tk()
my_gui = PlaySoundGUI(root, room.fs, mics_signals[0,:], y)
root.mainloop()
|
py | b409c4125fb36fc6a8b46f66cd8c8ffbfa2be979 | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.hcl_product import HclProduct # noqa: E501
from intersight.rest import ApiException
class TestHclProduct(unittest.TestCase):
"""HclProduct unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHclProduct(self):
"""Test HclProduct"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.hcl_product.HclProduct() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b409c425ac9573df38f43d884de318cb77ddbfea | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The VCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import VCoinTestFramework
from test_framework.util import *
class TxnMallTest(VCoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
|
py | b409c57967f45fa4f198445d3bcf8ab3a360a3df | import io
import os
import subprocess
from pathlib import Path
from threading import Thread
base = Path(__file__).parent.resolve()
website_path = base / 'website'
my_env = os.environ.copy()
my_env['PYTHONPATH'] = ':'.join([str(base)] + [path for path in my_env.get('PYTHONPATH', '').split(':') if path])
def run_website():
proc = subprocess.Popen(["flask", "run"], cwd=str(website_path), stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=my_env)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
if line.strip():
print("website:", line.strip('\n'))
def run_bot():
proc = subprocess.Popen(["python", "main.py"], cwd=str(base), stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=my_env)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
if line.strip():
print("bot :", line.strip('\n'))
if __name__ == '__main__':
website_thread = Thread(target=run_website)
bot_thread = Thread(target=run_bot)
website_thread.start()
bot_thread.start()
website_thread.join()
bot_thread.join()
|
py | b409c63717e488c8e08b0eb2e5bc11880a5afb06 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
__author__ = 'andyguo'
from quaternion import Vec3f |
py | b409c71429c510f250791491fd25150492fd44b1 | import numpy as np
import torch
from torch.autograd import Variable
from . import agent
from . import cnn_agent
from . import history_records
import image_preprocessing
import utils
import torch.optim as optim
class AgentBasic(agent.Agent):
def __init__(self, scenario_name, agent_identifier, temperature=1.0, image_dim=80, lr=0.001):
super(AgentBasic, self).__init__(scenario_name, agent_identifier, temperature, image_dim, lr)
self.cnn = cnn_agent.CNN(number_actions=self.nb_available_buttons, image_dim=image_dim)
self.cnn.to(utils.DEVICE_NAME)
self.ai = cnn_agent.AI(brain=self.cnn, body=self.softmax_body)
self.optimizer = optim.Adam(self.cnn.parameters(), lr=self.lr)
def read_game_data(self, game):
buffer = game.get_state().screen_buffer
self.last_image = self.screen_processing(buffer, self.image_dim, self.image_dim)
return np.array([self.last_image])
def make_action(self, state_data):
return self.ai(state_data)[0][0]
def calculate_reward(self, game_reward):
self.last_reward = game_reward
return self.last_reward
def generate_history_record(self, action, game_finished):
return history_records.BasicStep(state=self.last_image, action=action, reward=self.last_reward,
done=game_finished)
def perform_training_step(self, batch, gamma):
image_inputs, targets = self.eligibility_trace(batch=batch, gamma=gamma)
image_inputs, targets = Variable(image_inputs).to(utils.DEVICE_NAME), Variable(targets)
predictions = self.cnn(image_inputs)
loss_error = self.loss(predictions, targets)
self.optimizer.zero_grad()
loss_error.backward()
self.optimizer.step()
def eligibility_trace(self, batch, gamma=0.99):
inputs = []
targets = []
for series in batch:
input = torch.from_numpy(np.array([series[0].state, series[-1].state], dtype=np.float32))
output = self.cnn(input.to(utils.DEVICE_NAME))
cumul_reward = 0.0 if series[-1].done else output[1].data.max()
for step in reversed(series[:-1]):
cumul_reward = step.reward + gamma * cumul_reward
state = series[0].state
target = output[0].data
target[series[0].action] = cumul_reward
inputs.append(state)
targets.append(target)
return torch.from_numpy(np.array(inputs, dtype=np.float32)), torch.stack(targets)
def load_agent_optimizer(self, model_path):
self.cnn, self.optimizer = utils.load(model_path, model_used=self.cnn, optimizer_used=self.optimizer)
|
py | b409c737695f12bde1e2075ff69d0b904177beb9 | """Packaging rewordapp."""
from setuptools import setup, find_packages
setup(
name='rewordapp',
version='0.0.4',
license='BSD-3-Clause',
license_files=['LICENSE'],
description='The application to reword text.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Tuyen Mathew Duong',
author_email='[email protected]',
maintainer='Tuyen Mathew Duong',
maintainer_email='[email protected]',
install_requires=[],
url='https://github.com/Geeks-Trident-LLC/rewordapp',
packages=find_packages(
exclude=(
'tests*', 'testing*', 'examples*',
'build*', 'dist*', 'docs*', 'venv*'
)
),
test_suite='tests',
entry_points={
'console_scripts': [
'reword-app = rewordapp.application:execute',
]
},
classifiers=[
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
py | b409c7c1222634261e0936b246d5637c46b8b33f | import numpy as np
import math
from typing import Tuple, Set
def conj_grad(A: np.matrix, b: np.ndarray, x_0: np.ndarray):
k = 0
r = {}; r[0] = b - A @ x_0
x = {}; x[0] = x_0
p = {}
tau = {}
mu = {}
while not math.isclose(np.linalg.norm(r[k], ord=2), 0):
k += 1
if k == 1:
p[k] = r[0]
else:
tau[k-1] = (r[k-1].transpose() @ r[k-1]) / (r[k-2].transpose() @ r[k-2])
p[k] = r[k-1] + tau[k-1] * p[k-1]
mu[k] = (r[k-1].transpose() @ r[k-1]) / (p[k].transpose() @ A @ p[k])
x[k] = x[k-1] + mu[k] * p[k]
r[k] = r[k-1] - mu[k] * (A @ p[k])
x_star = x[k]
return x_star, k
def lu_solve(L: np.matrix, R: np.matrix, b: np.array) -> np.array:
y = np.zeros(b.size)
for m in range(0, b.size):
y[m] = b[m] - sum(
L[m][i] * y[i] for i in range(0, m)
)
y[m] /= L[m][m]
x = np.zeros(b.size)
for k in reversed(range(0, b.size)):
x[k] = y[k] - sum(
R[k][i] * x[i] for i in range(k + 1, b.size)
)
x[k] /= R[k][k]
return x
def conj_grad_precond(A: np.matrix, b: np.ndarray, x_0: np.ndarray, precond_func):
k = 0
r = {}; r[0] = b - A @ x_0
x = {}; x[0] = x_0
z = {}
p = {}
L, U = precond_func(A)
z[0] = lu_solve(L, U, r[0])
while not math.isclose(np.linalg.norm(r[k], ord=2), 0):
k += 1
if k == 1:
p[k] = z[0]
else:
tau = (r[k-1].transpose() @ z[k-1]) / (r[k-2].transpose() @ z[k-2])
p[k] = z[k-1] + tau * p[k-1]
mu = (r[k-1].transpose() @ z[k-1]) / (p[k].transpose() @ A @ p[k])
x[k] = x[k-1] + mu * p[k]
r[k] = r[k-1] - mu * (A @ p[k])
z[k] = lu_solve(L, U, r[k])
x_star = x[k]
return x_star, k
def matrix_portrait(A: np.matrix, e: float = None) -> Set[Tuple[int, int]]:
if e is None:
Omega = set()
n = A.shape[0]
for i in range(0, n):
for j in range(0, n):
if not math.isclose(A[i, j], 0):
Omega.add((i, j))
return Omega
else:
Omega = set()
n = A.shape[0]
for i in range(0, n):
for j in range(0, n):
if abs(A[i, j]) > e or i==j:
Omega.add((i, j))
return Omega
def incomplete_lu(A: np.matrix, Omega: Set[Tuple[int, int]], modified: bool = False) -> Tuple[np.matrix, np.matrix]:
A = A.copy()
n = A.shape[0]
L = np.eye(n, dtype=float)
R = np.zeros(A.shape, dtype=float)
for k in range(0, n):
for i in range(k, n):
if (k, i) in Omega:
R[k, i] = A[k, i]
elif modified:
R[k, k] -= A[k, i]
R[k, i] = 0
for j in range(k + 1, n):
L[j, k] = A[j, k] / A[k, k] if (j, k) in Omega else 0
for p in range(k + 1, n):
for q in range(k + 1, n):
A[p, q] -= L[p, k]*R[k, q]
return L, R
def ilu_k(A: np.matrix, k: int, modified: bool = False, e: float = None) -> Tuple[np.matrix, np.matrix]:
Omega = matrix_portrait(A, e)
for i in range(0, k+1):
L, R = incomplete_lu(A, Omega, modified)
T = L @ R - A
Omega |= matrix_portrait(T, e) # | is set union
return L, R |
py | b409c87cb2601767f9161953928f922d0d839d0a | """empty message
Revision ID: 0002_add_content_char_count
Revises: 0001_restart_migrations
Create Date: 2016-04-15 12:12:46.383782
"""
# revision identifiers, used by Alembic.
revision = "0002_add_content_char_count"
down_revision = "0001_restart_migrations"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column, table
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("notifications", sa.Column("content_char_count", sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("notifications", "content_char_count")
### end Alembic commands ###
|
py | b409c884c969eaece7239b70da03d38371989c95 | from eth_typing import Address
from web3 import Web3
from .contracts import (
token_interface,
rootchain_interface,
)
from .token import Token
from .user import User # FIXME remove all references of User
class RootChain:
"""
Utility class to provide API for contracts/RootChain.vy
"""
def __init__(self, w3: Web3, token_address: Address, rootchain_address: Address):
self._w3 = w3
self._token = w3.eth.contract(token_address, **token_interface)
self._contract = w3.eth.contract(rootchain_address, **rootchain_interface)
self.depositors = {}
self.pending_deposits = []
self.deposits = []
self.exits = []
self.challenges = []
def deposit(self, user: User, token: Token):
self.pending_deposits.append(token)
self.depositors[token.uid] = user
def publish(self):
for token in self.pending_deposits:
self.depositors[token.uid].depositAccepted(token)
self.deposits.append(token)
self.pending_deposits = []
def withdraw(self, token: Token):
self.pending_deposits.remove(token)
def startExit(self, user: User, token: Token):
# Assert user is receiver of exit transaction
assert token.history[-1].receiver == user.acct
if len(token.history) > 1:
# Assert parent receiver is sender of exit transaction
assert token.history[-2].receiver == token.history[-1].sender
else:
# Check that sender was depositor
assert self.depositors[token.uid].acct == token.history[-1].sender
self.exits.append(token)
def challengeExit(self, token: Token):
t = self.exits[self.exits.index(token)]
if len(t.history) >= 1 and len(token.history) >= 2:
challengeAfter = t.history[-1] == token.history[-2]
elif len(token.history) >= 1:
challengeAfter = self.depositors[token.uid].acct == token.history[0].sender and \
self.depositors[token.uid].acct != token.history[-1].receiver
else:
challengeAfter = False # Has to be at least one transaction deep
if len(t.history) >= 2 and len(token.history) >= 2:
challengeBetween = t.history[-2] == token.history[-2] and \
t.history[-1] != token.history[-1]
elif len(t.history) >= 1 and len(token.history) >= 1:
challengeBetween = self.depositors[token.uid].acct == t.history[-1].sender and \
t.history[-1] != token.history[-1] and \
t.history[-1].block_number > token.history[-1].block_number
else:
challengeBetween = False # Has to be at least one transaction deep
challengeBefore = True
if challengeAfter or challengeBetween:
self.exits.remove(token)
return True
elif challengeBefore:
self.challenges.append(token)
return False
else:
raise ValueError("Challenge not accepted!")
def respondChallenge(self, token: Token):
self.challenges.remove(token)
def finalizeExit(self, token: Token):
self.exits.remove(token)
if token not in self.challenges:
self.deposits.remove(token)
return True
else:
self.challenges.remove(token)
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.