ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a51bc782834cd28f5dfd80f6409f5e4f8b6e418 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-01-30 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0037_merge_20180130_0822'),
('api', '0035_merge_20180129_0859'),
]
operations = [
]
|
py | 1a51bccff61dbce04de17d0b968bf005ea30676b | #!/usr/bin/python
import multiprocessing
import containerstats
import etcd
import platform
import docker
import time
import os
import requests
dockerconnection = docker.Client(base_url='unix://var/run/docker.sock', timeout=2)
dockerconnection.close()
def getstats(obj):
etcd.CreateDir(DDS_ETCD_URL, platform.node() + '/' + obj.containername, DDS_CONTAINER_TTL)
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/cpuusage',
obj.getcontainercpuusage(dockerconnection)['cpuusage'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusage',
obj.getcontainermemusage(dockerconnection)['memusage'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusagepercent',
obj.getcontainermemusage(dockerconnection)['memusagepercent'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/netrx',
obj.getcontainernetusage(dockerconnection)['netrx'])
etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/nettx',
obj.getcontainernetusage(dockerconnection)['nettx'])
return True
if __name__ == '__main__':
if 'DDS_ETCD_URL' in os.environ:
DDS_ETCD_URL = os.environ['DDS_ETCD_URL']
else:
DDS_ETCD_URL = 'http://127.0.0.1:4001/v2/keys/'
if 'DDS_CONCURRENCY_LEVEL' in os.environ:
DDS_CONCURRENCY_LEVEL = os.environ['DDS_CONCURRENCY_LEVEL']
else:
DDS_CONCURRENCY_LEVEL = 8
# start values
DDS_HOST_TTL = 120
DDS_CONTAINER_TTL = 30
while True:
newpool = multiprocessing.Pool(processes=DDS_CONCURRENCY_LEVEL)
etcd.CreateDir(DDS_ETCD_URL, platform.node(), ttl=DDS_HOST_TTL)
containerlist = containerstats.getrunningcontainers(dockerconnection)
objlist = []
for container in containerlist:
objlist.append(containerstats.ContainerStats(container))
gatherstart = time.time()
# when i.e. container stop during data gathering timeout generated
try:
newpool.map(getstats, objlist)
except requests.packages.urllib3.exceptions.ReadTimeoutError:
pass
newpool.close()
gatherstop = time.time()
gatherduration = int(gatherstop - gatherstart)
DDS_HOST_TTL = gatherduration * 5
DDS_CONTAINER_TTL = gatherduration * 3
time.sleep(gatherduration)
|
py | 1a51bd245accfc10cac03a49e60c7a0cf5bbf847 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parsing routines for the ODB++ line record text format
according to the ODB++ 7.0 specification:
http://www.odb-sa.com/wp-content/uploads/ODB_Format_Description_v7.pdf
"""
from collections import defaultdict
from .Utils import readFileLines, readZIPFileLines
def filter_line_record_lines(lines):
"Remove empty and '#'-only lines from the given line list"
return [
line for line in lines
if line and line != "#"
]
def read_raw_linerecords(filename):
"Read a .Z line record file and return only important lines in order"
try: # Assume file-like object
return filter_line_record_lines(filename.read().split("\n"))
except AttributeError:
open_fn = readZIPFileLines if filename.endswith(".Z") else readFileLines
return filter_line_record_lines(
open_fn(filename))
def group_by_section(lines):
"Group a line record file by the section. Returns a dict containing lists."
groups = defaultdict(list)
name = None
for line in lines:
if line.startswith("#"):
name = line.strip("#").strip()
else:
groups[name].append(line)
return dict(groups)
def read_linerecords(filename):
"Read a linerecord file and return a dict grouped by section"
return group_by_section(read_raw_linerecords(filename))
|
py | 1a51beab77717e48ff2db75b97028cb30123b935 | """
Django settings for test1 project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-q9o966xjlvrcd6u55#4mxx$#@1lea6-6q=*5z&s_z)s^mvv20h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | 1a51bee2d772b7f388bf859ef9cd378569e96036 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_ozone()
df = b1.mPastData
for k in [1 , 5]:
df[b1.mTimeVar + "_" + str(k) + '_Daily'] = pd.date_range('2000-1-1', periods=df.shape[0], freq=str(k) + 'D')
#df.to_csv("outputs/ozone_WDHMS.csv");
#df.tail(10)
#df[:-10].tail()
#df[:-10:-1]
#df.describe()
for k in [1 , 5]:
for timevar in [b1.mTimeVar + "_" + str(k) + '_Daily']:
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.set_active_autoregressions([]);
lEngine.train(df , timevar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_" + timevar + "apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[timevar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
# lEngine.standardPlots(name = "outputs/ozone_" + timevar)
|
py | 1a51bf14775ac3bd4a78010a886b7e06d46cb066 | from .base import *
import os
import sys
DEBUG = False
ADMINS = (("ch1huizong", "[email protected]"),)
allowed_hosts = os.get('ALLOWED_HOSTS')
if allowed_hosts:
ALLOWED_HOSTS = allowed_hosts.split(",")
else:
print("ERROR ! Please Input ALLOWED_HOSTS env settings !")
sys.exit(1)
db_host = os.getenv("DB_HOST")
db_name = os.getenv("DB_NAME")
db_user = os.getenv("DB_USER")
db_password = os.getenv("DB_PASSWORD")
if db_host and db_name and db_user and db_password:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": db_name,
"HOST": db_host,
"USER": db_user,
"PASSWORD": db_password,
}
}
else:
print("ERROR ! Check DB SETTINGS !")
sys.exit(1)
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | 1a51c03be260a7538cc2c5594a170f289b58066a | store.set_global_value('hotkey', '<super>+p')
engine.set_return_value('<up>')
engine.run_script('mapkeys')
|
py | 1a51c0eb8d0bc7ed6341005d2205dc68fc9c74c9 | AVAILABLE_DATASET = ["dataset1", "dataset2"]
AVAILABLE_STRATS = ["GMV", "GMV_long", "GMV_lin", "GMV_nlin", "GMV_robust", "MeanVar_long"]
SAVE_DIR = './log'
DATES = ["2019-12-12",
"2020-01-12",
"2020-02-12",
"2020-03-12",
"2020-04-12",
"2020-05-12",
"2020-06-12",
"2020-07-12",
"2020-08-12",
"2020-09-12",
"2020-10-12",
"2020-11-12",
"2020-12-12",
"2021-01-12",
"2021-02-12",
"2021-03-12",
"2021-04-12",
"2021-05-12",
"2021-06-12",
"2021-07-12",
"2021-08-12",
"2021-09-12"]
|
py | 1a51c0fba590cbb7feef44e2cc4aa4bd4ba820ff | import requests
import os
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# First we want to check the robots.txt file of our objective website
# A function get_robot_txt is constructed to check any url
def get_robot_txt(url):
if url.endswith('/'):
path = url
else:
path = url + '/'
req = requests.get(path + "robots.txt", data=None)
return req.text
# Objective website
URL = "https://www.mercadolibre.com.co"
# Read robots.txt file
print('robots.txt:', get_robot_txt(URL))
sites = [
{
'country': 'Argentina',
'url': 'https://listado.mercadolibre.com.ar'
},
{
'country': 'Bolivia',
'url': 'https://listado.mercadolibre.com.bo'
},
{
'country': 'Brasil',
'url': 'https://lista.mercadolivre.com.br'
},
{
'country': 'Chile',
'url': 'https://listado.mercadolibre.cl'
},
{
'country': 'Colombia',
'url': 'https://listado.mercadolibre.com.co'
},
{
'country': 'Costa Rica',
'url': 'https://listado.mercadolibre.co.cr'
},
{
'country': 'Dominicana',
'url': 'https://listado.mercadolibre.com.do'
},
{
'country': 'Ecuador',
'url': 'https://listado.mercadolibre.com.ec'
},
{
'country': 'Guatemala',
'url': 'https://listado.mercadolibre.com.gt'
},
{
'country': 'Honduras',
'url': 'https://listado.mercadolibre.com.hn'
},
{
'country': 'México',
'url': 'https://listado.mercadolibre.com.mx'
},
{
'country': 'Nicaragua',
'url': 'https://listado.mercadolibre.com.ni'
},
{
'country': 'Panamá',
'url': 'https://listado.mercadolibre.com.pa'
},
{
'country': 'Paraguay',
'url': 'https://listado.mercadolibre.com.py'
},
{
'country': 'Perú',
'url': 'https://listado.mercadolibre.com.pe'
},
{
'country': 'El Salvador',
'url': 'https://listado.mercadolibre.com.sv'
},
{
'country': 'Uruguay',
'url': 'https://listado.mercadolibre.com.uy'
},
{
'country': 'Venezuela',
'url': 'https://listado.mercadolibre.com.ve'
},
]
products = [
{
'name': 'playstation',
'uri': 'playstation-5#D[A:playstation%205]',
},
{
'name': 'macbook pro',
'uri': 'macbook-pro-13#D[A:macbook%20pro%2013]',
},
{
'name': 'iphone',
'uri': 'iphone-11-512#D[A:iphone%2011%20512]',
},
{
'name': 'bmw s1000rr',
'uri': 'bmw-s1000rr#D[A:bmw%20s1000rr]',
},
{
'name': 'alexa echo',
'uri': 'alexa-echo-4#D[A:alexa%20echo%204]',
},
]
# Setting options for the webdriver
option = webdriver.ChromeOptions()
option.add_argument(" — incognito") # open incognito mode
# set our UserAgent name, in this case AcademicCrawler
option.add_argument("user-agent=AcademicCrawler")
# Getting current folder path
#My_path = os.path.dirname(os.path.abspath(__file__))
# Delay/Pause of download Throttling
TimeOut = 2 # sec
# Looking for the chromedriver file (Download from http://chromedriver.chromium.org/downloads)
#browser = webdriver.Chrome(executable_path=My_path + '/chromedriver', chrome_options=option)
browser = webdriver.Chrome(ChromeDriverManager().install(), options=option)
# Check if our UseraAgent is OK
agent = browser.execute_script("return navigator.userAgent")
print('agent:', agent)
def get_items_names():
elements = browser.find_elements_by_css_selector(
'#root-app > div > div > section > ol > li > div > div > div.ui-search-result__content-wrapper > div.ui-search-item__group.ui-search-item__group--title > a > h2')
if len(elements) == 0:
print('Caso 2')
elements = browser.find_elements_by_css_selector(
'#root-app > div > div.ui-search-main.ui-search-main--exhibitor.ui-search-main--only-products > section > ol > li > div > div > a > div > div.ui-search-item__group.ui-search-item__group--title > h2')
if len(elements) == 0:
print('Caso 3')
elements = browser.find_elements_by_css_selector(
'#root-app > div > div > section > ol > li > div > div > a > div > div.ui-search-item__group.ui-search-item__group--title > h2')
return elements
def get_items_prices():
prices = browser.find_elements_by_css_selector(
'.ui-search-price:not(.ui-search-price--size-x-tiny) .ui-search-price__second-line')
return prices
filename = "MercadoLibreData.csv"
current_path = os.path.dirname(os.path.abspath(__file__))
filename_path = current_path + '/' + filename
# print('current_path:', current_path)
os.remove(filename_path)
def write_file(text):
with open(filename_path, 'a') as file:
file.write(text)
file.close()
def authenticate_user():
browser.get('https://www.mercadolibre.com/jms/mco/lgz/login?platform_id=ML&go=https%3A%2F%2Fwww.mercadolibre.com.co%2F&loginType=explicit#nav-header')
browser.find_element_by_id("user_id").send_keys(os.getenv('mluser'))
browser.find_element_by_css_selector("button.andes-button > span:nth-child(1)").click()
# no fue posible debido a que requiere captcha y tiene doble factor de autenticación
browser.find_element_by_id("password").send_keys(os.getenv('mlpass'))
try:
write_file('product,country,url,item,precio\n')
for product in products:
for site in sites:
if site['country'] == 'Colombia':
pass
#authenticate_user()
print('looking:', site['country'], ', product:', product['name'])
write_file('"' + product['name'] + '",')
write_file('"' + site['country'] + '",')
# Get content from objective website
url = site['url'] + '/' + product['uri']
# Delay Calculo
t0 = time.time()
# Obtiene el browser
browser.get(url)
# estimación del tiempo de respuesta en segundos
response_delay = time.time() - t0
# espera de 10x, con respecto al tiempo de respuesta
delay_time = 10 * response_delay
print('Wait for...', delay_time, 'seconds')
time.sleep(delay_time)
# Apply delay
browser.implicitly_wait(TimeOut)
write_file('"' + url + '",')
items_names = get_items_names()
items_prices = get_items_prices()
if len(items_prices) > 0:
item_name = items_names[0].text.replace('"', "&dquo;")
print('item_name:', item_name)
write_file('"' + item_name + '",')
item_price = items_prices[0].text.split("\n")[0]
print('item_price:', item_price)
write_file('"' + item_price + '"\n')
else:
write_file('"",\n')
except Exception as e:
print(e)
finally:
pass
#browser.quit()
|
py | 1a51c13075deba4d7927d32da03fbfa2c98355ea | from typing import List, Tuple
import q_network
from q_network import Q
import numpy as np
# import gym
import tools
import torch
# buffer hyperparameters
batchsize = 200 # batchsize for buffer sampling
buffer_maxlength = 1000 # max number of tuples held by buffer
episodes_til_buffer_sample = 2
buffer = tools.ReplayBuffer(buffer_maxlength) # buffer holds the memories of the exp replay
# DQL hyperparameters
steps_til_target_update = 50 # time steps for target update
num_episodes = 500 # number of episodes to run
# initialsize = 500 # initial time steps before start training - unused
gamma = .99 # discount
# tracking important things
list_of_episode_rewards = [] # records the reward per episode
q_prime_update_counter = 0 # count the number of steps taken before updating q_prime
# initialize environment
envname = "CartPole-v0"
env = gym.make(envname)
"""
obssize
Num Observation Min Max
0 Cart Position -2.4 2.4
1 Cart Velocity -Inf Inf
2 Pole Angle -41.8° 41.8°
3 Pole Velocity At Tip -Inf Inf
"""
# initialize the principal and the target Q nets
state_dim = env.observation_space.low.size
action_dim = env.action_space.n
lr = 1e-3
q_greedy: Q = Q(state_dim, action_dim, lr)
q_prime: Q = Q(state_dim, action_dim, lr)
for episode in range(num_episodes):
# Initialize and reset environment.
s = env.reset()
d = False
reward_sum = 0
while not d:
q_vals: q_network.QValue = q_greedy.predict_state_value(state=s)
# Choose action w/ epsilon greedy approach
if np.random.rand() < tools.epsilon(episode, num_episodes):
a = torch.tensor(env.action_space.sample())
else:
a = torch.argmax(q_vals)
assert a in [0, 1]
ns, r, d, _ = env.step(int(a)) # Perform action in the env.
d_ = 1 if d else 0
experience = (s, a, r, d_, ns) # experience/memory tuple
# Append experience to the replay buffer.
buffer.append(experience)
# Shorten buffer if it's too long.
while buffer.number > buffer_maxlength:
buffer.pop()
# Training theta by gradients # train_from_buffer_sample()
if (episode % episodes_til_buffer_sample == 0
and buffer.number > batchsize):
experience_batch: List[tools.Experience] = buffer.sample(batchsize)
experience_batch: Tuple[List[torch.Tensor], List[int], List[float],
List[bool], List[torch.Tensor]
] = list(map(list, zip(*experience_batch)))
states, actions, rewards, dones, next_states = experience_batch
q_vals_ns: q_network.QValue = q_prime.predict_state_value(next_states)
max_vals = torch.max(q_vals_ns, dim=1).values # take the max along the columns
targets = torch.tensor(rewards) + torch.tensor(gamma)*max_vals
done_indices = [i for i, d in enumerate(dones) if d]
for idx in done_indices:
targets[idx] = rewards[idx]
q_greedy.train(states, actions, targets) # update_dqn_greedy()
# 5)
if q_prime_update_counter % steps_til_target_update == 0:
tools.update_q_prime(q_greedy, q_prime)
# 6)
q_prime_update_counter += 1
reward_sum += r
s = ns
list_of_episode_rewards.append(reward_sum)
tools.plot_episode_rewards(list_of_episode_rewards, "episode_rewards") |
py | 1a51c1c2208e5ed41e478b0026608a4e8a0eb088 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from mock import MagicMock
from .. import parse
from ..data import ComponentID, Component, Data
from ..subset import Subset
class TestParse(object):
def test_re_matches_valid_names(self):
reg = parse.TAG_RE
valid = ['{a}', '{ a }', '{A}', '{a }', '{ a}',
'{a_}', '{abc_1}', '{_abc_1}', '{1}', '{1_}']
invalid = ['', '{}', '{a ']
for v in valid:
assert reg.match(v) is not None
for i in invalid:
assert reg.match(i) is None
def test_group(self):
reg = parse.TAG_RE
assert reg.match('{a}').group('tag') == 'a'
assert reg.match('{ a }').group('tag') == 'a'
assert reg.match('{ A }').group('tag') == 'A'
assert reg.match('{ Abc_ }').group('tag') == 'Abc_'
def test_reference_list(self):
cmd = '{a} - {b} + {c}'
refs = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
expected = set([1, 2, 3])
result = set(parse._reference_list(cmd, refs))
assert expected == result
def test_reference_list_invalid_cmd(self):
with pytest.raises(KeyError) as exc:
parse._reference_list('{a}', {})
assert exc.value.args[0] == ("Tags from command not in "
"reference mapping")
def test_dereference(self):
c1 = ComponentID('c1')
c2 = ComponentID('c2')
s1 = Subset(None, label='s1')
s2 = Subset(None, label='s2')
refs = dict([('c1', c1), ('c2', c2), ('s1', s1), ('s2', s2)])
cmd = '({c1} > 10) and {s1}'
expected = ('(data[references["c1"], __view] > 10) and '
'references["s1"].to_mask(__view)')
result = parse._dereference(cmd, refs)
assert expected == result
def test_validate(self):
ref = {'a': ComponentID('ca'), 'b': ComponentID('cb')}
parse._validate('{a} + {b}', ref)
parse._validate('{a}', ref)
parse._validate('3 + 4', ref)
with pytest.raises(parse.InvalidTagError) as exc:
parse._validate('{c}', ref)
assert exc.value.args[0] == ("Tag c not in reference mapping: "
"['a', 'b']")
def test_ensure_only_component_references(self):
ref = {'a': 1, 'b': ComponentID('b')}
F = parse._ensure_only_component_references
F('{b} + 5', ref)
with pytest.raises(TypeError) as exc:
F('{b} + {a}', ref)
assert exc.value.args[0] == ("Reference to a, which is not a "
"ComponentID")
with pytest.raises(TypeError) as exc:
F('{b} + {d}', ref)
assert exc.value.args[0] == ("Reference to d, which is not a "
"ComponentID")
class TestParsedCommand(object):
def test_evaluate_component(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 5
cmd = '{comp1} * 5'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 25
data.__getitem__.assert_called_once_with((c1, None))
def test_evaluate_subset(self):
data = Data(x=[1, 2, 3])
sub1 = data.new_subset(data.id['x'] > 1)
sub2 = data.new_subset(data.id['x'] < 3)
cmd = '{s1} & {s2}'
refs = {'s1': sub1, 's2': sub2}
pc = parse.ParsedCommand(cmd, refs)
np.testing.assert_equal(pc.evaluate(data), [0, 1, 0])
def test_evaluate_function(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 5
cmd = 'max({comp1}, 100)'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 100
data.__getitem__.assert_called_once_with((c1, None))
def test_evaluate_math(self):
# If numpy, np, and math aren't defined in the config.py file, they
# are added to the local variables available.
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 10
refs = {'comp1': c1}
cmd = 'numpy.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
cmd = 'np.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
cmd = 'math.log10({comp1})'
pc = parse.ParsedCommand(cmd, refs)
assert pc.evaluate(data) == 1
def test_evaluate_test(self):
data = MagicMock()
c1 = ComponentID('c1')
data.__getitem__.return_value = 10
refs = {'comp1': c1}
cmd = 'numpy.log10({comp1}) + 3.4 - {comp1}'
pc = parse.ParsedCommand(cmd, refs)
pc.evaluate_test()
cmd = 'nump.log10({comp1}) + 3.4 - {comp1}'
pc = parse.ParsedCommand(cmd, refs)
with pytest.raises(NameError) as exc:
pc.evaluate_test()
assert exc.value.args[0] == "name 'nump' is not defined"
class TestParsedComponentLink(object):
def make_link(self):
data = Data()
comp = Component(np.array([1, 2, 3]))
c1 = ComponentID('c1')
c2 = ComponentID('c2')
data.add_component(comp, c1)
cmd = '{comp1} * 100'
refs = {'comp1': c1}
pc = parse.ParsedCommand(cmd, refs)
cl = parse.ParsedComponentLink(c2, pc)
data.add_component_link(cl)
return data, c2
def test(self):
data, cid = self.make_link()
result = data[cid]
expected = np.array([100, 200, 300])
np.testing.assert_array_equal(result, expected)
def test_not_identity(self):
# regression test
d = Data(x=[1, 2, 3])
c2 = ComponentID('c2')
cmd = '{x}'
refs = {'x': d.id['x']}
pc = parse.ParsedCommand(cmd, refs)
link = parse.ParsedComponentLink(c2, pc)
assert not link.identity
def test_slice(self):
data, cid = self.make_link()
result = data[cid, ::2]
np.testing.assert_array_equal(result, [100, 300])
def test_save_load(self):
from .test_state import clone
d = Data(x=[1, 2, 3])
c2 = ComponentID('c2')
cmd = '{x} + 1'
refs = {'x': d.id['x']}
pc = parse.ParsedCommand(cmd, refs)
link = parse.ParsedComponentLink(c2, pc)
d.add_component_link(link)
d2 = clone(d)
np.testing.assert_array_equal(d2['c2'], [2, 3, 4])
class TestParsedSubsetState(object):
def setup_method(self, method):
data = Data(g=[2, 4, 6, 8])
s1 = data.new_subset()
s2 = data.new_subset()
s1.subset_state = np.array([1, 1, 1, 0], dtype=bool)
s2.subset_state = np.array([0, 1, 1, 1], dtype=bool)
self.refs = {'s1': s1, 's2': s2, 'g': data.id['g']}
self.data = data
def test_two_subset(self):
cmd = '{s1} & {s2}'
s = self.data.new_subset()
p = parse.ParsedCommand(cmd, self.refs)
state = parse.ParsedSubsetState(p)
s.subset_state = state
result = s.to_mask()
expected = np.array([0, 1, 1, 0], dtype=bool)
np.testing.assert_array_equal(result, expected)
def test_two_subset_and_component(self):
cmd = '{s1} & {s2} & ({g} < 6)'
s = self.data.new_subset()
p = parse.ParsedCommand(cmd, self.refs)
state = parse.ParsedSubsetState(p)
s.subset_state = state
result = s.to_mask()
expected = np.array([0, 1, 0, 0], dtype=bool)
np.testing.assert_array_equal(result, expected)
|
py | 1a51c208a73c073a7d15f00c46a1a1fa07513174 | # -*- coding: utf-8 -*-
"""
XForm Survey element classes for different question types.
"""
import os.path
import re
from pyxform.errors import PyXFormError
from pyxform.question_type_dictionary import QUESTION_TYPE_DICT
from pyxform.survey_element import SurveyElement
from pyxform.utils import (
basestring,
node,
unicode,
default_is_dynamic,
has_dynamic_label,
)
class Question(SurveyElement):
def validate(self):
SurveyElement.validate(self)
# make sure that the type of this question exists in the
# question type dictionary.
if self.type not in QUESTION_TYPE_DICT:
raise PyXFormError("Unknown question type '%s'." % self.type)
def xml_instance(self, **kwargs):
survey = self.get_root()
attributes = {}
attributes.update(self.get("instance", {}))
for key, value in attributes.items():
attributes[key] = survey.insert_xpaths(value, self)
if self.get("default") and not default_is_dynamic(self.default, self.type):
return node(self.name, unicode(self.get("default")), **attributes)
return node(self.name, **attributes)
def xml_control(self):
if self.type == "calculate" or (
("calculate" in self.bind or self.trigger) and not (self.label or self.hint)
):
nested_setvalues = self.get_root().get_setvalues_for_question_name(
self.name
)
if nested_setvalues:
for setvalue in nested_setvalues:
msg = (
"The question ${%s} is not user-visible so it can't be used as a calculation trigger for question ${%s}."
% (self.name, setvalue[0])
)
raise PyXFormError(msg)
return None
xml_node = self.build_xml()
if xml_node:
self.nest_setvalues(xml_node)
return xml_node
def nest_setvalues(self, xml_node):
nested_setvalues = self.get_root().get_setvalues_for_question_name(self.name)
if nested_setvalues:
for setvalue in nested_setvalues:
setvalue_attrs = {
"ref": self.get_root()
.insert_xpaths("${%s}" % setvalue[0], self.get_root())
.strip(),
"event": "xforms-value-changed",
}
if not (setvalue[1] == ""):
setvalue_attrs["value"] = self.get_root().insert_xpaths(
setvalue[1], self
)
setvalue_node = node("setvalue", **setvalue_attrs)
xml_node.appendChild(setvalue_node)
def build_xml(self):
return None
class InputQuestion(Question):
"""
This control string is the same for: strings, integers, decimals,
dates, geopoints, barcodes ...
"""
def build_xml(self):
control_dict = self.control
label_and_hint = self.xml_label_and_hint()
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
result = node(**control_dict)
if label_and_hint:
for element in self.xml_label_and_hint():
result.appendChild(element)
# Input types are used for selects with external choices sheets.
if self["query"]:
choice_filter = self.get("choice_filter")
query = "instance('" + self["query"] + "')/root/item"
choice_filter = survey.insert_xpaths(choice_filter, self, True)
if choice_filter:
query += "[" + choice_filter + "]"
result.setAttribute("query", query)
return result
class TriggerQuestion(Question):
def build_xml(self):
control_dict = self.control
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
return node("trigger", *self.xml_label_and_hint(), **control_dict)
class UploadQuestion(Question):
def _get_media_type(self):
return self.control["mediatype"]
def build_xml(self):
control_dict = self.control
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
control_dict["mediatype"] = self._get_media_type()
return node("upload", *self.xml_label_and_hint(), **control_dict)
class Option(SurveyElement):
def xml_value(self):
return node("value", self.name)
def xml(self):
item = node("item")
self.xml_label()
item.appendChild(self.xml_label())
item.appendChild(self.xml_value())
return item
def validate(self):
pass
class MultipleChoiceQuestion(Question):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
# Notice that choices can be specified under choices or children.
# I'm going to try to stick to just choices.
# Aliases in the json format will make it more difficult
# to use going forward.
choices = list(kwargs_copy.pop("choices", [])) + list(
kwargs_copy.pop("children", [])
)
Question.__init__(self, **kwargs_copy)
for choice in choices:
self.add_choice(**choice)
def add_choice(self, **kwargs):
option = Option(**kwargs)
self.add_child(option)
def validate(self):
Question.validate(self)
descendants = self.iter_descendants()
next(descendants) # iter_descendants includes self; we need to pop it
for choice in descendants:
choice.validate()
def build_xml(self):
assert self.bind["type"] in ["string", "odk:rank"]
survey = self.get_root()
control_dict = self.control.copy()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
result = node(**control_dict)
for element in self.xml_label_and_hint():
result.appendChild(element)
choices = survey.get("choices")
multi_language = False
if choices is not None and len(choices) > 0:
first_choices = next(iter(choices.values()))
multi_language = isinstance(first_choices[0].get("label"), dict)
# itemset are only supposed to be strings,
# check to prevent the rare dicts that show up
if self["itemset"] and isinstance(self["itemset"], basestring):
choice_filter = self.get("choice_filter")
itemset_value_ref = "name"
itemset, file_extension = os.path.splitext(self["itemset"])
has_media = False
has_dyn_label = False
is_previous_question = bool(re.match(r"^\${.*}$", self.get("itemset")))
if choices.get(itemset):
has_media = bool(choices[itemset][0].get("media"))
has_dyn_label = has_dynamic_label(choices[itemset], multi_language)
if file_extension in [".csv", ".xml"]:
itemset = itemset
itemset_label_ref = "label"
else:
if not multi_language and not has_media and not has_dyn_label:
itemset = self["itemset"]
itemset_label_ref = "label"
else:
itemset = self["itemset"]
itemset_label_ref = "jr:itext(itextId)"
choice_filter = survey.insert_xpaths(
choice_filter, self, True, is_previous_question
)
if is_previous_question:
path = (
survey.insert_xpaths(self["itemset"], self, reference_parent=True)
.strip()
.split("/")
)
nodeset = "/".join(path[:-1])
itemset_value_ref = path[-1]
itemset_label_ref = path[-1]
if choice_filter:
choice_filter = choice_filter.replace(
"current()/" + nodeset, "."
).replace(nodeset, ".")
else:
# Choices must have a value. Filter out repeat instances without
# an answer for the linked question
name = path[-1]
choice_filter = f"./{name} != ''"
else:
nodeset = "instance('" + itemset + "')/root/item"
if choice_filter:
nodeset += "[" + choice_filter + "]"
if self["parameters"]:
params = self["parameters"]
if "randomize" in params and params["randomize"] == "true":
nodeset = "randomize(" + nodeset
if "seed" in params:
if params["seed"].startswith("${"):
nodeset = (
nodeset
+ ", "
+ survey.insert_xpaths(params["seed"], self).strip()
)
else:
nodeset = nodeset + ", " + params["seed"]
nodeset += ")"
itemset_children = [
node("value", ref=itemset_value_ref),
node("label", ref=itemset_label_ref),
]
result.appendChild(node("itemset", *itemset_children, nodeset=nodeset))
else:
for child in self.children:
result.appendChild(child.xml())
return result
class SelectOneQuestion(MultipleChoiceQuestion):
def __init__(self, **kwargs):
super(SelectOneQuestion, self).__init__(**kwargs)
self._dict[self.TYPE] = "select one"
class Tag(SurveyElement):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
choices = kwargs_copy.pop("choices", []) + kwargs_copy.pop("children", [])
super(Tag, self).__init__(**kwargs_copy)
if choices:
self.children = []
for choice in choices:
option = Option(**choice)
self.add_child(option)
def xml(self):
result = node("tag", key=self.name)
self.xml_label()
result.appendChild(self.xml_label())
for choice in self.children:
result.appendChild(choice.xml())
return result
def validate(self):
pass
class OsmUploadQuestion(UploadQuestion):
def __init__(self, **kwargs):
kwargs_copy = kwargs.copy()
tags = kwargs_copy.pop("tags", []) + kwargs_copy.pop("children", [])
super(OsmUploadQuestion, self).__init__(**kwargs_copy)
if tags:
self.children = []
for tag in tags:
self.add_tag(**tag)
def add_tag(self, **kwargs):
tag = Tag(**kwargs)
self.add_child(tag)
def build_xml(self):
control_dict = self.control
control_dict["ref"] = self.get_xpath()
control_dict["mediatype"] = self._get_media_type()
result = node("upload", *self.xml_label_and_hint(), **control_dict)
for osm_tag in self.children:
result.appendChild(osm_tag.xml())
return result
class RangeQuestion(Question):
def build_xml(self):
control_dict = self.control
label_and_hint = self.xml_label_and_hint()
survey = self.get_root()
# Resolve field references in attributes
for key, value in control_dict.items():
control_dict[key] = survey.insert_xpaths(value, self)
control_dict["ref"] = self.get_xpath()
params = self.get("parameters", {})
control_dict.update(params)
result = node(**control_dict)
if label_and_hint:
for element in self.xml_label_and_hint():
result.appendChild(element)
return result
|
py | 1a51c648d33ddc81e8e960208f117f38245eabd2 | import os
import pickle
import patoolib
import urllib.request
import numpy as np
import pandas as pd
from typing import Optional, Tuple, List, Dict
from scipy.signal import resample_poly as resample
from fractions import Fraction
from helpers import _round_up, _logging_messages
# KEY: brew install rar for macOS
# TODO: add more methods to read multi-channel EEG
def load_data_single_channel(
fs: Optional[float] = None,
path: Optional[str] = None,
verbose: Optional[int] = 1
) -> Tuple[List, Dict[str, Dict], List[float]]:
"""
Load single EEG channel (CZ-A1 or C3-A1) from DREAMS Sleep Spindles dataset
Data hosted at https://zenodo.org/record/2650142/files/DatabaseSpindles.rar
PS: mne methods do not work with the edf files provided by the dataset authors
Parameters
----------
fs : float, default=50
Sampling rate, in Hz.
If None, the data is not resampled.
path : str, default=None
Path to directory where data will be saved.
If 'None', the default path is './../datasets'.
verbose: int, default=1
Verbosity mode. 0 = silent, 1 = messages.
Returns
-------
eeg : list (length 8)
Each element (1 per subject) contains a 1-D numpy array with shape (30*60*Fs, )
expert_labels : dictionary (8 keys)
Each key-value (1 per subject) contains one dictionary with 3 keys-values: Expert1, Expert2, ExpertsUnion
where expert labels and their union are stored, respectively.
According to the datasets authors:
Only subjects 1 through 6 have all 3 possible labels, subjects 7 and 8 only have Expert1 and ExpertsUnion
Expert1 labels were cut off after 1000 seconds
new_fs : list (length 8)
Each element (1 per subject) contains the sampling frequency (float) of the corresponding eeg entries
"""
# Prefix of processed files
fname_pkl = 'DREAMS_SleepSpindles'
# Directory where to cache the dataset locally (relative to /datasets)
folder = 'DREAMS_Sleep_Spindles'
if path is None:
path = os.path.join(os.path.relpath(os.getcwd()), '..', 'datasets')
# Check if folder exists
dirname = os.path.join(path, folder)
if not os.path.isdir(dirname):
os.mkdir(dirname)
# Name of .rar file from source
fname_rar = os.path.join(dirname, 'DatabaseSpindles.rar')
ftest = os.path.join(dirname, 'excerpt1.txt')
# Check if data has already been downloaded and processed
tmp = '.pkl' if fs is None else '_Fs' + str(fs) + 'Hz.pkl'
regenerate = False
processing_flag = False
# Messages according to verbosity mode
msg = _logging_messages(verbose, {'download_msg': 'Downloading DREAMS Sleep Spindles dataset... (45 MBytes)'})
if not regenerate:
# Check for pickle file first
if not os.path.isfile(os.path.join(dirname, fname_pkl + tmp)):
# Check for .rar file then
if os.path.isfile(fname_rar):
if not os.path.isfile(ftest):
print(msg['rar_found_msg'])
patoolib.extract_archive(fname_rar, outdir=dirname, verbosity=-1)
print(msg['extract_rar_msg'])
else:
processing_flag = True
else:
print(msg['file_not_found_msg'])
regenerate = True
processing_flag = True
else:
print(msg['load_pk_msg'])
with open(os.path.join(dirname, fname_pkl + tmp), 'rb') as f:
eeg, expert_labels, new_fs = pickle.load(f)
return eeg, expert_labels, new_fs
if regenerate:
# Download dataset from https://zenodo.org/record/2650142/files/DatabaseSpindles.rar
print(msg['download_msg'])
# TODO: add download bar
url = 'https://zenodo.org/record/2650142/files/DatabaseSpindles.rar'
urllib.request.urlretrieve(url, fname_rar)
if os.path.isfile(fname_rar):
print(msg['rar_found_msg'])
patoolib.extract_archive(fname_rar, outdir=dirname, verbosity=-1)
print(msg['extract_rar_msg'])
if processing_flag:
# Number of subjects
n_sub = 8
# Duration of recordings - 30 minutes
len_record_sec = 30*60
# Initialize variables
eeg = [[] for _ in range(n_sub)]
sub_keys = ['Subject' + str(i) for i in range(1, n_sub + 1)]
expert_labels = dict(zip(sub_keys, (dict() for _ in sub_keys)))
expert_names = ['Expert1', 'Expert2']
# Resampling if fs is provided
fs_sub = np.zeros(n_sub)
if fs is not None:
for i in range(1, n_sub + 1):
df = pd.read_csv(os.path.join(dirname, "excerpt" + str(i) + ".txt"))
# Special case
if i == 6:
df = df[:360000]
# Original sampling frequency
fs_sub[i-1] = int(len(df) / len_record_sec)
# Resampling
f = Fraction(fs_sub[i-1]/fs).limit_denominator()
eeg[i-1] = resample(df.iloc[:, 0].to_numpy(), f.denominator, f.numerator)[..., None]
new_fs = [fs] * n_sub
else:
for i in range(1, n_sub + 1):
df = pd.read_csv(os.path.join(dirname, "excerpt" + str(i) + ".txt"))
# Special case
if i == 6:
df = df[:360000]
# Original sampling frequency
fs_sub[i-1] = int(len(df) / len_record_sec)
# No resampling
eeg[i-1] = df.iloc[:, 0].to_numpy()[..., None]
new_fs = fs_sub.tolist()
# Expert labels
for sub_i in range(1, n_sub + 1):
if sub_i <= 6:
aux = np.zeros((len_record_sec * round(new_fs[sub_i-1]), 2), dtype=np.uint8)
for i in range(1, 3):
df = pd.read_csv(os.path.join(dirname,
"Visual_scoring" + str(i) + "_excerpt" + str(sub_i) + ".txt"),
header=0)
df['SSloc'], df['SSdur'] = zip(*df[df.columns[0]].str.split())
df['SSloc'] = df['SSloc'].astype(float)
df['SSdur'] = df['SSdur'].astype(float)
for j in range(len(df)):
aux[_round_up(new_fs[sub_i - 1] * df['SSloc'][j]) - 1:
_round_up(new_fs[sub_i - 1] * df['SSloc'][j]) +
_round_up(new_fs[sub_i - 1] * df['SSdur'][j]), i - 1] = 1
expert_labels[sub_keys[sub_i - 1]][expert_names[i - 1]] = 1 + aux[:, i - 1]
labels = 1 + np.sum(aux, axis=1)
labels[labels > 1] = 2
expert_labels[sub_keys[sub_i - 1]]["ExpertsUnion"] = labels
else:
aux = np.zeros((len_record_sec * round(new_fs[sub_i-1])), dtype=np.uint8)
df = pd.read_csv(os.path.join(dirname,
"Visual_scoring1_excerpt" + str(sub_i) + ".txt"),
header=0)
df['SSloc'], df['SSdur'] = zip(*df[df.columns[0]].str.split())
df['SSloc'] = df['SSloc'].astype(float)
df['SSdur'] = df['SSdur'].astype(float)
for j in range(len(df)):
aux[_round_up(new_fs[sub_i - 1] * df['SSloc'][j]) - 1:
_round_up(new_fs[sub_i - 1] * df['SSloc'][j]) +
_round_up(new_fs[sub_i - 1] * df['SSdur'][j])] = 1
labels = 1 + aux
expert_labels[sub_keys[sub_i - 1]][expert_names[0]] = labels
expert_labels[sub_keys[sub_i - 1]]["ExpertsUnion"] = labels
# Save EEG, labels, sampling rates
with open(os.path.join(dirname, fname_pkl + tmp), 'wb') as f:
pickle.dump([eeg, expert_labels, new_fs], f)
print(msg['save_pk_msg'])
return eeg, expert_labels, new_fs
|
py | 1a51c70c6b4cc0c523d0d4b1d16dc77e2617b478 | from __future__ import absolute_import
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint_django.__pkginfo__ import BASE_ID
class DjangoInstalledChecker(BaseChecker):
name = 'django-installed-checker'
msgs = {
'F%s01' % BASE_ID: ("Django is not available on the PYTHONPATH",
'django-not-available',
"Django could not be imported by the pylint-django plugin, so most Django related "
"improvements to pylint will fail."),
'W%s99' % BASE_ID: ('Placeholder message to prevent disabling of checker',
'django-not-available-placeholder',
'PyLint does not recognise checkers as being enabled unless they have at least'
' one message which is not fatal...')
}
@check_messages('django-not-available')
def close(self):
try:
__import__('django')
except ImportError:
self.add_message('F%s01' % BASE_ID)
|
py | 1a51c827b69bd52d8fbe4a2abb5339ce2866e861 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-16 12:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('insta', '0005_unfollow'),
]
operations = [
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='insta.Profile')),
],
),
]
|
py | 1a51cb3c3cc8de2bbf8a1ff71f4e9924f410798d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import kubernetes.client.models as k8s
from airflow.executors import Executors
import uuid
class PodDefaults:
"""
Static defaults for the PodGenerator
"""
XCOM_MOUNT_PATH = '/airflow/xcom'
SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
XCOM_CMD = """import time
while True:
try:
time.sleep(3600)
except KeyboardInterrupt:
exit(0)
"""
VOLUME_MOUNT = k8s.V1VolumeMount(
name='xcom',
mount_path=XCOM_MOUNT_PATH
)
VOLUME = k8s.V1Volume(
name='xcom',
empty_dir=k8s.V1EmptyDirVolumeSource()
)
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=['python', '-c', XCOM_CMD],
image='python:3.5-alpine',
volume_mounts=[VOLUME_MOUNT]
)
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
:param image: The docker image
:type image: str
:param envs: A dict containing the environment variables
:type envs: Dict[str, str]
:param cmds: The command to be run on the pod
:type cmds: List[str]
:param secrets: Secrets to be launched to the pod
:type secrets: List[airflow.kubernetes.models.secret.Secret]
:param image_pull_policy: Specify a policy to cache or always pull an image
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:type image_pull_secrets: str
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: dict
:param hostnetwork: If True enable host networking on the pod
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations
:type tolerations: list
:param security_context: A dict containing the security context for the pod
:type security_context: dict
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:type configmaps: str
:param dnspolicy: Specify a dnspolicy for the pod
:type dnspolicy: str
:param pod: The fully specified pod.
:type pod: kubernetes.client.models.V1Pod
"""
def __init__(
self,
image,
name=None,
namespace=None,
volume_mounts=None,
envs=None,
cmds=None,
args=None,
labels=None,
node_selectors=None,
ports=None,
volumes=None,
image_pull_policy='IfNotPresent',
restart_policy='Never',
image_pull_secrets=None,
init_containers=None,
service_account_name=None,
resources=None,
annotations=None,
affinity=None,
hostnetwork=False,
tolerations=None,
security_context=None,
configmaps=None,
dnspolicy=None,
pod=None,
extract_xcom=False,
):
self.ud_pod = pod
self.pod = k8s.V1Pod()
self.pod.api_version = 'v1'
self.pod.kind = 'Pod'
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name + "-" + str(uuid.uuid4())[:8] if name else None
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name='base')
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(
name=key,
value=val
))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(k8s.V1EnvFromSource(
config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap
)
))
self.container.command = cmds or []
self.container.args = args or []
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
self.spec.dns_policy = dnspolicy
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
self.spec.restart_policy = restart_policy
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(','):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(
name=image_pull_secret
))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
result = self.ud_pod
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> k8s.V1Pod:
if obj is None:
return k8s.V1Pod()
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
'Cannot convert a non-dictionary or non-PodGenerator '
'object into a KubernetesExecutorConfig')
namespaced = obj.get(Executors.KubernetesExecutor, {})
resources = namespaced.get('resources')
if resources is None:
requests = {
'cpu': namespaced.get('request_cpu'),
'memory': namespaced.get('request_memory')
}
limits = {
'cpu': namespaced.get('limit_cpu'),
'memory': namespaced.get('limit_memory')
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(
requests=requests,
limits=limits
)
annotations = namespaced.get('annotations', {})
gcp_service_account_key = namespaced.get('gcp_service_account_key', None)
if annotations is not None and gcp_service_account_key is not None:
annotations.update({
'iam.cloud.google.com/service-account': gcp_service_account_key
})
pod_spec_generator = PodGenerator(
image=namespaced.get('image'),
envs=namespaced.get('env'),
cmds=namespaced.get('cmds'),
args=namespaced.get('args'),
labels=namespaced.get('labels'),
node_selectors=namespaced.get('node_selectors'),
name=namespaced.get('name'),
ports=namespaced.get('ports'),
volumes=namespaced.get('volumes'),
volume_mounts=namespaced.get('volume_mounts'),
namespace=namespaced.get('namespace'),
image_pull_policy=namespaced.get('image_pull_policy'),
restart_policy=namespaced.get('restart_policy'),
image_pull_secrets=namespaced.get('image_pull_secrets'),
init_containers=namespaced.get('init_containers'),
service_account_name=namespaced.get('service_account_name'),
resources=resources,
annotations=namespaced.get('annotations'),
affinity=namespaced.get('affinity'),
hostnetwork=namespaced.get('hostnetwork'),
tolerations=namespaced.get('tolerations'),
security_context=namespaced.get('security_context'),
configmaps=namespaced.get('configmaps'),
dnspolicy=namespaced.get('dnspolicy'),
pod=namespaced.get('pod'),
extract_xcom=namespaced.get('extract_xcom'),
)
return pod_spec_generator.gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: k8s.V1Pod) -> k8s.V1Pod:
"""
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:type base_pod: k8s.V1Pod
:param client_pod: the pod that the client wants to create.
:type client_pod: k8s.V1Pod
:return: the merged pods
This can't be done recursively as certain fields are preserved,
some overwritten, and some concatenated, e.g. The command
should be preserved from base, the volumes appended to and
the other fields overwritten.
"""
client_pod_cp = copy.deepcopy(client_pod)
def merge_objects(base_obj, client_obj):
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
setattr(client_obj, base_key, base_val)
def extend_object_field(base_obj, client_obj, field_name):
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if not base_obj_field:
return
if not client_obj_field:
setattr(client_obj, field_name, base_obj_field)
return
appended_fields = base_obj_field + client_obj_field
setattr(client_obj, field_name, appended_fields)
# Values at the pod and metadata should be overwritten where they exist,
# but certain values at the spec and container level must be conserved.
base_container = base_pod.spec.containers[0]
client_container = client_pod_cp.spec.containers[0]
extend_object_field(base_container, client_container, 'volume_mounts')
extend_object_field(base_container, client_container, 'env')
extend_object_field(base_container, client_container, 'env_from')
extend_object_field(base_container, client_container, 'ports')
extend_object_field(base_container, client_container, 'volume_devices')
client_container.command = base_container.command
client_container.args = base_container.args
merge_objects(base_pod.spec.containers[0], client_pod_cp.spec.containers[0])
# Just append any additional containers from the base pod
client_pod_cp.spec.containers.extend(base_pod.spec.containers[1:])
merge_objects(base_pod.metadata, client_pod_cp.metadata)
extend_object_field(base_pod.spec, client_pod_cp.spec, 'volumes')
merge_objects(base_pod.spec, client_pod_cp.spec)
merge_objects(base_pod, client_pod_cp)
return client_pod_cp
|
py | 1a51cb66e2beb8454a702db357829c5ae86a97f5 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import json
from dataclasses import dataclass, field
from json import JSONEncoder
from typing import Set, Dict, Union, List, Tuple
from sqlalchemy.orm import Session
from typing_extensions import TypeAlias
from .queries import get_warning_message_range
from .sarif_types import SARIFResult
from .ui.issues import IssueQueryResult
SARIFOutput: TypeAlias = Dict[
str,
Union[
List[
Dict[
str,
Union[
Dict[str, Dict[str, Union[List[Dict[str, str]], str]]],
List[SARIFResult],
],
]
],
str,
],
]
@dataclass
class SARIF:
version: str = "2.1.0"
schema: str = "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json" # noqa
_tool_warning_code_ranges: Dict[str, Tuple[int, int]] = field(default_factory=dict)
driver: Dict[str, Union[str, List[Dict[str, str]]]] = field(default_factory=dict)
results: List[SARIFResult] = field(default_factory=list)
def __init__(
self, tool: str, session: Session, filtered_issues: Set[IssueQueryResult]
) -> None:
self._tool_warning_code_ranges = {
"mariana-trench": (4000, 5000),
"pysa": (5000, 6000),
}
driver_json = {}
if tool == "pysa":
driver_json["name"] = "Pysa"
driver_json["informationUri"] = "https://github.com/facebook/pyre-check/"
tool_warning_messages = get_warning_message_range(
session,
self._tool_warning_code_ranges[tool][0],
self._tool_warning_code_ranges[tool][1],
)
rules_json = []
for rule in tool_warning_messages:
rules_json.append({"id": str(rule.code), "name": rule.message})
driver_json["rules"] = rules_json
else:
raise NotImplementedError
self.driver = driver_json
self.results = [issue.to_sarif() for issue in filtered_issues]
def to_json(self, indent: int = 2) -> str:
return json.dumps(self, cls=SARIFEncoder, indent=indent)
class SARIFEncoder(JSONEncoder):
def default(self, o: SARIF) -> SARIFOutput:
return {
"version": o.version,
"$schema": o.schema,
"runs": [
{
"tool": {"driver": o.driver},
"results": o.results,
}
],
}
|
py | 1a51cbb7bfbe897028813ca7c8fb3231a419d515 | #! /usr/bin/env py.test
import time
from mwlib import myjson as json
from mwlib import serve
def mkcolldir(tmpdir, name):
cid = serve.make_collection_id({'metabook': json.dumps({'title': name, "type": "collection"})})
d = tmpdir.join(cid[0], cid[:2], cid).ensure(dir=1)
d.join("output.rl").write("bla")
return d
def test_purge_cache(tmpdir):
d1 = mkcolldir(tmpdir, 'c1')
d2 = mkcolldir(tmpdir, 'c2')
d2.join("output.rl").setmtime(time.time() - 2)
serve.purge_cache(1, tmpdir.strpath)
assert d1.check()
assert not d2.check()
|
py | 1a51cc18a02c552e151b3c3ffcff54383cc2843a | import tensorflow as tf
from tqdm import tqdm
from ..texts._text_functions import str_idx, build_dataset
class Model:
def __init__(
self,
size_layer = 128,
num_layers = 1,
embedded_size = 128,
dict_size = 5000,
learning_rate = 1e-3,
output_size = 300,
dropout = 0.8,
):
def cells(size, reuse = False):
cell = tf.nn.rnn_cell.LSTMCell(
size, initializer = tf.orthogonal_initializer(), reuse = reuse
)
return tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob = dropout
)
def birnn(inputs, scope):
with tf.variable_scope(scope):
for n in range(num_layers):
(out_fw, out_bw), (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = cells(size_layer // 2),
cell_bw = cells(size_layer // 2),
inputs = inputs,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d' % (n),
)
inputs = tf.concat((out_fw, out_bw), 2)
return tf.layers.dense(inputs[:, -1], output_size)
self.X_left = tf.placeholder(tf.int32, [None, None])
self.X_right = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.float32, [None])
self.batch_size = tf.shape(self.X_left)[0]
encoder_embeddings = tf.Variable(
tf.random_uniform([dict_size, embedded_size], -1, 1)
)
embedded_left = tf.nn.embedding_lookup(encoder_embeddings, self.X_left)
embedded_right = tf.nn.embedding_lookup(
encoder_embeddings, self.X_right
)
def contrastive_loss(y, d):
tmp = y * tf.square(d)
tmp2 = (1 - y) * tf.square(tf.maximum((1 - d), 0))
return (
tf.reduce_sum(tmp + tmp2)
/ tf.cast(self.batch_size, tf.float32)
/ 2
)
self.output_left = birnn(embedded_left, 'left')
self.output_right = birnn(embedded_right, 'right')
self.distance = tf.sqrt(
tf.reduce_sum(
tf.square(tf.subtract(self.output_left, self.output_right)),
1,
keepdims = True,
)
)
self.distance = tf.div(
self.distance,
tf.add(
tf.sqrt(
tf.reduce_sum(
tf.square(self.output_left), 1, keepdims = True
)
),
tf.sqrt(
tf.reduce_sum(
tf.square(self.output_right), 1, keepdims = True
)
),
),
)
self.distance = tf.reshape(self.distance, [-1])
self.cost = contrastive_loss(self.Y, self.distance)
self.temp_sim = tf.subtract(
tf.ones_like(self.distance), tf.rint(self.distance)
)
correct_predictions = tf.equal(self.temp_sim, self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'))
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
def train_model(
train_X_left,
train_X_right,
train_Y,
epoch = 10,
batch_size = 16,
embedding_size = 256,
output_size = 300,
maxlen = 100,
dropout = 0.8,
num_layers = 1,
**kwargs
):
concat = (' '.join(train_X_left + train_X_right)).split()
vocabulary_size = len(list(set(concat)))
_, _, dictionary, reversed_dictionary = build_dataset(
concat, vocabulary_size
)
_graph = tf.Graph()
with _graph.as_default():
sess = tf.InteractiveSession()
model = Model(
size_layer = embedding_size,
num_layers = num_layers,
embedded_size = embedding_size,
dict_size = len(dictionary),
output_size = output_size,
dropout = dropout,
)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
vectors_left = str_idx(train_X_left, dictionary, maxlen, UNK = 3)
vectors_right = str_idx(train_X_right, dictionary, maxlen, UNK = 3)
for e in range(epoch):
pbar = tqdm(
range(0, len(vectors_left), batch_size), desc = 'minibatch loop'
)
for i in pbar:
batch_x_left = vectors_left[
i : min(i + batch_size, len(vectors_left))
]
batch_x_right = vectors_right[
i : min(i + batch_size, len(vectors_left))
]
batch_y = train_Y[i : min(i + batch_size, len(vectors_left))]
acc, loss, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.X_left: batch_x_left,
model.X_right: batch_x_right,
model.Y: batch_y,
},
)
pbar.set_postfix(cost = loss, accuracy = acc)
return sess, model, dictionary, saver, dropout
def load_siamese(location, json):
graph = tf.Graph()
with graph.as_default():
model = Model(
size_layer = json['embedding_size'],
num_layers = json['num_layers'],
embedded_size = json['embedding_size'],
dict_size = len(json['dictionary']),
output_size = json['output_size'],
dropout = json['dropout'],
)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, location + '/model.ckpt')
return sess, model, saver
|
py | 1a51cdfc61666c107c8164987f82ea1c6fae1db2 | # Generated by Django 2.2.12 on 2020-05-14 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120, verbose_name='Nom')),
('price', models.IntegerField(verbose_name='Prix')),
],
),
]
|
py | 1a51ce17e500d0f92115bda5567132b52e5563e6 | from dartsclone._dartsclone import DoubleArray, TraverseState
__all__ = [
"DoubleArray",
"TraverseState",
]
|
py | 1a51ce32453e09176bb94171f6ce4815156fcac0 | """
Union of Features
==========================
This module contains steps to perform various feature unions and model stacking, using parallelism is possible.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
from joblib import Parallel, delayed
from neuraxle.base import BaseStep, TruncableSteps, NamedTupleList, Identity, ExecutionContext, DataContainer, \
NonFittableMixin, ForceHandleOnlyMixin
from neuraxle.steps.numpy import NumpyConcatenateInnerFeatures
class FeatureUnion(ForceHandleOnlyMixin, TruncableSteps):
"""Parallelize the union of many pipeline steps."""
def __init__(
self,
steps_as_tuple: NamedTupleList,
joiner: NonFittableMixin = NumpyConcatenateInnerFeatures(),
n_jobs: int = None,
backend: str = "threading",
cache_folder_when_no_handle: str = None
):
"""
Create a feature union.
:param steps_as_tuple: the NamedTupleList of steps to process in parallel and to join.
:param joiner: What will be used to join the features. For example, ``NumpyConcatenateInnerFeatures()``.
:param n_jobs: The number of jobs for the parallelized ``joblib.Parallel`` loop in fit and in transform.
:param backend: The type of parallelization to do with ``joblib.Parallel``. Possible values: "loky", "multiprocessing", "threading", "dask" if you use dask, and more.
"""
steps_as_tuple.append(('joiner', joiner))
TruncableSteps.__init__(self, steps_as_tuple)
self.n_jobs = n_jobs
self.backend = backend
ForceHandleOnlyMixin.__init__(self, cache_folder=cache_folder_when_no_handle)
def _fit_data_container(self, data_container, context):
"""
Fit the parallel steps on the data. It will make use of some parallel processing.
:param data_container: The input data to fit onto
:param context: execution context
:return: self
"""
# Actually fit:
if self.n_jobs != 1:
fitted_steps = Parallel(backend=self.backend, n_jobs=self.n_jobs)(
delayed(step.handle_fit)(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
)
else:
fitted_steps = [
step.handle_fit(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
]
self._save_fitted_steps(fitted_steps)
return self
def _transform_data_container(self, data_container, context):
"""
Transform the data with the unions. It will make use of some parallel processing.
:param data_container: data container
:param context: execution context
:return: the transformed data_inputs.
"""
if self.n_jobs != 1:
data_containers = Parallel(backend=self.backend, n_jobs=self.n_jobs)(
delayed(step.handle_transform)(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
)
else:
data_containers = [
step.handle_transform(data_container.copy(), context)
for _, step in self.steps_as_tuple[:-1]
]
return DataContainer(
data_inputs=data_containers,
current_ids=data_container.current_ids,
summary_id=data_container.summary_id,
expected_outputs=data_container.expected_outputs,
sub_data_containers=data_container.sub_data_containers
)
def _did_transform(self, data_container, context):
data_container = self[-1].handle_transform(data_container, context)
return data_container
def _fit_transform_data_container(self, data_container, context):
"""
Transform the data with the unions. It will make use of some parallel processing.
:param data_container: data container
:param context: execution context
:return: the transformed data_inputs.
"""
new_self = self._fit_data_container(data_container, context)
data_container = self._transform_data_container(data_container, context)
return new_self, data_container
def _save_fitted_steps(self, fitted_steps):
# Save fitted steps
for i, fitted_step in enumerate(fitted_steps[:-1]):
self.steps_as_tuple[i] = (self.steps_as_tuple[i][0], fitted_step)
self._refresh_steps()
def _did_fit_transform(self, data_container, context):
data_container = self[-1].handle_transform(data_container, context)
return data_container
class AddFeatures(FeatureUnion):
"""Parallelize the union of many pipeline steps AND concatenate the new features to the received inputs using Identity."""
def __init__(self, steps_as_tuple: NamedTupleList, **kwargs):
"""
Create a ``FeatureUnion`` where ``Identity`` is the first step so as to also keep
the inputs to concatenate them to the outputs.
:param steps_as_tuple: The steps to be sent to the ``FeatureUnion``. ``Identity()`` is prepended.
:param kwargs: Other arguments to send to ``FeatureUnion``.
"""
FeatureUnion.__init__(self, [Identity()] + steps_as_tuple, **kwargs)
class ModelStacking(FeatureUnion):
"""Performs a ``FeatureUnion`` of steps, and then send the joined result to the above judge step."""
def __init__(
self,
steps_as_tuple: NamedTupleList,
judge: BaseStep,
**kwargs
):
"""
Perform model stacking. The steps will be merged with a FeatureUnion,
and the judge will recombine the predictions.
:param steps_as_tuple: the NamedTupleList of steps to process in parallel and to join.
:param judge: a BaseStep that will learn to judge the best answer and who to trust out of every parallel steps.
:param kwargs: Other arguments to send to ``FeatureUnion``.
"""
FeatureUnion.__init__(self, steps_as_tuple, **kwargs)
self.judge: BaseStep = judge # TODO: add "other" types of step(s) to TuncableSteps or to another intermediate class. For example, to get their hyperparameters.
def _did_fit_transform(self, data_container, context) -> ('BaseStep', DataContainer):
data_container = super()._did_fit_transform(data_container, context)
fitted_judge, data_container = self.judge.handle_fit_transform(data_container, context)
self.judge = fitted_judge
return data_container
def _did_fit(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer:
"""
Fit the parallel steps on the data. It will make use of some parallel processing.
Also, fit the judge on the result of the parallel steps.
:param data_container: data container to fit on
:param context: execution context
:return: self
"""
data_container = super()._did_fit(data_container, context)
data_container = super()._transform_data_container(data_container, context)
data_container = super()._did_transform(data_container, context)
fitted_judge = self.judge.handle_fit(data_container, context)
self.judge = fitted_judge
return data_container
def _did_transform(self, data_container, context) -> DataContainer:
"""
Transform the data with the unions. It will make use of some parallel processing.
Then, use the judge to refine the transformations.
:param data_container: data container to transform
:param context: execution context
"""
data_container = super()._did_transform(data_container, context)
results = self.judge.handle_transform(data_container, context)
data_container.set_data_inputs(results.data_inputs)
return data_container
|
py | 1a51d01eab2c979d3041156aaec073518302c7e4 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to interact with Arrow memory allocated by Arrow Java.
These functions convert the objects holding the metadata, the actual
data is not copied at all.
This will only work with a JVM running in the same process such as provided
through jpype. Modules that talk to a remote JVM like py4j will not work as the
memory addresses reported by them are not reachable in the python process.
"""
import pyarrow as pa
def jvm_buffer(arrowbuf):
"""
Construct an Arrow buffer from io.netty.buffer.ArrowBuf
Parameters
----------
arrowbuf: io.netty.buffer.ArrowBuf
Arrow Buffer representation on the JVM
Returns
-------
pyarrow.Buffer
Python Buffer that references the JVM memory
"""
address = arrowbuf.memoryAddress()
size = arrowbuf.capacity()
return pa.foreign_buffer(address, size, arrowbuf.asNettyBuffer())
def _from_jvm_int_type(jvm_type):
"""
Convert a JVM int type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Int
Returns
-------
typ: pyarrow.DataType
"""
bit_width = jvm_type.getBitWidth()
if jvm_type.getIsSigned():
if bit_width == 8:
return pa.int8()
elif bit_width == 16:
return pa.int16()
elif bit_width == 32:
return pa.int32()
elif bit_width == 64:
return pa.int64()
else:
if bit_width == 8:
return pa.uint8()
elif bit_width == 16:
return pa.uint16()
elif bit_width == 32:
return pa.uint32()
elif bit_width == 64:
return pa.uint64()
def _from_jvm_float_type(jvm_type):
"""
Convert a JVM float type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$FloatingPoint
Returns
-------
typ: pyarrow.DataType
"""
precision = jvm_type.getPrecision().toString()
if precision == 'HALF':
return pa.float16()
elif precision == 'SINGLE':
return pa.float32()
elif precision == 'DOUBLE':
return pa.float64()
def _from_jvm_time_type(jvm_type):
"""
Convert a JVM time type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Time
Returns
-------
typ: pyarrow.DataType
"""
time_unit = jvm_type.getUnit().toString()
if time_unit == 'SECOND':
assert jvm_type.getBitWidth() == 32
return pa.time32('s')
elif time_unit == 'MILLISECOND':
assert jvm_type.getBitWidth() == 32
return pa.time32('ms')
elif time_unit == 'MICROSECOND':
assert jvm_type.getBitWidth() == 64
return pa.time64('us')
elif time_unit == 'NANOSECOND':
assert jvm_type.getBitWidth() == 64
return pa.time64('ns')
def _from_jvm_timestamp_type(jvm_type):
"""
Convert a JVM timestamp type to its Python equivalent.
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Timestamp
Returns
-------
typ: pyarrow.DataType
"""
time_unit = jvm_type.getUnit().toString()
timezone = jvm_type.getTimezone()
if timezone is not None:
timezone = str(timezone)
if time_unit == 'SECOND':
return pa.timestamp('s', tz=timezone)
elif time_unit == 'MILLISECOND':
return pa.timestamp('ms', tz=timezone)
elif time_unit == 'MICROSECOND':
return pa.timestamp('us', tz=timezone)
elif time_unit == 'NANOSECOND':
return pa.timestamp('ns', tz=timezone)
def _from_jvm_date_type(jvm_type):
"""
Convert a JVM date type to its Python equivalent
Parameters
----------
jvm_type: org.apache.arrow.vector.types.pojo.ArrowType$Date
Returns
-------
typ: pyarrow.DataType
"""
day_unit = jvm_type.getUnit().toString()
if day_unit == 'DAY':
return pa.date32()
elif day_unit == 'MILLISECOND':
return pa.date64()
def field(jvm_field):
"""
Construct a Field from a org.apache.arrow.vector.types.pojo.Field
instance.
Parameters
----------
jvm_field: org.apache.arrow.vector.types.pojo.Field
Returns
-------
pyarrow.Field
"""
name = str(jvm_field.getName())
jvm_type = jvm_field.getType()
typ = None
if not jvm_type.isComplex():
type_str = jvm_type.getTypeID().toString()
if type_str == 'Null':
typ = pa.null()
elif type_str == 'Int':
typ = _from_jvm_int_type(jvm_type)
elif type_str == 'FloatingPoint':
typ = _from_jvm_float_type(jvm_type)
elif type_str == 'Utf8':
typ = pa.string()
elif type_str == 'Binary':
typ = pa.binary()
elif type_str == 'FixedSizeBinary':
typ = pa.binary(jvm_type.getByteWidth())
elif type_str == 'Bool':
typ = pa.bool_()
elif type_str == 'Time':
typ = _from_jvm_time_type(jvm_type)
elif type_str == 'Timestamp':
typ = _from_jvm_timestamp_type(jvm_type)
elif type_str == 'Date':
typ = _from_jvm_date_type(jvm_type)
elif type_str == 'Decimal':
typ = pa.decimal128(jvm_type.getPrecision(), jvm_type.getScale())
else:
raise NotImplementedError(
"Unsupported JVM type: {}".format(type_str))
else:
# TODO: The following JVM types are not implemented:
# Struct, List, FixedSizeList, Union, Dictionary
raise NotImplementedError(
"JVM field conversion only implemented for primitive types.")
nullable = jvm_field.isNullable()
jvm_metadata = jvm_field.getMetadata()
if jvm_metadata.isEmpty():
metadata = None
else:
metadata = {str(entry.getKey()): str(entry.getValue())
for entry in jvm_metadata.entrySet()}
return pa.field(name, typ, nullable, metadata)
def schema(jvm_schema):
"""
Construct a Schema from a org.apache.arrow.vector.types.pojo.Schema
instance.
Parameters
----------
jvm_schema: org.apache.arrow.vector.types.pojo.Schema
Returns
-------
pyarrow.Schema
"""
fields = jvm_schema.getFields()
fields = [field(f) for f in fields]
jvm_metadata = jvm_schema.getCustomMetadata()
if jvm_metadata.isEmpty():
metadata = None
else:
metadata = {str(entry.getKey()): str(entry.getValue())
for entry in jvm_metadata.entrySet()}
return pa.schema(fields, metadata)
def array(jvm_array):
"""
Construct an (Python) Array from its JVM equivalent.
Parameters
----------
jvm_array : org.apache.arrow.vector.ValueVector
Returns
-------
array : Array
"""
if jvm_array.getField().getType().isComplex():
minor_type_str = jvm_array.getMinorType().toString()
raise NotImplementedError(
"Cannot convert JVM Arrow array of type {},"
" complex types not yet implemented.".format(minor_type_str))
dtype = field(jvm_array.getField()).type
length = jvm_array.getValueCount()
buffers = [jvm_buffer(buf)
for buf in list(jvm_array.getBuffers(False))]
null_count = jvm_array.getNullCount()
return pa.Array.from_buffers(dtype, length, buffers, null_count)
def record_batch(jvm_vector_schema_root):
"""
Construct a (Python) RecordBatch from a JVM VectorSchemaRoot
Parameters
----------
jvm_vector_schema_root : org.apache.arrow.vector.VectorSchemaRoot
Returns
-------
record_batch: pyarrow.RecordBatch
"""
pa_schema = schema(jvm_vector_schema_root.getSchema())
arrays = []
for name in pa_schema.names:
arrays.append(array(jvm_vector_schema_root.getVector(name)))
return pa.RecordBatch.from_arrays(
arrays,
pa_schema.names,
metadata=pa_schema.metadata
)
|
py | 1a51d09a2e02e896573f6db1dff5ac9c28752fcf | #!/usr/bin/python
import json
import tarfile
import time
import os
import shutil
import sys
import requests
import tempfile
import contextlib
import re
from requests.auth import HTTPBasicAuth
from subprocess import Popen, PIPE
class Export:
"""
This is a generic EuPathDB export tool for Galaxy. It is abstract and so must be subclassed by more
specialized export tools that implement those abstract classes.
"""
# Names for the 2 json files and the folder containing the dataset to be included in the tarball
DATASET_JSON = "dataset.json"
META_JSON = "meta.json"
DATAFILES = "datafiles"
def __init__(self, dataset_type, version, validation_script, args):
"""
Initializes the export class with the parameters needed to accomplish the export of user
datasets on Galaxy to EuPathDB projects.
:param dataset_type: The EuPathDB type of this dataset
:param version: The version of the EuPathDB type of this dataset
:param validation_script: A script that handles the validation of this dataset
:param args: An array of the input parameters
"""
self._type = dataset_type
self._version = version
self._validation_script = validation_script
# Extract and transform the parameters as needed into member variables
self.parse_params(args)
# This msec timestamp is used to denote both the created and modified times.
self._timestamp = int(time.time() * 1000)
# This is the name of the file to be exported sans extension. It will be used to designate a unique temporary
# directory and to export both the tarball and the flag that triggers IRODS to process the tarball. By
# convention, the dataset tarball is of the form dataset_uNNNNNN_tNNNNNNN.tgz where the NNNNNN following the _u
# is the WDK user id and _t is the msec timestamp
self._export_file_root = 'dataset_u' + str(self._user_id) + '_t' + str(self._timestamp) + '_p' + str(os.getpid())
print >> sys.stdout, "Export file root is " + self._export_file_root
# Set up the configuration data
(self._url, self._user, self._pwd, self._lz_coll, self._flag_coll) = self.collect_rest_data()
def parse_params(self, args):
"""
Salts away all generic parameters (i.e., the first 5 params) and do some initial validation. The subclasses
will handle the other parameters.
:param args:
:return:
"""
if len(args) < 6:
raise ValidationException("The tool was passed an insufficient numbers of arguments.")
self._dataset_name = args[0]
self._summary = args[1]
self._description = args[2]
# WDK user id is derived from the user email
user_email = args[3].strip()
if not re.match(r'.+\.\[email protected]$', user_email, flags=0):
raise ValidationException(
"The user email " + str(user_email) + " is not valid for the use of this tool.")
galaxy_user = user_email.split("@")[0]
self._user_id = galaxy_user[galaxy_user.rfind(".") + 1:]
# Used to find the configuration file containing IRODS url and credentials
self._tool_directory = args[4]
# Output file
self._output = args[5]
def collect_rest_data(self):
"""
Obtains the url and credentials and relevant collections needed to run the iRODS rest service.
At some point, this information should be fished out of a configuration file.
:return: A tuple containing the url, user, and password, landing zone and flags collection,
in that order
"""
config_path = self._tool_directory + "/../../config/config.json"
# The tool directory path seems glitchy on Globus Dev Galaxy instance after service restarts.
# Uncomment to check.
#print >> sys.stdout, "self._tool_directory is " + self._tool_directory
with open(config_path, "r+") as config_file:
config_json = json.load(config_file)
return (config_json["url"], config_json["user"], config_json["password"], "lz", "flags")
def validate_datasets(self):
"""
Runs the validation script provided to the class upon initialization using the user's
dataset files as standard input.
:return:
"""
if self._validation_script == None:
return
dataset_files = self.identify_dataset_files()
validation_process = Popen(['python', self._tool_directory + "/../../bin/" + self._validation_script],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
# output is a tuple containing (stdout, stderr)
output = validation_process.communicate(json.dumps(dataset_files))
if validation_process.returncode == 1:
raise ValidationException(output[1])
def identify_dependencies(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a dependency json list.
:return: The dependency json list to be returned should look as follows:
[dependency1, dependency2, ... ]
where each dependency is written as a json object as follows:
{
"resourceIdentifier": <value>,
"resourceVersion": <value>,
"resourceDisplayName": <value
}
Where no dependencies exist, an empty list is returned
"""
raise NotImplementedError(
"The method 'identify_dependencies(self)' needs to be implemented in the specialized export module.")
def identify_projects(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a EuPathDB project list.
:return: The project list to be returned should look as follows:
[project1, project2, ... ]
At least one valid EuPathDB project must be listed
"""
raise NotImplementedError(
"The method 'identify_project(self)' needs to be implemented in the specialized export module.")
def identify_supported_projects(self):
"""
Override this method to provide a non-default list of projects.
Default is None, interpreted as all projects are ok, ie, no constraints.
"""
return None;
def identify_dataset_files(self):
"""
An abstract method to be addressed by a specialized export tool that furnishes a json list
containing the dataset data files and the EuPath file names they must have in the tarball.
:return: The dataset file list to be returned should look as follows:
[dataset file1, dataset file2, ... ]
where each dataset file is written as a json object as follows:
{
"name":<filename that EuPathDB expects>,
"path":<Galaxy path to the dataset file>
At least one valid EuPathDB dataset file must be listed
"""
raise NotImplementedError(
"The method 'identify_dataset_file(self)' needs to be implemented in the specialized export module.")
def create_dataset_json_file(self, temp_path):
""" Create and populate the dataset.json file that must be included in the tarball."""
# Get the total size of the dataset files (needed for the json file)
size = sum(os.stat(dataset_file['path']).st_size for dataset_file in self.identify_dataset_files())
if self.identify_supported_projects() != None:
for (project) in self.identify_projects():
if project not in self.identify_supported_projects():
raise ValidationException("Sorry, you cannot export this kind of data to " + project)
dataset_path = temp_path + "/" + self.DATASET_JSON
with open(dataset_path, "w+") as json_file:
json.dump({
"type": {"name": self._type, "version": self._version},
"dependencies": self.identify_dependencies(),
"projects": self.identify_projects(),
"dataFiles": self.create_data_file_metadata(),
"owner": self._user_id,
"size": size,
"created": self._timestamp
}, json_file, indent=4)
def create_metadata_json_file(self, temp_path):
"""" Create and populate the meta.json file that must be included in the tarball."""
meta_path = temp_path + "/" + self.META_JSON
with open(meta_path, "w+") as json_file:
json.dump({"name": self._dataset_name,
"summary": self._summary,
"description": self._description
}, json_file, indent=4)
def create_data_file_metadata(self):
"""
Create a json object holding metadata for an array of dataset files.
:return: json object to be inserted into dataset.json
"""
dataset_files_metadata = []
for dataset_file in self.identify_dataset_files():
dataset_file_metadata = {}
dataset_file_metadata["name"] = re.sub(r"\s+", "_", dataset_file['name'])
dataset_file_metadata["size"] = os.stat(dataset_file['path']).st_size
dataset_files_metadata.append(dataset_file_metadata)
return dataset_files_metadata
def package_data_files(self, temp_path):
"""
Copies the user's dataset files to the datafiles folder of the temporary dir and changes each
dataset filename conferred by Galaxy to a filename expected by EuPathDB
"""
os.mkdir(temp_path + "/" + self.DATAFILES)
for dataset_file in self.identify_dataset_files():
shutil.copy(dataset_file['path'], temp_path + "/" + self.DATAFILES + "/" + re.sub(r"\s+", "_", dataset_file['name']))
def create_tarball(self):
"""
Package the tarball - contains meta.json, dataset.json and a datafiles folder containing the
user's dataset files
"""
with tarfile.open(self._export_file_root + ".tgz", "w:gz") as tarball:
for item in [self.META_JSON, self.DATASET_JSON, self.DATAFILES]:
tarball.add(item)
def process_request(self, collection, source_file):
"""
This method wraps the iRODS rest request into a try/catch to insure that bad responses are
reflected back to the user.
:param collection: the name of the workspaces collection to which the file is to be uploaded
:param source_file: the name of the file to be uploaded to iRODS
"""
rest_response = self.send_request(collection, source_file)
try:
rest_response.raise_for_status()
except requests.exceptions.HTTPError as e:
print >> sys.stderr, "Error: " + str(e)
sys.exit(1)
def send_request(self, collection, source_file):
"""
This request is intended as a multi-part form post containing one file to be uploaded. iRODS Rest
does an iput followed by an iget, apparently. So the response can be used to insure proper
delivery.
:param collection: the name of the workspaces collection to which the file is to be uploaded
:param source_file: the name of the file to be uploaded to iRODS
:return: the http response from an iget of the uploaded file
"""
request = self._url + collection + "/" + source_file
headers = {"Accept": "application/json"}
upload_file = {"uploadFile": open(source_file, "rb")}
auth = HTTPBasicAuth(self._user, self._pwd)
try:
response = requests.post(request, auth=auth, headers=headers, files=upload_file)
response.raise_for_status()
except Exception as e:
print >> sys.stderr, "Error: The dataset export could not be completed at this time. The EuPathDB" \
" workspace may be unavailable presently. " + str(e)
sys.exit(2)
return response
def get_flag(self, collection, source_file):
"""
This method picks up any flag (success or failure) from the flags collection in iRODs related to the dataset
exported to determine whether the export was successful. If not, the nature of the failure is reported to the
user. The failure report will normally be very general unless the problem is one that can possibly be remedied
by the user (e.g., going over quota).
:param collection: The iRODS collection holding the status flags
:param source_file: The dataset tarball name sans extension
"""
time.sleep(5) # arbitrary wait period before one time check for a flag.
auth = HTTPBasicAuth(self._user, self._pwd)
# Look for the presence of a success flag first and if none found, check for a failure flag. If neither
# found, assume that to be a failure also.
try:
request = self._url + collection + "/" + "success_" + source_file
success = requests.get(request, auth=auth, timeout=5)
if success.status_code == 404:
request = self._url + collection + "/" + "failure_" + source_file
failure = requests.get(request, auth=auth, timeout=5)
if failure.status_code != 404:
raise TransferException(failure.content)
else:
failure.raise_for_status()
else:
self.output_success()
print >> sys.stdout, "Your dataset has been successfully exported to EuPathDB."
print >> sys.stdout, "Please visit an appropriate EuPathDB site to view your dataset."
except (requests.exceptions.ConnectionError, TransferException) as e:
print >> sys.stderr, "Error: " + str(e)
sys.exit(1)
def connection_diagnostic(self):
"""
Used to insure that the calling ip is the one expected (i.e., the one for which the
firewall is opened). In Globus Dev Galaxy instance calling the tool outside of Galaxy
versus inside Galaxy resulted in different calling ip addresses.
"""
request = "http://ifconfig.co"
headers = {"Accept": "application/json"}
try:
response = requests.get(request, headers=headers)
response.raise_for_status()
print >> sys.stdout, "Diagnostic Result: " + response.content
except Exception as e:
print >> sys.stderr, "Diagnostic Error: " + str(e)
def export(self):
"""
Does the work of exporting to EuPathDB, a tarball consisting of the user's dataset files along
with dataset and metadata json files.
"""
# Apply the validation first. If it fails, exit with a data error.
self.validate_datasets()
# We need to save the current working directory so we can get back to it when we are
# finished working in our temporary directory.
orig_path = os.getcwd()
# We need to create a temporary directory in which to assemble the tarball.
with self.temporary_directory(self._export_file_root) as temp_path:
# Need to temporarily work inside the temporary directory to properly construct and
# send the tarball
os.chdir(temp_path)
self.package_data_files(temp_path)
self.create_metadata_json_file(temp_path)
self.create_dataset_json_file(temp_path)
self.create_tarball()
# Uncomment to check the calling ip address for this tool.
# self.connection_diagnostic()
# Call the iRODS rest service to drop the tarball into the iRODS workspace landing zone
self.process_request(self._lz_coll, self._export_file_root + ".tgz")
# Create a empty (flag) file corresponding to the tarball
open(self._export_file_root + ".txt", "w").close()
# Call the iRODS rest service to drop a flag into the IRODS workspace flags collection. This flag
# triggers the iRODS PEP that unpacks the tarball and posts the event to Jenkins
self.process_request(self._flag_coll, self._export_file_root + ".txt")
# Look for a success/fail indication from IRODS.
self.get_flag(self._flag_coll, self._export_file_root)
# We exit the temporary directory prior to removing it, back to the original working directory.
os.chdir(orig_path)
@contextlib.contextmanager
def temporary_directory(self, dir_name):
"""
This method creates a temporary directory such that removal is assured once the
program completes.
:param dir_name: The name of the temporary directory
:return: The full path to the temporary directory
"""
temp_path = tempfile.mkdtemp(dir_name)
try:
yield temp_path
finally:
# Added the boolean arg because cannot remove top level of temp dir structure in
# Globus Dev Galaxy instance and it will throw an Exception if the boolean, 'True', is not in place.
shutil.rmtree(temp_path, True)
def output_success(self):
header = "<html><body><h1>Good news!</h1><br />"
msg = """
<h2>Results of the EuPathDB Export Tool<br />Bigwig Files to EuPathDB</h2>
<h3>Your set of bigwig files was exported from Galaxy to your account in EuPathDB.
For file access and to view in GBrowse, go to My Data Sets in the appropriate EuPathDB site:
</h3><br />
Go to the appropriate EuPathDB site (links below) to see it (and all your User Datasets):<br \>
<a href='http://amoebadb.org/amoeba/app/workspace/datasets'>AmoebaDB</a><br />
<a href='http://cryptodb.org/cryptodb/app/workspace/datasets'>CryptoDB</a><br />
<a href='http://fungidb.org/fungidb/app/workspace/datasets'>FungiDB</a><br />
<a href='http://giardiadb.org/giardiadb/app/workspace/datasets'>GiardiaDB</a><br />
<a href='http://hostdb.org/hostdb/app/workspace/datasets'>HostDB</a><br />
<a href='http://microsporidiadb.org/micro/app/workspace/datasets'>MicrosporidiaDB</a><br />
<a href='http://piroplasmadb.org/piro/app/workspace/datasets'>PiroplasmaDB</a><br />
<a href='http://plasmodb.org/plasmo/app/workspace/datasets'>PlasmoDB</a><br />
<a href='http://schistodb.net/schisto/app/workspace/datasets'>SchistoDB</a><br />
<a href='http://toxodb.org/toxo/app/workspace/datasets'>ToxoDB</a><br />
<a href='http://trichdb.org/trichdb/app/workspace/datasets'>TrichDB</a><br />
<a href='http://tritrypdb.org/tritrypdb/app/workspace/datasets'>TriTrypDB</a><br />
</body></html>
"""
with open(self._output, 'w') as file:
file.write("%s%s" % (header,msg))
class ValidationException(Exception):
"""
This represents the exception reported when a call to a validation script returns a data error.
"""
pass
class TransferException(Exception):
"""
This represents the exception reported when the export of a dataset to the iRODS system returns a failure.
"""
pass
|
py | 1a51d0adc08c0915d84421c4061ebac6450ce588 | # ---------------------------------------------------------------------
# Iskratel.MSAN.get_interface_status
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class Script(BaseScript):
name = "Iskratel.MSAN.get_interface_status"
interface = IGetInterfaceStatus
rx_port = re.compile(
r"^(?P<port>\d+/\d+)\s+(?:\s+|PC|PC Mbr)\s+"
r"(?P<admin_status>Enable|Disable)\s+"
r"(?:Auto|1000 Full)\s+"
r"(?:\s+|Auto|100 Full|1000 Full)\s+"
r"(?P<oper_status>Up|Down)\s+(?:Enable|Disable)\s+"
r"(?:Enable|Disable)(?P<descr>.*?)?\n",
re.MULTILINE,
)
def execute_cli(self, interface=None):
r = []
for match in self.rx_port.finditer(self.cli("show port all")):
if (interface is not None) and (interface == match.group("port")):
return [
{
"interface": match.group("port"),
"status": match.group("oper_status") != "Down",
}
]
r += [
{"interface": match.group("port"), "status": match.group("oper_status") != "Down"}
]
return r
|
py | 1a51d0f1d33a0b627d0f2c5dad83e09cfe66980a | """
File for test case generation
"""
import time
from typing import List
import kubernetes as k8s
from illuminatio.k8s_util import labels_to_string
from illuminatio.rule import Rule
from illuminatio.test_case import NetworkTestCase
from illuminatio.host import ClusterHost, GenericClusterHost
from illuminatio.util import rand_port, INVERTED_ATTRIBUTE_PREFIX
def _get_other_host_from(connection_targets, rule_namespace):
namespace_labels = "namespaceLabels"
pod_labels = "podLabels"
namespace = "namespace"
if namespace_labels in connection_targets and pod_labels in connection_targets:
return GenericClusterHost(connection_targets[namespace_labels], connection_targets[pod_labels])
if namespace in connection_targets and pod_labels in connection_targets:
return ClusterHost(connection_targets[namespace], connection_targets[pod_labels])
if namespace_labels in connection_targets: # and no podLabels included
return GenericClusterHost(connection_targets[namespace_labels], {})
if pod_labels in connection_targets:
return ClusterHost(rule_namespace, connection_targets[pod_labels])
if connection_targets == {}:
return GenericClusterHost({}, {})
raise ValueError("Unknown combination of field in connection %s" % connection_targets)
def get_namespace_label_strings(namespace_labels, namespaces):
"""
Returns a set of all stringified namespace labels
"""
# list of all namespace names with labels
return {labels_to_string(namespace_label): [namespace.metadata.name for namespace in namespaces
if namespace.metadata.labels is not None and
namespace_label.items() <= namespace.metadata.labels.items()]
for namespace_label in namespace_labels}
class NetworkTestCaseGenerator:
"""
Class for Generating Test cases out of a k8s NetworkPolicy and saving them to a specified format
"""
def __init__(self, log):
self.logger = log
def generate_test_cases(self,
network_policies: List[k8s.client.V1NetworkPolicy],
namespaces: List[k8s.client.V1Namespace]):
"""
Generates positive and negative test cases, also returns measured runtimes
"""
runtimes = {}
start_time = time.time()
isolated_hosts = []
other_hosts = []
outgoing_test_cases = []
incoming_test_cases = []
self.logger.debug("Generating test cases for %s", network_policies)
rules = [Rule.from_network_policy(netPol) for netPol in network_policies]
net_pol_parsing_time = time.time()
runtimes["parse"] = net_pol_parsing_time - start_time
self.logger.debug("Rule: %s", rules)
for rule in rules:
rule_host = ClusterHost(rule.concerns["namespace"], rule.concerns["podLabels"])
if rule_host not in isolated_hosts:
isolated_hosts.append(rule_host)
if rule.allowed: # means it is NOT default deny rule
for connection in rule.allowed:
for port in connection.ports:
on_port = port
other_host = _get_other_host_from(connection.targets, rule.concerns["namespace"])
other_hosts.append(other_host)
if connection.direction == "to":
case = NetworkTestCase(rule_host, other_host, on_port, True)
outgoing_test_cases.append(case)
elif connection.direction == "from":
case = NetworkTestCase(other_host, rule_host, on_port, True)
incoming_test_cases.append(case)
else:
raise ValueError("Direction '%s' unknown!" % connection.direction)
positive_test_time = time.time()
runtimes["positiveTestGen"] = positive_test_time - net_pol_parsing_time
negative_test_cases, negative_test_gen_runtimes = self.generate_negative_cases_for_incoming_cases(
isolated_hosts,
incoming_test_cases,
other_hosts, namespaces)
runtimes["negativeTestGen"] = negative_test_gen_runtimes
return outgoing_test_cases + negative_test_cases + incoming_test_cases, runtimes
# TODO: implement it also for outgoing test cases
# TODO: divide this into submethods
def generate_negative_cases_for_incoming_cases(self, isolated_hosts, incoming_test_cases, other_hosts, namespaces):
"""
Generates negative test cases based on desired positive test cases
"""
runtimes = {}
start_time = time.time()
# list of all namespace labels set on other hosts
namespace_labels = [h.namespace_labels for h in other_hosts if isinstance(h, GenericClusterHost)]
namespaces_per_label_strings = get_namespace_label_strings(namespace_labels, namespaces)
namespace_label_resolve_time = time.time()
runtimes["nsLabelResolve"] = namespace_label_resolve_time - start_time
labels_per_namespace = {n.metadata.name: n.metadata.labels for n in namespaces}
overlaps_per_host = {
host: self.get_overlapping_hosts(host, namespaces_per_label_strings, labels_per_namespace,
isolated_hosts + other_hosts)
for host in isolated_hosts}
overlap_calc_time = time.time()
runtimes["overlapCalc"] = overlap_calc_time - namespace_label_resolve_time
cases = []
for host in isolated_hosts:
host_string = str(host)
host_start_time = time.time()
runtimes[host_string] = {}
# Check for hosts that can target these to construct negative cases from
self.logger.debug(overlaps_per_host[host])
allowed_hosts_with_ports = [(test_case.from_host, test_case.port_string)
for test_case in incoming_test_cases if
test_case.to_host in overlaps_per_host[host]]
self.logger.debug("allowed_hosts_with_ports=%s", allowed_hosts_with_ports)
reaching_host_find_time = time.time()
runtimes[host_string]["findReachingHosts"] = reaching_host_find_time - host_start_time
if allowed_hosts_with_ports:
allowed_hosts, _ = zip(*allowed_hosts_with_ports)
ports_per_host = {host: [port for _host, port in allowed_hosts_with_ports if _host == host]
for host in allowed_hosts}
match_all_host = GenericClusterHost({}, {})
if match_all_host in allowed_hosts:
# All hosts are allowed to reach (on some ports or all) => results from ALLOW all
if "*" in ports_per_host[match_all_host]:
self.logger.info("Not generating negative tests for host %s"
"as all connections to it are allowed", host)
else:
cases.append(NetworkTestCase(match_all_host, host,
rand_port(ports_per_host[match_all_host]), False))
runtimes[host_string]["matchAllCase"] = time.time() - reaching_host_find_time
else:
inverted_hosts = set([h for l in [invert_host(host) for host in allowed_hosts] for h in l])
hosts_on_inverted = {h: originalHost for l, originalHost in
[(invert_host(host), host) for host in allowed_hosts] for h in l}
host_inversion_time = time.time()
runtimes[host_string]["hostInversion"] = host_inversion_time - reaching_host_find_time
overlaps_for_inverted_hosts = {
h: self.get_overlapping_hosts(h, namespaces_per_label_strings,
labels_per_namespace, allowed_hosts)
for h in inverted_hosts}
overlap_calc_time = time.time()
runtimes[host_string]["overlapCalc"] = overlap_calc_time - host_inversion_time
self.logger.debug("InvertedHosts: %s", inverted_hosts)
negative_test_targets = [h for h in inverted_hosts if len(overlaps_for_inverted_hosts[h]) <= 1]
self.logger.debug("NegativeTestTargets: %s", negative_test_targets)
# now remove the inverted hosts that are reachable
for target in negative_test_targets:
ports_for_inverted_hosts_original_host = ports_per_host[hosts_on_inverted[target]]
if ports_for_inverted_hosts_original_host:
cases.append(
NetworkTestCase(target, host, ports_for_inverted_hosts_original_host[0], False))
else:
cases.append(NetworkTestCase(target, host, "*", False))
runtimes[host_string]["casesGen"] = time.time() - overlap_calc_time
else:
# No hosts are allowed to reach host -> it should be totally isolated
# => results from default deny policy
cases.append(NetworkTestCase(host, host, "*", False))
runtimes["all"] = time.time() - start_time
return cases, runtimes
def get_overlapping_hosts(self, host, namespaces_per_label_strings, labels_per_namespace, other_hosts):
"""
Returns a list of hosts that might be selected by the same policies
"""
out = [host]
for other in other_hosts:
if host is not other:
namespace_overlap = self.namespaces_overlap(host, namespaces_per_label_strings,
labels_per_namespace, other)
pod_label_overlap = label_selector_overlap(other.pod_labels, host.pod_labels)
if namespace_overlap and pod_label_overlap:
out.append(other)
return out
def namespaces_overlap(self, host, namespaces_per_label_strings, labels_per_namespace, other_host):
"""
Checks whether two hosts have namespaces in common
"""
host_ns = self.resolve_namespaces(host, namespaces_per_label_strings)
other_ns = self.resolve_namespaces(other_host, namespaces_per_label_strings)
if host_ns and other_ns:
return any(ns in other_ns for ns in host_ns)
ns_labels = lookup_namespace_labels(host, labels_per_namespace)
other_ns_labels = lookup_namespace_labels(other_host, labels_per_namespace)
if ns_labels is not None and other_ns_labels is not None:
return label_selector_overlap(ns_labels, other_ns_labels)
return False
def resolve_namespaces(self, host, namespaces_per_label_strings):
"""
Returns the namespace of a given host
"""
self.logger.debug(host)
if isinstance(host, ClusterHost):
return [host.namespace]
labels = labels_to_string(host.namespace_labels)
return namespaces_per_label_strings[labels] if labels in namespaces_per_label_strings else []
def invert_host(host):
"""
Returns a list of either inverted GenericClusterHosts or inverted ClusterHosts
"""
if isinstance(host, GenericClusterHost):
return invert_generic_cluster_host(host)
if isinstance(host, ClusterHost):
return invert_cluster_host(host)
raise ValueError("Host %s is of unsupported type" % host)
def invert_cluster_host(host: ClusterHost):
"""
Returns a list of ClusterHosts with
once inverted pod label selectors,
once inverted namespace label selectors
and once both
"""
if host.pod_labels == {}:
return [ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace), {})]
inverted_hosts = [ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace), host.pod_labels),
ClusterHost("%s%s" % (INVERTED_ATTRIBUTE_PREFIX, host.namespace),
invert_label_selector(host.pod_labels)),
ClusterHost(host.namespace, invert_label_selector(host.pod_labels))]
return inverted_hosts
def invert_generic_cluster_host(host: GenericClusterHost):
"""
Returns a list of GenericClusterHosts with
once inverted pod label selectors,
once inverted namespace label selectors
and once both
"""
if host == GenericClusterHost({}, {}):
raise ValueError("Cannot invert GenericClusterHost matching all hosts in cluster")
if host.namespace_labels == {}:
return [GenericClusterHost({}, invert_label_selector(host.pod_labels))]
inverted_hosts = [GenericClusterHost(host.namespace_labels, invert_label_selector(host.pod_labels)),
GenericClusterHost(invert_label_selector(host.namespace_labels), host.pod_labels),
GenericClusterHost(invert_label_selector(host.namespace_labels),
invert_label_selector(host.pod_labels))]
return inverted_hosts
def invert_label_selector(labels):
"""
Inverts a label selector
"""
return {"%s%s" % (INVERTED_ATTRIBUTE_PREFIX, k): v for k, v in labels.items()}
def label_selector_overlap(label_selector_1, label_selector_2):
"""
Returns the intersection of two label selectors
"""
if label_selector_1 and label_selector_2:
return any(item in label_selector_2.items() for item in label_selector_1.items())
# if one of the label selector dicts is empty, they always overlap, as empty label selectors select all labels
return True
def lookup_namespace_labels(host, labels_per_namespace):
"""
Returns the namespace labels of a host
"""
if isinstance(host, GenericClusterHost):
return host.namespace_labels
if host.namespace in labels_per_namespace:
return labels_per_namespace[host.namespace]
return None
|
py | 1a51d15fb63ebf8f5081cdc8ab3367de59795b28 | import os
import numpy as np
import pandas as pd
import pytest
from collie_recs.interactions import HDF5Interactions, Interactions
from collie_recs.utils import create_ratings_matrix, pandas_df_to_hdf5
@pytest.fixture()
def df_for_interactions():
# this should exactly match ``ratings_matrix_for_interactions`` below
return pd.DataFrame(data={
'user_id': [0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5],
'item_id': [1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 0, 3],
'ratings': [1, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 5],
})
@pytest.fixture()
def ratings_matrix_for_interactions():
# this should exactly match ``df_for_interactions`` above
return np.array([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 5, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 3, 4],
[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 0, 0, 0, 0, 0]])
@pytest.fixture()
def sparse_ratings_matrix_for_interactions(df_for_interactions):
return create_ratings_matrix(df=df_for_interactions,
user_col='user_id',
item_col='item_id',
ratings_col='ratings',
sparse=True)
@pytest.fixture()
def df_for_interactions_with_missing_ids():
# we are missing item ID 7
# this should exactly match ``ratings_matrix_for_interactions_with_missing_ids`` below
return pd.DataFrame(data={
'user_id': [0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5],
'item_id': [1, 2, 2, 3, 4, 5, 6, 0, 8, 9, 0, 3],
'ratings': [1, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 5],
})
@pytest.fixture()
def ratings_matrix_for_interactions_with_missing_ids():
# we are missing item ID 7
# this should exactly match ``df_for_interactions_with_missing_ids`` above
return np.array([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 5, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 0, 0, 0, 0, 0]])
@pytest.fixture()
def sparse_ratings_matrix_for_interactions_with_missing_ids(df_for_interactions_with_missing_ids):
return create_ratings_matrix(df=df_for_interactions_with_missing_ids,
user_col='user_id',
item_col='item_id',
ratings_col='ratings',
sparse=True)
@pytest.fixture()
def df_for_interactions_with_0_ratings():
# ``df_for_interactions`` but with three extra interactions with ratings of 0
return pd.DataFrame(data={
'user_id': [0, 0, 1, 1, 2, 2, 3, 1, 2, 3, 3, 3, 4, 5, 5],
'item_id': [1, 2, 2, 3, 4, 5, 2, 4, 6, 7, 8, 9, 0, 3, 4],
'ratings': [1, 1, 2, 3, 4, 5, 0, 0, 1, 2, 3, 4, 5, 5, 0],
})
@pytest.fixture()
def df_for_interactions_with_duplicates():
# this should match ``df_for_interactions`` with duplicate user/item pairs added at the
# following indices: ``0 & 1`` and ``12 & 13``
return pd.DataFrame(data={
'user_id': [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5],
'item_id': [1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 0, 3, 3],
'ratings': [1, 2, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 5, 4],
})
@pytest.fixture()
def interactions_pandas(df_for_interactions):
return Interactions(users=df_for_interactions['user_id'],
items=df_for_interactions['item_id'],
ratings=df_for_interactions['ratings'],
check_num_negative_samples_is_valid=False)
@pytest.fixture()
def interactions_matrix(ratings_matrix_for_interactions):
return Interactions(mat=ratings_matrix_for_interactions,
check_num_negative_samples_is_valid=False)
@pytest.fixture()
def interactions_sparse_matrix(sparse_ratings_matrix_for_interactions):
return Interactions(mat=sparse_ratings_matrix_for_interactions,
check_num_negative_samples_is_valid=False)
@pytest.fixture()
def hdf5_pandas_df_path(df_for_interactions, tmpdir):
hdf5_path = os.path.join(str(tmpdir), 'df_for_interactions.h5')
pandas_df_to_hdf5(df=df_for_interactions, out_path=hdf5_path, key='interactions')
return hdf5_path
@pytest.fixture()
def hdf5_pandas_df_path_with_meta(df_for_interactions, tmpdir):
hdf5_path = os.path.join(str(tmpdir), 'df_for_interactions_meta.h5')
pandas_df_to_hdf5(df=df_for_interactions, out_path=hdf5_path, key='interactions')
additional_info_df = pd.DataFrame({
'num_users': [df_for_interactions['user_id'].max() + 1],
'num_items': [df_for_interactions['item_id'].max() + 1],
})
pandas_df_to_hdf5(df=additional_info_df, out_path=hdf5_path, key='meta')
return hdf5_path
@pytest.fixture(params=['users', 'items', 'both_users_and_items'])
def hdf5_pandas_df_path_ids_start_at_1(request, df_for_interactions, tmpdir):
incremented_df_for_interactions = df_for_interactions
if 'users' in request.param:
incremented_df_for_interactions['user_id'] += 1
if 'items' in request.param:
incremented_df_for_interactions['item_id'] += 1
hdf5_path = os.path.join(str(tmpdir), 'df_for_interactions_incremented.h5')
pandas_df_to_hdf5(df=incremented_df_for_interactions, out_path=hdf5_path, key='interactions')
return hdf5_path
@pytest.fixture()
def hdf5_interactions(hdf5_pandas_df_path_with_meta):
return HDF5Interactions(hdf5_path=hdf5_pandas_df_path_with_meta,
user_col='user_id',
item_col='item_id')
|
py | 1a51d1e298384bba4f7c545abb957337db1ed259 | import glob
import os
import argparse
from mega_core.config import cfg
from predictor import VIDDemo
parser = argparse.ArgumentParser(description="PyTorch Object Detection Visualization")
parser.add_argument(
"method",
choices=["base", "dff", "fgfa", "rdn", "mega"],
default="base",
type=str,
help="which method to use",
)
parser.add_argument(
"config",
default="configs/vid_R_101_C4_1x.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"checkpoint",
default="R_101.pth",
help="The path to the checkpoint for test.",
)
parser.add_argument(
"--visualize-path",
default="datasets/ILSVRC2015/Data/VID/val/ILSVRC2015_val_00003001",
# default="datasets/ILSVRC2015/Data/VID/snippets/val/ILSVRC2015_val_00003001.mp4",
help="the folder or a video to visualize.",
)
parser.add_argument(
"--suffix",
default=".JPEG",
help="the suffix of the images in the image folder.",
)
parser.add_argument(
"--output-folder",
default="demo/visualization/base",
help="where to store the visulization result.",
)
parser.add_argument(
"--video",
action="store_true",
help="if True, input a video for visualization.",
)
parser.add_argument(
"--output-video",
action="store_true",
help="if True, output a video.",
)
args = parser.parse_args()
cfg.merge_from_file("configs/BASE_RCNN_1gpu.yaml")
cfg.merge_from_file(args.config)
cfg.merge_from_list(["MODEL.WEIGHT", args.checkpoint])
vid_demo = VIDDemo(
cfg,
method=args.method,
confidence_threshold=0.1,
output_folder=args.output_folder
)
if not args.video:
visualization_results = vid_demo.run_on_image_folder(args.visualize_path, suffix=args.suffix)
else:
visualization_results = vid_demo.run_on_video(args.visualize_path)
if not args.output_video:
vid_demo.generate_images(visualization_results)
else:
vid_demo.generate_video(visualization_results) |
py | 1a51d25ecccc325b618836efd0f9f57effa0fc34 | from abc import ABC, abstractmethod
from typing import Union, IO, Sequence, Iterator, List, Any, Generator, ContextManager
from blocks.datafile import DataFile
from contextlib import contextmanager
class FileSystem(ABC):
"""The required interface for any filesystem implementation
See GCSFileSystem for a full implementation. This FileSystem is intended
to be extendable to support cloud file systems, encryption strategies, etc...
"""
@abstractmethod
def ls(self, path: str) -> Sequence[str]:
"""List files correspond to path, including glob wildcards
Parameters
----------
path : str
The path to the file or directory to list; supports wildcards
"""
pass
@abstractmethod
def access(self, paths: Sequence[str]) -> List[DataFile]:
"""Access multiple paths as file-like objects
This allows for optimization like parallel downloads
Parameters
----------
paths: list of str
The paths of the files to access
Returns
-------
files: list of DataFile
A list of datafile instances, one for each input path
"""
pass
@abstractmethod
def store(self, bucket: str, files: Sequence[str]) -> ContextManager:
"""Store multiple data objects
This allows for optimizations when storing several files
Parameters
----------
bucket : str
The GCS bucket to use to store the files
files : list of str
The file names to store
Returns
-------
datafiles : contextmanager
A contextmanager that will yield datafiles and place them
on the filesystem when finished
"""
pass
@abstractmethod
@contextmanager
def open(self, path, mode="rb"):
"""Access path as a file-like object
Parameters
----------
path: str
The path of the file to access
mode: str
The file mode for the opened file
Returns
-------
file: file
A python file opened to the provided path (uses a local temporary copy that is removed)
"""
pass
|
py | 1a51d35271378485715cbc82a80c009fb5407285 | from keras import backend as K
def coloringLoss_OneAccuracy(y_true, y_pred):
shape = K.shape(y_true)
h = K.reshape(shape[1], (1,1))
w = K.reshape(shape[2], (1,1))
denom = 1 / K.cast(K.reshape(K.dot(h, w), (1,1)), dtype = 'float32')
return K.dot(K.reshape(K.sum(K.cast(K.less_equal(K.abs(y_true - y_pred), 1), dtype = 'float32')), (1,1)), denom)
def coloringLoss_ThreeAccuracy(y_true, y_pred):
shape = K.shape(y_true)
h = K.reshape(shape[1], (1,1))
w = K.reshape(shape[2], (1,1))
denom = 1 / K.cast(K.reshape(K.dot(h, w), (1,1)), dtype = 'float32')
return K.dot(K.reshape(K.sum(K.cast(K.less_equal(K.abs(y_true - y_pred), 3), dtype = 'float32')), (1,1)), denom)
def coloringLoss_OneAccuracyYUV(y_true, y_pred):
V_acc=coloringLoss_OneAccuracy(y_true[:,:,:,0], y_pred[:,:,:,0])
U_acc=coloringLoss_OneAccuracy(y_true[:,:,:,1], y_pred[:,:,:,1])
Y_acc=coloringLoss_OneAccuracy(y_true[:,:,:,2], y_pred[:,:,:,2])
return (V_acc+U_acc)/2.0
def coloringLoss_ThreeAccuracyYUV(y_true, y_pred):
V_acc=coloringLoss_ThreeAccuracy(y_true[:,:,:,0], y_pred[:,:,:,0])
U_acc=coloringLoss_ThreeAccuracy(y_true[:,:,:,1], y_pred[:,:,:,1])
Y_acc=coloringLoss_ThreeAccuracy(y_true[:,:,:,2], y_pred[:,:,:,2])
return (V_acc+U_acc)/2.0 |
py | 1a51d389715f280b56a1f69bbaf3e4390d8faff4 | """
this module is used to extract and preprocess from the raw data
N.B. but some preprocessing are done manually
"""
import os
import json
data_path = os.path.dirname(__file__) + '/../src/bangla/data/'
def get_word_list():
with open(data_path + 'words.txt', 'r', encoding='utf-8') as file:
words_list = file.read().split('\n')
return [i for i in words_list if i != '']
def get_letters():
with open(data_path + 'bangla_letters.json', mode='r', encoding='utf-8') as file:
letters = json.loads(file.read())
return letters["letters"]
def get_numbers():
with open(data_path + 'bangla_letters.json', mode='r', encoding='utf-8') as file:
letters = json.loads(file.read())
return letters["numbers"]
def descriminate(letter, word_list = get_word_list()):
return list(set([i for i in word_list if i[0] == letter]))
"""
print([i for i in get_word_list() if i[0] == '-'])
temp_word_list = [i for i in get_word_list() if ' ' in i]
temp_word_dict = {}
for i in get_letters():
temp = [i for i in descriminate(i, temp_word_list) if len(i.split()) > 1]
temp_word_dict.update({i: temp})
json.dump(temp_word_dict, open(data_path + 'temp_bangla.json', mode='w', encoding='utf-8'), ensure_ascii = False)
"""
word_dict = {}
for i in get_letters():
descriminate_val = descriminate(i)
if len(descriminate_val) > 0: word_dict.update({i: descriminate_val})
else: word_dict.update({i: i})
for i in get_numbers():
word_dict.update({i: i})
json.dump(word_dict, open(data_path + 'bangla.json', mode='w', encoding='utf-8'), ensure_ascii = False) |
py | 1a51d43dfb94c982d446be6015a08ebf11bbf167 | import math
import pyaudio
import itertools
import numpy as np
from pygame import midi
BUFFER_SIZE = 256
SAMPLE_RATE = 44100
NOTE_AMP = 0.1
# -- HELPER FUNCTIONS --
def get_sin_oscillator(freq=55, amp=1, sample_rate=SAMPLE_RATE):
increment = (2 * math.pi * freq)/ sample_rate
return (math.sin(v) * amp * NOTE_AMP \
for v in itertools.count(start=0, step=increment))
def get_samples(notes_dict, num_samples=BUFFER_SIZE):
return [sum([int(next(osc) * 32767) \
for _, osc in notes_dict.items()]) \
for _ in range(num_samples)]
# -- INITIALIZION --
midi.init()
default_id = midi.get_default_input_id()
midi_input = midi.Input(device_id=default_id)
stream = pyaudio.PyAudio().open(
rate=SAMPLE_RATE,
channels=1,
format=pyaudio.paInt16,
output=True,
frames_per_buffer=BUFFER_SIZE
)
# -- RUN THE SYNTH --
try:
print("Starting...")
notes_dict = {}
while True:
if notes_dict:
# Play the notes
samples = get_samples(notes_dict)
samples = np.int16(samples).tobytes()
stream.write(samples)
if midi_input.poll():
# Add or remove notes from notes_dict
for event in midi_input.read(num_events=16):
(status, note, vel, _), _ = event
if status == 0x80 and note in notes_dict:
del notes_dict[note]
elif status == 0x90 and note not in notes_dict:
freq = midi.midi_to_frequency(note)
notes_dict[note] = get_sin_oscillator(freq=freq, amp=vel/127)
except KeyboardInterrupt as err:
midi_input.close()
stream.close()
print("Stopping...")
|
py | 1a51d4671aeee708836fc73309e322fdbf938ca1 | import pytest
from src import create_app
@pytest.fixture
def app():
app = create_app()
return app
myapp = create_app()
|
py | 1a51d51fc814158ce8d1df8f0420856de7b4705a | # -*- coding: utf-8 -*-
"""Cisco DNA Center GetAuditlogParentRecords data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorF8E3A0674C15Fd58Cd78F42Dca37C7C(object):
"""GetAuditlogParentRecords request schema definition."""
def __init__(self):
super(JSONSchemaValidatorF8E3A0674C15Fd58Cd78F42Dca37C7C, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"items": {
"properties": {
"additionalDetails": {
"type": "object"
},
"category": {
"type": "string"
},
"childCount": {
"type": "number"
},
"ciscoDnaEventLink": {
"type": "string"
},
"context": {
"type": "string"
},
"description":
{
"type": "string"
},
"details": {
"type": "object"
},
"domain": {
"type": "string"
},
"eventHierarchy": {
"type": "string"
},
"eventId": {
"type": "string"
},
"i18n": {
"type": "string"
},
"instanceId": {
"type": "string"
},
"message": {
"type": "string"
},
"messageParams": {
"type": "string"
},
"name": {
"type": "string"
},
"namespace": {
"type": "string"
},
"network": {
"type": "string"
},
"note": {
"type": "string"
},
"parentInstanceId": {
"type": "string"
},
"severity": {
"type": "integer"
},
"source": {
"type": "string"
},
"subDomain": {
"type": "string"
},
"tags": {
"type": "array"
},
"tenantId": {
"type": "string"
},
"timestamp": {
"type": "integer"
},
"tntId": {
"type": "string"
},
"type": {
"type": "string"
},
"userId": {
"type": "string"
},
"version": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
py | 1a51d528235c5ba8f5b8e5a85782555c1f0f6f8d | import os
from os.path import exists
import pytest
from pip._internal.cli.status_codes import PREVIOUS_BUILD_DIR_ERROR
from pip._internal.utils.marker_files import write_delete_marker_file
from tests.lib import need_mercurial
from tests.lib.local_repos import local_checkout
def test_cleanup_after_install(script, data):
"""
Test clean up after installing a package.
"""
script.pip(
'install', '--no-index',
'--find-links={}'.format(data.find_links),
'simple'
)
build = script.venv_path / "build"
src = script.venv_path / "src"
assert not exists(build), "build/ dir still exists: {}".format(build)
assert not exists(src), "unexpected src/ dir exists: {}" .format(src)
script.assert_no_temp()
@pytest.mark.network
def test_no_clean_option_blocks_cleaning_after_install(script, data):
"""
Test --no-clean option blocks cleaning after install
"""
build = script.base_path / 'pip-build'
script.pip(
'install', '--no-clean', '--no-index', '--build', build,
'--find-links={}'.format(data.find_links), 'simple', expect_temp=True,
)
assert exists(build)
@pytest.mark.network
@need_mercurial
def test_cleanup_after_install_editable_from_hg(script, tmpdir):
"""
Test clean up after cloning from Mercurial.
"""
requirement = '{}#egg=ScriptTest'.format(
local_checkout('hg+https://bitbucket.org/ianb/scripttest', tmpdir)
)
script.pip('install', '-e', requirement)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "build/ dir still exists: {}".format(build)
assert exists(src), "expected src/ dir doesn't exist: {}".format(src)
script.assert_no_temp()
def test_cleanup_after_install_from_local_directory(script, data):
"""
Test clean up after installing from a local directory.
"""
to_install = data.packages.joinpath("FSPkg")
script.pip('install', to_install)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "unexpected build/ dir exists: {}".format(build)
assert not exists(src), "unexpected src/ dir exist: {}".format(src)
script.assert_no_temp()
def test_cleanup_req_satisfied_no_name(script, data):
"""
Test cleanup when req is already satisfied, and req has no 'name'
"""
# this test confirms Issue #420 is fixed
# reqs with no 'name' that were already satisfied were leaving behind tmp
# build dirs
# 2 examples of reqs that would do this
# 1) https://bitbucket.org/ianb/initools/get/tip.zip
# 2) parent-0.1.tar.gz
dist = data.packages.joinpath("parent-0.1.tar.gz")
script.pip('install', dist)
script.pip('install', dist)
build = script.venv_path / 'build'
assert not exists(build), "unexpected build/ dir exists: %s" % build
script.assert_no_temp()
def test_cleanup_after_install_exception(script, data):
"""
Test clean up after a 'setup.py install' exception.
"""
# broken==0.2broken fails during install; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.2broken',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
def test_cleanup_after_egg_info_exception(script, data):
"""
Test clean up after a 'setup.py egg_info' exception.
"""
# brokenegginfo fails during egg_info; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'brokenegginfo==0.1',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
@pytest.mark.network
def test_cleanup_prevented_upon_build_dir_exception(script, data):
"""
Test no cleanup occurs after a PreviousBuildDirError
"""
build = script.venv_path / 'build'
build_simple = build / 'simple'
os.makedirs(build_simple)
write_delete_marker_file(build_simple)
build_simple.joinpath("setup.py").write_text("#")
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'simple',
'--build', build,
expect_error=True, expect_temp=True,
)
assert result.returncode == PREVIOUS_BUILD_DIR_ERROR, str(result)
assert "pip can't proceed" in result.stderr, str(result)
assert exists(build_simple), str(result)
@pytest.mark.network
def test_pep517_no_legacy_cleanup(script, data, with_wheel):
"""Test a PEP 517 failed build does not attempt a legacy cleanup"""
to_install = data.packages.joinpath('pep517_wrapper_buildsys')
script.environ["PIP_TEST_FAIL_BUILD_WHEEL"] = "1"
res = script.pip(
'install', '-f', data.find_links, to_install,
expect_error=True
)
# Must not have built the package
expected = "Failed building wheel for pep517-wrapper-buildsys"
assert expected in str(res)
# Must not have attempted legacy cleanup
assert "setup.py clean" not in str(res)
|
py | 1a51d5e4875f9b958d2d52b75b27d982ffbded45 | import numpy as np
import matplotlib.pyplot as plt
from audio import spec2wav, wav2spec, read_wav, write_wav
if __name__ == '__main__':
sr = 22050
n_fft = 512
win_length = 400
hop_length = 80
duration = 2 # sec
wav = read_wav( "H:\\cs230\\wav_x\\1_1.wav", sr, duration )
spec, _ = wav2spec(wav, n_fft, win_length, hop_length, False)
converted_wav = spec2wav(spec, n_fft, win_length, hop_length, 600)
write_wav(converted_wav, sr, 'a.wav')
plt.pcolormesh(spec)
plt.ylabel('Frequency')
plt.xlabel('Time')
plt.savefig("a.png")
|
py | 1a51d5f9a427216c4602d0f6a90763f12922f28e | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_NV_video_capture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_NV_video_capture',error_checker=_errors._error_checker)
GLX_DEVICE_ID_NV=_C('GLX_DEVICE_ID_NV',0x20CD)
GLX_NUM_VIDEO_CAPTURE_SLOTS_NV=_C('GLX_NUM_VIDEO_CAPTURE_SLOTS_NV',0x20CF)
GLX_UNIQUE_ID_NV=_C('GLX_UNIQUE_ID_NV',0x20CE)
@_f
@_p.types(_cs.c_int,ctypes.POINTER(_cs.Display),_cs.c_uint,_cs.GLXVideoCaptureDeviceNV)
def glXBindVideoCaptureDeviceNV(dpy,video_capture_slot,device):pass
@_f
@_p.types(ctypes.POINTER(_cs.GLXVideoCaptureDeviceNV),ctypes.POINTER(_cs.Display),_cs.c_int,ctypes.POINTER(_cs.c_int))
def glXEnumerateVideoCaptureDevicesNV(dpy,screen,nelements):pass
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXVideoCaptureDeviceNV)
def glXLockVideoCaptureDeviceNV(dpy,device):pass
@_f
@_p.types(_cs.c_int,ctypes.POINTER(_cs.Display),_cs.GLXVideoCaptureDeviceNV,_cs.c_int,ctypes.POINTER(_cs.c_int))
def glXQueryVideoCaptureDeviceNV(dpy,device,attribute,value):pass
@_f
@_p.types(None,ctypes.POINTER(_cs.Display),_cs.GLXVideoCaptureDeviceNV)
def glXReleaseVideoCaptureDeviceNV(dpy,device):pass
|
py | 1a51d613e851dd43727bd73fe3b3837c2cc9a648 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolDS1000Z import *
class rigolDS1074Z(rigolDS1000Z):
"Rigol DS1074Z IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DS1074Z')
super(rigolDS1074Z, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 70e6
self._init_channels()
|
py | 1a51d68808b171014c776054f6bba3924cfc4202 | import contextlib
import ctypes
import json
import os
import shutil
import struct
import subprocess
import sys
import tempfile
import time
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
from .aes import (
aes_cbc_decrypt_bytes,
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .dependencies import (
_SECRETSTORAGE_UNAVAILABLE_REASON,
secretstorage,
sqlite3,
)
from .minicurses import MultilinePrinter, QuietMultilinePrinter
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
class YDLLogger:
def __init__(self, ydl=None):
self._ydl = ydl
def debug(self, message):
if self._ydl:
self._ydl.write_debug(message)
def info(self, message):
if self._ydl:
self._ydl.to_screen(f'[Cookies] {message}')
def warning(self, message, only_once=False):
if self._ydl:
self._ydl.report_warning(message, only_once)
def error(self, message):
if self._ydl:
self._ydl.report_error(message)
class ProgressBar(MultilinePrinter):
_DELAY, _timer = 0.1, 0
def print(self, message):
if time.time() - self._timer > self._DELAY:
self.print_at_line(f'[Cookies] {message}', 0)
self._timer = time.time()
def progress_bar(self):
"""Return a context manager with a print method. (Optional)"""
# Do not print to files/pipes, loggers, or when --no-progress is used
if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'):
return
file = self._ydl._out_files['error']
try:
if not file.isatty():
return
except BaseException:
return
return self.ProgressBar(file, preserve_output=False)
def _create_progress_bar(logger):
if hasattr(logger, 'progress_bar'):
printer = logger.progress_bar()
if printer:
return printer
printer = QuietMultilinePrinter()
printer.print = lambda _: None
return printer
def load_cookies(cookie_file, browser_specification, ydl):
cookie_jars = []
if browser_specification is not None:
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
if cookie_file is not None:
is_filename = YoutubeDLCookieJar.is_path(cookie_file)
if is_filename:
cookie_file = expand_path(cookie_file)
jar = YoutubeDLCookieJar(cookie_file)
if not is_filename or os.access(cookie_file, os.R_OK):
jar.load(ignore_discard=True, ignore_expires=True)
cookie_jars.append(jar)
return _merge_cookie_jars(cookie_jars)
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
if browser_name == 'firefox':
return _extract_firefox_cookies(profile, logger)
elif browser_name == 'safari':
return _extract_safari_cookies(profile, logger)
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
logger.info('Extracting cookies from firefox')
if not sqlite3:
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
if profile is None:
search_root = _firefox_browser_dir()
elif _is_path(profile):
search_root = profile
else:
search_root = os.path.join(_firefox_browser_dir(), profile)
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
jar = YoutubeDLCookieJar()
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _firefox_browser_dir():
if sys.platform in ('linux', 'linux2'):
return os.path.expanduser('~/.mozilla/firefox')
elif sys.platform == 'win32':
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
if sys.platform in ('linux', 'linux2'):
config = _config_home()
browser_dir = {
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(config, 'google-chrome'),
'chromium': os.path.join(config, 'chromium'),
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
}[browser_name]
elif sys.platform == 'win32':
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
appdata_roaming = os.path.expandvars('%APPDATA%')
browser_dir = {
'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'),
'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'),
'chromium': os.path.join(appdata_local, R'Chromium\User Data'),
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
appdata = os.path.expanduser('~/Library/Application Support')
browser_dir = {
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(appdata, 'Google/Chrome'),
'chromium': os.path.join(appdata, 'Chromium'),
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
}[browser_name]
else:
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
keyring_name = {
'brave': 'Brave',
'chrome': 'Chrome',
'chromium': 'Chromium',
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
}[browser_name]
browsers_without_profiles = {'opera'}
return {
'browser_dir': browser_dir,
'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles
}
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info(f'Extracting cookies from {browser_name}')
if not sqlite3:
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
config = _get_chromium_based_browser_settings(browser_name)
if profile is None:
search_root = config['browser_dir']
elif _is_path(profile):
search_root = profile
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
else:
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.connection.text_factory = bytes
column_names = _get_column_names(cursor, 'cookies')
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies')
jar = YoutubeDLCookieJar()
failed_cookies = 0
unencrypted_cookies = 0
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, line in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
is_encrypted, cookie = _process_chrome_cookie(decryptor, *line)
if not cookie:
failed_cookies += 1
continue
elif not is_encrypted:
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure):
host_key = host_key.decode()
name = name.decode()
value = value.decode()
path = path.decode()
is_encrypted = not value and encrypted_value
if is_encrypted:
value = decryptor.decrypt(encrypted_value)
if value is None:
return is_encrypted, None
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
comment=None, comment_url=None, rest={})
class ChromeCookieDecryptor:
"""
Overview:
Linux:
- cookies are either v10 or v11
- v10: AES-CBC encrypted with a fixed key
- v11: AES-CBC encrypted with an OS protected key (keyring)
- v11 keys can be stored in various places depending on the activate desktop environment [2]
Mac:
- cookies are either v10 or not v10
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
- not v10: 'old data' stored as plaintext
Windows:
- cookies are either v10 or not v10
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
- not v10: encrypted with DPAPI
Sources:
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
- KeyStorageLinux::CreateService
"""
def decrypt(self, encrypted_value):
raise NotImplementedError('Must be implemented by sub classes')
@property
def cookie_counts(self):
raise NotImplementedError('Must be implemented by sub classes')
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
if sys.platform in ('linux', 'linux2'):
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
elif sys.platform == 'darwin':
return MacChromeCookieDecryptor(browser_keyring_name, logger)
elif sys.platform == 'win32':
return WindowsChromeCookieDecryptor(browser_root, logger)
else:
raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}')
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger, *, keyring=None):
self._logger = logger
self._v10_key = self.derive_key(b'peanuts')
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
self._v11_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
elif version == b'v11':
self._cookie_counts['v11'] += 1
if self._v11_key is None:
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
else:
self._cookie_counts['other'] += 1
return None
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger):
self._logger = logger
password = _get_mac_keyring_password(browser_keyring_name, logger)
self._v10_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
else:
self._cookie_counts['other'] += 1
# other prefixes are considered 'old data' which were stored as plaintext
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return encrypted_value
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_root, logger):
self._logger = logger
self._v10_key = _get_windows_v10_key(browser_root, logger)
self._cookie_counts = {'v10': 0, 'other': 0}
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
# kNonceLength
nonce_length = 96 // 8
# boringssl
# EVP_AEAD_AES_GCM_TAG_LEN
authentication_tag_length = 16
raw_ciphertext = ciphertext
nonce = raw_ciphertext[:nonce_length]
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
authentication_tag = raw_ciphertext[-authentication_tag_length:]
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
else:
self._cookie_counts['other'] += 1
# any other prefix means the data is DPAPI encrypted
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode()
def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
logger.debug('Trying secondary cookie location')
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
raise FileNotFoundError('could not find safari cookies database')
with open(cookies_path, 'rb') as f:
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
class ParserError(Exception):
pass
class DataParser:
def __init__(self, data, logger):
self._data = data
self.cursor = 0
self._logger = logger
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
data = self._data[self.cursor:end]
self.cursor = end
return data
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
return struct.unpack(data_format, self.read_bytes(4))[0]
def read_double(self, big_endian=False):
data_format = '>d' if big_endian else '<d'
return struct.unpack(data_format, self.read_bytes(8))[0]
def read_cstring(self):
buffer = []
while True:
c = self.read_bytes(1)
if c == b'\x00':
return b''.join(buffer).decode()
else:
buffer.append(c)
def skip(self, num_bytes, description='unknown'):
if num_bytes > 0:
self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}')
elif num_bytes < 0:
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
def skip_to_end(self, description='unknown'):
self.skip_to(len(self._data), description)
def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
p = DataParser(data, logger)
p.expect_bytes(b'cook', 'database signature')
number_of_pages = p.read_uint(big_endian=True)
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
return page_sizes, p.cursor
def _parse_safari_cookies_page(data, jar, logger):
p = DataParser(data, logger)
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
with _create_progress_bar(logger) as progress_bar:
for i, record_offset in enumerate(record_offsets):
progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}')
p.skip_to(record_offset, 'space between records')
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
p.read_bytes(record_length)
p.skip_to_end('space in between pages')
def _parse_safari_cookies_record(data, jar, logger):
p = DataParser(data, logger)
record_size = p.read_uint()
p.skip(4, 'unknown record field 1')
flags = p.read_uint()
is_secure = bool(flags & 0x0001)
p.skip(4, 'unknown record field 2')
domain_offset = p.read_uint()
name_offset = p.read_uint()
path_offset = p.read_uint()
value_offset = p.read_uint()
p.skip(8, 'unknown record field 3')
expiration_date = _mac_absolute_time_to_posix(p.read_double())
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
try:
p.skip_to(domain_offset)
domain = p.read_cstring()
p.skip_to(name_offset)
name = p.read_cstring()
p.skip_to(path_offset)
path = p.read_cstring()
p.skip_to(value_offset)
value = p.read_cstring()
except UnicodeDecodeError:
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
return record_size
p.skip_to(record_size, 'space at the end of the record')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
return record_size
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
"""
References:
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
- this data appears to be out of date but the important parts of the database structure is the same
- there are a few bytes here and there which are skipped during parsing
"""
if jar is None:
jar = YoutubeDLCookieJar()
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
p = DataParser(data[body_start:], logger)
for page_size in page_sizes:
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
p.skip_to_end('footer')
return jar
class _LinuxDesktopEnvironment(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
DesktopEnvironment
"""
OTHER = auto()
CINNAMON = auto()
GNOME = auto()
KDE = auto()
PANTHEON = auto()
UNITY = auto()
XFCE = auto()
class _LinuxKeyring(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
SelectedLinuxBackend
"""
KWALLET = auto()
GNOMEKEYRING = auto()
BASICTEXT = auto()
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
def _get_linux_desktop_environment(env):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
GetDesktopEnvironment
"""
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
desktop_session = env.get('DESKTOP_SESSION', None)
if xdg_current_desktop is not None:
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
if xdg_current_desktop == 'Unity':
if desktop_session is not None and 'gnome-fallback' in desktop_session:
return _LinuxDesktopEnvironment.GNOME
else:
return _LinuxDesktopEnvironment.UNITY
elif xdg_current_desktop == 'GNOME':
return _LinuxDesktopEnvironment.GNOME
elif xdg_current_desktop == 'X-Cinnamon':
return _LinuxDesktopEnvironment.CINNAMON
elif xdg_current_desktop == 'KDE':
return _LinuxDesktopEnvironment.KDE
elif xdg_current_desktop == 'Pantheon':
return _LinuxDesktopEnvironment.PANTHEON
elif xdg_current_desktop == 'XFCE':
return _LinuxDesktopEnvironment.XFCE
elif desktop_session is not None:
if desktop_session in ('mate', 'gnome'):
return _LinuxDesktopEnvironment.GNOME
elif 'kde' in desktop_session:
return _LinuxDesktopEnvironment.KDE
elif 'xfce' in desktop_session:
return _LinuxDesktopEnvironment.XFCE
else:
if 'GNOME_DESKTOP_SESSION_ID' in env:
return _LinuxDesktopEnvironment.GNOME
elif 'KDE_FULL_SESSION' in env:
return _LinuxDesktopEnvironment.KDE
return _LinuxDesktopEnvironment.OTHER
def _choose_linux_keyring(logger):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
linux_keyring = _LinuxKeyring.BASICTEXT
else:
linux_keyring = _LinuxKeyring.GNOMEKEYRING
return linux_keyring
def _get_kwallet_network_wallet(logger):
""" The name of the wallet used to store network passwords.
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
KWalletDBus::NetworkWallet
which does a dbus call to the following function:
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
Wallet::NetworkWallet
"""
default_wallet = 'kdewallet'
try:
proc = Popen([
'dbus-send', '--session', '--print-reply=literal',
'--dest=org.kde.kwalletd5',
'/modules/kwalletd5',
'org.kde.KWallet.networkWallet'
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.warning('failed to read NetworkWallet')
return default_wallet
else:
network_wallet = stdout.decode().strip()
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
def _get_kwallet_password(browser_keyring_name, logger):
logger.debug('using kwallet-query to obtain password from kwallet')
if shutil.which('kwallet-query') is None:
logger.error('kwallet-query command not found. KWallet and kwallet-query '
'must be installed to read from KWallet. kwallet-query should be'
'included in the kwallet package for your distribution')
return b''
network_wallet = _get_kwallet_network_wallet(logger)
try:
proc = Popen([
'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult '
'the kwallet-query man page for details')
return b''
else:
if stdout.lower().startswith(b'failed to read'):
logger.debug('failed to read password from kwallet. Using empty string instead')
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
# just tries to read the value (which kwallet returns "") whereas kwallet-query
# checks hasEntry. To verify this:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
# while starting chrome.
# this may be a bug as the intended behaviour is to generate a random password and store
# it, but that doesn't matter here.
return b''
else:
logger.debug('password found')
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
return b''
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not secretstorage:
logger.error(f'secretstorage not available {_SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
# and presumably searches for its key in the list. It appears that we must do the same.
# https://github.com/jaraco/keyring/issues/556
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
# note: chrome/chromium can be run with the following flags to determine which keyring backend
# it has chosen to use
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
# will not be sufficient in all cases.
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
logger.debug(f'Chosen keyring: {keyring.name}')
if keyring == _LinuxKeyring.KWALLET:
return _get_kwallet_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.GNOMEKEYRING:
return _get_gnome_keyring_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.BASICTEXT:
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
return None
assert False, f'Unknown keyring {keyring}'
def _get_mac_keyring_password(browser_keyring_name, logger):
logger.debug('using find-generic-password to obtain password from OSX keychain')
try:
proc = Popen(
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
return None
def _get_windows_v10_key(browser_root, logger):
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
if path is None:
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')
return None
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
def pbkdf2_sha1(password, salt, iterations, key_length):
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
try:
return plaintext.decode()
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
try:
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
except ValueError:
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
return None
try:
return plaintext.decode()
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_windows_dpapi(ciphertext, logger):
"""
References:
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
"""
from ctypes.wintypes import DWORD
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))]
buffer = ctypes.create_string_buffer(ciphertext)
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
blob_out = DATA_BLOB()
ret = ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), # pDataIn
None, # ppszDataDescr: human readable description of pDataIn
None, # pOptionalEntropy: salt?
None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display
0, # dwFlags
ctypes.byref(blob_out) # pDataOut
)
if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True)
return None
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
return result
def _config_home():
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def _open_database_copy(database_path, tmpdir):
# cannot open sqlite databases if they are already in use (e.g. by the browser)
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
shutil.copy(database_path, database_copy_path)
conn = sqlite3.connect(database_copy_path)
return conn.cursor()
def _get_column_names(cursor, table_name):
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode() for row in table_info]
def _find_most_recently_used_file(root, filename, logger):
# if there are multiple browser profiles, take the most recently used one
i, paths = 0, []
with _create_progress_bar(logger) as progress_bar:
for curr_root, dirs, files in os.walk(root):
for file in files:
i += 1
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
if file == filename:
paths.append(os.path.join(curr_root, file))
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
def _merge_cookie_jars(jars):
output_jar = YoutubeDLCookieJar()
for jar in jars:
for cookie in jar:
output_jar.set_cookie(cookie)
if jar.filename is not None:
output_jar.filename = jar.filename
return output_jar
def _is_path(value):
return os.path.sep in value
def _parse_browser_specification(browser_name, profile=None, keyring=None):
if browser_name not in SUPPORTED_BROWSERS:
raise ValueError(f'unsupported browser: "{browser_name}"')
if keyring not in (None, *SUPPORTED_KEYRINGS):
raise ValueError(f'unsupported keyring: "{keyring}"')
if profile is not None and _is_path(profile):
profile = os.path.expanduser(profile)
return browser_name, profile, keyring
|
py | 1a51d6e75021e9625433a67fdad34c8f71762d8e | import math
def aire_triangle(a : float,b : float,c : float) -> float:
"""Précondition : (a>0) and (b>0) and (c>0)
Précondition : les côtés a, b et c définissent bien un triangle.
retourne l'aire du triangle dont les côtés sont de
longueurs a, b, et c.
"""
# Demi-périmètre
p : float = (a + b + c) / 2
return math.sqrt(p * (p - a) * (p - b) * (p - c))
# Jeu de tests (Etape 3)
assert aire_triangle(3, 4, 5) == 6.0
assert aire_triangle(13, 14, 15) == 84.0
assert aire_triangle(1, 1, 1) == math.sqrt(3 / 16)
assert aire_triangle(2, 3, 5) == 0.0 # c'est un triangle plat...
|
py | 1a51d9814c9e385aeea1b44cfe5f64351acab886 | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=[
'type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:]['type'] = pd.Categorical(games.loc[:]['type'])
|
py | 1a51da15485970e51791b381a265ee8a6247f4b1 | from collections import OrderedDict
from django.conf import settings
from systems.models.base import BaseModel
from utility.data import Collection, ensure_list, flatten, clean_dict, normalize_value, format_value, prioritize, dump_json
import re
import copy
import yaml
import logging
logger = logging.getLogger(__name__)
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
class BaseProfileComponent(object):
def __init__(self, name, profile):
self.name = name
self.profile = profile
self.command = profile.command
self.manager = self.command.manager
def priority(self):
return 10
def ensure_module_config(self):
# Override in subclass if needed
return False
def get_names(self, relation):
return [ getattr(x, x.facade.key()) for x in relation.all() ]
def get_info(self, name, config):
return self.profile.get_info(name, config)
def pop_info(self, name, config):
return self.profile.pop_info(name, config)
def get_value(self, name, config):
return self.profile.get_value(name, config)
def pop_value(self, name, config):
return self.profile.pop_value(name, config)
def get_values(self, name, config):
return self.profile.get_values(name, config)
def pop_values(self, name, config):
return self.profile.pop_values(name, config)
def interpolate(self, config, **replacements):
return self.profile.interpolate(config, replacements)
def get_variables(self, instance, variables = None):
if not variables:
variables = {}
return self.profile.get_variables(instance, variables)
def exec(self, command, **parameters):
return self.command.exec_local(command, parameters)
def run_list(self, elements, processor):
return self.command.run_list(elements, processor)
class CommandProfile(object):
def __init__(self, module, name = None, data = None):
if not data:
data = {}
self.name = name
self.module = module
self.command = module.command
self.manager = self.command.manager
self.data = data
self.components = []
self.config = Collection()
def get_component_names(self, filter_method = None):
return self.manager.index.load_component_names(self, filter_method)
def initialize(self, config, components):
self.components = components if components else []
if not config:
config = {}
self.init_config(config)
self.load_parents()
self.data = self.get_schema()
def init_config(self, dynamic_config):
self.command.options.initialize(True)
for stored_config in self.command.get_instances(self.command._config):
self.config.set(stored_config.name, stored_config.value)
if isinstance(dynamic_config, dict):
for name, value in dynamic_config.items():
self.config.set(name, value)
def get_config(self):
return self.data.get('config', {})
def set_config(self, config):
if 'config' not in self.data:
self.data['config'] = {}
for name, value in self.interpolate_config(config).items():
self.data['config'][name] = value
def interpolate_config(self, input_config, **options):
config = {}
for name, value in input_config.items():
config[name] = self.interpolate_config_value(value, **options)
if not self.config.check(name):
self.config.set(name, config[name])
return config
def interpolate_config_value(self, value, **options):
options['config_overrides'] = self.config.export()
return normalize_value(self.command.options.interpolate(value, **options))
def load_parents(self):
self.parents = []
self.set_config(self.get_config())
if 'parents' in self.data:
parents = self.data.pop('parents')
for parent in reversed(ensure_list(parents)):
module = self.module.instance
if isinstance(parent, str):
profile_name = self.interpolate_config_value(parent)
else:
profile_name = self.interpolate_config_value(parent['profile'])
if 'module' in parent:
module_name = self.interpolate_config_value(parent['module'])
if module_name != 'self':
module = self.get_module(module_name)
self.parents.insert(0,
module.provider.get_profile(profile_name)
)
for profile in reversed(self.parents):
profile.load_parents()
def get_schema(self):
schema = {'config': {}}
for profile in self.parents:
parent_schema = profile.get_schema()
self.merge_schema(schema, parent_schema)
self.merge_schema(schema, self.data)
for component in self.get_component_names('ensure_module_config'):
if component in schema:
for name, component_config in schema[component].items():
if '_module' not in component_config:
component_config['_module'] = self.module.instance.name
for name, value in schema['config'].items():
if not self.config.check(name):
self.config.set(name, value)
return schema
def merge_schema(self, schema, data):
for key, value in data.items():
if isinstance(value, dict):
schema.setdefault(key, {})
self.merge_schema(schema[key], value)
else:
schema[key] = value
def display_schema(self, operation):
self.command.info('')
self.process_components(operation, display_only = True)
if self.include('profile'):
component = self.manager.index.load_component(self, 'profile')
profiles = self.expand_instances(component.name, self.data)
for profile, config in profiles.items():
if self.include_instance(profile, config):
getattr(component, operation)(profile, config, True)
def run(self, components = None, config = None, display_only = False, test = False):
self.command.data("Running profile:", "{}:{}".format(self.module.instance.name, self.name), 'profile_name')
operation = 'run'
self.initialize(config, components)
if display_only:
self.display_schema(operation)
else:
self.process_components(operation,
extra_config = { 'test': test }
)
def destroy(self, components = None, config = None, display_only = False):
self.command.data("Destroying profile:", "{}:{}".format(self.module.instance.name, self.name), 'profile_name')
def remove_instance(instance_config):
return not instance_config.get('_keep', False)
operation = 'destroy'
self.initialize(config, components)
if display_only:
self.display_schema(operation)
else:
self.process_components(operation, include_method = remove_instance)
def process_components(self, operation, include_method = None, display_only = False, extra_config = None):
component_map = self.manager.index.load_components(self)
for priority, components in sorted(component_map.items()):
def process(component):
operation_method = getattr(component, operation, None)
if callable(operation_method) and self.include(component.name):
if extra_config and isinstance(extra_config, dict):
for property, value in extra_config.items():
setattr(component, property, value)
self._process_component_instances(component,
component_method = operation_method,
include_method = include_method,
display_only = display_only
)
self.command.run_list(components, process)
def _process_component_instances(self, component, component_method, include_method = None, display_only = False):
data = copy.deepcopy(self.data)
requirements = Collection()
processed = Collection()
rendered_instances = OrderedDict() if display_only else None
def get_wait_keys(_name):
wait_keys = []
if _name in requirements and requirements[_name]:
for _child_name in flatten(ensure_list(requirements[_name])):
if processed[_child_name]:
wait_keys.extend(processed[_child_name])
wait_keys.extend(get_wait_keys(_child_name))
return list(set(wait_keys))
def check_include(config):
if not callable(include_method):
return True
return include_method(self.interpolate_config_value(config))
def render_instance(name):
instance_config = copy.deepcopy(data[component.name][name])
name = self.interpolate_config_value(name)
instance_config = self.interpolate_config_value(instance_config,
config = 'query',
config_value = False,
function_suppress = '^\s*\<+[^\>]+\>+\s*$',
conditional_suppress = '\s*\<+[^\>]+\>+\s*'
)
if self.include_instance(name, instance_config):
if '_config' in instance_config:
instance_config = self.interpolate_config_value(instance_config,
function_suppress = '^\s*\<+[^\>]+\>+\s*$',
conditional_suppress = '\s*\<+[^\>]+\>+\s*'
)
component_method(name, instance_config)
rendered_instances[name] = instance_config
def process_instances(interpolate_references):
instance_map = self.order_instances(self.expand_instances(component.name, data,
interpolate_references = interpolate_references
))
for priority, names in sorted(instance_map.items()):
expansion = Collection()
def process_instance(name):
instance_config = copy.deepcopy(data[component.name][name])
name = self.interpolate_config_value(name)
if self.include_instance(name, instance_config):
if isinstance(instance_config, dict):
if '_foreach' in instance_config:
expansion[priority] = True
if priority not in expansion and \
name not in processed and \
check_include(instance_config):
instance_config = self.interpolate_config_value(instance_config)
if isinstance(instance_config, dict):
requirements[name] = instance_config.pop('_requires', [])
if requirements[name]:
instance_config['_wait_keys'] = get_wait_keys(name)
if settings.DEBUG_COMMAND_PROFILES:
self.command.info(yaml.dump(
{ name: instance_config },
Dumper = noalias_dumper
))
log_keys = component_method(name, instance_config)
processed[name] = ensure_list(log_keys) if log_keys else []
if display_only:
self.command.run_list(names, render_instance)
else:
self.command.run_list(names, process_instance)
if not display_only and priority in expansion:
return process_instances(True)
if display_only:
process_instances(True)
self.command.info(yaml.dump(
{ component.name: rendered_instances },
Dumper = noalias_dumper
))
else:
process_instances(False)
self.command.wait_for_tasks([ log_keys for name, log_keys in processed.export().items() ])
def expand_instances(self, component_name, data = None, interpolate_references = True):
instance_data = copy.deepcopy(self.data if data is None else data)
instance_map = {}
def get_replacements(info, replacements, keys = None):
if keys is None:
keys = []
tag = ".".join(keys) if keys else 'value'
if isinstance(info, dict):
replacements["<<{}>>".format(tag)] = info
replacements["<<>{}>>".format(tag)] = dump_json(info)
for key, value in info.items():
get_replacements(value, replacements, keys + [str(key)])
elif isinstance(info, (list, tuple)):
replacements["<<{}>>".format(tag)] = info
replacements["<<>{}>>".format(tag)] = dump_json(info)
for index, value in enumerate(info):
get_replacements(value, replacements, keys + [str(index)])
else:
replacements["<<{}>>".format(tag)] = info
return replacements
def substitute_config(config, replacements):
if isinstance(config, dict):
config = copy.deepcopy(config)
for key in list(config.keys()):
real_key = substitute_config(key, replacements)
real_value = substitute_config(config[key], replacements)
if isinstance(real_key, (dict, list, tuple)) or real_key != key:
config.pop(key, None)
if isinstance(real_key, dict):
for sub_key, sub_value in real_key.items():
config[sub_key] = sub_value if sub_value is not None else real_value
elif isinstance(real_key, (list, tuple)):
for sub_key in real_key:
config[sub_key] = real_value
else:
config[real_key] = real_value
elif isinstance(config, (list, tuple)):
config = copy.deepcopy(config)
for index, value in enumerate(config):
config[index] = substitute_config(value, replacements)
else:
for token in replacements.keys():
if str(config) == token:
config = replacements[token]
else:
replacement = replacements[token]
if isinstance(replacements[token], (list, tuple, dict)):
replacement = dump_json(replacements[token])
if isinstance(config, str):
config = config.replace(token, str(replacement))
if isinstance(config, str) and re.match(r'^\<\<.*\>\>$', config):
config = None
return config
for name, config in instance_data[component_name].items():
if config and isinstance(config, dict):
collection = config.get('_foreach', None)
if collection and (interpolate_references or not isinstance(collection, str) or not collection.startswith('&')):
config.pop('_foreach')
collection = self.interpolate_config_value(collection)
if isinstance(collection, (list, tuple)):
for item in collection:
replacements = get_replacements(item, {})
new_name = self.interpolate_config_value(substitute_config(name, replacements))
instance_map[new_name] = substitute_config(config, replacements)
elif isinstance(collection, dict):
for key, item in collection.items():
replacements = get_replacements(item, {
"<<dict_key>>": key
})
new_name = self.interpolate_config_value(substitute_config(name, replacements))
instance_map[new_name] = substitute_config(config, replacements)
else:
self.command.error("Component instance expansions must be lists or dictionaries: {}".format(collection))
else:
instance_map[name] = config
else:
instance_map[name] = config
for name, config in instance_map.items():
if data is None:
self.data[component_name][name] = config
else:
data[component_name][name] = config
return instance_map
def order_instances(self, configs):
for name, value in configs.items():
if isinstance(value, dict) and '_requires' in value and value['_requires'] is not None:
value['_requires'] = self.interpolate_config_value(value['_requires'])
return prioritize(configs, keep_requires = True, requires_field = '_requires')
def include(self, component, force = False, check_data = True):
if component == 'profile' and 'profile' in self.data:
return True
if not force and self.components and component not in self.components:
return False
if check_data and component not in self.data:
return False
return True
def include_inner(self, component, force = False):
return self.include(component,
force = force,
check_data = False
)
def include_instance(self, name, config):
if isinstance(config, dict):
when = config.pop('_when', None)
when_not = config.pop('_when_not', None)
when_in = config.pop('_when_in', None)
when_not_in = config.pop('_when_not_in', None)
when_type = config.pop('_when_type', 'AND').upper()
if when is not None:
result = True if when_type == 'AND' else False
for variable in ensure_list(when):
value = format_value('bool', self.interpolate_config_value(variable))
if when_type == 'AND':
if not value:
return False
else:
if value:
result = True
return result
if when_not is not None:
result = True if when_type == 'AND' else False
for variable in ensure_list(when_not):
value = format_value('bool', self.interpolate_config_value(variable))
if when_type == 'AND':
if value:
return False
else:
if not value:
result = True
return result
if when_in is not None:
value = self.interpolate_config_value(when_in)
return name in ensure_list(value)
if when_not_in is not None:
value = self.interpolate_config_value(when_not_in)
return name not in ensure_list(value)
return True
def get_variables(self, instance, variables = None):
if not variables:
variables = {}
system_fields = [ x.name for x in instance.facade.system_field_instances ]
if getattr(instance, 'config', None) and isinstance(instance.config, dict):
for name, value in instance.config.items():
variables[name] = value
for field in instance.facade.fields:
value = getattr(instance, field)
if not isinstance(value, BaseModel) and field[0] != '_' and field not in system_fields:
variables[field] = value
return clean_dict(variables)
def get_instances(self, facade_name, excludes = None):
if not excludes:
excludes = []
facade_index = self.manager.index.get_facade_index()
excludes = ensure_list(excludes)
instances = []
for instance in self.command.get_instances(facade_index[facade_name]):
if not excludes or instance.name not in excludes:
instances.append(instance)
return instances
def get_module(self, name):
facade = self.command.facade(self.command._module)
return self.command.get_instance(facade, name, required = False)
def get_info(self, name, config, remove = True):
if remove:
value = config.pop(name, None)
else:
value = config.get(name, None)
return value
def pop_info(self, name, config):
return self.get_info(name, config, True)
def get_value(self, name, config, remove = False):
value = self.get_info(name, config, remove)
if value is not None:
value = self.interpolate_config_value(value)
return value
def pop_value(self, name, config):
return self.get_value(name, config, True)
def get_values(self, name, config, remove = False):
value = self.get_value(name, config, remove)
return ensure_list(value) if value is not None else []
def pop_values(self, name, config):
return self.get_values(name, config, True)
def interpolate(self, config, replacements = None):
if not replacements:
replacements = {}
def _interpolate(data):
if isinstance(data, dict):
for key, value in data.items():
data[key] = _interpolate(value)
elif isinstance(data, (list, tuple)):
for index, value in enumerate(data):
data[index] = _interpolate(value)
elif isinstance(data, str):
data = re.sub(r"([\{\}])", r"\1\1", data)
data = re.sub(r"\<([a-z][\_\-a-z0-9]+)\>", r"{\1}", data)
data = data.format(**replacements)
return data
if replacements:
return _interpolate(copy.deepcopy(config))
return config
|
py | 1a51dbdc6ae1300ef65cbac66f16ad07a9d858bb | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
import os
import re
import subprocess
import tempfile
from . import utils
def get_verbose():
"""Return if in verbose mode."""
verbose = 0
for e in ["V", "VERBOSE"]:
if e not in os.environ:
continue
verbose = int(os.environ[e])
break
return verbose > 0
def get_yosys():
"""
Searches for the Yosys binary. If the env. var. "YOSYS" is set, then it
checks if it points to a valid executable binary. Otherwise it searches
in PATH for binaries named "yosys" and returns the first one found.
"""
def is_exe(fpath):
"""
Returns True if a file exists and is executable.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# The environmental variable "YOSYS" is set. It should point to the Yosys
# executable.
if "YOSYS" in os.environ:
fpath = os.environ["YOSYS"]
if not is_exe(fpath):
return None
return fpath
# Look for the 'yosys' binary in the current PATH but only if the PATH
# variable is available.
elif "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
fpath = os.path.join(path, "yosys")
if is_exe(fpath):
return fpath
# Couldn't find Yosys.
return None
def determine_select_prefix():
"""
Older and newer versions of Yosys exhibit different behavior of the
'select' command regarding black/white boxes. Newer version requires a
prefix before some queries. This function determines whether the prefix
is required or not.
"""
# Query help string of the select command
cmd = ["-p", "help select"]
stdout = get_output(cmd, no_common_args=True)
# Look for the phrase. If found then the prefix is required
PHRASE = "prefix the pattern with '='"
if PHRASE in stdout:
return "="
# No prefix needed
return ""
def get_yosys_common_args():
return ["-e", "wire '[^']*' is assigned in a block", "-q"]
def get_output(params, no_common_args=False):
"""Run Yosys with given command line parameters, and return
stdout as a string. Raises CalledProcessError on a non-zero exit code."""
verbose = get_verbose()
cmd = [get_yosys()]
if not no_common_args:
cmd += get_yosys_common_args()
cmd += params
if verbose:
msg = ""
msg += "command".ljust(9).ljust(80, "=") + "\n"
msg += str(cmd)
print(msg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the output
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
retcode = p.wait()
if verbose:
msg = ""
if len(stdout):
msg += "stdout".ljust(9).ljust(80, "=") + "\n"
msg += stdout
if len(stderr):
msg += "stderr".ljust(9).ljust(80, "=") + "\n"
msg += stderr
msg += "exitcode".ljust(9).ljust(80, "=") + "\n"
msg += "{}\n".format(retcode)
msg += "=" * 80 + "\n"
print(msg)
if retcode != 0:
emsg = ""
emsg += "Yosys failed with exit code {}\n".format(retcode)
emsg += "Command: '{}'\n".format(" ".join(cmd))
emsg += "Message:\n"
emsg += "\n".join([" " + v for v in stderr.splitlines()])
raise subprocess.CalledProcessError(retcode, cmd, emsg)
return stdout
defines = []
includes = []
def add_define(defname):
"""Add a Verilog define to the list of defines to set in Yosys"""
defines.append(defname)
def get_defines():
"""Return a list of set Verilog defines, as a list of arguments
to pass to Yosys `read_verilog`"""
return " ".join(["-D" + _ for _ in defines])
def add_include(path):
""" Add a path to search when reading verilog to the list of
includes set in Yosys"""
includes.append(path)
def get_includes():
"""Return a list of include directories, as a list of arguments
to pass to Yosys `read_verilog`"""
return " ".join(["-I" + _ for _ in includes])
def commands(commands, infiles=[]):
"""Run a given string containing Yosys commands
Inputs
-------
commands : string of Yosys commands to run
infiles : list of input files
"""
commands = "read_verilog {} {} {}; ".format(
get_defines(), get_includes(), " ".join(infiles)
) + commands
params = ["-p", commands]
return get_output(params)
def script(script, infiles=[]):
"""Run a Yosys script given a path to the script
Inputs
-------
script : path to Yosys script to run
infiles : list of input files
"""
params = ["-s", script] + infiles
return get_output(params)
def vlog_to_json(
infiles, flatten=False, aig=False, mode=None, module_with_mode=None
):
"""
Convert Verilog to a JSON representation using Yosys
Inputs
-------
infiles : list of input files
flatten : set to flatten output hierarchy
aig : generate And-Inverter-Graph modules for gates
mode : set to a value other than None to use `chparam` to
set the value of the MODE parameter
module_with_mode : the name of the module to apply `mode` to
"""
prep_opts = "-flatten" if flatten else ""
json_opts = "-aig" if aig else ""
if mode is not None:
mode_str = 'chparam -set MODE "{}" {}; '.format(mode, module_with_mode)
else:
mode_str = ""
cmds = "{}prep {}; write_json {}".format(mode_str, prep_opts, json_opts)
try:
j = utils.strip_yosys_json(commands(cmds, infiles))
except subprocess.CalledProcessError as ex:
print(ex.output)
exit(-1)
return json.loads(j)
def extract_pin(module, pstr, _regex=re.compile(r"([^/]+)/([^/]+)")):
"""
Extract the pin from a line of the result of a Yosys select command, or
None if the command result is irrelevant (e.g. does not correspond to the
correct module)
Inputs
-------
module: Name of module to extract pins from
pstr: Line from Yosys select command (`module/pin` format)
"""
m = re.match(r"([^/]+)/([^/]+)", pstr)
if m and m.group(1) == module:
return m.group(2)
else:
return None
def do_select(infiles, module, expr, prep=False, flatten=False):
"""
Run a Yosys select command (given the expression and input files)
on a module and return the result as a list of pins
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
expr: Yosys selector expression for select command
prep: Run prep command before selecting.
flatten: Flatten module when running prep.
"""
# TODO: All of these functions involve a fairly large number of calls to
# Yosys. Although performance here is unlikely to be a major priority any
# time soon, it might be worth investigating better options?
f = ""
if flatten:
f = "-flatten"
p = ""
if prep:
p = "prep -top {} {};".format(module, f)
else:
p = "proc;"
outfile = tempfile.mktemp()
sel_cmd = "{} cd {}; select -write {} {}".format(p, module, outfile, expr)
try:
commands(sel_cmd, infiles)
except subprocess.CalledProcessError as ex:
print(ex.output)
exit(-1)
pins = []
with open(outfile, 'r') as f:
for net in f:
snet = net.strip()
if (len(snet) > 0):
pin = extract_pin(module, snet)
if pin is not None:
pins.append(pin)
os.remove(outfile)
return pins
def get_combinational_sinks(infiles, module, innet):
"""Return a list of output ports which are combinational sinks of a given
input.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
innet: Name of input net to find sinks of
"""
return do_select(
infiles, module, "={} %co* =o:* %i ={} %d".format(innet, innet)
)
def list_clocks(infiles, module):
"""Return a list of clocks in the module
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
"""
return do_select(
infiles, module,
"=c:* %x:+[CLK]:+[clk]:+[clock]:+[CLOCK] =c:* %d =x:* %i"
)
def get_clock_assoc_signals(infiles, module, clk):
"""Return the list of signals associated with a given clock.
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return do_select(
infiles, module,
"select -list ={} %a %co* %x =i:* =o:* %u %i =a:ASSOC_CLOCK={} %u ={} "
"%d".
format(clk, clk, clk)
)
# Find things which affect the given output
# show w:*D_IN_0 %a %ci*
# Find things which are affected by the given clock.
# show w:*INPUT_CLK %a %co*
# Find things which are affect by the given signal - combinational only.
# select -list w:*INPUT_CLK %a %co* %x x:* %i
def get_related_output_for_input(infiles, module, signal):
""".
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return do_select(
infiles, module, "select -list =w:*{} %a %co* =o:* %i".format(signal)
)
def get_related_inputs_for_input(infiles, module, signal):
""".
Inputs
-------
infiles: List of Verilog source files to pass to Yosys
module: Name of module to run command on
clk: Name of clock to find associated signals
"""
return [
x for x in do_select(
infiles, module,
"select -list =w:*{} %a %co* %x =i:* %i".format(signal)
) if x != signal
]
|
py | 1a51dc78d1adeddfd39d2359102b4dedeb6a2435 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'playground.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a51dcd1f4bad74c8b1d215db3b26aa9e599e26c | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
import numpy as np
class TestSplitOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host, [PrecisionType.FP32, PrecisionType.INT64],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"kunlunxin_xtcl", "nvidia_tensorrt", "intel_openvino"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
x_dtype = program_config.inputs["input_data"].dtype
#check config
if predictor_config.precision() == PrecisionType.INT64:
if x_dtype != np.int64:
return False
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.sampled_from([[6, 9, 24], [6, 24, 24], [6, 24], [24, 24], [24]
]))
batch = draw(st.integers(min_value=1, max_value=10))
in_shape.insert(0, batch)
sections = draw(
st.sampled_from([[], [3, 3], [2, 4], [10, 14], [2, 2, 2],
[1, 3, 2], [3, 3, 3], [3, 7, 14]]))
input_num = draw(st.sampled_from([0, 1]))
num = draw(st.sampled_from([0, 2, 3]))
input_axis = draw(st.sampled_from([0, 1, 2, 3]))
input_type = draw(st.sampled_from(["float32", "int32", "int64"]))
Out = draw(
st.sampled_from([["output_var0", "output_var1"],
["output_var0", "output_var1", "output_var2"]]))
#Sections and num cannot both be equal to 0.
assume((num != 0 and len(sections) == 0) or (num == 0 and
len(sections) != 0))
# the dimensions of input and axis match
assume(input_axis < len(in_shape))
#When sections and num are not both equal to 0, sections has higher priority.
#The sum of sections should be equal to the input size.
if len(sections) != 0:
assume(len(Out) == len(sections))
assume(in_shape[input_axis] % len(sections) == 0)
sum = 0
for i in sections:
sum += i
assume(sum == in_shape[input_axis])
if num != 0:
assume(len(Out) == num)
assume(in_shape[input_axis] % num == 0)
if input_num == 0:
assume((len(in_shape) == 2) & (in_shape[1] == 24) & (
sections == [10, 14]) & (len(Out) == 2))
def generate_input(*args, **kwargs):
if input_type == "float32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.float32)
elif input_type == "int32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int32)
elif input_type == "int64":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int64)
def generate_AxisTensor(*args, **kwargs):
return np.ones([1]).astype(np.int32)
def generate_SectionsTensorList1(*args, **kwargs):
return np.array([10]).astype(np.int32)
def generate_SectionsTensorList2(*args, **kwargs):
return np.array([14]).astype(np.int32)
dics_intput = [{
"X": ["input_data"],
"AxisTensor": ["AxisTensor"],
"SectionsTensorList":
["SectionsTensorList1", "SectionsTensorList2"]
}, {
"X": ["input_data"]
}]
dics_weight = [{
"AxisTensor": TensorConfig(data_gen=partial(generate_AxisTensor)),
"SectionsTensorList1":
TensorConfig(data_gen=partial(generate_SectionsTensorList1)),
"SectionsTensorList2":
TensorConfig(data_gen=partial(generate_SectionsTensorList2))
}, {}]
ops_config = OpConfig(
type="split",
inputs=dics_intput[input_num],
outputs={"Out": Out},
attrs={"sections": sections,
"num": num,
"axis": input_axis})
program_config = ProgramConfig(
ops=[ops_config],
weights=dics_weight[input_num],
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input))
},
outputs=Out)
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-3, 1e-3
return self.get_predictor_configs(), ["split"], (atol, rtol)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
x_shape = list(program_config.inputs["input_data"].shape)
if predictor_config.target() == TargetType.Metal:
if len(x_shape) != 4:
return True
self.add_ignore_check_case(
teller1, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff in a specific case. We need to fix it as soon as possible."
)
def teller2(program_config, predictor_config):
x_dtype = program_config.inputs["input_data"].dtype
x_shape = list(program_config.inputs["input_data"].shape)
out_shape = list(program_config.outputs)
axis = program_config.ops[0].attrs["axis"]
num = program_config.ops[0].attrs["num"]
if predictor_config.target() == TargetType.OpenCL:
if num != 2 or x_dtype != np.float32:
return True
if predictor_config.target() == TargetType.Metal:
if len(x_shape) == 2 or axis == 0 or axis == 1:
return True
if x_dtype != np.float32:
return True
self.add_ignore_check_case(
teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support this op in a specific case. We need to fix it as soon as possible."
)
def _teller3(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_shape = program_config.inputs["input_data"].shape
axis = program_config.ops[0].attrs["axis"]
in_dtype = program_config.inputs["input_data"].dtype
if len(in_shape) == 1 or axis == 0 or in_dtype != np.float32:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' or 'axis == 0' or 'in_dtype != float32' on NvidiaTensorrt."
)
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 50
if target_str == "OpenCL":
# Make sure to generate enough valid cases for OpenCL
max_examples = 100
if target_str == "Metal":
# Make sure to generate enough valid cases for OpenCL
max_examples = 500
self.run_and_statis(
quant=False, min_success_num=25, max_examples=max_examples)
if __name__ == "__main__":
unittest.main(argv=[''])
|
py | 1a51de20bc3fae37bf71c1e6e13ea17b914d31ab | #!/usr/bin/env python
"""Tests for `twitter_blocker` package."""
import pytest
from twitter_blocker import twitter_blocker
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
|
py | 1a51de491ce4c32f2ad3469384ac3a16eba809e5 | import asyncio
import json
import logging
import multiprocessing
import multiprocessing.context
import time
from collections import defaultdict
from pathlib import Path
from secrets import token_bytes
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import aiosqlite
from blspy import G1Element, PrivateKey
from chia.consensus.coinbase import pool_parent_id, farmer_parent_id
from chia.consensus.constants import ConsensusConstants
from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH, solution_to_pool_state
from chia.pools.pool_wallet import PoolWallet
from chia.protocols import wallet_protocol
from chia.protocols.wallet_protocol import PuzzleSolutionResponse, RespondPuzzleSolution, CoinState
from chia.server.ws_connection import WSChiaConnection
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_spend import CoinSpend
from chia.types.full_block import FullBlock
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import process_config_start_method
from chia.util.db_wrapper import DBWrapper
from chia.util.errors import Err
from chia.util.ints import uint32, uint64, uint128, uint8
from chia.util.db_synchronous import db_synchronous_on
from chia.wallet.cat_wallet.cat_utils import match_cat_puzzle, construct_cat_puzzle
from chia.wallet.cat_wallet.cat_wallet import CATWallet
from chia.wallet.cat_wallet.cat_constants import DEFAULT_CATS
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.derive_keys import master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened
from chia.wallet.key_val_store import KeyValStore
from chia.wallet.puzzles.cat_loader import CAT_MOD
from chia.wallet.rl_wallet.rl_wallet import RLWallet
from chia.wallet.settings.user_settings import UserSettings
from chia.wallet.trade_manager import TradeManager
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.util.compute_hints import compute_coin_hints
from chia.wallet.util.transaction_type import TransactionType
from chia.wallet.util.wallet_sync_utils import last_change_height_cs
from chia.wallet.util.wallet_types import WalletType
from chia.wallet.wallet import Wallet
from chia.wallet.wallet_action import WalletAction
from chia.wallet.wallet_action_store import WalletActionStore
from chia.wallet.wallet_blockchain import WalletBlockchain
from chia.wallet.wallet_coin_record import WalletCoinRecord
from chia.wallet.wallet_coin_store import WalletCoinStore
from chia.wallet.wallet_info import WalletInfo
from chia.wallet.wallet_interested_store import WalletInterestedStore
from chia.wallet.wallet_pool_store import WalletPoolStore
from chia.wallet.wallet_puzzle_store import WalletPuzzleStore
from chia.wallet.wallet_sync_store import WalletSyncStore
from chia.wallet.wallet_transaction_store import WalletTransactionStore
from chia.wallet.wallet_user_store import WalletUserStore
from chia.server.server import ChiaServer
from chia.wallet.did_wallet.did_wallet import DIDWallet
from chia.wallet.wallet_weight_proof_handler import WalletWeightProofHandler
class WalletStateManager:
constants: ConsensusConstants
config: Dict
tx_store: WalletTransactionStore
puzzle_store: WalletPuzzleStore
user_store: WalletUserStore
action_store: WalletActionStore
basic_store: KeyValStore
start_index: int
# Makes sure only one asyncio thread is changing the blockchain state at one time
lock: asyncio.Lock
log: logging.Logger
# TODO Don't allow user to send tx until wallet is synced
sync_mode: bool
sync_target: uint32
genesis: FullBlock
state_changed_callback: Optional[Callable]
pending_tx_callback: Optional[Callable]
puzzle_hash_created_callbacks: Dict = defaultdict(lambda *x: None)
db_path: Path
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
main_wallet: Wallet
wallets: Dict[uint32, Any]
private_key: PrivateKey
trade_manager: TradeManager
new_wallet: bool
user_settings: UserSettings
blockchain: WalletBlockchain
coin_store: WalletCoinStore
sync_store: WalletSyncStore
finished_sync_up_to: uint32
interested_store: WalletInterestedStore
multiprocessing_context: multiprocessing.context.BaseContext
weight_proof_handler: WalletWeightProofHandler
server: ChiaServer
root_path: Path
wallet_node: Any
pool_store: WalletPoolStore
default_cats: Dict[str, Any]
@staticmethod
async def create(
private_key: PrivateKey,
config: Dict,
db_path: Path,
constants: ConsensusConstants,
server: ChiaServer,
root_path: Path,
wallet_node,
name: str = None,
):
self = WalletStateManager()
self.new_wallet = False
self.config = config
self.constants = constants
self.server = server
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
self.lock = asyncio.Lock()
self.log.debug(f"Starting in db path: {db_path}")
self.db_connection = await aiosqlite.connect(db_path)
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute(
"pragma synchronous={}".format(db_synchronous_on(self.config.get("db_sync", "auto"), db_path))
)
self.db_wrapper = DBWrapper(self.db_connection)
self.coin_store = await WalletCoinStore.create(self.db_wrapper)
self.tx_store = await WalletTransactionStore.create(self.db_wrapper)
self.puzzle_store = await WalletPuzzleStore.create(self.db_wrapper)
self.user_store = await WalletUserStore.create(self.db_wrapper)
self.action_store = await WalletActionStore.create(self.db_wrapper)
self.basic_store = await KeyValStore.create(self.db_wrapper)
self.trade_manager = await TradeManager.create(self, self.db_wrapper)
self.user_settings = await UserSettings.create(self.basic_store)
self.pool_store = await WalletPoolStore.create(self.db_wrapper)
self.interested_store = await WalletInterestedStore.create(self.db_wrapper)
self.default_cats = DEFAULT_CATS
self.wallet_node = wallet_node
self.sync_mode = False
self.sync_target = uint32(0)
self.finished_sync_up_to = uint32(0)
multiprocessing_start_method = process_config_start_method(config=self.config, log=self.log)
self.multiprocessing_context = multiprocessing.get_context(method=multiprocessing_start_method)
self.weight_proof_handler = WalletWeightProofHandler(
constants=self.constants,
multiprocessing_context=self.multiprocessing_context,
)
self.blockchain = await WalletBlockchain.create(self.basic_store, self.constants, self.weight_proof_handler)
self.state_changed_callback = None
self.pending_tx_callback = None
self.db_path = db_path
main_wallet_info = await self.user_store.get_wallet_by_id(1)
assert main_wallet_info is not None
self.private_key = private_key
self.main_wallet = await Wallet.create(self, main_wallet_info)
self.wallets = {main_wallet_info.id: self.main_wallet}
wallet = None
for wallet_info in await self.get_all_wallet_info_entries():
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(self, wallet_info)
elif wallet_info.type == WalletType.CAT:
wallet = await CATWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.RATE_LIMITED:
wallet = await RLWallet.create(self, wallet_info)
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.POOLING_WALLET:
wallet = await PoolWallet.create_from_db(
self,
self.main_wallet,
wallet_info,
)
if wallet is not None:
self.wallets[wallet_info.id] = wallet
return self
def get_derivation_index(self, pubkey: G1Element, max_depth: int = 1000) -> int:
for i in range(0, max_depth):
derived = self.get_public_key(uint32(i))
if derived == pubkey:
return i
derived = self.get_public_key_unhardened(uint32(i))
if derived == pubkey:
return i
return -1
def get_public_key(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk(self.private_key, index).get_g1()
def get_public_key_unhardened(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk_unhardened(self.private_key, index).get_g1()
async def get_keys(self, puzzle_hash: bytes32) -> Optional[Tuple[G1Element, PrivateKey]]:
record = await self.puzzle_store.record_for_puzzle_hash(puzzle_hash)
if record is None:
raise ValueError(f"No key for this puzzlehash {puzzle_hash})")
if record.hardened:
private = master_sk_to_wallet_sk(self.private_key, record.index)
pubkey = private.get_g1()
return pubkey, private
private = master_sk_to_wallet_sk_unhardened(self.private_key, record.index)
pubkey = private.get_g1()
return pubkey, private
async def create_more_puzzle_hashes(self, from_zero: bool = False, in_transaction=False):
"""
For all wallets in the user store, generates the first few puzzle hashes so
that we can restore the wallet from only the private keys.
"""
targets = list(self.wallets.keys())
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
to_generate = self.config["initial_num_public_keys"]
for wallet_id in targets:
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
start_index = 0
derivation_paths: List[DerivationRecord] = []
if last is not None:
start_index = last + 1
# If the key was replaced (from_zero=True), we should generate the puzzle hashes for the new key
if from_zero:
start_index = 0
for index in range(start_index, unused + to_generate):
if WalletType(target_wallet.type()) == WalletType.POOLING_WALLET:
continue
# Hardened
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
if puzzle is None:
self.log.error(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index), puzzlehash, pubkey, target_wallet.type(), uint32(target_wallet.id()), True
)
)
# Unhardened
pubkey_unhardened: G1Element = self.get_public_key_unhardened(uint32(index))
puzzle_unhardened: Program = target_wallet.puzzle_for_pk(bytes(pubkey_unhardened))
if puzzle_unhardened is None:
self.log.error(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash_unhardened: bytes32 = puzzle_unhardened.get_tree_hash()
self.log.info(
f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash_unhardened.hex()}"
)
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash_unhardened,
pubkey_unhardened,
target_wallet.type(),
uint32(target_wallet.id()),
False,
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths, in_transaction)
await self.add_interested_puzzle_hashes(
[record.puzzle_hash for record in derivation_paths],
[record.wallet_id for record in derivation_paths],
in_transaction,
)
if unused > 0:
await self.puzzle_store.set_used_up_to(uint32(unused - 1), in_transaction)
async def update_wallet_puzzle_hashes(self, wallet_id):
derivation_paths: List[DerivationRecord] = []
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
for index in range(unused, last):
# Since DID are not released yet we can assume they are only using unhardened keys derivation
pubkey: G1Element = self.get_public_key_unhardened(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Generating public key at index {index} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.wallet_info.type,
uint32(target_wallet.wallet_info.id),
False,
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths)
async def get_unused_derivation_record(
self, wallet_id: uint32, in_transaction=False, hardened=False
) -> DerivationRecord:
"""
Creates a puzzle hash for the given wallet, and then makes more puzzle hashes
for every wallet to ensure we always have more in the database. Never reusue the
same public key more than once (for privacy).
"""
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
await self.create_more_puzzle_hashes()
# Now we must have unused public keys
unused = await self.puzzle_store.get_unused_derivation_path()
assert unused is not None
record: Optional[DerivationRecord] = await self.puzzle_store.get_derivation_record(
unused, wallet_id, hardened
)
assert record is not None
# Set this key to used so we never use it again
await self.puzzle_store.set_used_up_to(record.index, in_transaction=in_transaction)
# Create more puzzle hashes / keys
await self.create_more_puzzle_hashes(in_transaction=in_transaction)
return record
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
current: Optional[DerivationRecord] = await self.puzzle_store.get_current_derivation_record_for_wallet(
wallet_id
)
return current
def set_callback(self, callback: Callable):
"""
Callback to be called when the state of the wallet changes.
"""
self.state_changed_callback = callback
def set_pending_callback(self, callback: Callable):
"""
Callback to be called when new pending transaction enters the store
"""
self.pending_tx_callback = callback
def set_coin_with_puzzlehash_created_callback(self, puzzlehash: bytes32, callback: Callable):
"""
Callback to be called when new coin is seen with specified puzzlehash
"""
self.puzzle_hash_created_callbacks[puzzlehash] = callback
async def puzzle_hash_created(self, coin: Coin):
callback = self.puzzle_hash_created_callbacks[coin.puzzle_hash]
if callback is None:
return None
await callback(coin)
def state_changed(self, state: str, wallet_id: int = None, data_object=None):
"""
Calls the callback if it's present.
"""
if data_object is None:
data_object = {}
if self.state_changed_callback is None:
return None
self.state_changed_callback(state, wallet_id, data_object)
def tx_pending_changed(self) -> None:
"""
Notifies the wallet node that there's new tx pending
"""
if self.pending_tx_callback is None:
return None
self.pending_tx_callback()
async def synced(self):
latest = await self.blockchain.get_peak_block()
if latest is None:
return False
if latest.height - await self.blockchain.get_finished_sync_up_to() > 1:
return False
latest_timestamp = self.blockchain.get_latest_timestamp()
if latest_timestamp > int(time.time()) - 10 * 60:
return True
return False
def set_sync_mode(self, mode: bool, sync_height: uint32 = uint32(0)):
"""
Sets the sync mode. This changes the behavior of the wallet node.
"""
self.sync_mode = mode
self.sync_target = sync_height
self.state_changed("sync_changed")
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int, unspent_records=None) -> uint128:
"""
Returns the balance amount of all coins that are spendable.
"""
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id, unspent_records)
spendable_amount: uint128 = uint128(0)
for record in spendable:
spendable_amount = uint128(spendable_amount + record.coin.amount)
return spendable_amount
async def does_coin_belong_to_wallet(self, coin: Coin, wallet_id: int) -> bool:
"""
Returns true if we have the key for this coin.
"""
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is None:
return False
coin_wallet_id, wallet_type = info
if wallet_id == coin_wallet_id:
return True
return False
async def get_confirmed_balance_for_wallet(
self,
wallet_id: int,
unspent_coin_records: Optional[Set[WalletCoinRecord]] = None,
) -> uint128:
"""
Returns the confirmed balance, including coinbase rewards that are not spendable.
"""
# lock only if unspent_coin_records is None
if unspent_coin_records is None:
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
return uint128(sum(cr.coin.amount for cr in unspent_coin_records))
async def get_unconfirmed_balance(
self, wallet_id: int, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None
) -> uint128:
"""
Returns the balance, including coinbase rewards that are not spendable, and unconfirmed
transactions.
"""
# This API should change so that get_balance_from_coin_records is called for Set[WalletCoinRecord]
# and this method is called only for the unspent_coin_records==None case.
if unspent_coin_records is None:
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
all_unspent_coins: Set[Coin] = {cr.coin for cr in unspent_coin_records}
for record in unconfirmed_tx:
for addition in record.additions:
# This change or a self transaction
if await self.does_coin_belong_to_wallet(addition, wallet_id):
all_unspent_coins.add(addition)
for removal in record.removals:
if await self.does_coin_belong_to_wallet(removal, wallet_id) and removal in all_unspent_coins:
all_unspent_coins.remove(removal)
return uint128(sum(coin.amount for coin in all_unspent_coins))
async def unconfirmed_removals_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns new removals transactions that have not been confirmed yet.
"""
removals: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.removals:
removals[coin.name()] = coin
return removals
async def fetch_parent_and_check_for_cat(
self, peer: WSChiaConnection, coin_state: CoinState, fork_height: Optional[uint32]
) -> Tuple[Optional[uint32], Optional[WalletType]]:
if self.is_pool_reward(coin_state.created_height, coin_state.coin.parent_coin_info) or self.is_farmer_reward(
coin_state.created_height, coin_state.coin.parent_coin_info
):
return None, None
response: List[CoinState] = await self.wallet_node.get_coin_state(
[coin_state.coin.parent_coin_info], fork_height, peer
)
if len(response) == 0:
self.log.warning(f"Could not find a parent coin with ID: {coin_state.coin.parent_coin_info}")
return None, None
parent_coin_state = response[0]
assert parent_coin_state.spent_height == coin_state.created_height
wallet_id = None
wallet_type = None
cs: Optional[CoinSpend] = await self.wallet_node.fetch_puzzle_solution(
peer, parent_coin_state.spent_height, parent_coin_state.coin
)
if cs is None:
return None, None
matched, curried_args = match_cat_puzzle(Program.from_bytes(bytes(cs.puzzle_reveal)))
if matched:
mod_hash, tail_hash, inner_puzzle = curried_args
inner_puzzle_hash = inner_puzzle.get_tree_hash()
self.log.info(
f"parent: {parent_coin_state.coin.name()} inner_puzzle_hash for parent is {inner_puzzle_hash}"
)
hint_list = compute_coin_hints(cs)
derivation_record = None
for hint in hint_list:
derivation_record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(bytes32(hint))
if derivation_record is not None:
break
if derivation_record is None:
self.log.info(f"Received state for the coin that doesn't belong to us {coin_state}")
else:
our_inner_puzzle: Program = self.main_wallet.puzzle_for_pk(bytes(derivation_record.pubkey))
cat_puzzle = construct_cat_puzzle(CAT_MOD, bytes32(bytes(tail_hash)[1:]), our_inner_puzzle)
if cat_puzzle.get_tree_hash() != coin_state.coin.puzzle_hash:
return None, None
if bytes(tail_hash).hex()[2:] in self.default_cats or self.config.get(
"automatically_add_unknown_cats", False
):
cat_wallet = await CATWallet.create_wallet_for_cat(
self, self.main_wallet, bytes(tail_hash).hex()[2:], in_transaction=True
)
wallet_id = cat_wallet.id()
wallet_type = WalletType(cat_wallet.type())
self.state_changed("wallet_created")
return wallet_id, wallet_type
async def new_coin_state(
self, coin_states: List[CoinState], peer: WSChiaConnection, fork_height: Optional[uint32]
) -> None:
# TODO: add comment about what this method does
# Input states should already be sorted by cs_height, with reorgs at the beginning
curr_h = -1
for c_state in coin_states:
last_change_height = last_change_height_cs(c_state)
if last_change_height < curr_h:
raise ValueError("Input coin_states is not sorted properly")
curr_h = last_change_height
all_txs_per_wallet: Dict[int, List[TransactionRecord]] = {}
trade_removals = await self.trade_manager.get_coins_of_interest()
all_unconfirmed: List[TransactionRecord] = await self.tx_store.get_all_unconfirmed()
trade_coin_removed: List[CoinState] = []
for coin_state_idx, coin_state in enumerate(coin_states):
wallet_info: Optional[Tuple[uint32, WalletType]] = await self.get_wallet_id_for_puzzle_hash(
coin_state.coin.puzzle_hash
)
local_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin_state.coin.name())
self.log.debug(f"{coin_state.coin.name()}: {coin_state}")
# If we already have this coin, and it was spent and confirmed at the same heights, then we return (done)
if local_record is not None:
local_spent = None
if local_record.spent_block_height != 0:
local_spent = local_record.spent_block_height
if (
local_spent == coin_state.spent_height
and local_record.confirmed_block_height == coin_state.created_height
):
continue
wallet_id: Optional[uint32] = None
wallet_type: Optional[WalletType] = None
if wallet_info is not None:
wallet_id, wallet_type = wallet_info
elif local_record is not None:
wallet_id = uint32(local_record.wallet_id)
wallet_type = local_record.wallet_type
elif coin_state.created_height is not None:
wallet_id, wallet_type = await self.fetch_parent_and_check_for_cat(peer, coin_state, fork_height)
if wallet_id is None or wallet_type is None:
self.log.info(f"No wallet for coin state: {coin_state}")
continue
if wallet_id in all_txs_per_wallet:
all_txs = all_txs_per_wallet[wallet_id]
else:
all_txs = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
all_txs_per_wallet[wallet_id] = all_txs
all_outgoing = [tx for tx in all_txs if "OUTGOING" in TransactionType(tx.type).name]
derivation_index = await self.puzzle_store.index_for_puzzle_hash(coin_state.coin.puzzle_hash)
if derivation_index is not None:
await self.puzzle_store.set_used_up_to(derivation_index, True)
if coin_state.created_height is None:
# TODO implements this coin got reorged
# TODO: we need to potentially roll back the pool wallet here
pass
elif coin_state.created_height is not None and coin_state.spent_height is None:
await self.coin_added(coin_state.coin, coin_state.created_height, all_txs, wallet_id, wallet_type)
elif coin_state.created_height is not None and coin_state.spent_height is not None:
self.log.info(f"Coin Removed: {coin_state}")
record = await self.coin_store.get_coin_record(coin_state.coin.name())
if coin_state.coin.name() in trade_removals:
trade_coin_removed.append(coin_state)
children: Optional[List[CoinState]] = None
if record is None:
farmer_reward = False
pool_reward = False
tx_type: int
if self.is_farmer_reward(coin_state.created_height, coin_state.coin.parent_coin_info):
farmer_reward = True
tx_type = TransactionType.FEE_REWARD.value
elif self.is_pool_reward(coin_state.created_height, coin_state.coin.parent_coin_info):
pool_reward = True
tx_type = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.INCOMING_TX.value
record = WalletCoinRecord(
coin_state.coin,
coin_state.created_height,
coin_state.spent_height,
True,
farmer_reward or pool_reward,
wallet_type,
wallet_id,
)
await self.coin_store.add_coin_record(record)
# Coin first received
coin_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(
coin_state.coin.parent_coin_info
)
if coin_record is not None and wallet_type.value == coin_record.wallet_type:
change = True
else:
change = False
if not change:
created_timestamp = await self.wallet_node.get_timestamp_for_height(coin_state.created_height)
tx_record = TransactionRecord(
confirmed_at_height=coin_state.created_height,
created_at_time=uint64(created_timestamp),
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin_state.coin.puzzle_hash)),
amount=uint64(coin_state.coin.amount),
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin_state.coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=bytes32(token_bytes()),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
children = await self.wallet_node.fetch_children(peer, coin_state.coin.name(), fork_height)
assert children is not None
additions = [state.coin for state in children]
if len(children) > 0:
fee = 0
to_puzzle_hash = None
# Find coin that doesn't belong to us
amount = 0
for coin in additions:
derivation_record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash
)
if derivation_record is None:
to_puzzle_hash = coin.puzzle_hash
amount += coin.amount
if to_puzzle_hash is None:
to_puzzle_hash = additions[0].puzzle_hash
spent_timestamp = await self.wallet_node.get_timestamp_for_height(coin_state.spent_height)
# Reorg rollback adds reorged transactions so it's possible there is tx_record already
# Even though we are just adding coin record to the db (after reorg)
tx_records: List[TransactionRecord] = []
for out_tx_record in all_outgoing:
for rem_coin in out_tx_record.removals:
if rem_coin.name() == coin_state.coin.name():
tx_records.append(out_tx_record)
if len(tx_records) > 0:
for tx_record in tx_records:
await self.tx_store.set_confirmed(tx_record.name, coin_state.spent_height)
else:
tx_record = TransactionRecord(
confirmed_at_height=coin_state.spent_height,
created_at_time=uint64(spent_timestamp),
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, to_puzzle_hash)),
amount=uint64(int(amount)),
fee_amount=uint64(fee),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=additions,
removals=[coin_state.coin],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=bytes32(token_bytes()),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
await self.coin_store.set_spent(coin_state.coin.name(), coin_state.spent_height)
rem_tx_records: List[TransactionRecord] = []
for out_tx_record in all_outgoing:
for rem_coin in out_tx_record.removals:
if rem_coin.name() == coin_state.coin.name():
rem_tx_records.append(out_tx_record)
for tx_record in rem_tx_records:
await self.tx_store.set_confirmed(tx_record.name, coin_state.spent_height)
for unconfirmed_record in all_unconfirmed:
for rem_coin in unconfirmed_record.removals:
if rem_coin.name() == coin_state.coin.name():
self.log.info(f"Setting tx_id: {unconfirmed_record.name} to confirmed")
await self.tx_store.set_confirmed(unconfirmed_record.name, coin_state.spent_height)
if record.wallet_type == WalletType.POOLING_WALLET:
if coin_state.spent_height is not None and coin_state.coin.amount == uint64(1):
wallet = self.wallets[uint32(record.wallet_id)]
curr_coin_state: CoinState = coin_state
while curr_coin_state.spent_height is not None:
cs: CoinSpend = await self.wallet_node.fetch_puzzle_solution(
peer, curr_coin_state.spent_height, curr_coin_state.coin
)
success = await wallet.apply_state_transition(cs, curr_coin_state.spent_height)
if not success:
break
new_singleton_coin: Optional[Coin] = wallet.get_next_interesting_coin(cs)
if new_singleton_coin is None:
# No more singleton (maybe destroyed?)
break
await self.coin_added(
new_singleton_coin,
coin_state.spent_height,
[],
uint32(record.wallet_id),
record.wallet_type,
)
await self.coin_store.set_spent(curr_coin_state.coin.name(), curr_coin_state.spent_height)
await self.add_interested_coin_ids([new_singleton_coin.name()], True)
new_coin_state: List[CoinState] = await self.wallet_node.get_coin_state(
[new_singleton_coin.name()], fork_height, peer
)
assert len(new_coin_state) == 1
curr_coin_state = new_coin_state[0]
# Check if a child is a singleton launcher
if children is None:
children = await self.wallet_node.fetch_children(peer, coin_state.coin.name(), fork_height)
assert children is not None
for child in children:
if child.coin.puzzle_hash != SINGLETON_LAUNCHER_HASH:
continue
if await self.have_a_pool_wallet_with_launched_id(child.coin.name()):
continue
if child.spent_height is None:
# TODO handle spending launcher later block
continue
launcher_spend: Optional[CoinSpend] = await self.wallet_node.fetch_puzzle_solution(
peer, coin_state.spent_height, child.coin
)
if launcher_spend is None:
continue
try:
pool_state = solution_to_pool_state(launcher_spend)
except Exception as e:
self.log.debug(f"Not a pool wallet launcher {e}")
continue
# solution_to_pool_state may return None but this may not be an error
if pool_state is None:
self.log.debug("solution_to_pool_state returned None, ignore and continue")
continue
assert child.spent_height is not None
pool_wallet = await PoolWallet.create(
self,
self.main_wallet,
child.coin.name(),
[launcher_spend],
child.spent_height,
True,
"pool_wallet",
)
launcher_spend_additions = launcher_spend.additions()
assert len(launcher_spend_additions) == 1
coin_added = launcher_spend_additions[0]
await self.coin_added(
coin_added, coin_state.spent_height, [], pool_wallet.id(), WalletType(pool_wallet.type())
)
await self.add_interested_coin_ids([coin_added.name()], True)
else:
raise RuntimeError("All cases already handled") # Logic error, all cases handled
for coin_state_removed in trade_coin_removed:
await self.trade_manager.coins_of_interest_farmed(coin_state_removed, fork_height)
async def have_a_pool_wallet_with_launched_id(self, launcher_id: bytes32) -> bool:
for wallet_id, wallet in self.wallets.items():
if (
wallet.type() == WalletType.POOLING_WALLET
and (await wallet.get_current_state()).launcher_id == launcher_id
):
self.log.warning("Already have, not recreating")
return True
return False
def is_pool_reward(self, created_height, parent_id):
for i in range(0, 30):
try_height = created_height - i
if try_height < 0:
break
calculated = pool_parent_id(try_height, self.constants.GENESIS_CHALLENGE)
if calculated == parent_id:
return True
return False
def is_farmer_reward(self, created_height, parent_id):
for i in range(0, 30):
try_height = created_height - i
if try_height < 0:
break
calculated = farmer_parent_id(try_height, self.constants.GENESIS_CHALLENGE)
if calculated == parent_id:
return True
return False
async def get_wallet_id_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[uint32, WalletType]]:
info = await self.puzzle_store.wallet_info_for_puzzle_hash(puzzle_hash)
if info is not None:
wallet_id, wallet_type = info
return uint32(wallet_id), wallet_type
interested_wallet_id = await self.interested_store.get_interested_puzzle_hash_wallet_id(puzzle_hash=puzzle_hash)
if interested_wallet_id is not None:
wallet_id = uint32(interested_wallet_id)
if wallet_id not in self.wallets.keys():
self.log.warning(f"Do not have wallet {wallet_id} for puzzle_hash {puzzle_hash}")
return None
wallet_type = WalletType(self.wallets[uint32(wallet_id)].type())
return uint32(wallet_id), wallet_type
return None
async def coin_added(
self,
coin: Coin,
height: uint32,
all_outgoing_transaction_records: List[TransactionRecord],
wallet_id: uint32,
wallet_type: WalletType,
) -> Optional[WalletCoinRecord]:
"""
Adding coin to DB, return wallet coin record if it get's added
"""
existing: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin.name())
if existing is not None:
return None
self.log.info(f"Adding coin: {coin} at {height} wallet_id:{wallet_id}")
farmer_reward = False
pool_reward = False
if self.is_farmer_reward(height, coin.parent_coin_info):
farmer_reward = True
elif self.is_pool_reward(height, coin.parent_coin_info):
pool_reward = True
farm_reward = False
coin_record: Optional[WalletCoinRecord] = await self.coin_store.get_coin_record(coin.parent_coin_info)
if coin_record is not None and wallet_type.value == coin_record.wallet_type:
change = True
else:
change = False
if farmer_reward or pool_reward:
farm_reward = True
if pool_reward:
tx_type: int = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.FEE_REWARD.value
timestamp = await self.wallet_node.get_timestamp_for_height(height)
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=timestamp,
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin.puzzle_hash)),
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=coin.name(),
memos=[],
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
records: List[TransactionRecord] = []
for record in all_outgoing_transaction_records:
for add_coin in record.additions:
if add_coin.name() == coin.name():
records.append(record)
if len(records) > 0:
for record in records:
if record.confirmed is False:
await self.tx_store.set_confirmed(record.name, height)
elif not change:
timestamp = await self.wallet_node.get_timestamp_for_height(height)
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=timestamp,
to_puzzle_hash=(await self.convert_puzzle_hash(wallet_id, coin.puzzle_hash)),
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=coin.name(),
memos=[],
)
if coin.amount > 0:
await self.tx_store.add_transaction_record(tx_record, True)
coin_record_1: WalletCoinRecord = WalletCoinRecord(
coin, height, uint32(0), False, farm_reward, wallet_type, wallet_id
)
await self.coin_store.add_coin_record(coin_record_1)
if wallet_type == WalletType.CAT or wallet_type == WalletType.DISTRIBUTED_ID:
wallet = self.wallets[wallet_id]
await wallet.coin_added(coin, height)
await self.create_more_puzzle_hashes(in_transaction=True)
return coin_record_1
async def add_pending_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet before new transaction is sent to the full_node
"""
# Wallet node will use this queue to retry sending this transaction until full nodes receives it
await self.tx_store.add_transaction_record(tx_record, False)
all_coins_names = []
all_coins_names.extend([coin.name() for coin in tx_record.additions])
all_coins_names.extend([coin.name() for coin in tx_record.removals])
await self.add_interested_coin_ids(all_coins_names, False)
self.tx_pending_changed()
self.state_changed("pending_transaction", tx_record.wallet_id)
async def add_transaction(self, tx_record: TransactionRecord, in_transaction=False):
"""
Called from wallet to add transaction that is not being set to full_node
"""
await self.tx_store.add_transaction_record(tx_record, in_transaction)
self.state_changed("pending_transaction", tx_record.wallet_id)
async def remove_from_queue(
self,
spendbundle_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
error: Optional[Err],
):
"""
Full node received our transaction, no need to keep it in queue anymore
"""
updated = await self.tx_store.increment_sent(spendbundle_id, name, send_status, error)
if updated:
tx: Optional[TransactionRecord] = await self.get_transaction(spendbundle_id)
if tx is not None:
self.state_changed("tx_update", tx.wallet_id, {"transaction": tx})
async def get_all_transactions(self, wallet_id: int) -> List[TransactionRecord]:
"""
Retrieves all confirmed and pending transactions
"""
records = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
return records
async def get_transaction(self, tx_id: bytes32) -> Optional[TransactionRecord]:
return await self.tx_store.get_transaction_record(tx_id)
async def is_addition_relevant(self, addition: Coin):
"""
Check whether we care about a new addition (puzzle_hash). Returns true if we
control this puzzle hash.
"""
result = await self.puzzle_store.puzzle_hash_exists(addition.puzzle_hash)
return result
async def get_wallet_for_coin(self, coin_id: bytes32) -> Any:
coin_record = await self.coin_store.get_coin_record(coin_id)
if coin_record is None:
return None
wallet_id = uint32(coin_record.wallet_id)
wallet = self.wallets[wallet_id]
return wallet
async def reorg_rollback(self, height: int):
"""
Rolls back and updates the coin_store and transaction store. It's possible this height
is the tip, or even beyond the tip.
"""
await self.coin_store.rollback_to_block(height)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(height)
await self.tx_store.rollback_to_block(height)
for record in reorged:
if record.type in [
TransactionType.OUTGOING_TX,
TransactionType.OUTGOING_TRADE,
TransactionType.INCOMING_TRADE,
]:
await self.tx_store.tx_reorged(record, in_transaction=True)
self.tx_pending_changed()
# Removes wallets that were created from a blockchain transaction which got reorged.
remove_ids = []
for wallet_id, wallet in self.wallets.items():
if wallet.type() == WalletType.POOLING_WALLET.value:
remove: bool = await wallet.rewind(height, in_transaction=True)
if remove:
remove_ids.append(wallet_id)
for wallet_id in remove_ids:
await self.user_store.delete_wallet(wallet_id, in_transaction=True)
self.wallets.pop(wallet_id)
async def _await_closed(self) -> None:
await self.db_connection.close()
if self.weight_proof_handler is not None:
self.weight_proof_handler.cancel_weight_proof_tasks()
def unlink_db(self):
Path(self.db_path).unlink()
async def get_all_wallet_info_entries(self, wallet_type: Optional[WalletType] = None) -> List[WalletInfo]:
return await self.user_store.get_all_wallet_info_entries(wallet_type)
async def get_start_height(self):
"""
If we have coin use that as starting height next time,
otherwise use the peak
"""
return 0
async def get_wallet_for_asset_id(self, asset_id: str):
for wallet_id in self.wallets:
wallet = self.wallets[wallet_id]
if wallet.type() == WalletType.CAT:
if bytes(wallet.cat_info.limitations_program_hash).hex() == asset_id:
return wallet
return None
async def add_new_wallet(self, wallet: Any, wallet_id: int, create_puzzle_hashes=True, in_transaction=False):
self.wallets[uint32(wallet_id)] = wallet
if create_puzzle_hashes:
await self.create_more_puzzle_hashes(in_transaction=in_transaction)
self.state_changed("wallet_created")
async def get_spendable_coins_for_wallet(self, wallet_id: int, records=None) -> Set[WalletCoinRecord]:
if records is None:
records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
# Coins that are currently part of a transaction
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_dict: Dict[bytes32, Coin] = {}
for tx in unconfirmed_tx:
for coin in tx.removals:
# TODO, "if" might not be necessary once unconfirmed tx doesn't contain coins for other wallets
if await self.does_coin_belong_to_wallet(coin, wallet_id):
removal_dict[coin.name()] = coin
# Coins that are part of the trade
offer_locked_coins: Dict[bytes32, WalletCoinRecord] = await self.trade_manager.get_locked_coins()
filtered = set()
for record in records:
if record.coin.name() in offer_locked_coins:
continue
if record.coin.name() in removal_dict:
continue
filtered.add(record)
return filtered
async def create_action(
self, name: str, wallet_id: int, wallet_type: int, callback: str, done: bool, data: str, in_transaction: bool
):
await self.action_store.create_action(name, wallet_id, wallet_type, callback, done, data, in_transaction)
self.tx_pending_changed()
async def generator_received(self, height: uint32, header_hash: uint32, program: Program):
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_generator":
stored_header_hash = bytes32(hexstr_to_bytes(action_data["header_hash"]))
stored_height = uint32(action_data["height"])
if stored_header_hash == header_hash and stored_height == height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(height, header_hash, program, action.id)
async def puzzle_solution_received(self, response: RespondPuzzleSolution):
unwrapped: PuzzleSolutionResponse = response.response
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
stored_coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
if stored_coin_name == unwrapped.coin_name and height == unwrapped.height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(unwrapped, action.id)
async def new_peak(self, peak: wallet_protocol.NewPeakWallet):
for wallet_id, wallet in self.wallets.items():
if wallet.type() == uint8(WalletType.POOLING_WALLET):
await wallet.new_peak(peak.height)
async def add_interested_puzzle_hashes(
self, puzzle_hashes: List[bytes32], wallet_ids: List[int], in_transaction: bool = False
) -> None:
for puzzle_hash, wallet_id in zip(puzzle_hashes, wallet_ids):
await self.interested_store.add_interested_puzzle_hash(puzzle_hash, wallet_id, in_transaction)
if len(puzzle_hashes) > 0:
await self.wallet_node.new_peak_queue.subscribe_to_puzzle_hashes(puzzle_hashes)
async def add_interested_coin_ids(self, coin_ids: List[bytes32], in_transaction: bool = False) -> None:
for coin_id in coin_ids:
await self.interested_store.add_interested_coin_id(coin_id, in_transaction)
if len(coin_ids) > 0:
await self.wallet_node.new_peak_queue.subscribe_to_coin_ids(coin_ids)
async def delete_trade_transactions(self, trade_id: bytes32):
txs: List[TransactionRecord] = await self.tx_store.get_transactions_by_trade_id(trade_id)
for tx in txs:
await self.tx_store.delete_transaction_record(tx.name)
async def convert_puzzle_hash(self, wallet_id: uint32, puzzle_hash: bytes32) -> bytes32:
wallet = self.wallets[wallet_id]
# This should be general to wallets but for right now this is just for CATs so we'll add this if
if wallet.type() == WalletType.CAT.value:
return await wallet.convert_puzzle_hash(puzzle_hash)
return puzzle_hash
|
py | 1a51df88fea9eed51b2fbdc788c323550cc9771e | # stdlib
# third party
import torch as th
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...lib.torch.tensor_util import tensor_deserializer
from ...lib.torch.tensor_util import tensor_serializer
from ...logger import warning
from ...proto.lib.torch.device_pb2 import Device as Device_PB
from ...proto.lib.torch.tensor_pb2 import TensorProto as Tensor_PB
torch_tensor_type = type(th.tensor([1, 2, 3]))
def object2proto(obj: object) -> Tensor_PB:
proto = Tensor_PB()
proto.tensor = tensor_serializer(obj)
proto.requires_grad = getattr(obj, "requires_grad", False)
proto.device.CopyFrom(
Device_PB(
type=obj.device.type, # type: ignore
index=obj.device.index, # type: ignore
)
)
if proto.requires_grad:
grad = getattr(obj, "grad", None)
if grad is not None:
proto.grad = tensor_serializer(grad)
return proto
def proto2object(proto: Tensor_PB) -> th.Tensor:
tensor = tensor_deserializer(proto.tensor)
if proto.requires_grad:
tensor.grad = tensor_deserializer(proto.grad)
tensor.requires_grad_(proto.requires_grad)
if proto.device.type == "cuda" and th.cuda.is_available():
cuda_index = proto.device.index
if th.cuda.device_count() < (cuda_index + 1):
cuda_index = th.cuda.device_count() - 1
warning(
f"The requested CUDA index {proto.device.index} is invalid."
+ f"Falling back to GPU index {cuda_index}.",
print=True,
)
return tensor.cuda(cuda_index)
if proto.device.type == "cuda" and not th.cuda.is_available():
warning("Cannot find any CUDA devices, falling back to CPU.", print=True)
return tensor
GenerateWrapper(
wrapped_type=torch_tensor_type,
import_path="torch.Tensor",
protobuf_scheme=Tensor_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
py | 1a51e0bbb173cb8ae224210a9a6c8b3000138f90 | import os
from datetime import timedelta
class Config(object):
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOAD_FOLDER = "./media/thumbnails"
ALLOWED_EXTENSIONS = tuple({".jpeg", ".jpg", ".png", ".gif"})
SECRET_KEY = os.environ.get("SECRET_KEY")
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=30)
class DevelopmentConfig(Config):
SQLALCHEMY_DATABASE_URI = "sqlite:///database.db"
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:"
SECRET_KEY = "fjdsajfksd"
|
py | 1a51e0df72249c510b8c50f58c3f810b3dab4514 | from skelebot.objects.component import *
import idoubtthiswilleverbealegitimatepackagename
# Just a dummy plugin for testing
class AddNumbers(Component):
activation = Activation.ALWAYS
commands = ["addNumbers"]
def addParsers(self, subparsers):
subparsers.add_parser("addNumbers", help="adds the numbers 1 and 2 to get 3!")
return subparsers
def execute(self, config, args):
a = 1
b = 2
print(a, " + ", b, " = ", a + b)
|
py | 1a51e0ecba5618849f774c0b6f5f0c4cf848ebf9 | from django import get_version
from django.template import engines
from django.test import TestCase
DJANGO3 = get_version() >= "3"
def html_39x27(html):
"""
Return HTML string with ' (Django < 3) instead of ' (Django >= 3).
See https://docs.djangoproject.com/en/dev/releases/3.0/#miscellaneous
"""
if not DJANGO3:
return html.replace("'", "'")
return html
class BootstrapTestCase(TestCase):
"""TestCase with render function for template code."""
def render(self, text, context=None, load_bootstrap=True):
"""Return rendered result of template with given context."""
prefix = "{% load django_bootstrap5 %}" if load_bootstrap else ""
template = engines["django"].from_string(f"{prefix}{text}")
return template.render(context or {})
|
py | 1a51e1a5b5c44e364deee20732d927328ee105b6 | from models import *
import sqlalchemy as db
from sqlalchemy.orm import *
from sqlalchemy import inspect
from sqlalchemy_schemadisplay import create_schema_graph
class DatabaseHelper(object):
# params
__session = None
__engine = None
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '__instance'):
cls.__instance = super(DatabaseHelper, cls).__new__(cls)
return cls.__instance
@classmethod
def get_engine(cls):
if cls.__engine is None:
cls.__engine = engine = db.create_engine("postgresql://postgres:postgres@localhost:5432/postgres")
print("Created database engine ", cls.__engine)
return cls.__engine
@classmethod
def get_session(cls):
if cls.__session is None:
cls.__session = Session(cls.get_engine())
print("Database session opened")
return cls.__session
@classmethod
def close(cls):
if cls.__session:
cls.__session.close()
print("Database session closed")
@classmethod
def __getInspector(cls):
return inspect(cls.get_engine())
@classmethod
def rollback_session(cls):
cls.get_session().rollback()
@classmethod
def getListOfTables(cls):
inspector = cls.__getInspector()
table_list = []
for table_name in inspector.get_table_names():
if table_name is None: continue
table_list.append(table_name)
return table_list
@classmethod
def getTableColumns(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_columns(table_name)
@classmethod
def get_primary_keys(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_primary_keys(table_name)
@classmethod
def get_foreign_keys(cls, table_name):
inspector = cls.__getInspector()
return inspector.get_foreign_keys(table_name)
@classmethod
def fill_db_object(cls, model, inflator_object):
assert (isinstance(inflator_object, object))
db_object = model.create()
for key in inflator_object.keys():
setattr(db_object, key.name, inflator_object[key])
return db_object
@classmethod
def getTableObject(cls, table_name):
if table_name == 'promoter':
return promoter
if table_name == 'ad':
return ad
if table_name == 'theme':
return theme
if table_name == 'user':
return user
if table_name == 'user_theme':
return user_theme
if table_name == 'product':
return product
if table_name == 'session':
return session |
py | 1a51e1dc6800754db2f27bb5e81097a9dab5b610 | #Locating, Mapping, and Reaching Customers
#Keelin Haynes, Masters Student of Geography, Miami University, Oxford, OH USA
#
#The link to the original script: https://github.com/keelindhaynes/Marketing_Initiative-Target_Identification-Python-
#
#This script will locate kroger stores within a LBRS dataset, create a buffer
# around them, select all homes and bussinesses within that buffer,
# and create a mailing list containing thte addresses
#
#This script is written to be used with ArcPy
#In order for this to be as usable as possible with as little user input as possible, this will create all items with generic terms. i.e. No county identifying information will be
#contained in it. Your outputs will by: "Kroger", "Kroger_Buffer", and "Applicable_Addresses".
#Thus, if you need to run multiple counties, you will need to go into the output folder and either rename each dataset with county identifying info (e.g. "Muskingum_Kroger")
#or you gather the mailing addresses of one county before moving on to the next
#This imports the necessary modules
import arcpy
import arcpy.da
#You will need to enter your workspace below!!
#^!
#^!
#^!
#This sets up the workspace location
arcpy.env.workspace = r"F:\1PythonFinalProject\WASHINGTONLBRS"
print "The workspace is " + arcpy.env.workspace + "\n"
#This allows outputs to be overwritten
arcpy.env.overwriteOutput = True
print "Outputs will be overwritten \n"
#This sets up a try block
try:
#This sets up a search cursor that will list all of the Krogers in the dataset
#SearchCursor(in_table, field_names,
#{where_clause}, {spatial_reference}, {explode_to_points}, {sql_clause})
#You will need to enter the location of your data below!!
#^!
#^!
#^!
seacurin = r"F:\1PythonFinalProject\WASHINGTONLBRS\WAS_ADDS.shp"
with arcpy.da.SearchCursor (seacurin, ("COMMENT", "LSN", "USPS_CITY", "STATE", "ZIPCODE"), '"COMMENT" = \'KROGER\'') as cursor:
#This creates a for loop to iterate through all of the files in the LBRS data
for row in cursor:
#This tells the user the address of any Krogers in the data
print ("Kroger Address: " + row[1]+ " "+ row[2]+ " "+ row[3]+ " "+ row[4])
print " "
#This creates a temporary feature layer of selected points in the data set
#MakeFeatureLayer_management (in_features, out_layer, {where_clause}, {workspace}, {field_info}
#You will need to enter the location where you either have or want your "output" folder to be located below!!
#^!
#^!
#^!
outputfeature = r"F:\1PythonFinalProject\Outputs\Kroger_location"
arcpy.MakeFeatureLayer_management(seacurin, outputfeature, '"COMMENT" = \'KROGER\'')
print "A feature layer was just created. \n"
#This creates a shapefile of the above selection
#CopyFeatures_management (in_features, out_feature_class, {config_keyword}, {spatial_grid_1}, {spatial_grid_2}, {spatial_grid_3})
arcpy.CopyFeatures_management(outputfeature, r"f:\1PythonFinalProject\Outputs\Kroger.shp")
print "A shapefile of the above feature layer was created \n"
#This sets up variables to be used for a buffer
#Buffer_analysis (in_features, out_feature_class, buffer_distance_or_field,
#{line_side}, {line_end_type}, {dissolve_option}, {dissolve_field}, {method})
buffin = r"F:\1PythonFinalProject\Outputs\Kroger.shp"
buffout = r"F:\1PythonFinalProject\Outputs\Kroger_Buffer.shp"
buffdist = "1 Mile"
#This creates a buffer of 1 mile around the shapefiles created above
arcpy.Buffer_analysis(buffin, buffout, buffdist)
print "A buffer of 1 mile was created around the above shapefile (Kroger location) \n"
#This will create a feature layer of the LBRS data to be used for a location query
#MakeFeatureLayer_management (in_features, out_layer, {where_clause}, {workspace}, {field_info}
temp_LBRS_Data_Layer = arcpy.MakeFeatureLayer_management(seacurin, r"F:\1PythonFinalProject\Outputs\LBRS_Temp_Layer.shp")
print "A a feature layer of the LBRS data to be used for a location query was just created \n"
#This sets up variables to be used for a select by location function
#SelectLayerByLocation_management (in_layer, {overlap_type}, {select_features},
#{search_distance}, {selection_type}, {invert_spatial_relationship})
sellocin = temp_LBRS_Data_Layer
selloctype = "Within"
sellocselfeature = buffout
#This performs a select by location (In this case those LBRS points within the buffer)
selloc = arcpy.SelectLayerByLocation_management(sellocin, selloctype, sellocselfeature)
print "A select by location (In this case those LBRS points within the buffer) was just performed \n"
#This creates a shapefile of the above selection
#CopyFeatures_management (in_features, out_feature_class, {config_keyword}, {spatial_grid_1}, {spatial_grid_2}, {spatial_grid_3})
arcpy.CopyFeatures_management(selloc, r"F:\1PythonFinalProject\Outputs\Applicable_Addresses")
print "A shapefile of the above selection was just created \n"
#This adds a new field to the attribute table of the above selection
#AddField_management (in_table, field_name, field_type, {field_precision}, {field_scale}, {field_length}, {field_alias},
#{field_is_nullable}, {field_is_required}, {field_domain})
arcpy.AddField_management(r"F:\1PythonFinalProject\Outputs\Applicable_Addresses.shp", "Address", "TEXT")
print "A new field to the attribute table of the above selection was just added. \n"
#This populates the newly created field
#UpdateCursor (in_table, field_names, {where_clause}, {spatial_reference}, {explode_to_points}, {sql_clause}
with arcpy.da.UpdateCursor(r"F:\1PythonFinalProject\Outputs\Applicable_Addresses.shp", ("LSN", "USPS_CITY", "STATE", "ZIPCODE", "Address")) as cursor:
#This creates a counter
cntr = 1
#This creates a for loop
for row in cursor:
#This creates an if statement that updates the Address field with the concatenation of the other selected fields
if row[4] == "":
row[4] = row[0] + " " + row[1] + ", " + row[2] + " " + row[3]
cursor.updateRow(row)
print ("Record number " +str(cntr) +" updated. It now says " + row[4])
cntr = cntr +1
print "\n \n You now a list of addresses within 1 mile of the kroger location in your chosen county. The addresses are contained in a field that is located in the attribute table of the dataset."
print "\n Have a great day"
except Exception as e:
print (e.message)
|
py | 1a51e3905237a441c13ebdffea10d69b7652821c | import macaroons_lib2 as mlib # library of macroons being tested
import hmac
import hashlib
import base64
import time
from Crypto.Cipher import AES
"""This is a test file for testing each of the functions defined in macaroons_lib2
The five functional tests are written to ensure the appliable macaroons functions operate
as defined within the paper written by Birgisson et al.'s "Macaroons: Cookies with Contextual
Caveats for Decentralized Authorization in the Cloud". The macaroons functions from the
paper's Figure 8 (Page 7) being tested within this test file include the following:
Test 2 - CreateMacaroon(k, id , L);
Test 3 - M.addCaveatHelper(cId, vId, cL)
Test 4 - M.AddFirstPartyCaveat(a)
Test 1 - M.Verify(TM , k, A, M)
The additional functions for marshalling and pasing JSONs are being also tested to support
the replication of results in Birgisson et al. Table II.
Test 5 - Mashal as JSON
Test 5 - Parse from JSON
...
Test File Development Status
-------
as of 3pm 7/20
completed so far:
create macaroon
verify 1st party caveat
to do:
add 1st party caveat
marshal and parse from json
conversion to dict and obj
not needed: 3rd party caveat since it is not in the table we are reproducing
as of 420pm 7/20
completed so far:
create macaroon,
verify 1st party caveat,
add 1st party caveat,
marshal and parse from json
to do:
conversion to dict and obj
(talk to Ali, may not need testing, since we pulled straight from online source)
not needed:
3rd party caveat since it is not in the table we are reproducing
...
Methods
-------
printTestDesc(testName)
Prints the tests name (i.e. testName) that is being attempted
printTestResult(testName, string)
Prints the test name and its reuslts (i.e. testName and string) following test completion
test1_VerifyOfFirstPartyCaveats()
Test 1 creates a macaroon and add first party caveats then tests the verify function
test2_CreateMacaroon()
Test 2 creates a simple macaroon and tests its creation
test3_addCaveatHelper():
Test 3 creates a simple macaroon and tests the caveat helper function
test4_addFirstPartyCaveat():
Test 4 tests add First Party Caveat function which is a function wrapper of addCaveatHelper
test5_marshalAndParseJSON():
Test 5 creates a macaroon and tests the marshal and parse to JSON functions
"""
def printTestDesc(testName):
"""Prints the tests name (i.e. testName) that is being attempted
Parameters
----------
testName : str
The name of the test being run
"""
print("------------------------------------------ ATTEMPTING "+ testName)
def printTestResult(testName, string):
"""Prints the test name and its reuslts (i.e. testName and string) following test completion
Parameters
----------
testName : str
The name of the test being run
testName : str
The results of the test
"""
print("------------------------------------------ "+ testName + ":"+ string )
def test1_VerifyOfFirstPartyCaveats():
"""Test 1 creates a macaroon and add first party caveats then tests the verify function
"""
K_TargetService1 = "this is the secret key "
K_TargetService2 = "this is also the secret key "
random_nonce = str(433242342)
location = "Catonsville, 21228"
caveat1 = "level of coolness == high"
caveat2 = "champions == USA Women's Team"
M = mlib.CreateMacaroon(K_TargetService1, random_nonce, location)
M.addFirstPartyCaveat(caveat1)
M.addFirstPartyCaveat(caveat2)
receivedMacaroon = M
# M2 = mlib.CreateMacaroon(K_TargetService1, random_nonce, location)
# M2.addFirstPartyCaveat(caveat1)
# M2.addFirstPartyCaveat(caveat2)
assert(mlib.verify(receivedMacaroon, K_TargetService2 ) == False)
assert(mlib.verify(receivedMacaroon, K_TargetService1 ) == True)
def test2_CreateMacaroon():
"""Test 2 creates a simple macaroon and tests its creation
"""
#### Input: data
id = "abc"
key = "234324"
location = "DC"
#### Output: compute hmac on the outside
hmac_value = hmac.new(key, id, hashlib.sha256)
hmac_value_digest = hmac_value.hexdigest()
#### use library to compute HMAC
M = mlib.CreateMacaroon(key, id, location)
#### Assertion: Does the library's output equal the expected value
#print(M.sig)
#print(hmac_value_digest)
printTestDesc("CreateMacaroon")
assert(M.sig == hmac_value_digest)
assert(M.id == id)
printTestResult("CreateMacaroon" , "SUCCESS")
def test3_addCaveatHelper():
"""Test 3 creates a simple macaroon and tests the caveat helper function
"""
printTestDesc("addCaveatHelper")
id = "abc"
key = "234324"
location = "DC"
M = mlib.CreateMacaroon(key, id, location)
oldMacaroonCopy = mlib.parseFromJSON(mlib.marshalToJSON(M))
assert(M.sig == oldMacaroonCopy.sig)
caveat_cid = "123"
caveat_vid = "sdfd"
caveat_cl = "NYC"
## test addCaveatHelper
M.addCaveatHelper(caveat_cid , caveat_vid, caveat_cl)
assert(M.sig != oldMacaroonCopy.sig)
#### what to verify
#### test if the caveat was properly added
string_caveat = caveat_cid + ":" + caveat_vid + ":" + caveat_cl
assert(M.caveats[-1] == string_caveat)
#### test if the caveat signature "evolved" correctly
new_sig = hmac.new(oldMacaroonCopy.sig, caveat_vid+caveat_cid , hashlib.sha256)
assert(M.sig == new_sig.hexdigest())
printTestResult("addCaveatHelper" , "SUCCESS")
def test4_addFirstPartyCaveat():
"""Test 4 tests add First Party Caveat function which is a function wrapper of addCaveatHelper
"""
printTestDesc("addFirstPartyCaveat")
id = "abc"
key = "234324"
location = "DC"
M = mlib.CreateMacaroon(key, id, location)
caveat_cid = "123"
caveat_vid = "0"
caveat_cl = "NYC"
M.addCaveatHelper(caveat_cid , caveat_vid, caveat_cl)
assert(M.sig != oldMacaroonCopy.sig)
#### what to verify
#### test if the caveat was properly added
string_caveat = caveat_cid + ":" + caveat_vid + ":" + caveat_cl
assert(M.caveats[-1] == string_caveat)
#### test if the caveat signature "evolved" correctly
new_sig = hmac.new(oldMacaroonCopy.sig, caveat_vid+caveat_cid , hashlib.sha256)
assert(M.sig == new_sig.hexdigest())
printTestResult("addFirstPartyCaveat" , "SUCCESS")
def test5_marshalAndParseJSON():
"""Test 5 creates a macaroon and tests the marshal and parse to JSON functions
"""
printTestDesc("marshalToJSON")
id = "abc"
key = "234324"
location = "DC"
M = mlib.CreateMacaroon(key, id, location)
json_string = marshalToJSON(M)
print(json_string)
printTestDesc("parseToJSON")
M_returned = parseFromJSON(json_string)
print(M_returned)
if(__name__ == "__main__"):
# call all five tests
test1_VerifyOfFirstPartyCaveats()
test2_CreateMacaroon()
test3_addCaveatHelper()
test4_addFirstPartyCaveat()
test5_marshalAndParseJSON()
"""old code
"""
# id = "abc"
# key = "234324"
# location = "DC"
# M = CreateMacaroon(key, id, location)
# M.addCaveatHelper("123", "sdfd", "NYC")
# M.addCaveatHelper("13423", "sdfdfd", "DC")
# M.addCaveatHelper("12dfd3", "sd343fd", "Baltimore")
# json_string = marshalToJSON(M)
# M_returned = parseFromJSON(json_string)
"""old code
"""
# M.thirdPartyLocations = ["NYC" , "DC", "Baltimore"]
# json_string2 = marshalToJSON(M)
# M_returned2 = parseFromJSON(json_string2)
|
py | 1a51e3fe2459aa397b0c70f2959db5fb836352f9 | import os
import sys
import glob
import unittest
import datetime
def create_test_suite():
"""
Runs through the list of unit tests available in the src/test folder, executing
each in turn.
"""
testFolder = sys.path[0]
testFolderTokens = testFolder.split('/')
moduleStrings = []
if testFolderTokens[-1] == 'src':
testFiles = glob.glob(testFolder+'/test/test_*.py')
for testFile in testFiles:
testFileName = testFile.split('/')[-1].replace('.py','')
moduleStrings.append('test.'+testFileName)
else:
testFiles = glob.glob(testFolder+'/test/test_*.py')
for testFile in testFiles:
testFileName = testFile.split('/')[-1].replace('.py','')
moduleStrings.append('test.'+testFileName)
print('Running Suite of Tests...',datetime.datetime.now())
suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in moduleStrings]
testSuite = unittest.TestSuite(suites)
return testSuite
|
py | 1a51e56a5e44b9b3555c0cb7be8e86edf93121ae | from distutils.version import LooseVersion
import pytest
import numpy as np
from collections import defaultdict
from itertools import combinations
from opensfm import multiview
from opensfm.synthetic_data import synthetic_examples
def pytest_configure(config):
use_legacy_numpy_printoptions()
def use_legacy_numpy_printoptions():
"""Ensure numpy use legacy print formant."""
if LooseVersion(np.__version__).version[:2] > [1, 13]:
np.set_printoptions(legacy='1.13')
@pytest.fixture(scope='module')
def scene_synthetic():
np.random.seed(42)
data = synthetic_examples.synthetic_ellipse_scene()
maximum_depth = 40
projection_noise = 1.0
gps_noise = 5.0
exifs = data.get_scene_exifs(gps_noise)
features, desc, colors, graph = data.get_tracks_data(maximum_depth,
projection_noise)
return data, exifs, features, desc, colors, graph
@pytest.fixture(scope='session')
def scene_synthetic_cube():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
_, _, _, tracks_manager = data.get_tracks_data(40, 0.0)
return data.get_reconstruction(), tracks_manager
@pytest.fixture(scope='module')
def pairs_and_poses():
np.random.seed(42)
data = synthetic_examples.synthetic_cube_scene()
reconstruction = data.get_reconstruction()
scale = 0.0
features, _, _, tracks_manager = data.get_tracks_data(40, scale)
points_keys = list(reconstruction.points.keys())
pairs, poses = defaultdict(list), defaultdict(list)
for im1, im2 in tracks_manager.get_all_pairs_connectivity():
tuples = tracks_manager.get_all_common_observations(im1, im2)
f1 = [p.point for k, p, _ in tuples if k in points_keys]
f2 = [p.point for k, _, p in tuples if k in points_keys]
pairs[im1, im2].append((f1, f2))
poses[im1, im2] = reconstruction.shots[im2].pose.\
compose(reconstruction.shots[im1].pose.inverse())
camera = list(reconstruction.cameras.values())[0]
return pairs, poses, camera, features, tracks_manager, reconstruction
@pytest.fixture(scope='module')
def pairs_and_their_E(pairs_and_poses):
pairs, poses, camera, _, _, _ = pairs_and_poses
pairs = list(sorted(zip(pairs.values(), poses.values()), key=lambda x: -len(x[0])))
num_pairs = 20
indices = [np.random.randint(0, len(pairs)-1) for i in range(num_pairs)]
ret_pairs = []
for idx in indices:
pair = pairs[idx]
p1 = np.array([x for x, _ in pair[0]])
p2 = np.array([x for _, x in pair[0]])
p1 = p1.reshape(-1, p1.shape[-1])
p2 = p2.reshape(-1, p2.shape[-1])
f1 = camera.pixel_bearing_many(p1)
f2 = camera.pixel_bearing_many(p2)
pose = pair[1]
R = pose.get_rotation_matrix()
t_x = multiview.cross_product_matrix(pose.get_origin())
e = R.dot(t_x)
e /= np.linalg.norm(e)
ret_pairs.append((f1, f2, e, pose))
return ret_pairs
@pytest.fixture(scope='module')
def shots_and_their_points(pairs_and_poses):
_, _, _, _, tracks_manager, reconstruction = pairs_and_poses
ret_shots = []
for shot in reconstruction.shots.values():
bearings, points = [], []
for k, obs in tracks_manager.get_shot_observations(shot.id).items():
if k not in reconstruction.points:
continue
p = reconstruction.points[k]
bearings.append(shot.camera.pixel_bearing(obs.point))
points.append(p.coordinates)
ret_shots.append((shot.pose, np.array(bearings), np.array(points)))
return ret_shots
|
py | 1a51e56df32beae169d9fc743025c4b7a39fde7e | #-*- coding: utf-8 -*-
'''
Copyright (c) 2016 NSR (National Security Research Institute)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from distutils.core import setup
setup(
name = 'lsh',
version = '0.0.4',
packages = ['lsh'],
author = 'NSR',
description = 'LSH',
) |
py | 1a51e62eb62c9016aee9608661d1b2ee60a8125e | """
Huang G. et al. "`Densely Connected Convolutional Networks
<https://arxiv.org/abs/1608.06993>`_"
"""
import tensorflow as tf
from ... import is_best_practice
from . import TFModel
from .layers import conv_block
class DenseNet(TFModel):
""" DenseNet
**Configuration**
inputs : dict
dict with 'images' and 'labels'. See :meth:`.TFModel._make_inputs`.
input_block : dict
body : dict
num_layers : list of int
number of layers in dense blocks
block : dict
parameters for dense block, including :func:`~.layers.conv_block` parameters, as well as
growth_rate : int
number of output filters in each layer (default=32)
bottleneck : bool
whether to use 1x1 convolutions in each layer (default=True)
skip : bool
whether to concatenate inputs to the output tensor
transition_layer : dict
parameters for transition layers, including :func:`~.layers.conv_block` parameters, as well as
reduction_factor : float
a multiplier for number of output filters (default=1)
"""
@classmethod
def default_config(cls):
config = TFModel.default_config()
config['common/conv/use_bias'] = False
config['input_block'].update(dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2))
config['body/block'] = dict(layout='nacd', dropout_rate=.2, growth_rate=32, bottleneck=True, skip=True)
config['body/transition_layer'] = dict(layout='nacv', kernel_size=1, strides=1,
pool_size=2, pool_strides=2, reduction_factor=1)
config['head'] = dict(layout='Vf')
config['loss'] = 'ce'
if is_best_practice('optimizer'):
config['optimizer'].update(name='Adam')
else:
lr = 1e-1
# boundaries - the number of iterations on the 150th and 225th epochs on CIFAR with batch size=64
config['decay'] = ('const', dict(boundaries=[117300, 175950], values=[lr, lr/10, lr/100]))
config['optimizer'] = ('Momentum', dict(momentum=.9))
return config
def build_config(self, names=None):
config = super().build_config(names)
if config.get('head/units') is None:
config['head/units'] = self.num_classes('targets')
if config.get('head/filters') is None:
config['head/filters'] = self.num_classes('targets')
return config
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
num_layers, block, transition = cls.pop(['num_layers', 'block', 'transition_layer'], kwargs)
block = {**kwargs, **block}
transition = {**kwargs, **transition}
with tf.variable_scope(name):
x, inputs = inputs, None
for i, n_layers in enumerate(num_layers):
x = cls.block(x, num_layers=n_layers, name='block-%d' % i, **block)
if i < len(num_layers) - 1:
x = cls.transition_layer(x, name='transition-%d' % i, **transition)
return x
@classmethod
def block(cls, inputs, num_layers=3, name=None, **kwargs):
""" A network building block consisting of a stack of 1x1 and 3x3 convolutions.
Parameters
----------
inputs : tf.Tensor
input tensor
num_layers : int
number of conv layers
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/block', **kwargs)
layout, growth_rate, bottleneck, skip = \
cls.pop(['layout', 'growth_rate', 'bottleneck', 'skip'], kwargs)
with tf.variable_scope(name):
axis = cls.channels_axis(kwargs['data_format'])
x = inputs
all_layers = []
for i in range(num_layers):
if len(all_layers) > 0:
x = tf.concat([inputs] + all_layers, axis=axis, name='concat-%d' % i)
if bottleneck:
x = conv_block(x, filters=growth_rate * 4, kernel_size=1, layout=layout,
name='bottleneck-%d' % i, **kwargs)
x = conv_block(x, filters=growth_rate, kernel_size=3, layout=layout,
name='conv-%d' % i, **kwargs)
all_layers.append(x)
if skip:
all_layers = [inputs] + all_layers
x = tf.concat(all_layers, axis=axis, name='concat-%d' % num_layers)
return x
@classmethod
def transition_layer(cls, inputs, name='transition_layer', **kwargs):
""" An intermediary interconnect layer between two dense blocks
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body/transition_layer', **kwargs)
reduction_factor = cls.get('reduction_factor', kwargs)
num_filters = cls.num_channels(inputs, kwargs.get('data_format'))
return conv_block(inputs, filters=num_filters * reduction_factor, name=name, **kwargs)
class DenseNet121(DenseNet):
""" The original DenseNet-121 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 24, 32]
return config
class DenseNet169(DenseNet):
""" The original DenseNet-169 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 32, 16]
return config
class DenseNet201(DenseNet):
""" The original DenseNet-201 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 48, 32]
return config
class DenseNet264(DenseNet):
""" The original DenseNet-264 architecture """
@classmethod
def default_config(cls):
config = DenseNet.default_config()
config['body']['num_layers'] = [6, 12, 64, 48]
return config
|
py | 1a51e65934db367169076783ff1d135346d02a33 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 11:13:30 2020
@author: lorenz
"""
import numpy as np
import MathUtils as mt
def quatprod(Q,P) :
if P.shape[0] == 4:
P = P.T
if Q.shape[0] == 4:
Q = Q.T
QP = np.column_stack( (P[:,0]*Q[:,0] - P[:,1]*Q[:,1] - P[:,2]*Q[:,2] - P[:,3]*Q[:,3],
P[:,0]*Q[:,1] + P[:,1]*Q[:,0] - P[:,2]*Q[:,3] + P[:,3]*Q[:,2],
P[:,0]*Q[:,2] + P[:,2]*Q[:,0] + P[:,1]*Q[:,3] - P[:,3]*Q[:,1],
P[:,0]*Q[:,3] - P[:,1]*Q[:,2] + P[:,2]*Q[:,1] + P[:,3]*Q[:,0] ) )
QP = QP / mt.vecnorm(QP)[:,None]
return QP
def quatUniInv(Q):
if Q.ndim == 1 :
Qinv = Q.copy()
Qinv[1:4] = -Qinv[1:4]
return Qinv
if Q.shape[0] == 4:
Qinv = Q.T.copy()
else :
Qinv = Q.copy()
Qinv[:,1:4] *= -1
return Qinv
def rotateVectorArray(quat,pos):
return np.block( [
[pos[0,:]*(2*quat[0,:]**2 + 2*quat[1,:]**2 - 1) - pos[1,:]*(2*quat[0,:]*quat[3,:] - 2*quat[1,:]*quat[2,:]) + pos[2,:]*(2*quat[0,:]*quat[2,:] + 2*quat[1,:]*quat[3,:])],
[pos[1,:]*(2*quat[0,:]**2 + 2*quat[2,:]**2 - 1) + pos[0,:]*(2*quat[0,:]*quat[3,:] + 2*quat[1,:]*quat[2,:]) - pos[2,:]*(2*quat[0,:]*quat[1,:] - 2*quat[2,:]*quat[3,:])],
[pos[2,:]*(2*quat[0,:]**2 + 2*quat[3,:]**2 - 1) - pos[0,:]*(2*quat[0,:]*quat[2,:] - 2*quat[1,:]*quat[3,:]) + pos[1,:]*(2*quat[0,:]*quat[1,:] + 2*quat[2,:]*quat[3,:])]
])
|
py | 1a51e7906aa7790a8d243c1411ed1c81818d8c04 | """From RAW data filter out sentences not containing the word mouse or mice."""
import click
import codecs
import os
import src.data.dataset as dataset
@click.command()
@click.argument('read_directory', type=click.Path(dir_okay=True),
default=dataset.DEFAULT_RAW_DATA_DIRECTORY)
@click.argument('save_directory', type=click.Path(writable=True, dir_okay=True),
default=dataset.DEFAULT_INTERIM_DATA_DIRECTORY)
@click.argument('encoding', default=dataset.DEFAULT_ENCODING)
def filter_raw_data(read_directory, save_directory, encoding='utf-8'):
"""Filter out sentences not containing 'mouse' and change 'mice' to 'mouse'
READ_DIRECTORY is directory to read raw data from.
Default: <project_root>/data/raw
SAVE_DIRECTORY is directory to store filtered data.
Default: <project_root>/data/interim
ENCODING is the encoding used to save the filtered sentences
Default: 'utf-8'
Creates files 'animal.txt' and 'device.txt' in SAVE_DIRECTORY
"""
for context in ['animal', 'device']:
read_dir = os.path.join(read_directory, context)
save_dir = os.path.join(save_directory, '{}.txt'.format(context))
filenames = [filename for filename in os.listdir(read_dir) if filename.endswith('.txt')]
print(filenames)
with codecs.open(save_dir, 'w', encoding) as of:
for filename in filenames:
read_path = os.path.join(read_dir, filename)
with codecs.open(read_path, 'r', encoding) as rf:
text = rf.read()
processed_text = dataset.process_text(text)
for sentence in processed_text:
of.write(sentence)
of.write('\n')
if __name__ == '__main__':
filter_raw_data()
|
py | 1a51e79afb41bfee3c8b66e0afe04a2ce063451e | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
message = models.TextField(blank=True)
profile = models.ImageField(upload_to='user_image/profile/%Y/%m/%d', blank=True) |
py | 1a51e7b2f65207413390afa5bb92f71ec678620d | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
words
return render_template('index.html', words= words )
if __name__ == '__main__':
app.run(debug=True) |
py | 1a51e81e041e4a860879be72003c3d2e09130969 | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# spider/
APPS_DIR = ROOT_DIR / "spider"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=True)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"django_filters",
]
LOCAL_APPS = [
"spider.users.apps.UsersConfig",
"spider.products",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "spider.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"spider.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Pavel Tanchev""", "[email protected]")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "spider.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "spider.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
|
py | 1a51e84ed8a4e9e8a2152fbe4fbc537a9b2ff9b4 | # Demo: (Audio) -> (Label)
import gradio as gr
import numpy as np
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from math import log2, pow
A4 = 440
C0 = A4*pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
def get_pitch(freq):
h = round(12*log2(freq/C0))
n = h % 12
return name[n]
def main_note(audio):
rate, y = audio
if len(y.shape) == 2:
y = y.T[0]
N = len(y)
T = 1.0 / rate
x = np.linspace(0.0, N*T, N)
yf = fft(y)
yf2 = 2.0/N * np.abs(yf[0:N//2])
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
volume_per_pitch = {}
total_volume = np.sum(yf2)
for freq, volume in zip(xf, yf2):
if freq == 0:
continue
pitch = get_pitch(freq)
if pitch not in volume_per_pitch:
volume_per_pitch[pitch] = 0
volume_per_pitch[pitch] += 1.0 * volume / total_volume
return volume_per_pitch
io = gr.Interface(
main_note,
"microphone",
gr.outputs.Label(num_top_classes=4),
examples=[
["audio/recording1.wav"],
["audio/cantina.wav"],
],
interpretation="default")
io.launch()
|
py | 1a51ea2690801a7f351be958e0bb8b8efc553081 | # -*- coding: utf-8 -*-
from typing import Dict
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'r2r_dac.yaml'))
# noinspection PyPep8Naming
class adc_sar_templates__r2r_dac(Module):
"""Module for library adc_sar_templates cell r2r_dac.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self, lch, pw, nw, m, num_series, num_bits, device_intent='fast'):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
self.parameters['lch'] = lch
self.parameters['pw'] = pw
self.parameters['nw'] = nw
self.parameters['m'] = m
self.parameters['num_series'] = num_series
self.parameters['num_bits'] = num_bits
self.parameters['device_intent'] = device_intent
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'O': 'out',
'EN': 'EN<%d>' %(num_bits-1-i),
'ENB': 'ENB<%d>' %(num_bits-1-i)
})
else:
term_list.append({'O': 'int%d' %(num_bits-i),
'EN': 'EN<%d>' %(num_bits-1-i),
'ENB': 'ENB<%d>' %(num_bits-1-i)
})
name_list.append('I2RVDD%d' % (num_bits-1-i))
self.array_instance('I2RVDD', name_list, term_list=term_list)
for i in range(num_bits):
self.instances['I2RVDD'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'O': 'out',
'EN': 'ENB<%d>' %(num_bits-1-i),
'ENB': 'EN<%d>' %(num_bits-1-i)
})
else:
term_list.append({'O': 'int%d' %(num_bits-i),
'EN': 'ENB<%d>' %(num_bits-1-i),
'ENB': 'EN<%d>' %(num_bits-1-i)
})
name_list.append('I2RVSS%d' % (num_bits-1-i))
self.array_instance('I2RVSS', name_list, term_list=term_list)
for i in range(num_bits):
self.instances['I2RVSS'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
# array generation
name_list = []
term_list = []
for i in range(num_bits):
if i == 0:
term_list.append({'I': 'out',
'O': 'int%d' %(num_bits-1-i),
})
elif i == num_bits-1:
term_list.append({'I': 'int%d' %(num_bits-i),
'O': 'VSS',
})
else:
term_list.append({'I': 'int%d' %(num_bits-i),
'O': 'int%d' %(num_bits-1-i),
})
if not i == num_bits-1:
name_list.append('IR%d' % (num_bits-1-i))
else:
name_list.append('IR%d' % (num_bits-1-i))
self.array_instance('IR', name_list, term_list=term_list)
for i in range(num_bits):
if i == num_bits-1:
self.instances['IR'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=num_series, device_intent=device_intent)
else:
self.instances['IR'][i].design(lch=lch, pw=pw, nw=nw, m=m, num_series=int(num_series/2), device_intent=device_intent)
# inv array generation
name_list = []
term_list = []
term_list.append({'O': 'ENB<%d:0>' %(num_bits-1),
'I': 'SEL<%d:0>' %(num_bits-1),
})
name_list.append('IINV0<%d:0>' %(num_bits-1))
self.array_instance('IINV0', name_list, term_list=term_list)
self.instances['IINV0'][0].design(lch=lch, pw=pw, nw=nw, m=2, device_intent=device_intent)
name_list = []
term_list = []
term_list.append({'O': 'EN<%d:0>' %(num_bits-1),
'I': 'ENB<%d:0>' %(num_bits-1),
})
name_list.append('IINV1<%d:0>' %(num_bits-1))
self.array_instance('IINV1', name_list, term_list=term_list)
self.instances['IINV1'][0].design(lch=lch, pw=pw, nw=nw, m=2, device_intent=device_intent)
self.rename_pin('SEL', 'SEL<%d:0>' % (num_bits - 1))
# self.rename_pin('ZP<0>', 'ZP<%d:0>' % (num_bits - 1))
# self.rename_pin('ZMID<0>', 'ZMID<%d:0>' % (num_bits - 1))
# self.rename_pin('ZM<0>', 'ZM<%d:0>' % (num_bits - 1))
# self.rename_pin('RETO<0>', 'RETO<%d:0>' % (num_bits - 1))
#
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZP%d' % (j) + '<%d>' % (i),
# 'D': 'ZP<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZP%d' % (j) + '<%d>' % (i),
# 'D': 'ZP%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP0%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN0%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP0', name_list_p, term_list=term_list)
# self.array_instance('IBUFN0', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP0'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN0'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZMID%d' % (j) + '<%d>' % (i),
# 'D': 'ZMID<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZMID%d' % (j) + '<%d>' % (i),
# 'D': 'ZMID%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP1%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN1%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP1', name_list_p, term_list=term_list)
# self.array_instance('IBUFN1', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP1'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN1'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# name_list_p = []
# name_list_n = []
# term_list = []
# for i in range(num_bits):
# for j in range(num_inv_bb):
# if j == (num_inv_bb - 1):
# term_list.append({'G': 'ZM%d' % (j) + '<%d>' % (i),
# 'D': 'ZM<%d>' % (i),
# })
# else:
# term_list.append({'G': 'ZM%d' % (j) + '<%d>' % (i),
# 'D': 'ZM%d' % (j + 1) + '<%d>' % (i),
# })
# name_list_p.append('IBUFP2%d' % (j) + '<%d>' % (i))
# name_list_n.append('IBUFN2%d' % (j) + '<%d>' % (i))
# self.array_instance('IBUFP2', name_list_p, term_list=term_list)
# self.array_instance('IBUFN2', name_list_n, term_list=term_list)
# for i in range(num_bits * num_inv_bb):
# self.instances['IBUFP2'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
# self.instances['IBUFN2'][i].design(w=pw, l=lch, nf=m, intent=device_intent)
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {} |
py | 1a51eaa80dedf3885d2eff7b0fd8429c48b847f9 | # -*- coding:UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''封装ADB功能
'''
from __future__ import unicode_literals
from __future__ import print_function
import os
import re
import six
import subprocess
import sys
import threading
import time
from pkg_resources import iter_entry_points
from qt4a.androiddriver.adbclient import ADBClient
from qt4a.androiddriver.util import Singleton, Deprecated, logger, ThreadEx, TimeoutError, InstallPackageFailedError, PermissionError, is_int, encode_wrap, enforce_utf8_decode
try:
import _strptime # time.strptime() is not thread-safed, so import _strptime first, otherwise it raises an AttributeError: _strptime_time
except:
pass
cur_path = os.path.dirname(os.path.abspath(__file__))
def get_adb_path():
if sys.platform == 'win32':
sep = ';'
file_name = 'adb.exe'
else:
sep = ':'
file_name = 'adb'
for root in os.environ.get('PATH').split(sep):
adb_path = os.path.join(root, file_name)
if os.path.exists(adb_path): # 优先使用环境变量中指定的 adb
return adb_path
return os.path.join(cur_path, 'tools', 'adb', sys.platform, file_name)
adb_path = get_adb_path()
def is_adb_server_opend():
'''判断ADB Server是否开启
'''
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('localhost', 5037))
sock.close()
return False
except:
return True
class EnumRootState(object):
'''设备Root状态
'''
Unknown = 0 # 未知
NonRoot = 1 # 非Root
AdbdRoot = 2 # adbd以Root权限执行(执行adb root后)
SuRoot = 3 # 使用su进入Root
class IADBBackend(object):
'''ADBBackend接口定义
'''
@staticmethod
def list_device():
'''枚举设备列表
'''
pass
@staticmethod
def open_device(name):
'''打开指定设备
:param name: 设备名称
:type name: str
:return: IADBBackend实例
'''
pass
@property
def device_host(self):
'''设备主机
'''
pass
@property
def device_name(self):
'''设备名
'''
pass
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
pass
class LocalADBBackend(IADBBackend):
'''本地ADBBackend
'''
@staticmethod
def start():
if is_adb_server_opend():
return False
subprocess.call([adb_path, 'start-server'])
return True
@staticmethod
def close():
subprocess.call([adb_path, 'kill-server'])
@staticmethod
def list_device(device_host='127.0.0.1'):
'''枚举设备列表
'''
if not is_adb_server_opend():
return []
result = ADBClient.get_client(device_host).call(
'devices', retry_count=3)[0]
result = result.split('\n')
device_list = []
for device in result:
if len(device) <= 1 or not '\t' in device:
continue
device_name, status = device.split('\t')
if status != 'device':
continue
device_list.append(device_name)
return device_list
@staticmethod
def open_device(name):
'''打开指定设备
:param name: 设备名称
:type name: str
:return: IADBBackend实例
'''
device_host = '127.0.0.1'
if ':' in name:
pattern = re.compile(r'^\d{3,5}$')
pos = name.find(':')
hostname = name[:pos]
if not pattern.match(name[pos + 1:]):
# adb connect device
name = name[pos + 1:]
device_host = hostname
if name not in LocalADBBackend.list_device(device_host):
raise RuntimeError('Device %s not exist in host %s' %
(name, device_host))
return LocalADBBackend(device_host, name)
def __init__(self, device_host, device_name, port=5037):
self._device_host = device_host
self._device_host_port = port
self._device_name = device_name
self._adb_client = ADBClient.get_client(
self._device_host, self._device_host_port)
@property
def device_host(self):
'''设备主机
'''
return self._device_host
@property
def device_name(self):
'''设备名
'''
return self._device_name
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
timeout = kwargs.pop('timeout')
sync = kwargs.pop('sync')
return self._adb_client.call(cmd, self._device_name, *args, sync=sync, retry_count=1, timeout=timeout)
def static_result(func):
'''固定返回结果函数
'''
def _wrap_func(self):
attr = '_%s_result' % func.__name__
if not hasattr(self, attr):
result = func(self)
setattr(self, attr, result)
return getattr(self, attr)
return _wrap_func
class ADB(object):
'''封装ADB功能
'''
armeabi = 'armeabi'
x86 = 'x86'
connect_timeout = 300 # 连接设备的超时时间
def __init__(self, backend):
self._backend = backend
self._device_name = self._backend.device_name
self._root_state = EnumRootState.Unknown
self._need_quote = None # 执行shell命令时有些手机需要引号,有些不需要
self._log_filter_thread_list = [] # 不打印log的线程id列表
self._shell_prefix = None # 有些设备上会有固定输出
self._logcat_callbacks = []
self._newline = None # 不同手机使用的换行会不同
@property
def device_host(self):
'''设备主机名
'''
return self._backend.device_host
@property
def device_name(self):
'''设备名
'''
return self._backend.device_name
def add_no_log_thread(self, thread):
'''添加线程到不打印日志线程列表
'''
if not thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.append(thread.ident)
def remove_no_log_thread(self, thread):
'''移除不打印日志线程列表中指定线程
'''
if thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.remove(thread.ident)
def run_adb_cmd(self, cmd, *args, **kwargs):
'''执行adb命令
'''
retry_count = 3 # 默认最多重试3次
if 'retry_count' in kwargs:
retry_count = kwargs.pop('retry_count')
timeout = 20
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
sync = True
if 'sync' in kwargs:
sync = kwargs.pop('sync')
for _ in range(retry_count):
if not threading.current_thread().ident in self._log_filter_thread_list:
logger.info('adb %s:%s %s %s' % (
self._backend.device_host, self._backend.device_name, cmd, ' '.join(args)))
time0 = time.clock()
try:
result = self._backend.run_adb_cmd(
cmd, *args, sync=sync, timeout=timeout, **kwargs)
except Exception as e:
logger.exception('Exec adb %s failed: %s' % (cmd, e))
continue
if not isinstance(result, tuple):
return result
if not threading.current_thread().ident in self._log_filter_thread_list:
logger.info('执行ADB命令耗时:%s' % (time.clock() - time0))
out, err = result
if err:
if b'error: device not found' in err:
self.run_adb_cmd('wait-for-device', retry_count=1,
timeout=self.connect_timeout) # 等待设备连接正常
return self.run_adb_cmd(cmd, *args, **kwargs)
return err
if isinstance(out, (bytes, str)):
out = out.strip()
return out
def run_shell_cmd(self, cmd_line, root=False, **kwds):
'''运行shell命令
:param cmd_line: 要运行的命令行
:param root: 是否使用root权限
'''
if not self._newline:
result = self.run_adb_cmd('shell', 'echo "1\n2"')
if b'\r\n' in result:
self._newline = b'\r\n'
else:
self._newline = b'\n'
binary_output = False
if 'binary_output' in kwds:
binary_output = kwds.pop('binary_output')
def _handle_result(result):
if not isinstance(result, (bytes, str)):
return result
if self._newline != b'\n':
result = result.replace(self._newline, b'\n')
if binary_output:
return result
else:
result = result.decode('utf8')
if self._shell_prefix != None and self._shell_prefix > 0:
result = '\n'.join(result.split('\n')[self._shell_prefix:])
if result.startswith('WARNING: linker:'):
# 虚拟机上可能会有这种错误:WARNING: linker: libdvm.so has text relocations. This is wasting memory and is a security risk. Please fix.
lines = result.split('\n')
idx = 1
while idx < len(lines):
if not lines[idx].startswith('WARNING: linker:'):
break
idx += 1
return '\n'.join(lines[idx:]).strip()
else:
return result
if root:
need_su = True
if self._root_state == EnumRootState.Unknown:
self._root_state = self.get_root_state()
if self._root_state == EnumRootState.AdbdRoot:
need_su = False
elif self._root_state == EnumRootState.NonRoot:
raise RuntimeError('device is not rooted')
if not need_su:
return self.run_shell_cmd(cmd_line, **kwds)
if self._need_quote == None:
self._check_need_quote()
if self._need_quote:
cmd_line = 'su -c \'%s\'' % cmd_line
else:
cmd_line = 'su -c %s' % cmd_line
return _handle_result(self.run_adb_cmd('shell', '%s' % cmd_line, **kwds))
def reboot(self, _timeout=180):
'''重启手机'''
try:
self.run_adb_cmd('reboot', retry_count=1, timeout=30)
except TimeoutError:
# 使用强杀init进程方式重启手机
self.kill_process(1)
time.sleep(10) # 等待手机重启
if _timeout > 0:
self.wait_for_boot_complete(_timeout)
def wait_for_boot_complete(self, _timeout=180):
'''等待手机启动完成'''
# 手机重启完后 adbd Insecure 启动时会导致adb断开重连,qt4a框架己经实现了adb root权限功能,测试手机请不要安装 adbd Insecure
import time
print('等待手机启动完成...')
self.run_adb_cmd('wait-for-device', timeout=_timeout)
boot_complete = False
attempts = 0
wait_period = 5
while not boot_complete and (attempts * wait_period) < _timeout:
output = self.run_shell_cmd(
"getprop sys.boot_completed", retry_count=1)
output = output.strip()
if output == "1":
boot_complete = True
else:
time.sleep(wait_period)
attempts += 1
if not boot_complete:
raise RuntimeError(
"dev.bootcomplete 标志在 %s 秒后仍未设置,手机重启失败" % _timeout)
def start_logcat(self, process_list=[], params=''):
'''运行logcat进程
:param process_list: 要捕获日志的进程名或进程ID列表,为空则捕获所有进程
:type process_list: list
'''
if not hasattr(self, '_start_count'):
self._start_count = 0
self._start_count += 1
if self._start_count > 1:
return
logger.debug('[ADB] start logcat')
self.run_shell_cmd('logcat -c ' + params) # 清除缓冲区
if not hasattr(self, '_log_list'):
self._log_list = []
self._logcat_running = True
self._log_pipe = self.run_shell_cmd(
'logcat -v threadtime ' + params, sync=False)
# self._logcat_thread_func(process_list)
self._logcat_thread = ThreadEx(
target=self._logcat_thread_func, args=[process_list, params])
self._logcat_thread.setDaemon(True)
self._logcat_thread.start()
self._log_filter_thread_list.append(self._logcat_thread.ident)
def stop_logcat(self):
'''停止logcat
'''
if not hasattr(self, '_start_count') or self._start_count <= 0:
logger.warn('[ADB] logcat not start')
return
self._start_count -= 1
if self._start_count > 0:
return
logger.debug('[ADB] stop logcat')
self._logcat_running = False
if hasattr(self, '_log_pipe'):
if self._log_pipe.poll() == None: # 判断logcat进程是否存在
try:
self._log_pipe.terminate()
except WindowsError as e:
logger.warn('terminate logcat process failed: %s' % e)
if hasattr(self, '_logcat_thread'):
if self._logcat_thread.ident in self._log_filter_thread_list:
self._log_filter_thread_list.remove(self._logcat_thread.ident)
else:
logger.warn('%s not in %s' % (
self._logcat_thread.ident, self._log_filter_thread_list))
def get_log(self, clear=True):
'''获取已经保存的log
'''
if not hasattr(self, '_log_list'):
return []
result = self._log_list
if clear:
self._log_list = []
return result
def save_log(self, save_path):
'''保存log
'''
if not hasattr(self, '_log_list'):
return
log_list = self.get_log()
if six.PY2:
for i in range(len(log_list)):
log = log_list[i]
if not isinstance(log, unicode):
# 先编码为unicode
for code in ['utf8', 'gbk']:
try:
log = log.decode(code)
break
except UnicodeDecodeError as e:
# logger.warn('decode with %s error: %s' % (code, e))
pass
else:
log = repr(log)
log_list[i] = log.encode('utf8') if isinstance(
log, unicode) else log
f = open(save_path, 'w')
f.write('\n'.join(log_list))
f.close()
def add_logcat_callback(self, callback):
'''添加logcat回调
'''
if not callback in self._logcat_callbacks:
self._logcat_callbacks.append(callback)
def remove_logcat_callback(self, callback):
'''移除logcat回调
'''
if callback in self._logcat_callbacks:
self._logcat_callbacks.remove(callback)
def insert_logcat(self, process_name, year, month_day, timestamp, level, tag, tid, content):
self._log_list.append('[%s] [%s-%s %s] %s/%s(%s): %s' % (process_name,
year, month_day, timestamp,
level,
tag,
tid,
content))
pid = 0
pattern = re.compile(r'^(.+)\((\d+)\)$')
ret = pattern.match(process_name)
if ret:
process_name = ret.group(1)
pid = int(ret.group(2))
for callback in self._logcat_callbacks:
callback(pid, process_name, '%s-%s' % (year, month_day),
timestamp, level, tag, int(tid), content)
def _logcat_thread_func(self, process_list, params=''):
'''获取logcat线程
'''
import re
# pattern = re.compile(r'([A-Z])/([\w|.]+)\s*\(\s*(\d+)\):.+') #标准格式
# pattern = re.compile(r'([\d|-]+)\s+([\d|:|\.]+)\s+(\d+)\s+(\d+)\s+(\w)\s+(\S+)\s*:\s+(.+)') # [^:]
# 会过滤掉只有内容和内容为空的情况:--------- beginning of /dev/log/main not match pattern;04-16 10:09:25.170 2183 2183 D AndroidRuntime:
pattern = re.compile(
r'([\d|-]+)\s+([\d|:|\.]+)\s+(\d+)\s+(\d+)\s+(\w)\s+(.*?)\s*:\s*(.*)')
# Date Time PID TID Level Tag Content
pid_dict = {}
filter_pid_list = [] # 没有找到匹配进程的列表
zygote_pid = 0 # zygote进程ID
while self._logcat_running:
log = self._log_pipe.stdout.readline()
log = enforce_utf8_decode(log).strip()
if not log:
if self._log_pipe.poll() != None:
logger.debug('logcat进程:%s 已退出' % self._log_pipe.pid)
# 进程已退出
# TODO: 解决logcat重复问题
if not self._logcat_running:
logger.info('logcat线程停止运行')
return
self._log_pipe = self.run_shell_cmd(
'logcat -v threadtime ' + params, sync=False)
else:
continue
ret = pattern.match(log)
if not ret:
logger.info('log: %s not match pattern' % log)
continue
tag = ret.group(6).strip()
if tag in ['inject', 'dexloader', 'ActivityInspect', 'MethodHook', 'androidhook']:
logger.info(log) # 测试桩日志加入到qt4a日志中
continue
if tag in ['Web Console']:
if ret.group(7).startswith('[ClickListener]'):
logger.info(log) # WebView的控件点击信息
continue
pid = int(ret.group(3))
if pid in filter_pid_list:
continue
init_process_list = ['<pre-initialized>', 'zygote']
if not pid in pid_dict.keys():
for item in self.list_process():
if zygote_pid == 0 and item['proc_name'] == 'zygote' and item['ppid'] == 1:
# zygote父进程ID为1
zygote_pid = item['pid']
for init_process in init_process_list:
if item['pid'] in pid_dict and pid_dict[item['pid']].startswith(init_process) and not item['proc_name'].startswith(init_process):
for i in range(len(self._log_list) - 1, -1, -1):
# 修复之前记录的“<pre-initialized>”进程
pre_process_name = '[%s(%d)]' % (
init_process, item['pid'])
if not pre_process_name in self._log_list[i]:
continue
if process_list:
del_flag = True
for process in process_list:
if pid == process or item['proc_name'].startswith(process):
# 替换为真实进程名
self._log_list[i] = self._log_list[i].replace(
pre_process_name, ('[%s(%d)]' % (item['proc_name'], item['pid'])))
del_flag = False
break
if del_flag:
# 不在需要记录的进程列表中
del self._log_list[i]
else:
# 直接替换
self._log_list[i] = self._log_list[i].replace(
pre_process_name, ('[%s(%d)]' % (item['proc_name'], item['pid'])))
pid_dict[item['pid']] = item['proc_name']
# if item['proc_name'] in init_process_list and item['pid'] != zygote_pid:
# pid_dict[item['pid']] += '(%d)' % item['pid']
if not pid in pid_dict.keys():
filter_pid_list.append(pid)
continue
found = False
if not process_list:
found = True # 不指定进程列表则捕获所有进程
else:
for process in process_list:
if pid == process or (pid in pid_dict and (pid_dict[pid].startswith(process) or pid_dict[pid].startswith('<pre-initialized>')
or (pid_dict[pid].startswith('zygote') and pid != zygote_pid))): # 进程初始化中
found = True
break
if found:
import datetime
if not hasattr(self, '_year'):
self._year = datetime.date.today().year
try:
self.insert_logcat('%s(%d)' % (pid_dict.get(pid), pid), self._year, ret.group(
1), ret.group(2), ret.group(5), ret.group(6), ret.group(4), ret.group(7))
except:
logger.exception('Insert logcat failed: %r' % log)
@static_result
def get_root_state(self):
'''获取Root状态
'''
if self.is_adbd_root():
return EnumRootState.AdbdRoot
result = self.run_shell_cmd('su -c id')
if 'su: not found' in result:
return EnumRootState.NonRoot
elif 'uid=0(root)' in result:
return EnumRootState.SuRoot
return EnumRootState.NonRoot
@static_result
def is_adbd_root(self):
'''adbd是否以root权限运行
'''
result = self.run_shell_cmd('id')
logger.debug('is_adbd_root: %s' % result)
return 'uid=0(root)' in result
def is_rooted(self):
return self.get_root_state() in (EnumRootState.AdbdRoot, EnumRootState.SuRoot)
def _check_need_quote(self, timeout=20):
'''
'''
cmd = "su -c 'ls -l /data/data'" # 默认方式为加引号,避免有些手机上对于存在空格的命令容易出错
# 联想S899T上发现不加引号返回结果为空
result = self.run_shell_cmd(cmd, timeout=timeout)
if result.find('com.android.phone') >= 0:
self._need_quote = True
else:
logger.debug(result)
self._need_quote = False
# ifndef __RELEASE__
def _set_system_writable(self):
'''修改system分区可写
'''
result = self.run_shell_cmd('mount', True)
for line in result.split('\n'):
if line.find('/system') >= 0:
block = line.split(' ')[0]
print(block)
self.run_shell_cmd('mount -o remount %s /system' % block, True)
return True
return False
# endif
def forward(self, port1, port2, type='tcp'):
'''端口转发
:param port1: PC上的TCP端口
:type port1: int
:param port2: 手机上的端口或LocalSocket地址
:type port2: int或String
:param type: 手机上的端口类型
:type type: String,LocalSocket地址使用“localabstract”
'''
while 1:
ret = self.run_adb_cmd('forward', 'tcp:%d' %
(port1), '%s:%s' % (type, port2))
if not 'cannot bind socket' in ret and not 'cannot bind to socket' in ret:
return port1
port1 += 1
def remove_forward(self, port):
'''移除指定的端口映射
'''
return 'cannot remove listener' in self.run_adb_cmd('forward', '--remove', 'tcp:%d' % (port))
def create_tunnel(self, addr, type='tcp'):
'''直接创建与手机中socket服务端的连接
'''
sock = self.run_adb_cmd('create_tunnel', '%s:%s' % (type, addr))
if sock == '':
return None
return sock
def _push_file(self, src_path, dst_path):
'''以指定身份拷贝文件到手机中
'''
result = self.run_adb_cmd('push', src_path, dst_path, timeout=None)
if 'No space left on device' in result or 'No such file or directory' in result:
# 如果源文件不存在不会执行到这里
raise RuntimeError('设备存储空间不足')
return result
def push_file(self, src_path, dst_path, uid=None):
'''以指定身份拷贝文件到手机中
'''
if six.PY2 and isinstance(dst_path, unicode):
dst_path = dst_path.encode('utf8')
file_size = 0
for _ in range(3):
file_size = os.path.getsize(src_path) # 防止取到的文件大小不正确
result = self._push_file(src_path, dst_path)
if file_size == 0:
logger.warn('文件大小为0')
return result
if ('%d' % file_size) in result:
try:
_, file_list = self.list_dir(dst_path)
if len(file_list) == 0:
logger.warn('push file failed: file not exist')
elif file_list[0]['size'] != file_size:
logger.warn('push file failed: file size error, expect %d, actual is %d' % (
file_size, file_list[0]['size']))
self.delete_file(dst_path)
else:
logger.debug(repr(file_list[0]))
if uid:
self.chown(dst_path, uid, uid)
return result
except RuntimeError as e:
err_msg = e.args[0]
if six.PY2 and (not isinstance(err_msg, unicode)):
err_msg = err_msg.decode('utf8')
logger.warn(err_msg)
else:
logger.warn('push file failed: %s' % result)
raise RuntimeError('Push file [%d]%s to device [%r] failed: %s' % (
file_size, src_path, self._device_name, result))
def pull_file(self, src_path, dst_path):
'''从手机中拉取文件
'''
result = self.run_adb_cmd('pull', src_path, dst_path, timeout=600)
if 'failed to copy' in result:
raise RuntimeError(result)
if not 'bytes in' in result:
logger.warn(repr(result))
logger.debug(self.run_shell_cmd('ls -l %s' % src_path, True))
return result
@staticmethod
def _get_package_name(apk_path):
'''获取安装包名
'''
import zipfile
from ._axmlparser import AXMLPrinter
package_name = ''
zf = zipfile.ZipFile(apk_path, mode='r')
for i in zf.namelist():
if i == "AndroidManifest.xml":
printer = AXMLPrinter(zf.read(i))
package_name = printer.get_xml_obj().getElementsByTagName('manifest')[
0].getAttribute('package')
break
if not package_name:
raise RuntimeError('获取安装包中的包名信息失败')
return package_name
def _install_apk(self, apk_path, package_name, reinstall=False):
'''
'''
if self.get_sdk_version() <= 19:
timeout = 3 * 60
else:
timeout = 6 * 60 # TODO: 9100安装5.0系统后安装应用超过3分钟
cmdline = 'pm install %s %s' % ('-r' if reinstall else '', apk_path)
ret = ''
for i in range(3):
# 处理一些必然会失败的情况,如方法数超标之类的问题
try:
if not self.is_rooted():
# 通知QT4A助手开始监控应用安装
self.run_shell_cmd('am broadcast -a startInstallMonitor')
ret = self.run_shell_cmd(
cmdline, retry_count=1, timeout=timeout)
else:
proc = self.run_shell_cmd(
cmdline, True, sync=False) # 使用root权限安装
time0 = time.time()
close_popup_count = 0
while time.time() - time0 < timeout:
if proc.poll() != None:
ret = proc.communicate()[0]
break
elif time.time() - time0 > 10 and close_popup_count < 2:
# 有些系统上弹窗会出现很久,关掉弹窗可以避免超时
self.run_shell_cmd('input keyevent 4')
close_popup_count += 1
time.sleep(1)
else:
raise TimeoutError('Install package timeout')
if not b'Success' in ret:
logger.warn('install with root failed: %s' % ret)
if not b'INSTALL_' in ret.strip().split(b'\n')[-1]:
# 权限弹窗导致的安装失败
ret = self.run_as(
'system', cmdline, retry_count=1, timeout=timeout)
logger.debug(ret)
if b'Success' in ret:
return True, ret
elif i > 1 and b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
# 出现至少一次超时,认为安装完成
return True, 'Success'
elif b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
# 尝试覆盖安装
return self._install_apk(apk_path, package_name, True)
elif b'INSTALL_PARSE_FAILED_NO_CERTIFICATES' in ret or b'INSTALL_PARSE_FAILED_UNEXPECTED_EXCEPTION' in ret:
if i >= 2:
return False, ret
time.sleep(10)
continue
elif b'INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES' in ret or b'INSTALL_FAILED_DEXOPT' in ret or b'INSTALL_FAILED_UPDATE_INCOMPATIBLE' in ret:
# 必须卸载安装
if not reinstall:
return False, ret
self.uninstall_app(package_name)
return self._install_apk(apk_path, package_name, False)
elif b'INSTALL_FAILED_INSUFFICIENT_STORAGE' in ret:
# 有可能是存在/data/app-lib/packagename-1目录导致的
for i in (1, 2):
dir_path = '/data/app-lib/%s-%d' % (package_name, i)
if 'No such file or directory' in self.run_shell_cmd('ls -l %s' % dir_path, True):
continue
else:
self.delete_folder(dir_path)
break
else:
return False, ret
elif b'INSTALL_FAILED_UID_CHANGED' in ret or b'INSTALL_FAILED_INTERNAL_ERROR' in ret:
# /data/data目录下存在文件夹没有删除
dir_path = '/data/data/%s' % package_name
for _ in range(3):
# 防止删除没有成功
self.delete_folder(dir_path)
if b'No such file or directory' in self.run_shell_cmd('ls -l %s' % dir_path, True):
break
continue
elif b'INSTALL_FAILED_CANCELLED_BY_USER' in ret:
# 一般是ROM需要手动确认安装,改用system权限安装
ret = self.run_shell_cmd(
'su system %s' % cmdline, timeout=timeout)
if b'Success' in ret:
return True, ret
elif b'Error: Could not access the Package Manager' in ret:
# 设备出现问题,等待监控程序重启设备
time.sleep(30)
else:
return False, ret
except TimeoutError as e:
logger.warn('install app timeout: %r' % e)
else:
logger.warn('install app failed')
ret = self.run_shell_cmd(cmdline, timeout=timeout) # 改用非root权限安装
logger.debug(ret)
if b'Success' in ret or b'INSTALL_FAILED_ALREADY_EXISTS' in ret:
return True, 'Success'
return False, ret
def install_apk(self, apk_path, reinstall=False):
'''安装应用
'''
if not os.path.exists(apk_path):
raise RuntimeError('APK: %s 不存在' % apk_path)
package_name = self._get_package_name(apk_path)
tmp_path = '/data/local/tmp/%s.apk' % package_name
self.push_file(apk_path, tmp_path)
if not reinstall:
self.uninstall_app(package_name) # 先卸载,再安装
result = self._install_apk(tmp_path, package_name, reinstall)
else:
result = self._install_apk(tmp_path, package_name, reinstall)
# logger.debug(result)
if result[0] == False:
if not b'Failure' in result[1]:
# 一般这种情况都是由于adb server意外退出导致,此时安装过程还会继续
logger.warn('install app: %r' % result[1])
timeout = 30
time0 = time.time()
while time.time() - time0 < timeout:
# 等待应用安装完成
if self.get_package_path(package_name):
break
time.sleep(1)
else:
result = self._install_apk(
tmp_path, package_name, reinstall)
else:
err_msg = result[1]
if six.PY2:
if isinstance(err_msg, unicode):
err_msg = err_msg.encode('utf8')
if isinstance(package_name, unicode):
package_name = package_name.encode('utf8')
raise InstallPackageFailedError(
'安装应用%s失败:%s' % (package_name, err_msg))
try:
self.delete_file('/data/local/tmp/*.apk')
except TimeoutError:
pass
def uninstall_app(self, pkg_name):
'''卸载应用
'''
result = ''
if not self.get_package_path(pkg_name):
return True
for _ in range(5):
try:
result = self.run_adb_cmd(
'uninstall', pkg_name, retry_count=1, timeout=30)
break
except RuntimeError:
logger.exception('uninstall %s failed' % pkg_name)
time.sleep(10)
else:
raise
logger.debug('uninstall %s result: %r' % (pkg_name, result))
if self.is_rooted():
# 清理卸载可能遗留的cache文件
cpu_abi = 'arm'
if self.get_cpu_abi() == 'x86':
cpu_abi = 'x86' # TODO: 支持64位CPU
self.delete_file('/data/dalvik-cache/%s/data@app@%s-*' %
(cpu_abi, pkg_name))
return 'Success' in result
# ifndef __RELEASE__
@Deprecated('uninstall_app')
def uninstall_apk(self, pkg_name):
'''卸载应用
'''
return self.uninstall_app(pkg_name)
# endif
@encode_wrap
def get_package_path(self, pkg_name):
'''获取应用安装包路径
'''
for _ in range(3):
# 为避免某些情况下获取不到应用安装包路径,重试多次
result = self.run_shell_cmd('pm path %s' % pkg_name)
logger.debug('get_package_path: %r' % result)
pos = result.find('package:')
if pos >= 0:
return result[pos + 8:]
time.sleep(1)
return ''
@encode_wrap
def get_package_version(self, pkg_name):
'''获取应用版本
'''
result = self.run_shell_cmd('dumpsys package %s' % pkg_name)
for line in result.split('\n'):
line = line.strip()
if line.startswith('versionName='):
return line[12:]
@encode_wrap
def _build_intent_extra_string(self, extra):
'''构造intent参数列表
'''
extra_str = ''
for key in extra: # 指定额外参数
p_type = ''
value = extra[key]
if isinstance(value, bytes):
value = value.decode('utf8')
if value in ['true', 'false']:
p_type = 'z' # EXTRA_BOOLEAN_VALUE
elif isinstance(value, int):
if is_int(value):
p_type = 'i' # EXTRA_INT_VALUE
else:
p_type = 'l' # EXTRA_LONG_VALUE
elif isinstance(value, float):
p_type = 'f' # EXTRA_FLOAT_VALUE
elif value.startswith('file://'): # EXTRA_URI_VALUE
p_type = 'u'
param = '-e%s %s %s ' % (p_type, key,
('"%s"' % value) if not p_type else value)
if p_type:
param = u'-' + param
extra_str += param
if len(extra_str) > 0:
extra_str = extra_str[:-1]
return extra_str
@encode_wrap
def start_activity(self, activity_name, action='', type='', data_uri='', extra={}, wait=True):
'''打开一个Activity
Warning: Activity not started, intent has been delivered to currently running top-most instance.
Status: ok
ThisTime: 0
TotalTime: 0
WaitTime: 2
Complete
'''
if activity_name:
activity_name = '-n %s' % activity_name
if action: # 指定Action
action = '-a %s ' % action
if type:
type = '-t %s ' % type
if data_uri:
data_uri = '-d "%s" ' % data_uri
extra_str = self._build_intent_extra_string(extra)
W = u''
if wait:
W = '-W' # 等待启动完成才返回
# 如果/sbin/sh指向busybox,就会返回“/sbin/sh: am: not found”错误
# 返回am找不到是因为am缺少“#!/system/bin/sh”
command = 'am start %s %s %s%s%s%s' % (
W, activity_name, action, type, data_uri, extra_str)
if command[-1] == ' ':
command = command[:-1]
result = self.run_shell_cmd(command, timeout=15, retry_count=3)
if 'Permission Denial' in result or (wait and (not 'Activity:' in result or not 'Complete' in result)):
# 使用root权限运行
if self.is_rooted():
result = self.run_shell_cmd(
command, True, timeout=15, retry_count=3)
else:
package_name = activity_name.split('/')[0].split()[1]
result = self.run_as(package_name, command,
timeout=15, retry_count=3)
# raise RuntimeError('打开Activity失败:\n%s' % result)
if 'startActivityAndWait asks to run as user -2 but is calling from user 0' in result:
command += ' --user 0'
result = self.run_as(package_name, command,
timeout=15, retry_count=3)
logger.info('start activity command:%s' % command)
if 'Permission Denial' in result or ('run as user -2 but is calling from user 0' in result) or (wait and not 'Complete' in result):
raise RuntimeError('start activity failed: %s' % result)
ret_dict = {}
for line in result.split('\n'):
if ': ' in line:
key, value = line.split(': ')
ret_dict[key] = value
if 'Error' in ret_dict:
raise RuntimeError(ret_dict['Error'])
return ret_dict
def start_service(self, service_name, extra={}):
'''启动服务
'''
extra_str = self._build_intent_extra_string(extra)
command = 'am startservice -n %s %s' % (service_name, extra_str)
if command[-1] == ' ':
command = command[:-1]
result = self.run_shell_cmd(command, timeout=15, retry_count=3)
if 'no service started' in result or 'java.lang.SecurityException' in result:
raise RuntimeError('start service %s failed: %s' %
(service_name, result))
def stop_service(self, service_name):
'''停止服务
'''
result = self.run_shell_cmd(
'am stopservice -n %s' % service_name, timeout=15, retry_count=3)
if not 'Service stopped' in result and not 'was not running' in result:
raise RuntimeError('stop service failed: %s' % result)
def send_broadcast(self, action, extra={}):
'''发送广播
:param action: 广播使用的ACTION
:type action: string
:param extra: 额外参数
:type extra: dict
'''
extra_str = self._build_intent_extra_string(extra)
command = 'am broadcast -a %s %s' % (action, extra_str)
result = self.run_shell_cmd(command)
if not 'Broadcast completed: result=0' in result:
raise RuntimeError('Send broadcast failed: %s' % result)
def get_property(self, prop):
'''读取属性
'''
return self.run_shell_cmd('getprop %s' % prop)
def set_property(self, prop, value):
'''设置属性
'''
self.run_shell_cmd('setprop %s %s' % (prop, value), self.is_rooted())
@static_result
def get_cpu_abi(self):
'''获取系统的CPU架构信息
'''
ret = self.run_shell_cmd('getprop ro.product.cpu.abi')
if not ret:
ret = 'armeabi' # 有些手机可能没有这个系统属性
return ret
@static_result
def get_device_model(self):
'''获取设备型号
'''
model = self.run_shell_cmd('getprop ro.product.model')
brand = self.run_shell_cmd('getprop ro.product.brand')
if model.find(brand) >= 0:
return model
return '%s %s' % (brand, model)
@static_result
def get_system_version(self):
'''获取系统版本
'''
return self.run_shell_cmd('getprop ro.build.version.release')
@static_result
def get_sdk_version(self):
'''获取SDK版本
'''
return int(self.run_shell_cmd('getprop ro.build.version.sdk'))
def get_uid(self, app_name):
'''获取APP的uid
'''
result = self.run_shell_cmd('ls -l /data/data', True)
for line in result.split('\n'):
items = line.split(' ')
for item in items:
if not item:
continue
if item == app_name:
return items[1]
return None
def is_selinux_opened(self):
'''selinux是否是enforcing状态
'''
if self.get_sdk_version() < 18:
return False
return 'Enforcing' in self.run_shell_cmd('getenforce', True)
def close_selinux(self):
'''关闭selinux
'''
result = self.run_shell_cmd('setenforce 0', True)
if 'Permission denied' in result:
return False
return True
def chmod(self, file_path, attr):
'''修改文件/目录属性
:param file_path: 文件/目录路径
:type file_path: string
:param attr: 设置的属性值,如:777
:type attr: int
'''
def _parse(num):
num = str(num)
attr = ''
su_flag = ''
if len(num) == 4:
su_flag = int(num[0])
num = num[1:]
for c in num:
c = int(c)
if c & 4:
attr += 'r'
else:
attr += '-'
if c & 2:
attr += 'w'
else:
attr += '-'
if c & 1:
attr += 'x'
else:
attr += '-'
if su_flag and su_flag == 4:
attr = attr[:2] + 's' + attr[3:]
return attr
ret = self.run_shell_cmd('chmod %s %s' %
(attr, file_path), self.is_rooted())
dir_list, file_list = self.list_dir(file_path)
if len(dir_list) == 0 and len(file_list) == 1 and file_path.endswith('/' + file_list[0]['name']):
# 这是一个文件
new_attr = file_list[0]['attr']
else:
# 目录
dir_name = file_path.split('/')[-1]
parent_path = '/'.join(file_path.split('/')[:-1])
dir_list, _ = self.list_dir(parent_path)
for dir in dir_list:
if dir['name'] == dir_name:
new_attr = dir['attr']
break
if new_attr != _parse(attr):
logger.warn('chmod failed: %r(%s)' % (ret, new_attr))
return self.chmod(file_path, attr)
return new_attr
def chown(self, file_path, uid, gid):
'''修改文件的拥有者和群组
:param file_path: 文件路径
:type file_path: string
:param uid: 拥有者
:type uid: string
:param gid: 群组
:type gid: string
'''
self.run_shell_cmd('chown %s:%s %s' % (uid, gid, file_path), True)
def mkdir(self, dir_path, mod=None):
'''创建目录
'''
cmd = 'mkdir %s' % (dir_path)
ret = self.run_shell_cmd(cmd, self.is_rooted())
# if not 'File exists' in ret:
# #加了-p参数貌似不会返回这个提示信息
try:
self.list_dir(dir_path)
except RuntimeError as e:
logger.warn('mkdir %s failed: %s(%s)' % (dir_path, ret, e))
return self.mkdir(dir_path, mod)
# 修改权限
if mod != None:
self.chmod(dir_path, mod)
def list_dir(self, dir_path):
'''列取目录
'''
if ' ' in dir_path:
dir_path = '"%s"' % dir_path
use_root = self.is_rooted()
if use_root and dir_path.startswith('/sdcard') or dir_path.startswith('/storage/') or dir_path.startswith('/mnt/'):
# 部分手机上发现用root权限访问/sdcard路径不一致
use_root = False
result = self.run_shell_cmd('ls -l %s' % dir_path, use_root)
if 'Permission denied' in result:
raise PermissionError(result)
if 'No such file or directory' in result:
raise RuntimeError('file or directory %s not exist' % dir_path)
if 'Not a directory' in result:
raise RuntimeError(u'%s %s' % (dir_path, result))
dir_list = []
file_list = []
def _handle_name(name):
return name.split('/')[-1]
is_toybox = self.get_sdk_version() >= 24
is_busybox = None
# busybox格式 -rwxrwxrwx 1 shell shell 13652 Jun 3 10:56 /data/local/tmp/qt4a/inject
for line in result.split('\n'):
items = line.split()
if len(items) < 6:
continue # (6, 7, 9)
if not line[0] in ('-', 'd', 'l'):
continue
is_dir = items[0][0] == 'd' # 是否是目录
is_link = items[0][0] == 'l' # 软链
if is_busybox == None:
if is_toybox:
item = items[5] # 日期字段
else:
item = items[4] # 日期字段
if is_dir or is_link:
item = items[3] # 目录和软链没有size字段
pattern = re.compile(r'\d{4}-\d{2}-\d{2}')
if pattern.match(item):
is_busybox = False
else:
is_busybox = True
if not is_busybox:
# 防止文件名称中有空格
if not is_toybox:
if not is_dir and not is_link and len(items) > 7:
items[6] = line[line.find(items[6]):].strip()
elif is_dir and len(items) > 6:
items[5] = line[line.find(items[5]):].strip()
else:
if not is_dir and not is_link and len(items) > 8:
items[7] = line[line.find(items[7]):].strip()
elif is_dir and len(items) > 7:
items[6] = line[line.find(items[6]):].strip()
attrs = items[0]
if attrs[0] == 'd':
if is_busybox:
name = _handle_name(items[8])
elif is_toybox:
name = items[7]
else:
name = items[5]
dir_list.append({'name': name, 'attr': attrs[1:]})
elif attrs[0] == '-':
if is_busybox:
name = _handle_name(items[8])
size = int(items[4])
last_modify_time = items[7]
elif is_toybox:
name = _handle_name(items[7])
size = int(items[4])
last_modify_time = time.strptime(
'%s %s:00' % (items[5], items[6]), "%Y-%m-%d %X")
else:
name = items[6]
size = int(items[3])
try:
last_modify_time = time.strptime(
'%s %s:00' % (items[4], items[5]), "%Y-%m-%d %X")
except:
# TODO: 即将删掉,调试用
logger.info('line=%s' % line)
raise
file_list.append(
{'name': name, 'attr': attrs[1:], 'size': size, 'last_modify_time': last_modify_time})
elif attrs[0] == 'l': # link
if is_busybox:
name = _handle_name(items[8])
last_modify_time = items[7]
link = items[10]
elif is_toybox:
name = items[7]
last_modify_time = time.strptime(
'%s %s:00' % (items[5], items[6]), "%Y-%m-%d %X")
link = items[9]
else:
name = items[5]
last_modify_time = time.strptime(
'%s %s:00' % (items[3], items[4]), "%Y-%m-%d %X")
link = items[7]
file_list.append(
{'name': name, 'attr': attrs[1:], 'link': link, 'last_modify_time': last_modify_time})
return dir_list, file_list
def get_sdcard_path(self):
'''获取sdcard路径
'''
path = '/sdcard'
while True:
dir_list, file_list = self.list_dir(path)
if len(dir_list) == 0 and len(file_list) == 1 and 'link' in file_list[0]:
# another link
path = file_list[0]['link']
else:
break
return path
def get_file_info(self, file_path):
'''获取文件信息
'''
return self.list_dir(file_path)[1][0]
def copy_file(self, src_path, dst_path):
'''在手机上拷贝文件
'''
if not hasattr(self, '_has_cp'):
self._has_cp = 'not found' not in self.run_shell_cmd('cp')
if self._has_cp: # 不是所有的ROM都有cp命令
self.run_shell_cmd('cp %s %s' %
(src_path, dst_path), self.is_rooted())
else:
self.run_shell_cmd('cat %s > %s' % (
src_path, dst_path), self.is_rooted(), timeout=30) # 部分手机上发现此方法耗时较多
def delete_file(self, file_path):
'''删除手机上文件
'''
if '*' in file_path:
# 使用通配符时不能使用引号
self.run_shell_cmd('rm -f %s' % file_path, self.is_rooted())
else:
file_path = file_path.replace('"', r'\"')
self.run_shell_cmd('rm -f "%s"' % file_path, self.is_rooted())
def delete_folder(self, folder_path):
'''删除手机上的目录
'''
folder_path = folder_path.replace('"', r'\"')
self.run_shell_cmd('rm -R "%s"' % folder_path, self.is_rooted())
def run_as_by_app(self, package_name, cmdline, **kwargs):
'''在app中执行命令
'''
cmd_res_path = '/sdcard/qt4a_cmd_res.txt'
self.delete_file(cmd_res_path)
timeout = 30
if 'timeout' in kwargs:
timeout = kwargs['timeout']
try:
self.start_activity('%s/com.test.androidspy.inject.CmdExecuteActivity' %
package_name, extra={'cmdline': cmdline, 'timeout': timeout}, wait=False)
except Exception as e:
if 'com.test.androidspy.inject.CmdExecuteActivity} does not exist' in e.args[0]:
raise RuntimeError(
'该命令需要对apk重打包才能执行,请使用`qt4a-manage repack-apk -p /path/to/apk`命令进行重打包并安装后重试!')
raise
cmd_argv_list = cmdline.split()
if len(cmd_argv_list) > 1 and cmd_argv_list[0] == 'pm' and cmd_argv_list[1] == 'clear':
logger.info('run cmd:%s,return Success' % cmdline)
time.sleep(2)
return 'Success'
time0 = time.time()
while time.time() - time0 < timeout:
try:
self.list_dir(cmd_res_path)
result = self.run_shell_cmd("cat %s" % cmd_res_path)
return result
except RuntimeError as e:
logger.info('run_as_by_app exception:%s' % e)
time.sleep(1)
raise TimeoutError("run_as_by_app timeout:%d" % timeout)
def run_as(self, package_name, cmdline, **kwargs):
'''以package_name权限执行命令
'''
if self.is_rooted():
if self._need_quote:
cmdline = '"%s"' % cmdline
cmdline = 'su %s %s' % (package_name, cmdline)
return self.run_shell_cmd(cmdline, False, **kwargs)
if ':' in package_name:
package_name = package_name.split(':')[0] # 允许传入进程名
if '&&' in cmdline:
cmndline = 'run-as %s sh -c "%s"' % (package_name, cmdline)
else:
cmndline = 'run-as %s %s' % (package_name, cmdline)
result = self.run_shell_cmd(cmndline, **kwargs)
run_as_succ = False
if 'is unknown' in result:
logger.info('Package %s not installed' % package_name)
elif 'not debuggable' in result:
logger.info('Package %s is not debuggable' % package_name)
elif 'Could not set capabilities: Operation not permitted' in result:
logger.info('Samsung device has bug with run-as command')
elif 'run-as: exec failed for' in result:
raise RuntimeError(result)
else:
run_as_succ = True
if not run_as_succ:
try:
result = self.run_as_by_app(package_name, cmdline, **kwargs)
except RuntimeError:
logger.exception('run %s as %s by app failed' %
(cmdline, package_name))
raise PermissionError('run %s as %s failed' %
(cmdline, package_name))
return result
def is_app_process64(self, process):
'''是否是64位应用进程
:param process: 进程名或进程ID
:tytpe process: string/int
'''
process_name = ''
if isinstance(process, six.string_types) and not process.isdigit():
process_name = process
pid = self.get_pid(process)
else:
pid = int(process)
if pid <= 0:
raise ValueError('process %s not exist' % process)
if self.is_rooted():
return 'app_process64' in self.run_shell_cmd('ls -l /proc/%d/exe' % pid, True)
elif process_name:
return 'app_process64' in self.run_as(process_name, 'ls -l /proc/%d/exe' % pid)
else:
raise ValueError('Non root device must pass process name')
def _list_process(self):
'''获取进程列表
'''
cmdline = 'ps'
if self.get_sdk_version() >= 26:
cmdline += ' -A'
result = self.run_shell_cmd(cmdline).strip()
lines = result.split('\n')
busybox = False
if lines[0].startswith('PID'):
busybox = True
result_list = []
for i in range(1, len(lines)):
lines[i] = lines[i].strip()
if not lines[i]:
continue
items = lines[i].split()
if not busybox:
if len(items) < 9:
err_msg = "ps命令返回格式错误:\n%s" % lines[i]
if len(items) == 8:
result_list.append(
{'pid': int(items[1]), 'ppid': int(items[2]), 'proc_name': items[7]})
else:
raise RuntimeError(err_msg)
else:
proc_name = items[8]
if len(proc_name) <= 1 and len(items) > 9:
proc_name = items[9]
result_list.append(
{'pid': int(items[1]), 'ppid': int(items[2]), 'proc_name': proc_name})
else:
idx = 4
cmd = items[idx]
if len(cmd) == 1:
# 有时候发现此处会有“N”
idx += 1
cmd = items[idx]
idx += 1
if cmd[0] == '{' and cmd[-1] == '}':
cmd = items[idx]
ppid = 0
if items[1].isdigit():
ppid = int(items[1]) # 有些版本中没有ppid
result_list.append(
{'pid': int(items[0]), 'ppid': ppid, 'proc_name': cmd})
return result_list
def list_process(self):
'''获取进程列表
'''
for _ in range(3):
try:
return self._list_process()
except RuntimeError as e:
logger.warn('%s' % e)
else:
raise RuntimeError('获取进程列表失败')
def get_pid(self, proc_name):
'''获取进程ID
'''
process_list = self.list_process()
for process in process_list:
if process['proc_name'] == proc_name:
return process['pid']
return 0
def get_process_status(self, pid):
'''获取进程状态信息
'''
ret = self.run_shell_cmd('cat /proc/%d/status' % pid, True)
result = {}
for line in ret.split('\n'):
if not line:
continue
if not ':' in line:
logger.warn('get_process_status line error: %r' % line)
continue
key, value = line.split(':')
result[key] = value.strip()
return result
def get_process_user(self, pid):
'''get procees user name
:param pid: process id
:type pid: int
'''
uid = -1
cmdline = 'cat /proc/%d/status' % pid
result = self.run_shell_cmd(cmdline).strip()
for line in result.split('\n'):
line = line.strip()
if line.startswith('Uid:'):
uid = int(line.split()[1])
break
if uid < 0:
raise RuntimeError('get uid of process %d failed' % pid)
if uid < 10000:
return uid
cmdline = 'cat /proc/%d/cmdline' % pid
result = self.run_shell_cmd(cmdline).strip().split('\x00')[0]
if ':' in result:
result = result.split(':')[0]
return result
def kill_process(self, proc_name_or_pid):
'''杀进程
'''
kill_list = []
package_name = None
process_list = self.list_process()
for process in process_list:
if isinstance(proc_name_or_pid, six.string_types) and proc_name_or_pid in process['proc_name']:
if process['proc_name'] == proc_name_or_pid:
# 保证主进程首先被杀
kill_list.insert(0, process['pid'])
else:
kill_list.append(process['pid'])
elif process['pid'] == proc_name_or_pid:
kill_list.append(process['pid'])
if not kill_list:
return None # 没有找到对应的进程
if package_name == None and not self.is_rooted():
package_name = self.get_process_user(kill_list[0])
for i, pid in enumerate(kill_list):
kill_list[i] = 'kill -9 %d' % pid
cmd_line = ' && '.join(kill_list)
if package_name == 2000:
# shell process
result = self.run_shell_cmd(cmd_line)
elif self.is_rooted():
result = self.run_shell_cmd(cmd_line, True)
elif isinstance(package_name, six.string_types):
# package
result = self.run_as(package_name, cmd_line)
else:
raise PermissionError(
'can\'t kill uid=%s process in non-root device' % package_name)
if 'Operation not permitted' in result:
raise PermissionError('run %s failed: %s' % (cmd_line, result))
return True
def get_device_imei(self):
'''获取手机串号
'''
result = self.run_shell_cmd('dumpsys iphonesubinfo', self.is_rooted())
for line in result.split('\n'):
if line.find('Device ID') >= 0:
return line.split('=')[1].strip()
raise RuntimeError('获取imei号失败:%r' % result)
def get_cpu_total_time(self):
cpu_time = 0
result = self.run_shell_cmd('cat /proc/stat')
result = result.split('\n')[0]
for item in result.split(' '):
item = item.strip()
if not item:
continue
if item == 'cpu':
continue
cpu_time += int(item)
return cpu_time
def get_process_cpu_time(self, pid):
result = self.run_shell_cmd('cat /proc/%d/stat' % pid)
result = result.split(' ')
utime = int(result[13])
stime = int(result[14])
cutime = int(result[15])
cstime = int(result[16])
return utime + stime + cutime + cstime
def get_thread_cpu_time(self, pid, tid):
result = self.run_shell_cmd('cat /proc/%d/task/%d/stat' % (pid, tid))
result = result.split(' ')
utime = int(result[13])
stime = int(result[14])
cutime = int(result[15])
cstime = int(result[16])
return utime + stime + cutime + cstime
def get_process_cpu(self, proc_name, interval=0.1):
'''获取进程中每个线程的CPU占用率
'''
pid = self.get_pid(proc_name)
# print (pid)
if not pid:
return None
total_cpu1 = self.get_cpu_total_time()
process_cpu1 = self.get_process_cpu_time(pid)
thread_cpu1 = self.get_thread_cpu_time(pid, pid)
time.sleep(interval)
total_cpu2 = self.get_cpu_total_time()
process_cpu2 = self.get_process_cpu_time(pid)
thread_cpu2 = self.get_thread_cpu_time(pid, pid)
total_cpu = total_cpu2 - total_cpu1
process_cpu = process_cpu2 - process_cpu1
thread_cpu = thread_cpu2 - thread_cpu1
return process_cpu * 100 // total_cpu, thread_cpu * 100 // total_cpu
@staticmethod
def list_device():
'''获取设备列表
'''
return LocalADBBackend.list_device()
@staticmethod
def is_local_device(device_id):
'''是否是本地设备
'''
pattern = re.compile(r'([\w|\-|\.]+):(.+)')
mat = pattern.match(device_id)
if not mat or (mat.group(2).isdigit() and int(mat.group(2)) > 1024 and int(mat.group(2)) < 65536):
return True
else:
return False
@staticmethod
def open_device(name_or_backend=None):
'''打开设备
'''
if isinstance(name_or_backend, six.string_types):
adb_backend = LocalADBBackend.open_device(name_or_backend)
else:
adb_backend = name_or_backend
adb = ADB(adb_backend)
if adb.is_rooted() and adb.is_selinux_opened():
if not adb.close_selinux():
logger.warn('Close selinux failed')
# raise RuntimeError('关闭selinux失败,确认手机是否完美Root')
return adb
@staticmethod
def connect_device(name):
'''使用TCP连接设备
'''
proc = subprocess.Popen(
[adb_path, 'connect', name], stdout=subprocess.PIPE)
result = proc.stdout.read()
if result.find('unable to connect to') >= 0:
print(result, file=sys.stderr)
return False
return True
def get_cpu_time(self):
'''获取手机全局总时间片和空闲时间片
'''
import re
cpu_time = 0
result = self.run_shell_cmd('cat /proc/stat')
result = result.split('\n')[0]
result, num = re.subn(r'\s+', ' ', result) # 将字符串中多个相连的空白字符合并成一个空白字符
results = result.split(' ')
if len(results) < 5:
logger.warn('无法取得CPU时间片统计,请确保手机正常链接,并已启动!')
return 0, 0
idle_time = int(results[4])
for item in results:
item = item.strip()
if not item:
continue
if item == 'cpu':
continue
cpu_time += int(item)
return cpu_time, idle_time
def get_cpu_usage(self, interval=0.5):
'''获取手机全局CPU使用率
'''
total_time1, idle_time1 = self.get_cpu_time()
time.sleep(interval)
total_time2, idle_time2 = self.get_cpu_time()
total_time = total_time2 - total_time1
idle_time = idle_time2 - idle_time1
if total_time == 0:
return -1
return (total_time - idle_time) * 100 // total_time
@static_result
def is_art(self):
'''是否是art虚拟机
'''
ret = self.get_property('persist.sys.dalvik.vm.lib')
if not ret:
ret = self.get_property('persist.sys.dalvik.vm.lib.2')
return 'libart.so' in ret
def dump_stack(self, pid_or_procname):
'''获取进程调用堆栈
'''
if isinstance(pid_or_procname, six.string_types):
pid = self.get_pid(pid_or_procname)
else:
pid = pid_or_procname
anr_dir = '/data/anr'
try:
self.list_dir(anr_dir)
except RuntimeError:
self.mkdir(anr_dir)
self.chmod(anr_dir, 777)
cmd = 'kill -3 %d' % pid
self.run_shell_cmd(cmd, True)
return self.run_shell_cmd('cat %s/traces.txt' % anr_dir, True)
def get_state(self):
'''获取设备状态
'''
return self.run_adb_cmd('get-state')
if __name__ == '__main__':
pass
|
py | 1a51eb3803257469bdc8d3899956c301a9faf0aa | """
Introducción a los Bloques condicionales.
Un bloque condicional puede probar diferentes condiciones
"""
import sys
try:
numero1 = int(input("Introduzca un primer número: "))
numero2 = int(input("Introduzca un segundo número: "))
except ValueError as e:
print("La conversión de al menos uno de los números no ha tenido éxito",
file=sys.stderr)
sys.exit()
# Hacer la comparación
if numero1 <= numero2:
print(numero1, "<=", numero2)
elif numero1 >= numero2:
print(numero1, ">=", numero2)
else:
print(numero1, "==", numero2)
|
py | 1a51eb580b3e32af25228914002c3747002fd1ee | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_lc_info
version_added: 1.0.0
short_description: Gather information about AWS Autoscaling Launch Configurations.
description:
- Gather information about AWS Autoscaling Launch Configurations.
- This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change.
author: "Loïc Latreille (@psykotox)"
requirements: [ boto3 ]
options:
name:
description:
- A name or a list of name to match.
default: []
type: list
elements: str
sort:
description:
- Optional attribute which with to sort the results.
choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
type: str
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
type: str
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
type: int
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
type: int
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all launch configurations
community.aws.ec2_lc_info:
- name: Gather information about launch configuration with name "example"
community.aws.ec2_lc_info:
name: example
- name: Gather information sorted by created_time from most recent to least recent
community.aws.ec2_lc_info:
sort: created_time
sort_order: descending
'''
RETURN = r'''
block_device_mapping:
description: Block device mapping for the instances of launch configuration
type: list
returned: always
sample: "[{
'device_name': '/dev/xvda':,
'ebs': {
'delete_on_termination': true,
'volume_size': 8,
'volume_type': 'gp2'
}]"
classic_link_vpc_security_groups:
description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
type: str
returned: always
sample:
created_time:
description: The creation date and time for the launch configuration
type: str
returned: always
sample: "2016-05-27T13:47:44.216000+00:00"
ebs_optimized:
description: EBS I/O optimized (true ) or not (false )
type: bool
returned: always
sample: true,
image_id:
description: ID of the Amazon Machine Image (AMI)
type: str
returned: always
sample: "ami-12345678"
instance_monitoring:
description: Launched with detailed monitoring or not
type: dict
returned: always
sample: "{
'enabled': true
}"
instance_type:
description: Instance type
type: str
returned: always
sample: "t2.micro"
kernel_id:
description: ID of the kernel associated with the AMI
type: str
returned: always
sample:
key_name:
description: Name of the key pair
type: str
returned: always
sample: "user_app"
launch_configuration_arn:
description: Amazon Resource Name (ARN) of the launch configuration
type: str
returned: always
sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
launch_configuration_name:
description: Name of the launch configuration
type: str
returned: always
sample: "lc-app"
ramdisk_id:
description: ID of the RAM disk associated with the AMI
type: str
returned: always
sample:
security_groups:
description: Security groups to associated
type: list
returned: always
sample: "[
'web'
]"
user_data:
description: User data available
type: str
returned: always
sample:
'''
try:
import botocore
from botocore.exceptions import ClientError
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
def list_launch_configs(connection, module):
launch_config_name = module.params.get("name")
sort = module.params.get('sort')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
try:
pg = connection.get_paginator('describe_launch_configurations')
launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="Failed to list launch configs")
snaked_launch_configs = []
for launch_config in launch_configs['LaunchConfigurations']:
snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
for launch_config in snaked_launch_configs:
if 'CreatedTime' in launch_config:
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
if sort:
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
if sort and sort_start and sort_end:
snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
elif sort and sort_start:
snaked_launch_configs = snaked_launch_configs[sort_start:]
elif sort and sort_end:
snaked_launch_configs = snaked_launch_configs[:sort_end]
module.exit_json(launch_configurations=snaked_launch_configs)
def main():
argument_spec = dict(
name=dict(required=False, default=[], type='list', elements='str'),
sort=dict(required=False, default=None,
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
sort_order=dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start=dict(required=False, type='int'),
sort_end=dict(required=False, type='int'),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if module._name == 'ec2_lc_facts':
module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", date='2021-12-01', collection_name='community.aws')
try:
connection = module.client('autoscaling')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Failed to connect to AWS')
list_launch_configs(connection, module)
if __name__ == '__main__':
main()
|
py | 1a51ed321680f4b8f4901f8063a7caf8ff74293a | import json
import logging
import flask
from nest_py.core.flask.nest_endpoints.nest_endpoint import NestEndpoint
# create a logger named 'client'
logger = logging.getLogger('client')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('client-debug.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.addHandler(fh)
# define a convenience wrapper for the convenience wrappers
LOGMUX = { "debug": logger.debug, "info": logger.info, "warn": logger.warn, "error": logger.error, "critical": logger.critical }
class LoggingEndpoint(NestEndpoint):
def __init__(self, authenticator):
self.flask_ep = 'logs'
self.flask_rule = 'logs'
#TODO:We aren't requiring the user be logged in so we can
#capture errors at the login/logout screens, but that might be a
#bad idea if we ever let people outside the firewall access
#a nest app
require_auth = False
super(LoggingEndpoint, self).__init__(self.flask_ep,
authenticator, require_auth=require_auth)
return
def get_flask_rule(self):
return self.flask_rule
def get_flask_endpoint(self):
return self.flask_ep
def do_POST(self, request, requesting_user):
"""
called when someone POSTs a message to the 'logs' endpoint
"""
# Scrape some relevant fields from the request data
# TODO: Scrape other useful things? UserAgent? Duration?
request_data = json.loads(request.data)
level = request_data.get('level', 'debug')
message = request_data.get('message', 'blank')
stack = request_data.get('stack')
sent = request_data.get('sent')
ip = request.remote_addr
if requesting_user is None:
username = 'NOT_LOGGED_IN'
else:
username = requesting_user.get_username()
# Format and log the message to filehandler and consolehandler, as defined above
message_prefix = ip + " (" + username +") - "
LOGMUX[level](message_prefix + message)
# Log stack trace, if one was given
if stack is not None:
LOGMUX[level](message_prefix + "Stack Trace:\n" + stack)
#pylint: enable=unused-argument
resp = flask.make_response('logged ok', 200)
return resp
|
py | 1a51edc34c036dae1134929f0391bef82df82dfe | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python configuration for SetInput interaction."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from extensions.interactions import base
class SetInput(base.BaseInteraction):
"""Interaction for input of an unordered set of strings."""
name = 'Set Input'
description = 'Allows learners to enter an unordered set of strings.'
display_mode = base.DISPLAY_MODE_INLINE
_dependency_ids = []
answer_type = 'SetOfUnicodeString'
instructions = None
narrow_instructions = None
needs_summary = False
can_have_solution = True
show_generic_submit_button = True
# NB: There used to be a UnicodeString-typed parameter here called
# 'element_type'. This has since been removed.
_customization_arg_specs = []
_answer_visualization_specs = [{
# Table with answer counts for top N answers.
'id': 'FrequencyTable',
'options': {
'column_headers': ['Answer', 'Count'],
'title': 'Top 10 answers',
},
'calculation_id': 'Top10AnswerFrequencies',
'addressed_info_is_supported': True,
}, {
# Table with most commonly submitted elements of set.
'id': 'FrequencyTable',
'options': {
'column_headers': ['Element', 'Count'],
'title': 'Commonly submitted elements',
},
'calculation_id': 'FrequencyCommonlySubmittedElements',
# Since individual answer elements are not generally intended to be
# used as a single response to SetInput interactions, we omit the
# addressed column entirely.
'addressed_info_is_supported': False,
}]
|
py | 1a51edfbe41e25bf090bb21d6fc009d8256c7a7e | import json
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
vmanage_session = requests.session()
# Reserved sandbox
vmanage_node = {
"host": "10.10.20.90",
"username": "admin",
"password": "C1sco12345",
"verify": False
}
data = {
"j_username": vmanage_node["username"],
"j_password": vmanage_node["password"]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
URI = f"https://{vmanage_node['host']}"
def authenticate():
response = vmanage_session.post(url=f"{URI}/j_security_check", data=data, headers=headers, verify=vmanage_node.get("verify"))
if response.status_code != 200 or "html" in response.text:
print(f"Could not authenticate: {response.status_code}\nExiting")
exit()
# Get XSRF TOKEN
response = vmanage_session.get(f"{URI}/dataservice/client/token")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get token: {response.status_code}\nExiting")
exit()
else:
vmanage_session.headers["X-XSRF-TOKEN"] = response.text
# TODO: User and Group
def get_admin_user():
response = vmanage_session.get(f"{URI}/dataservice/admin/user")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin users: {response.status_code}")
return None
return response.json()["data"]
def post_admin_user(username: str, fullname: str, group: list, password: str):
data = {
"userName": username,
"description": fullname,
"group": group,
"password": password
}
headers = {"Content-Type": "application/json"}
response = vmanage_session.post(f"{URI}/dataservice/admin/user", data=json.dumps(data), headers=headers)
if response.status_code != 200 or "html" in response.text:
print(f"Could not add user: {response.status_code}")
def delete_admin_user(username: str):
response = vmanage_session.delete(f"{URI}/dataservice/admin/user/{username}")
if response.status_code != 200 or "html" in response.text:
print(f"Could not delete user: {response.status_code}")
print(response.text)
def get_admin_user_activeSessions():
# TODO: Response code is 403, why?
response = vmanage_session.get(f"{URI}/dataservice/admin/user/activeSessions")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin users active sessions: {response.status_code}")
return None
return response.json()["data"]
def get_admin_user_role():
response = vmanage_session.get(f"{URI}/dataservice/admin/user/role")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin user role: {response.status_code}")
return None
return response.json()
def get_admin_usergroup():
response = vmanage_session.get(f"{URI}/dataservice/admin/usergroup")
if response.status_code != 200 or "html" in response.text:
print(f"Could not get admin usergroup: {response.status_code}")
return None
return response.json()["data"]
# TODO: Audit Log
# TODO: Tenant Management
# TODO: Tenant Backup Restore
# TODO: Utility - Logging
# TODO: Utility - Security
if __name__ == "__main__":
authenticate()
###### GET ADMIN USERS AND PRINT THEM
print("=" * 80)
print("----- USERS")
print("=" * 80)
users = get_admin_user()
for user in users:
print(f" Username: {user['userName']}")
print(f" Full name: {user.get('description')}")
# Each user can be in multiple groups
print(" Groups: ", end="")
for group in user["group"]:
print(group, end=" ")
print()
print(" " + "=" * 75)
print()
##### GET ADMIN USER ACTIVE SESSIONS
print("=" * 80)
print("----- ACTIVE USER SESSIONS")
print("=" * 80)
user_sessions = get_admin_user_activeSessions()
print(f" {user_sessions}")
print()
##### CHECK IF THIS USER SESSION HAS ADMIN PRIVILEGES
print("=" * 80)
print("----- USER ADMIN ROLE")
print("=" * 80)
user_role = get_admin_user_role()
print(f" Username: {vmanage_node['username']} is admin: {user_role['isAdmin']}")
print()
##### GET USERGROUPS AND PRINT PRIVILEGES
print("=" * 80)
print("----- USERGROUP PRIVILEGES")
print("=" * 80)
usergroups = get_admin_usergroup()
for group in usergroups:
print(" " + "=" * 75)
print(f" Group: {group['groupName']}")
print(" " + "=" * 75)
print(" Tasks")
print(" " + "=" * 70)
for task in group["tasks"]:
if task.get("enabled"):
print(f" {task['feature']}:", end=" ")
if task.get("read"):
print("r", end="")
if task.get("write"):
print("w", end="")
print()
print()
##### ADD ADMIN USER
username = "pythonuser"
print("=" * 80)
print(f"ADDING USER: {username}")
print("=" * 80)
post_admin_user(username, "Python Automation", ["netadmin"], "cisco")
##### VERIFY
print("=" * 80)
print(f"VERIFYING {username} EXISTS")
print("=" * 80)
users = get_admin_user()
found = None
for user in users:
if user["userName"] == username:
print(f" Found user: {username}")
found = True
break
##### DELETE USER
if found:
print("=" * 80)
print(f"DELETING USER: {username}")
delete_admin_user(username)
##### VERIFY
print("=" * 80)
print(f"VERIFYING {username} DOESN'T EXISTS")
print("=" * 80)
users = get_admin_user()
found = None
for user in users:
if user["userName"] == username:
print(f" Found user: {username}")
found = True
break
if not found:
print(f" {username} not found")
vmanage_session.close()
|
py | 1a51efb1e49f26c01e4601cd91a1f5f298458bd9 | # Advice: use repr(our_file.read()) to print the full output of tqdm
# (else '\r' will replace the previous lines and you'll see only the latest.
from __future__ import unicode_literals
import sys
import csv
import re
import os
from nose import with_setup
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
from time import sleep
from tqdm import tqdm
from tqdm import trange
from tqdm import TqdmDeprecationWarning
from tqdm._tqdm import TMonitor
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from io import IOBase # to support unicode strings
class DeprecationError(Exception):
pass
# Ensure we can use `with closing(...) as ... :` syntax
if getattr(StringIO, '__exit__', False) and \
getattr(StringIO, '__enter__', False):
def closing(arg):
return arg
else:
from contextlib import closing
try:
_range = xrange
except NameError:
_range = range
try:
_unicode = unicode
except NameError:
_unicode = str
nt_and_no_colorama = False
if os.name == 'nt':
try:
import colorama # NOQA
except ImportError:
nt_and_no_colorama = True
# Regex definitions
# List of control characters
CTRLCHR = [r'\r', r'\n', r'\x1b\[A'] # Need to escape [ for regex
# Regular expressions compilation
RE_rate = re.compile(r'(\d+\.\d+)it/s')
RE_ctrlchr = re.compile("(%s)" % '|'.join(CTRLCHR)) # Match control chars
RE_ctrlchr_excl = re.compile('|'.join(CTRLCHR)) # Match and exclude ctrl chars
RE_pos = re.compile(r'((\x1b\[A|\r|\n)+((pos\d+) bar:\s+\d+%|\s{3,6})?)') # NOQA
class DiscreteTimer(object):
'''Virtual discrete time manager, to precisely control time for tests'''
def __init__(self):
self.t = 0.0
def sleep(self, t):
'''Sleep = increment the time counter (almost no CPU used)'''
self.t += t
def time(self):
'''Get the current time'''
return self.t
class FakeSleep(object):
'''Wait until the discrete timer reached the required time'''
def __init__(self, dtimer):
self.dtimer = dtimer
def sleep(self, t):
end = t + self.dtimer.t
while(self.dtimer.t < end):
sleep(0.0000001) # sleep a bit to interrupt (instead of pass)
def cpu_timify(t, timer=None):
'''Force tqdm to use the specified timer instead of system-wide time()'''
if timer is None:
timer = DiscreteTimer()
t._time = timer.time
t._sleep = timer.sleep
t.start_t = t.last_print_t = t._time()
return timer
def pretest():
if getattr(tqdm, "_instances", False):
n = len(tqdm._instances)
if n:
tqdm._instances.clear()
raise EnvironmentError(
"{0} `tqdm` instances still in existence PRE-test".format(n))
def posttest():
if getattr(tqdm, "_instances", False):
n = len(tqdm._instances)
if n:
tqdm._instances.clear()
raise EnvironmentError(
"{0} `tqdm` instances still in existence POST-test".format(n))
class UnicodeIO(IOBase):
''' Unicode version of StringIO '''
def __init__(self, *args, **kwargs):
super(UnicodeIO, self).__init__(*args, **kwargs)
self.encoding = 'U8' # io.StringIO supports unicode, but no encoding
self.text = ''
self.cursor = 0
def __len__(self):
return len(self.text)
def seek(self, offset):
self.cursor = offset
def tell(self):
return self.cursor
def write(self, s):
self.text = self.text[:self.cursor] + s + \
self.text[self.cursor + len(s):]
self.cursor += len(s)
def read(self, n=-1):
_cur = self.cursor
self.cursor = len(self) if n < 0 \
else min(_cur + n, len(self))
return self.text[_cur:self.cursor]
def getvalue(self):
return self.text
def get_bar(all_bars, i):
""" Get a specific update from a whole bar traceback """
# Split according to any used control characters
bars_split = RE_ctrlchr_excl.split(all_bars)
bars_split = list(filter(None, bars_split)) # filter out empty splits
return bars_split[i]
def progressbar_rate(bar_str):
return float(RE_rate.search(bar_str).group(1))
def squash_ctrlchars(s):
""" Apply control characters in a string just like a terminal display """
# List of supported control codes
ctrlcodes = [r'\r', r'\n', r'\x1b\[A']
# Init variables
curline = 0 # current line in our fake terminal
lines = [''] # state of our fake terminal
# Split input string by control codes
RE_ctrl = re.compile("(%s)" % ("|".join(ctrlcodes)), flags=re.DOTALL)
s_split = RE_ctrl.split(s)
s_split = filter(None, s_split) # filter out empty splits
# For each control character or message
for nextctrl in s_split:
# If it's a control character, apply it
if nextctrl == '\r':
# Carriage return
# Go to the beginning of the line
# simplified here: we just empty the string
lines[curline] = ''
elif nextctrl == '\n':
# Newline
# Go to the next line
if curline < (len(lines) - 1):
# If already exists, just move cursor
curline += 1
else:
# Else the new line is created
lines.append('')
curline += 1
elif nextctrl == '\x1b[A':
# Move cursor up
if curline > 0:
curline -= 1
else:
raise ValueError("Cannot go up, anymore!")
# Else, it is a message, we print it on current line
else:
lines[curline] += nextctrl
return lines
def test_format_interval():
""" Test time interval format """
format_interval = tqdm.format_interval
assert format_interval(60) == '01:00'
assert format_interval(6160) == '1:42:40'
assert format_interval(238113) == '66:08:33'
def test_format_meter():
""" Test statistics and progress bar formatting """
try:
unich = unichr
except NameError:
unich = chr
format_meter = tqdm.format_meter
assert format_meter(0, 1000, 13) == \
" 0%| | 0/1000 [00:13<?, ?it/s]"
assert format_meter(0, 1000, 13, ncols=68, prefix='desc: ') == \
"desc: 0%| | 0/1000 [00:13<?, ?it/s]"
assert format_meter(231, 1000, 392) == \
" 23%|" + unich(0x2588) * 2 + unich(0x258e) + \
" | 231/1000 [06:32<21:44, 1.70s/it]"
assert format_meter(10000, 1000, 13) == \
"10000it [00:13, 769.23it/s]"
assert format_meter(231, 1000, 392, ncols=56, ascii=True) == \
" 23%|" + '#' * 3 + '6' + \
" | 231/1000 [06:32<21:44, 1.70s/it]"
assert format_meter(100000, 1000, 13, unit_scale=True, unit='iB') == \
"100KiB [00:13, 7.69KiB/s]"
assert format_meter(100, 1000, 12, ncols=0, rate=7.33) == \
" 10% 100/1000 [00:12<02:02, 7.33it/s]"
# Check that bar_format correctly adapts {bar} size to the rest
assert format_meter(20, 100, 12, ncols=13, rate=8.1,
bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
" 20%|" + unich(0x258f) + "|20/100"
assert format_meter(20, 100, 12, ncols=14, rate=8.1,
bar_format=r'{l_bar}{bar}|{n_fmt}/{total_fmt}') == \
" 20%|" + unich(0x258d) + " |20/100"
# Check that bar_format can print only {bar} or just one side
assert format_meter(20, 100, 12, ncols=2, rate=8.1,
bar_format=r'{bar}') == \
unich(0x258d) + " "
assert format_meter(20, 100, 12, ncols=7, rate=8.1,
bar_format=r'{l_bar}{bar}') == \
" 20%|" + unich(0x258d) + " "
assert format_meter(20, 100, 12, ncols=6, rate=8.1,
bar_format=r'{bar}|test') == \
unich(0x258f) + "|test"
def test_si_format():
""" Test SI unit prefixes """
format_meter = tqdm.format_meter
assert '9.00 ' in format_meter(1, 9, 1, unit_scale=True, unit='B')
assert '99.0 ' in format_meter(1, 99, 1, unit_scale=True)
assert '999 ' in format_meter(1, 999, 1, unit_scale=True)
assert '9.99K ' in format_meter(1, 9994, 1, unit_scale=True)
assert '10.0K ' in format_meter(1, 9999, 1, unit_scale=True)
assert '99.5K ' in format_meter(1, 99499, 1, unit_scale=True)
assert '100K ' in format_meter(1, 99999, 1, unit_scale=True)
assert '1.00M ' in format_meter(1, 999999, 1, unit_scale=True)
assert '1.00G ' in format_meter(1, 999999999, 1, unit_scale=True)
assert '1.00T ' in format_meter(1, 999999999999, 1, unit_scale=True)
assert '1.00P ' in format_meter(1, 999999999999999, 1, unit_scale=True)
assert '1.00E ' in format_meter(1, 999999999999999999, 1, unit_scale=True)
assert '1.00Z ' in format_meter(1, 999999999999999999999, 1,
unit_scale=True)
assert '1.0Y ' in format_meter(1, 999999999999999999999999, 1,
unit_scale=True)
assert '10.0Y ' in format_meter(1, 9999999999999999999999999, 1,
unit_scale=True)
assert '100.0Y ' in format_meter(1, 99999999999999999999999999, 1,
unit_scale=True)
assert '1000.0Y ' in format_meter(1, 999999999999999999999999999, 1,
unit_scale=True)
@with_setup(pretest, posttest)
def test_all_defaults():
""" Test default kwargs """
with closing(UnicodeIO()) as our_file:
with tqdm(range(10), file=our_file) as progressbar:
assert len(progressbar) == 10
for _ in progressbar:
pass
# restore stdout/stderr output for `nosetest` interface
# try:
# sys.stderr.write('\x1b[A')
# except:
# pass
sys.stderr.write('\rTest default kwargs ... ')
@with_setup(pretest, posttest)
def test_iterate_over_csv_rows():
""" Test csv iterator """
# Create a test csv pseudo file
with closing(StringIO()) as test_csv_file:
writer = csv.writer(test_csv_file)
for _ in _range(3):
writer.writerow(['test'] * 3)
test_csv_file.seek(0)
# Test that nothing fails if we iterate over rows
reader = csv.DictReader(test_csv_file,
fieldnames=('row1', 'row2', 'row3'))
with closing(StringIO()) as our_file:
for _ in tqdm(reader, file=our_file):
pass
@with_setup(pretest, posttest)
def test_file_output():
""" Test output to arbitrary file-like objects """
with closing(StringIO()) as our_file:
for i in tqdm(_range(3), file=our_file):
if i == 1:
our_file.seek(0)
assert '0/3' in our_file.read()
@with_setup(pretest, posttest)
def test_leave_option():
""" Test `leave=True` always prints info about the last iteration """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True):
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
our_file.seek(0)
assert '\n' == our_file.read()[-1] # not '\r'
with closing(StringIO()) as our_file2:
for _ in tqdm(_range(3), file=our_file2, leave=False):
pass
our_file2.seek(0)
assert '| 3/3 ' not in our_file2.read()
@with_setup(pretest, posttest)
def test_trange():
""" Test trange """
with closing(StringIO()) as our_file:
for _ in trange(3, file=our_file, leave=True):
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
with closing(StringIO()) as our_file2:
for _ in trange(3, file=our_file2, leave=False):
pass
our_file2.seek(0)
assert '| 3/3 ' not in our_file2.read()
@with_setup(pretest, posttest)
def test_min_interval():
""" Test mininterval """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, mininterval=1e-10):
pass
our_file.seek(0)
assert " 0%| | 0/3 [00:00<" in our_file.read()
@with_setup(pretest, posttest)
def test_max_interval():
""" Test maxinterval """
total = 100
bigstep = 10
smallstep = 5
# Test without maxinterval
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with closing(StringIO()) as our_file2:
# with maxinterval but higher than loop sleep time
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1, maxinterval=1e-2)
cpu_timify(t, timer)
# without maxinterval
t2 = tqdm(total=total, file=our_file2, miniters=None,
mininterval=0, smoothing=1, maxinterval=None)
cpu_timify(t2, timer)
assert t.dynamic_miniters
assert t2.dynamic_miniters
# Increase 10 iterations at once
t.update(bigstep)
t2.update(bigstep)
# The next iterations should not trigger maxinterval (step 10)
for _ in _range(4):
t.update(smallstep)
t2.update(smallstep)
timer.sleep(1e-5)
t.close() # because PyPy doesn't gc immediately
t2.close() # as above
our_file2.seek(0)
assert "25%" not in our_file2.read()
our_file.seek(0)
assert "25%" not in our_file.read()
# Test with maxinterval effect
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1, maxinterval=1e-4) as t:
cpu_timify(t, timer)
# Increase 10 iterations at once
t.update(bigstep)
# The next iterations should trigger maxinterval (step 5)
for _ in _range(4):
t.update(smallstep)
timer.sleep(1e-2)
our_file.seek(0)
assert "25%" in our_file.read()
# Test iteration based tqdm with maxinterval effect
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(_range(total), file=our_file, miniters=None,
mininterval=1e-5, smoothing=1, maxinterval=1e-4) as t2:
cpu_timify(t2, timer)
for i in t2:
if i >= (bigstep - 1) and \
((i - (bigstep - 1)) % smallstep) == 0:
timer.sleep(1e-2)
if i >= 3 * bigstep:
break
our_file.seek(0)
assert "15%" in our_file.read()
# Test different behavior with and without mininterval
timer = DiscreteTimer()
total = 1000
mininterval = 0.1
maxinterval = 10
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
mininterval=mininterval, maxinterval=maxinterval) as tm1:
with tqdm(total=total, file=our_file, miniters=None, smoothing=1,
mininterval=0, maxinterval=maxinterval) as tm2:
cpu_timify(tm1, timer)
cpu_timify(tm2, timer)
# Fast iterations, check if dynamic_miniters triggers
timer.sleep(mininterval) # to force update for t1
tm1.update(total/2)
tm2.update(total/2)
assert int(tm1.miniters) == tm2.miniters == total/2
# Slow iterations, check different miniters if mininterval
timer.sleep(maxinterval*2)
tm1.update(total/2)
tm2.update(total/2)
res = [tm1.miniters, tm2.miniters]
assert res == [
(total/2)*mininterval/(maxinterval*2),
(total/2)*maxinterval/(maxinterval*2)
]
# Same with iterable based tqdm
timer1 = DiscreteTimer() # need 2 timers for each bar because zip not work
timer2 = DiscreteTimer()
total = 100
mininterval = 0.1
maxinterval = 10
with closing(StringIO()) as our_file:
t1 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
mininterval=mininterval, maxinterval=maxinterval)
t2 = tqdm(_range(total), file=our_file, miniters=None, smoothing=1,
mininterval=0, maxinterval=maxinterval)
cpu_timify(t1, timer1)
cpu_timify(t2, timer2)
for i in t1:
if i == ((total/2)-2):
timer1.sleep(mininterval)
if i == (total-1):
timer1.sleep(maxinterval*2)
for i in t2:
if i == ((total/2)-2):
timer2.sleep(mininterval)
if i == (total-1):
timer2.sleep(maxinterval*2)
assert t1.miniters == 0.255
assert t2.miniters == 0.5
t1.close()
t2.close()
@with_setup(pretest, posttest)
def test_min_iters():
""" Test miniters """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True, miniters=4):
our_file.write('blank\n')
our_file.seek(0)
assert '\nblank\nblank\n' in our_file.read()
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, leave=True, miniters=1):
our_file.write('blank\n')
our_file.seek(0)
# assume automatic mininterval = 0 means intermediate output
assert '| 3/3 ' in our_file.read()
@with_setup(pretest, posttest)
def test_dynamic_min_iters():
""" Test purely dynamic miniters (and manual updates and __del__) """
with closing(StringIO()) as our_file:
total = 10
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=1)
t.update()
# Increase 3 iterations
t.update(3)
# The next two iterations should be skipped because of dynamic_miniters
t.update()
t.update()
# The third iteration should be displayed
t.update()
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
t.__del__() # simulate immediate del gc
assert ' 0%| | 0/10 [00:00<' in out
assert '40%' in out
assert '50%' not in out
assert '60%' not in out
assert '70%' in out
# Check with smoothing=0, miniters should be set to max update seen so far
with closing(StringIO()) as our_file:
total = 10
t = tqdm(total=total, file=our_file, miniters=None, mininterval=0,
smoothing=0)
t.update()
t.update(2)
t.update(5) # this should be stored as miniters
t.update(1)
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters and not t.smoothing
assert t.miniters == 5
t.close()
# Check iterable based tqdm
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
smoothing=0.5)
for _ in t:
pass
assert t.dynamic_miniters
# No smoothing
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=None, mininterval=None,
smoothing=0)
for _ in t:
pass
assert t.dynamic_miniters
# No dynamic_miniters (miniters is fixed manually)
with closing(StringIO()) as our_file:
t = tqdm(_range(10), file=our_file, miniters=1, mininterval=None)
for _ in t:
pass
assert not t.dynamic_miniters
@with_setup(pretest, posttest)
def test_big_min_interval():
""" Test large mininterval """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(2), file=our_file, mininterval=1E10):
pass
our_file.seek(0)
assert '50%' not in our_file.read()
with closing(StringIO()) as our_file:
with tqdm(_range(2), file=our_file, mininterval=1E10) as t:
t.update()
t.update()
our_file.seek(0)
assert '50%' not in our_file.read()
@with_setup(pretest, posttest)
def test_smoothed_dynamic_min_iters():
""" Test smoothed dynamic miniters """
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
with tqdm(total=100, file=our_file, miniters=None, mininterval=0,
smoothing=0.5, maxinterval=0) as t:
cpu_timify(t, timer)
# Increase 10 iterations at once
t.update(10)
# The next iterations should be partially skipped
for _ in _range(2):
t.update(4)
for _ in _range(20):
t.update()
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
assert ' 0%| | 0/100 [00:00<' in out
assert '10%' in out
assert '14%' not in out
assert '18%' in out
assert '20%' not in out
assert '25%' in out
assert '30%' not in out
assert '32%' in out
@with_setup(pretest, posttest)
def test_smoothed_dynamic_min_iters_with_min_interval():
""" Test smoothed dynamic miniters with mininterval """
timer = DiscreteTimer()
# In this test, `miniters` should gradually decline
total = 100
with closing(StringIO()) as our_file:
# Test manual updating tqdm
with tqdm(total=total, file=our_file, miniters=None, mininterval=1e-3,
smoothing=1, maxinterval=0) as t:
cpu_timify(t, timer)
t.update(10)
timer.sleep(1e-2)
for _ in _range(4):
t.update()
timer.sleep(1e-2)
our_file.seek(0)
out = our_file.read()
assert t.dynamic_miniters
with closing(StringIO()) as our_file:
# Test iteration-based tqdm
with tqdm(_range(total), file=our_file, miniters=None,
mininterval=0.01, smoothing=1, maxinterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
if i >= 10:
timer.sleep(0.1)
if i >= 14:
break
our_file.seek(0)
out2 = our_file.read()
assert t.dynamic_miniters
assert ' 0%| | 0/100 [00:00<' in out
assert '11%' in out and '11%' in out2
# assert '12%' not in out and '12%' in out2
assert '13%' in out and '13%' in out2
assert '14%' in out and '14%' in out2
@with_setup(pretest, posttest)
def test_disable():
""" Test disable """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, disable=True):
pass
our_file.seek(0)
assert our_file.read() == ''
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=1, disable=True)
progressbar.update(3)
progressbar.close()
our_file.seek(0)
assert our_file.read() == ''
@with_setup(pretest, posttest)
def test_unit():
""" Test SI unit prefix """
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), file=our_file, miniters=1, unit="bytes"):
pass
our_file.seek(0)
assert 'bytes/s' in our_file.read()
@with_setup(pretest, posttest)
def test_ascii():
""" Test ascii/unicode bar """
# Test ascii autodetection
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, ascii=None) as t:
assert t.ascii # TODO: this may fail in the future
# Test ascii bar
with closing(StringIO()) as our_file:
for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
mininterval=0, ascii=True):
pass
our_file.seek(0)
res = our_file.read().strip("\r").split("\r")
assert '7%|6' in res[1]
assert '13%|#3' in res[2]
assert '20%|##' in res[3]
# Test unicode bar
with closing(UnicodeIO()) as our_file:
with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
for _ in _range(3):
t.update()
our_file.seek(0)
res = our_file.read().strip("\r").split("\r")
assert "7%|\u258b" in res[1]
assert "13%|\u2588\u258e" in res[2]
assert "20%|\u2588\u2588" in res[3]
@with_setup(pretest, posttest)
def test_update():
""" Test manual creation and updates """
with closing(StringIO()) as our_file:
with tqdm(total=2, file=our_file, miniters=1, mininterval=0) \
as progressbar:
assert len(progressbar) == 2
progressbar.update(2)
our_file.seek(0)
assert '| 2/2' in our_file.read()
progressbar.desc = 'dynamically notify of 4 increments in total'
progressbar.total = 4
try:
progressbar.update(-10)
except ValueError as e:
if str(e) != "n (-10) cannot be negative":
raise
progressbar.update() # should default to +1
else:
raise ValueError("Should not support negative updates")
our_file.seek(0)
res = our_file.read()
assert '| 3/4 ' in res
assert 'dynamically notify of 4 increments in total' in res
@with_setup(pretest, posttest)
def test_close():
""" Test manual creation and closure and n_instances """
# With `leave` option
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=10)
progressbar.update(3)
assert '| 3/3 ' not in our_file.getvalue() # Should be blank
assert len(tqdm._instances) == 1
progressbar.close()
assert len(tqdm._instances) == 0
assert '| 3/3 ' in our_file.getvalue()
# Without `leave` option
with closing(StringIO()) as our_file:
progressbar = tqdm(total=3, file=our_file, miniters=10, leave=False)
progressbar.update(3)
progressbar.close()
assert '| 3/3 ' not in our_file.getvalue() # Should be blank
# With all updates
with closing(StringIO()) as our_file:
assert len(tqdm._instances) == 0
with tqdm(total=3, file=our_file, miniters=0, mininterval=0,
leave=True) as progressbar:
assert len(tqdm._instances) == 1
progressbar.update(3)
res = our_file.getvalue()
assert '| 3/3 ' in res # Should be blank
# close() called
assert len(tqdm._instances) == 0
our_file.seek(0)
exres = res + '\n'
if exres != our_file.read():
our_file.seek(0)
raise AssertionError("\nExpected:\n{0}\nGot:{1}\n".format(
exres, our_file.read()))
# Closing after the output stream has closed
with closing(StringIO()) as our_file:
t = tqdm(total=2, file=our_file)
t.update()
t.update()
t.close()
@with_setup(pretest, posttest)
def test_smoothing():
""" Test exponential weighted average smoothing """
timer = DiscreteTimer()
# -- Test disabling smoothing
with closing(StringIO()) as our_file:
with tqdm(_range(3), file=our_file, smoothing=None, leave=True) as t:
cpu_timify(t, timer)
for _ in t:
pass
our_file.seek(0)
assert '| 3/3 ' in our_file.read()
# -- Test smoothing
# Compile the regex to find the rate
# 1st case: no smoothing (only use average)
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=None, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
with tqdm(_range(3), file=our_file, smoothing=None, leave=True,
miniters=1, mininterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
# Sleep more for first iteration and
# see how quickly rate is updated
if i == 0:
timer.sleep(0.01)
else:
# Need to sleep in all iterations
# to calculate smoothed rate
# (else delta_t is 0!)
timer.sleep(0.001)
t.update()
n_old = len(tqdm._instances)
t.close()
assert len(tqdm._instances) == n_old - 1
# Get result for iter-based bar
a = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
a2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# 2nd case: use max smoothing (= instant rate)
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=1, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
with tqdm(_range(3), file=our_file, smoothing=1, leave=True,
miniters=1, mininterval=0) as t2:
cpu_timify(t2, timer)
for i in t2:
if i == 0:
timer.sleep(0.01)
else:
timer.sleep(0.001)
t.update()
t.close()
# Get result for iter-based bar
b = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
b2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# 3rd case: use medium smoothing
with closing(StringIO()) as our_file2:
with closing(StringIO()) as our_file:
t = tqdm(_range(3), file=our_file2, smoothing=0.5, leave=True,
miniters=1, mininterval=0)
cpu_timify(t, timer)
t2 = tqdm(_range(3), file=our_file, smoothing=0.5, leave=True,
miniters=1, mininterval=0)
cpu_timify(t2, timer)
for i in t2:
if i == 0:
timer.sleep(0.01)
else:
timer.sleep(0.001)
t.update()
t2.close()
t.close()
# Get result for iter-based bar
c = progressbar_rate(get_bar(our_file.getvalue(), 3))
# Get result for manually updated bar
c2 = progressbar_rate(get_bar(our_file2.getvalue(), 3))
# Check that medium smoothing's rate is between no and max smoothing rates
assert a < c < b
assert a2 < c2 < b2
@with_setup(pretest, posttest)
def test_deprecated_nested():
""" Test nested progress bars """
if nt_and_no_colorama:
raise SkipTest
# TODO: test degradation on windows without colorama?
# Artificially test nested loop printing
# Without leave
our_file = StringIO()
try:
tqdm(total=2, file=our_file, nested=True)
except TqdmDeprecationWarning:
if """`nested` is deprecated and automated.\
Use position instead for manual control.""" not in our_file.getvalue():
raise
else:
raise DeprecationError("Should not allow nested kwarg")
@with_setup(pretest, posttest)
def test_bar_format():
""" Test custom bar formatting """
with closing(StringIO()) as our_file:
bar_format = r'{l_bar}{bar}|{n_fmt}/{total_fmt}-{n}/{total}{percentage}{rate}{rate_fmt}{elapsed}{remaining}' # NOQA
for _ in trange(2, file=our_file, leave=True, bar_format=bar_format):
pass
out = our_file.getvalue()
assert "\r 0%| |0/2-0/20.0None?it/s00:00?\r" in out
# Test unicode string auto conversion
with closing(StringIO()) as our_file:
bar_format = r'hello world'
with tqdm(ascii=False, bar_format=bar_format, file=our_file) as t:
assert isinstance(t.bar_format, _unicode)
@with_setup(pretest, posttest)
def test_unpause():
""" Test unpause """
timer = DiscreteTimer()
with closing(StringIO()) as our_file:
t = trange(10, file=our_file, leave=True, mininterval=0)
cpu_timify(t, timer)
timer.sleep(0.01)
t.update()
timer.sleep(0.01)
t.update()
timer.sleep(0.1) # longer wait time
t.unpause()
timer.sleep(0.01)
t.update()
timer.sleep(0.01)
t.update()
t.close()
r_before = progressbar_rate(get_bar(our_file.getvalue(), 2))
r_after = progressbar_rate(get_bar(our_file.getvalue(), 3))
assert r_before == r_after
@with_setup(pretest, posttest)
def test_position():
""" Test positioned progress bars """
if nt_and_no_colorama:
raise SkipTest
# Artificially test nested loop printing
# Without leave
our_file = StringIO()
t = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', leave=False, position=2)
t.update()
t.close()
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\r ',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
# Test iteration-based tqdm positioning
our_file = StringIO()
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos0 bar', position=0):
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos1 bar', position=1):
for _ in trange(2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', position=2):
pass
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A\n\x1b[A\rpos0 bar: 50%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\n\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A\n\x1b[A\rpos0 bar: 100%',
'\n']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
# Test manual tqdm positioning
our_file = StringIO()
t1 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos0 bar', position=0)
t2 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos1 bar', position=1)
t3 = tqdm(total=2, file=our_file, miniters=1, mininterval=0,
maxinterval=0, desc='pos2 bar', position=2)
for _ in _range(2):
t1.update()
t3.update()
t2.update()
our_file.seek(0)
out = our_file.read()
res = [m[0] for m in RE_pos.findall(out)]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\rpos0 bar: 50%',
'\n\n\rpos2 bar: 50%',
'\x1b[A\x1b[A\n\rpos1 bar: 50%',
'\x1b[A\rpos0 bar: 100%',
'\n\n\rpos2 bar: 100%',
'\x1b[A\x1b[A\n\rpos1 bar: 100%',
'\x1b[A']
if res != exres:
raise AssertionError("\nExpected:\n{0}\nGot:\n{1}\nRaw:\n{2}\n".format(
str(exres), str(res), str([out])))
t1.close()
t2.close()
t3.close()
# Test auto repositionning of bars when a bar is prematurely closed
# tqdm._instances.clear() # reset number of instances
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar', mininterval=0)
t2 = tqdm(total=10, file=our_file, desc='pos1 bar', mininterval=0)
t3 = tqdm(total=10, file=our_file, desc='pos2 bar', mininterval=0)
res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError(
"\nExpected:\n{0}\nGot:\n{1}\n".format(
str(exres), str(res)))
t2.close()
t4 = tqdm(total=10, file=our_file, desc='pos3 bar', mininterval=0)
t1.update(1)
t3.update(1)
t4.update(1)
res = [m[0] for m in RE_pos.findall(our_file.getvalue())]
exres = ['\rpos0 bar: 0%',
'\n\rpos1 bar: 0%',
'\x1b[A\n\n\rpos2 bar: 0%',
'\x1b[A\x1b[A\n\x1b[A\n\n\rpos3 bar: 0%',
'\x1b[A\x1b[A\rpos0 bar: 10%',
'\n\rpos2 bar: 10%',
'\x1b[A\n\n\rpos3 bar: 10%',
'\x1b[A\x1b[A']
if res != exres:
raise AssertionError(
"\nExpected:\n{0}\nGot:\n{1}\n".format(
str(exres), str(res)))
t4.close()
t3.close()
t1.close()
@with_setup(pretest, posttest)
def test_set_description():
""" Test set description """
with closing(StringIO()) as our_file:
with tqdm(desc='Hello', file=our_file) as t:
assert t.desc == 'Hello: '
t.set_description('World')
assert t.desc == 'World: '
t.set_description()
assert t.desc == ''
@with_setup(pretest, posttest)
def test_deprecated_gui():
""" Test internal GUI properties """
# Check: StatusPrinter iff gui is disabled
with closing(StringIO()) as our_file:
t = tqdm(total=2, gui=True, file=our_file, miniters=1, mininterval=0)
assert not hasattr(t, "sp")
try:
t.update(1)
except TqdmDeprecationWarning as e:
if 'Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`' \
not in our_file.getvalue():
raise
else:
raise DeprecationError('Should not allow manual gui=True without'
' overriding __iter__() and update()')
finally:
t._instances.clear()
# t.close()
# len(tqdm._instances) += 1 # undo the close() decrement
t = tqdm(_range(3), gui=True, file=our_file,
miniters=1, mininterval=0)
try:
for _ in t:
pass
except TqdmDeprecationWarning as e:
if 'Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`' \
not in our_file.getvalue():
raise e
else:
raise DeprecationError('Should not allow manual gui=True without'
' overriding __iter__() and update()')
finally:
t._instances.clear()
# t.close()
# len(tqdm._instances) += 1 # undo the close() decrement
with tqdm(total=1, gui=False, file=our_file) as t:
assert hasattr(t, "sp")
@with_setup(pretest, posttest)
def test_cmp():
""" Test comparison functions """
with closing(StringIO()) as our_file:
t0 = tqdm(total=10, file=our_file)
t1 = tqdm(total=10, file=our_file)
t2 = tqdm(total=10, file=our_file)
assert t0 < t1
assert t2 >= t0
assert t0 <= t2
t3 = tqdm(total=10, file=our_file)
t4 = tqdm(total=10, file=our_file)
t5 = tqdm(total=10, file=our_file)
t5.close()
t6 = tqdm(total=10, file=our_file)
assert t3 != t4
assert t3 > t2
assert t5 == t6
t6.close()
t4.close()
t3.close()
t2.close()
t1.close()
t0.close()
@with_setup(pretest, posttest)
def test_repr():
""" Test representation """
with closing(StringIO()) as our_file:
with tqdm(total=10, ascii=True, file=our_file) as t:
assert str(t) == ' 0%| | 0/10 [00:00<?, ?it/s]'
@with_setup(pretest, posttest)
def test_clear():
""" Test clearing bar display """
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}')
t2 = trange(10, file=our_file, desc='pos1 bar',
bar_format='{l_bar}')
before = squash_ctrlchars(our_file.getvalue())
t2.clear()
t1.clear()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
assert before == ['pos0 bar: 0%|', 'pos1 bar: 0%|']
assert after == ['', '']
@with_setup(pretest, posttest)
def test_clear_disabled():
""" Test clearing bar display """
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}') as t:
t.clear()
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_refresh():
""" Test refresh bar display """
with closing(StringIO()) as our_file:
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t2 = tqdm(total=10, file=our_file, desc='pos1 bar',
bar_format='{l_bar}', mininterval=999, miniters=999)
t1.update()
t2.update()
before = squash_ctrlchars(our_file.getvalue())
t1.refresh()
t2.refresh()
after = squash_ctrlchars(our_file.getvalue())
t1.close()
t2.close()
# Check that refreshing indeed forced the display to use realtime state
assert before == [u'pos0 bar: 0%|', u'pos1 bar: 0%|']
assert after == [u'pos0 bar: 10%|', u'pos1 bar: 10%|']
@with_setup(pretest, posttest)
def test_disabled_refresh():
""" Test refresh bar display """
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar', disable=True,
bar_format='{l_bar}', mininterval=999, miniters=999) as t:
t.update()
t.refresh()
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_write():
""" Test write messages """
s = "Hello world"
with closing(StringIO()) as our_file:
# Change format to keep only left part w/o bar and it/s rate
t1 = tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t2 = trange(10, file=our_file, desc='pos1 bar', bar_format='{l_bar}',
mininterval=0, miniters=1)
t3 = tqdm(total=10, file=our_file, desc='pos2 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
t2.update()
t3.update()
before = our_file.getvalue()
# Write msg and see if bars are correctly redrawn below the msg
t1.write(s, file=our_file) # call as an instance method
tqdm.write(s, file=our_file) # call as a class method
after = our_file.getvalue()
t1.close()
t2.close()
t3.close()
before_squashed = squash_ctrlchars(before)
after_squashed = squash_ctrlchars(after)
assert after_squashed == [s, s] + before_squashed
# Check that no bar clearing if different file
with closing(StringIO()) as our_file_bar:
with closing(StringIO()) as our_file_write:
t1 = tqdm(total=10, file=our_file_bar, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_bar = our_file_bar.getvalue()
tqdm.write(s, file=our_file_write)
after_bar = our_file_bar.getvalue()
t1.close()
assert before_bar == after_bar
# Test stdout/stderr anti-mixup strategy
# Backup stdout/stderr
stde = sys.stderr
stdo = sys.stdout
# Mock stdout/stderr
with closing(StringIO()) as our_stderr:
with closing(StringIO()) as our_stdout:
sys.stderr = our_stderr
sys.stdout = our_stdout
t1 = tqdm(total=10, file=sys.stderr, desc='pos0 bar',
bar_format='{l_bar}', mininterval=0, miniters=1)
t1.update()
before_err = sys.stderr.getvalue()
before_out = sys.stdout.getvalue()
tqdm.write(s, file=sys.stdout)
after_err = sys.stderr.getvalue()
after_out = sys.stdout.getvalue()
t1.close()
assert before_err == '\rpos0 bar: 0%|\rpos0 bar: 10%|'
assert before_out == ''
after_err_res = [m[0] for m in RE_pos.findall(after_err)]
assert after_err_res == [u'\rpos0 bar: 0%',
u'\rpos0 bar: 10%',
u'\r ',
u'\r\r ',
u'\rpos0 bar: 10%']
assert after_out == s + '\n'
# Restore stdout and stderr
sys.stderr = stde
sys.stdout = stdo
@with_setup(pretest, posttest)
def test_len():
"""Test advance len (numpy array shape)"""
try:
import numpy as np
except:
raise SkipTest
with closing(StringIO()) as f:
with tqdm(np.zeros((3, 4)), file=f) as t:
assert len(t) == 3
@with_setup(pretest, posttest)
def test_autodisable_disable():
"""Test autodisable will disable on non-TTY"""
with closing(StringIO()) as our_file:
with tqdm(total=10, disable=None, file=our_file) as t:
t.update(3)
assert our_file.getvalue() == ''
@with_setup(pretest, posttest)
def test_autodisable_enable():
"""Test autodisable will not disable on TTY"""
with closing(StringIO()) as our_file:
setattr(our_file, "isatty", lambda: True)
with tqdm(total=10, disable=None, file=our_file) as t:
t.update()
assert our_file.getvalue() != ''
@with_setup(pretest, posttest)
def test_deprecation_exception():
def test_TqdmDeprecationWarning():
with closing(StringIO()) as our_file:
raise (TqdmDeprecationWarning('Test!',
fp_write=getattr(our_file, 'write',
sys.stderr.write)))
def test_TqdmDeprecationWarning_nofpwrite():
raise (TqdmDeprecationWarning('Test!', fp_write=None))
assert_raises(TqdmDeprecationWarning, test_TqdmDeprecationWarning)
assert_raises(Exception, test_TqdmDeprecationWarning_nofpwrite)
@with_setup(pretest, posttest)
def test_monitoring_thread():
# Note: should fix miniters for these tests, else with dynamic_miniters
# it's too complicated to handle with monitoring update and maxinterval...
maxinterval = 10
# 1- Configure and test the thread alone
# Setup a discrete timer
timer = DiscreteTimer()
TMonitor._time = timer.time
# And a fake sleeper
sleeper = FakeSleep(timer)
TMonitor._sleep = sleeper.sleep
# And a fake tqdm
class fake_tqdm(object):
_instances = []
# Instanciate the monitor
monitor = TMonitor(fake_tqdm, maxinterval)
# Test if alive, then killed
assert monitor.report()
monitor.exit()
timer.sleep(maxinterval*2) # need to go out of the sleep to die
assert not monitor.report()
# assert not monitor.is_alive() # not working dunno why, thread not killed
del monitor
# 2- Test for real with a tqdm instance that takes too long
total = 1000
# Setup a discrete timer
timer = DiscreteTimer()
# And a fake sleeper
sleeper = FakeSleep(timer)
# Setup TMonitor to use the timer
TMonitor._time = timer.time
TMonitor._sleep = sleeper.sleep
# Set monitor interval
tqdm.monitor_interval = maxinterval
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=maxinterval) as t:
cpu_timify(t, timer)
# Do a lot of iterations in a small timeframe
# (smaller than monitor interval)
timer.sleep(maxinterval/2) # monitor won't wake up
t.update(500)
# check that our fixed miniters is still there
assert t.miniters == 500
# Then do 1 it after monitor interval, so that monitor kicks in
timer.sleep(maxinterval*2)
t.update(1)
# Wait for the monitor to get out of sleep's loop and update tqdm..
timeend = timer.time()
while not (t.monitor.woken >= timeend and t.miniters == 1):
timer.sleep(1) # Force monitor to wake up if it woken too soon
sleep(0.000001) # sleep to allow interrupt (instead of pass)
assert t.miniters == 1 # check that monitor corrected miniters
# Note: at this point, there may be a race condition: monitor saved
# current woken time but timer.sleep() happen just before monitor
# sleep. To fix that, either sleep here or increase time in a loop
# to ensure that monitor wakes up at some point.
# Try again but already at miniters = 1 so nothing will be done
timer.sleep(maxinterval*2)
t.update(2)
timeend = timer.time()
while not (t.monitor.woken >= timeend):
timer.sleep(1) # Force monitor to wake up if it woken too soon
sleep(0.000001)
# Wait for the monitor to get out of sleep's loop and update tqdm..
assert t.miniters == 1 # check that monitor corrected miniters
# 3- Check that class var monitor is deleted if no instance left
assert tqdm.monitor is None
# 4- Test on multiple bars, one not needing miniters adjustment
total = 1000
# Setup a discrete timer
timer = DiscreteTimer()
# And a fake sleeper
sleeper = FakeSleep(timer)
# Setup TMonitor to use the timer
TMonitor._time = timer.time
TMonitor._sleep = sleeper.sleep
with closing(StringIO()) as our_file:
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=maxinterval) as t1:
# Set high maxinterval for t2 so monitor does not need to adjust it
with tqdm(total=total, file=our_file, miniters=500,
mininterval=0.1, maxinterval=1E5) as t2:
cpu_timify(t1, timer)
cpu_timify(t2, timer)
# Do a lot of iterations in a small timeframe
timer.sleep(5)
t1.update(500)
t2.update(500)
assert t1.miniters == 500
assert t2.miniters == 500
# Then do 1 it after monitor interval, so that monitor kicks in
timer.sleep(maxinterval*2)
t1.update(1)
t2.update(1)
# Wait for the monitor to get out of sleep and update tqdm
timeend = timer.time()
while not (t.monitor.woken >= timeend and t1.miniters == 1):
timer.sleep(1)
sleep(0.000001)
assert t1.miniters == 1 # check that monitor corrected miniters
assert t2.miniters == 500 # check that t2 was not adjusted
@with_setup(pretest, posttest)
def test_postfix():
"""Test postfix"""
postfix = {'float': 0.321034, 'gen': 543, 'str': 'h', 'lst': [2]}
postfix_order = (('w', 'w'), ('a', 0)) # no need for a OrderedDict, set is OK
expected = ['float=0.321', 'gen=543', 'lst=[2]', 'str=h']
expected_order = ['w=w', 'a=0', 'float=0.321', 'gen=543', 'lst=[2]', 'str=h']
# Test postfix set at init
with closing(StringIO()) as our_file:
with tqdm(total=10, file=our_file, desc='pos0 bar',
bar_format='{r_bar}', postfix=postfix) as t1:
t1.refresh()
out = our_file.getvalue()
# Test postfix set after init
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos1 bar',
bar_format='{r_bar}', postfix=None) as t2:
t2.set_postfix(**postfix)
t2.refresh()
out2 = our_file.getvalue()
# Order of items in dict may change, so need a loop to check per item
for res in expected:
assert res in out
assert res in out2
# Test postfix set after init and with ordered dict
with closing(StringIO()) as our_file:
with trange(10, file=our_file, desc='pos2 bar',
bar_format='{r_bar}', postfix=None) as t3:
t3.set_postfix(postfix_order, **postfix)
t3.refresh()
out3 = our_file.getvalue()
out3 = out3[1:-1].split(', ')[3:]
assert out3 == expected_order
|
py | 1a51f2cb715e6e7995e23e426d401a56cb1b4aa5 | from google.cloud import storage
import googleapiclient.discovery
import shutil
import os
import time
from .function import port_open, post_slack
from .libraries import extra_libraries, important_libraries
from herpetologist import check_type
import subprocess
import cloudpickle
from typing import Callable, List
additional_command = [
'gsutil cp gs://general-bucket/dask.zip dask.zip',
'unzip dask.zip',
'worker_size=1 name=a project=a zone=a expired=99999 docker-compose -f docker-compose.yaml up --build',
]
dask_network = {
'allowed': [{'IPProtocol': 'tcp', 'ports': ['8787', '8786']}],
'description': '',
'direction': 'INGRESS',
'kind': 'compute#firewall',
'name': 'dask-network',
'priority': 1000.0,
'sourceRanges': ['0.0.0.0/0'],
'targetTags': ['dask'],
}
@check_type
def build_image(
project: str,
zone: str,
bucket_name: str,
image_name: str,
family: str,
instance_name: str = 'build-dask-instance',
source_image: dict = {
'project': 'ubuntu-os-cloud',
'family': 'ubuntu-1804-lts',
},
storage_image: str = 'asia-southeast1',
webhook_function: Callable = post_slack,
validate_webhook: bool = True,
additional_libraries: List[str] = extra_libraries,
install_bash: str = None,
dockerfile: str = None,
**kwargs,
):
"""
Parameters
----------
project: str
project id
zone: str
bucket_name: str
bucket name to upload dask code, can be private.
image_name: str
image name for dask bootloader.
family: str
family name for built image
instance_name: str (default='build-dask-instance')
Start-up instance to build the image
source_image: dict (default={'project': 'ubuntu-os-cloud', 'family': 'ubuntu-1804-lts'})
Source image to start the instance for building the image
storage_image: str, (default='asia-southeast1')
storage location for dask image.
webhook_function: Callable, (default=post_slack)
Callable function to send alert during gracefully delete, default is post_slack.
validate_webhook: bool, (default=True)
if True, will validate `webhook_function`.
Not suggest to set it as False because this webhook_function will use during gracefully delete.
additional_libraries: List[str], (default=extra_libraries).
add more libraries from PYPI. This is necessary if want dask cluster able to necessary libraries.
install_bash: str, (default=None).
File path to custom start-up script to build disk image
dockerfile: List[str], (default=None).
File path to custom Dockerfile to build docker image
**kwargs:
Keyword arguments to pass to webhook_function.
"""
def nested_post(msg):
return webhook_function(msg, **kwargs)
if validate_webhook:
if nested_post('Testing from ondemand-dask') != 200:
raise Exception('`webhook_function` must returned 200.')
compute = googleapiclient.discovery.build('compute', 'v1')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
this_dir = os.path.dirname(__file__)
pkl = os.path.join(this_dir, 'image', 'dask', 'post.pkl')
with open(pkl, 'wb') as fopen:
cloudpickle.dump(nested_post, fopen)
reqs = important_libraries + additional_libraries
reqs = list(set(reqs))
req = os.path.join(this_dir, 'image', 'dask', 'requirements.txt')
with open(req, 'w') as fopen:
fopen.write('\n'.join(reqs))
if dockerfile:
with open(dockerfile, 'r') as fopen:
script = fopen.read()
dockerfile_path = os.path.join(this_dir, 'image', 'dask', 'Dockerfile')
with open(dockerfile_path, 'w') as fopen:
fopen.write(script)
image = os.path.join(this_dir, 'image')
shutil.make_archive('dask', 'zip', image)
blob = bucket.blob('dask.zip')
blob.upload_from_filename('dask.zip')
os.remove('dask.zip')
image_response = (
compute.images()
.getFromFamily(**source_image)
.execute()
)
source_disk_image = image_response['selfLink']
try:
print('Creating `dask-network` firewall rule.')
compute.firewalls().insert(
project = project, body = dask_network
).execute()
print('Done.')
except:
print('`dask-network` exists.')
machine_type = f'zones/{zone}/machineTypes/n1-standard-1'
if install_bash is None:
install_bash = 'install.sh'
install_bash = os.path.join(this_dir, install_bash)
startup_script = open(install_bash).read()
startup_script = '\n'.join(
startup_script.split('\n') + additional_command
).replace('general-bucket', bucket_name)
config = {
'name': instance_name,
'tags': {'items': ['dask']},
'machineType': machine_type,
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {'sourceImage': source_disk_image},
}
],
'networkInterfaces': [
{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
],
}
],
'serviceAccounts': [
{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/compute',
],
}
],
'metadata': {
'items': [
{'key': 'startup-script', 'value': startup_script},
{'key': 'bucket', 'value': bucket_name},
]
},
}
operation = (
compute.instances()
.insert(project = project, zone = zone, body = config)
.execute()
)
print(f'Waiting instance `{instance_name}` to run.')
while True:
result = (
compute.zoneOperations()
.get(project = project, zone = zone, operation = operation['name'])
.execute()
)
if result['status'] == 'DONE':
if 'error' in result:
raise Exception(result['error'])
else:
print('Done.')
break
time.sleep(1)
print('Waiting IP Address to check health.')
while True:
result = (
compute.instances().list(project = project, zone = zone).execute()
)
results = result['items'] if 'items' in result else None
dask = [r for r in results if r['name'] == instance_name]
if len(dask) > 0:
dask = dask[0]
ip_address = dask['networkInterfaces'][0]['accessConfigs'][0][
'natIP'
]
print(f'Got it, Public IP: {ip_address}')
break
time.sleep(2)
print('Waiting Dask cluster to run.')
while True:
if port_open(ip_address, 8786) and port_open(ip_address, 8787):
print('Done.')
break
time.sleep(5)
compute = googleapiclient.discovery.build('compute', 'v1')
print(f'Deleting image `{image_name}` if exists.')
try:
compute.images().delete(project = project, image = image_name).execute()
print('Done.')
except:
pass
# give a rest to gcp API before build the image.
time.sleep(20)
print(f'Building image `{image_name}`.')
try:
o = subprocess.check_output(
[
'gcloud',
'compute',
'images',
'create',
image_name,
'--source-disk',
instance_name,
'--source-disk-zone',
zone,
'--family',
family,
'--storage-location',
storage_image,
'--force',
],
stderr = subprocess.STDOUT,
)
print('Done.')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise
print(f'Deleting instance `{instance_name}`.')
compute = googleapiclient.discovery.build('compute', 'v1')
compute.instances().delete(
project = project, zone = zone, instance = instance_name
).execute()
print('Done.')
return True
|
py | 1a51f2d795b53627b761ebdbdc3f8260cb11a851 | T = int(input())
for x in range(1, T + 1):
N, M, Q = map(int, input().split())
P = map(int, input().split())
R = map(int, input().split())
pages = [True] * (N + 1)
for P_i in P:
pages[P_i] = False
readers = {}
y = 0
for R_i in R:
try:
y += readers[R_i]
except KeyError:
read = sum(pages[page] for page in range(R_i, N + 1, R_i))
readers[R_i] = read
y += read
print("Case #{}: {}".format(x, y), flush = True)
|
py | 1a51f45924ac6279cee67622f1da69f909796a7f | import contextlib
import glob
import json
import os
import sys
from sqlalchemy.orm import scoped_session
import get
import db
from flask import *
app = Flask(__name__)
@contextlib.contextmanager
def working_directory(path):
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
# Get files
#with working_directory('./static/pixiv'):
# posts = glob.glob('*.png') + glob.glob('*.jpg')
dbsession = scoped_session(db.Session())
if db.isempty():
print("downloading")
get.main()
with working_directory('./static/pixiv'):
ids = glob.glob('*.json')
print(str(ids))
ids = map(lambda x : x.split('.')[0], ids)
for id in ids:
db.addpixiv(id)
print(db.session.query(db.Posts).all())
#posts = map(addition, numbers)
@app.route('/')
def hello_world():
return render_template("hello.html", posts=map(lambda x : x.file_name,db.session.query(db.Posts).all()))
@app.route('/download_pixiv', methods=['GET'])
def presentpixiv():
return """<!DOCTYPE html>
<html>
<body>
<form action="/download_pixiv" method="post" >
<label for="fname">pixiv id:</label><br>
<input type="text" id="pixiv_id" name="pixiv_id" value="20"><br>
<input type="submit" value="Submit">
</form>
<p>If you click the "Submit" button, the form-data will be sent to a page called "/download_pixiv".</p>
</body>
</html>"""
@app.route('/download_pixiv', methods=['POST'])
def downloadpixiv():
id = request.form.get('pixiv_id')
get.download(id);
db.addpixiv(id)
return "done"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080)
|
py | 1a51f475bc489f6ffe3949bc8c703fdb55093e91 |
# pylint: disable-msg=F0401
import os
import sys
from setuptools import setup, find_packages
here = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(here,
'src',
'openmdao',
'main')))
import releaseinfo
version = releaseinfo.__version__
setup(name='openmdao.main',
version=version,
description="OpenMDAO framework infrastructure",
long_description="""\
""",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
],
keywords='optimization multidisciplinary multi-disciplinary analysis',
author='',
author_email='',
url='http://openmdao.org',
license='Apache License, Version 2.0',
namespace_packages=["openmdao"],
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
package_data={
'openmdao.main.test': ['src/doubler.py',
'load_test/_macros/default',
'load_test/_settings.cfg',
'load_test/simple.py'],
'openmdao.main': ['src/openmdao/main/docs/*']
},
test_suite='nose.collector',
zip_safe=False,
install_requires=[
'argparse',
'decorator',
'mock',
'networkx<1.9.1',
'openmdao.units',
'openmdao.util',
'pycrypto',
'pyparsing',
'requests',
'setuptools',
'Sphinx',
'Traits==4.3.0',
'zope.interface',
],
extras_require={
'numpy_comps': ['numpy'],
},
entry_points="""
[console_scripts]
idle=idlelib.PyShell:main
plugin=openmdao.main.plugin:plugin
openmdao=openmdao.main.cli:openmdao
[openmdao.component]
openmdao.main.assembly.Assembly = openmdao.main.assembly:Assembly
openmdao.main.component_with_derivatives.ComponentWithDerivatives = openmdao.main.component_with_derivatives:ComponentWithDerivatives
openmdao.main.driver_uses_derivatives.DriverUsesDerivatives = openmdao.main.driver_uses_derivatives:DriverUsesDerivatives
openmdao.main.problem_formulation.ArchitectureAssembly = openmdao.main.problem_formulation:ArchitectureAssembly
openmdao.main.implicitcomp.ImplicitComponent = openmdao.main.implicitcomp:ImplicitComponent
[openmdao.variable]
openmdao.main.datatypes.any.Any = openmdao.main.datatypes.any:Any
openmdao.main.datatypes.bool.Bool = openmdao.main.datatypes.bool:Bool
openmdao.main.datatypes.complex.Complex = openmdao.main.datatypes.complex:Complex
openmdao.main.datatypes.dict.Dict = openmdao.main.datatypes.dict:Dict
openmdao.main.datatypes.enum.Enum = openmdao.main.datatypes.enum:Enum
openmdao.main.datatypes.event.Event = openmdao.main.datatypes.event:Event
openmdao.main.datatypes.file.File = openmdao.main.datatypes.file:File
openmdao.main.datatypes.float.Float = openmdao.main.datatypes.float:Float
openmdao.main.datatypes.geom.Geom = openmdao.main.datatypes.geom:Geom
openmdao.main.datatypes.instance.Base = openmdao.main.datatypes.instance:Base
openmdao.main.datatypes.instance.Instance = openmdao.main.datatypes.instance:Instance
openmdao.main.datatypes.int.Int = openmdao.main.datatypes.int:Int
openmdao.main.datatypes.list.List = openmdao.main.datatypes.list:List
openmdao.main.datatypes.slot.Slot = openmdao.main.datatypes.slot:Slot
openmdao.main.datatypes.str.Str = openmdao.main.datatypes.str:Str
openmdao.main.datatypes.uncertaindist.UncertainDistVar = openmdao.main.datatypes.uncertaindist:UncertainDistVar
openmdao.main.datatypes.vtree.VarTree = openmdao.main.datatypes.vtree:VarTree
openmdao.main.datatypes.array.Array = openmdao.main.datatypes.array:Array
""",
)
|
py | 1a51f4f38e40869e9f4a3650909e3b0b30c7496d | from ._builtin import Page, WaitPage
import random
from exp.util import Participant
from exp.payment import PaymentMethod, MethodThreeResults, MethodOneResults, MethodTwoResults
from exp.lottery import Lottery
class FinalPayoffResults(Page):
def vars_for_template(self):
experiment = Participant.get_experiment(self.player)
method_one = Participant.get_payment_one_results(self.player)
method_two = Participant.get_payment_two_results(self.player)
method_three = Participant.get_payment_three_results(self.player)
part_one_earnings = method_one.earnings + method_two.earnings
part_one_payoff = experiment.PART_ONE_WEIGHT*part_one_earnings*experiment.CONVERSION_RATE
part_two_payoff = experiment.PART_TWO_WEIGHT*method_three.earnings*experiment.CONVERSION_RATE
final_payoff = experiment.SHOW_UP_FEE + experiment.ENDOWMENT + part_one_payoff + part_two_payoff
return {
'show_up_fee': experiment.SHOW_UP_FEE,
'endowment': experiment.ENDOWMENT,
'rate': experiment.CONVERSION_RATE,
'method_1': round(method_one.earnings, 2),
'method_2': round(method_two.earnings, 2),
'method_3': round(method_three.earnings, 2),
'total_in_credits': round(part_one_earnings, 2),
'earnings_1': round(part_one_payoff, 2),
'earnings_2': round(part_two_payoff, 2),
'final_payoff': round(final_payoff, 2),
}
class MethodOneResultsPage(Page):
def vars_for_template(self):
experiment = Participant.get_experiment(self.player)
results = Participant.get_payment_one_results(self.player)
random_position = 'Left' if results.left_auction.aid == results.auction.aid else 'Right'
if results.preferred_position == experiment.phase_one.LEFT:
preferred_position = 'Left'
elif results.preferred_position == experiment.phase_one.RIGHT:
preferred_position = 'Right'
else:
preferred_position = 'Indifferent'
if results.random_signal_is_percentage:
random_signal = round(results.random_signal * 100, 2)
else:
random_signal = results.random_signal
if results.other_random_signal_is_percentage:
others_random_signal = round(results.other_random_signal * 100, 2)
else:
others_random_signal = results.other_random_signal
return {
'player_id': results.player_id,
'other_id': results.other_player_id,
'preferred_position': preferred_position,
'left_auction': results.left_auction,
'right_auction': results.right_auction,
'auction': results.auction,
'random_position': random_position,
'bid': results.bid,
'others_bid': results.other_bid,
'winner': results.lottery_won,
'signal_is_percentage': results.random_signal_is_percentage,
'signal': random_signal,
'others_signal': others_random_signal,
'others_signal_is_percentage': results.other_random_signal_is_percentage,
'low_value': results.low_value,
'high_value': results.high_value,
'low_prob': round(results.low_prob * 100, 2),
'high_prob': round(results.high_prob * 100, 2),
'high_chosen': results.high_prize_chosen,
'earnings': results.earnings,
'realized': results.realized,
'auction_type': results.auction.atype,
'low_prize_chosen': results.low_prize_chosen,
'high_prize_chosen': results.high_prize_chosen,
}
class MethodTwoResultsPage(Page):
def vars_for_template(self):
results = Participant.get_payment_two_results(self.player)
context = {
'player_id': results.player_id,
'other_id': results.other_player_id,
'cutoff_auction': results.auction,
'cutoff': results.cutoff,
'random_offer': round(results.random_offer, 2),
'offer_accepted': results.offer_accepted,
}
if not results.offer_accepted:
if results.random_signal_is_percentage:
random_signal = round(results.random_signal * 100, 2)
else:
random_signal = int(results.random_signal)
if results.other_random_signal_is_percentage:
others_random_signal = round(results.other_random_signal * 100, 2)
else:
others_random_signal = int(results.other_random_signal)
context.update({
'auction': results.auction,
'bid': results.bid,
'others_bid': results.other_bid,
'winner': results.lottery_won,
'signal': random_signal,
'others_signal': others_random_signal,
'signal_is_percentage': results.random_signal_is_percentage,
'others_signal_is_percentage': results.other_random_signal_is_percentage,
'low_value': results.low_value,
'high_value': results.high_value,
'low_prob': results.low_prob * 100,
'high_prob': results.high_prob * 100,
'high_chosen': results.high_prize_chosen,
'earnings': results.earnings,
'realized': results.realized,
'auction_type': results.auction.atype,
'low_prize_chosen': results.low_prize_chosen,
'high_prize_chosen': results.high_prize_chosen,
})
return context
class MethodThreeResultsPage(Page):
def vars_for_template(self):
results = Participant.get_payment_three_results(self.player)
context = {
'rolled_side': results.rolled_side,
'rolled_side_encoded': results.rolled_side_encoded,
'die_encoding': results.die_encoding,
'bet_color': Lottery.BET_HIGH_RED if results.bet_color == Lottery.BET_HIGH_RED else Lottery.BET_HIGH_BLUE,
'bet_high_red': Lottery.BET_HIGH_RED,
'bet_high_blue': Lottery.BET_HIGH_BLUE,
'high_value': results.high_value,
'low_value': results.low_value,
'lottery': results.lottery,
'lottery_type': results.lottery.ltype,
'cutoff': results.cutoff,
'random_cutoff': results.random_cutoff,
'play_lottery': results.play_lottery,
'num_red': results.num_red,
'num_blue': results.num_blue,
'realized_value': results.realized_value,
'earnings': results.earnings
}
return context
class ResultsWaitPage(WaitPage):
def after_all_players_arrive(self):
players = self.group.get_players()[:]
for i, player in enumerate(players):
player_id = player.participant.id_in_session
others = players[:i] + players[i + 1:]
other_player = random.choice(others)
other_id = other_player.participant.id_in_session
experiment = Participant.get_experiment(player)
other_experiment = Participant.get_experiment(other_player)
payment_method = PaymentMethod(player_id, other_id, experiment, other_experiment)
method_one_results = payment_method.method_one_payment(MethodOneResults())
method_two_results = payment_method.method_two_payment(MethodTwoResults())
method_three_results = payment_method.method_three_results(MethodThreeResults())
Participant.set_payment_one_results(player, method_one_results)
Participant.set_payment_two_results(player, method_two_results)
Participant.set_payment_three_results(player, method_three_results)
part_one_earnings = method_one_results.earnings + method_two_results.earnings
part_one_payoff = experiment.PART_ONE_WEIGHT*part_one_earnings*experiment.CONVERSION_RATE
part_two_payoff = experiment.PART_TWO_WEIGHT*method_three_results.earnings*experiment.CONVERSION_RATE
final_payoff = experiment.SHOW_UP_FEE + experiment.ENDOWMENT + part_one_payoff + part_two_payoff
player.payoff = final_payoff
player.save_results(method_one_results, method_two_results, method_three_results)
page_sequence = [
ResultsWaitPage,
MethodOneResultsPage,
MethodTwoResultsPage,
MethodThreeResultsPage,
FinalPayoffResults
]
|
py | 1a51f565c0e017c3478645a9ca7accbbb0708c7a | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
stack = []
res = []
if not root:
return []
if root.right:
stack.append((root.right,0))
if root:
stack.append((root,1))
if root.left:
stack.append((root.left,0))
while stack:
node = stack.pop()
if node[1]==0:
if node[0].right:
stack.append((node[0].right,0))
if node[0].left:
stack.append((node[0],1))
stack.append((node[0].left,0))
else:
res.append(node[0].val)
else:
res.append(node[0].val)
return res
""" recursive
if not root:
return []
res =[]
if(root.left):
res.extend(self.inorderTraversal(root.left))
res.append(root.val)
if(root.right):
res.extend(self.inorderTraversal(root.right))
return res
"""
|
py | 1a51f56e9ef0d547d513ed03e886d40057c96b93 | import requests
from variables import rapidApiKey
def ipLocation(ipAddress):
url = "https://ip-location5.p.rapidapi.com/get_geo_info"
payload = "ip="+ipAddress
headers = {
'content-type': "application/x-www-form-urlencoded",
'x-rapidapi-key': rapidApiKey,
'x-rapidapi-host': "ip-location5.p.rapidapi.com"
}
response = requests.request("POST", url, data=payload, headers=headers)
dataResponse = response.json()
# print(response.text)
neededData = ['ip','region','city','latitude','longitude']
importantData = []
for data in neededData:
importantData.append(dataResponse[data])
# print(neededData)
# print(importantData)
return response
"""
{
"ip":"45.16.197.205"
"continent":{2 items
"code":"NA"
"name":"North America"
}
"country":{5 items
"code":"US"
"name":"United States"
"capital":"Washington"
"currency":"USD"
"phone-code":"1"
}
"region":"Texas"
"city":"Richardson"
"latitude":32.9483
"longitude":-96.7299
}
"""
|
py | 1a51f5dfae4021eb39c5a560ee2bf71835182935 | class Error(Exception):
"""Generic client error."""
class UpdateError(Error):
"""DNS Update error."""
|
py | 1a51f5fd6016bd7bbef9dcb4dfa836b9df41f658 | import numpy as np
import time
#import rtlsdr
import kid_readout.equipment.rtlkid
#rtl = rtlsdr.RtlSdr()
#rtl.gain = 40.2
#rtl.center_freq = 870.840e6
#rtl.sample_rate = 1024e3
#f_ref = 871.380e6
#f_ref = 870.436e6
f_ref=991.825e6
rtl = kid_readout.equipment.rtlkid.RtlKidReadout()
rtl.rtl.gain = 40.0
rtl.rtl.sample_rate = 256e3
rtl.hittite.set_power(10.0)
rtl.hittite.on()
rtl.adjust_freq_correction()
error = rtl.measure_freq_error()
if abs(error/1e9) > 5e-6:
print "adjusting freq correction failed!"
while True:
start_time = time.time()
freq,data = rtl.do_scan(freqs=np.linspace(-8e5,3e5,500)+f_ref,level=0.0)
peak = freq[data.argmin()]#+1e3
print "peak at",peak
rtl.hittite.set_freq(peak)
rtl.rtl.center_freq = peak + 10e3
rtl.hittite.on()
time.sleep(2)
d = rtl.rtl.read_samples(2**21)
d = rtl.rtl.read_samples(2**21)
d = d[2048:]
filename = '/home/data2/rtl/%s' % (time.strftime('%Y-%m-%d_%H-%M-%S'))
np.savez(filename,data=d, time= time.time(), sample_rate=rtl.rtl.sample_rate, gain= rtl.rtl.gain,
center_freq = rtl.rtl.center_freq,sweep_freq = freq, sweep_mag = data, start_time = start_time)
print "saved in ", filename
7/0
time.sleep(120.0) |
py | 1a51f793a349f6525af4da334f61655409e1a967 | class ExifFormat:
def __init__(self, id, name, size, short_name):
self.id = id
self.name = name
self.size = size
self.short_name = short_name # used with struct.unpack()
exif_formats = {
1: ExifFormat(1, 'unsigned byte', 1, 'B'),
2: ExifFormat(2, 'ascii string', 1, 's'),
3: ExifFormat(3, 'unsigned short', 2, 'H'),
4: ExifFormat(4, 'unsigned long', 4, 'L'),
5: ExifFormat(5, 'unsigned rational', 8, ''),
6: ExifFormat(6, 'signed byte', 1, 'b'),
7: ExifFormat(7, 'undefined', 1, 'B'), # consider `undefined` as `unsigned byte`
8: ExifFormat(8, 'signed short', 2, 'h'),
9: ExifFormat(9, 'signed long', 4, 'l'),
10: ExifFormat(10, 'signed rational', 8, ''),
11: ExifFormat(11, 'single float', 4, 'f'),
12: ExifFormat(12, 'double float', 8, 'd'),
}
|
py | 1a51f8b8980cd5530bf8e74e1e4f6561e21738ee | # %%
#
example = """123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i"""
def load(l):
# connections to each wire, either an integer or a formula
cn = dict()
for l in l.splitlines():
expr, output = l.split(' -> ')
cn[output] = expr
return cn
def solve_memo(cons, var):
if var in cons and isinstance(cons[var], int):
return cons[var]
v = solve1(cons, var)
cons[var] = v
print(f'{var} = {v}')
return v
def solve1(cons, var):
try:
return int(var)
except ValueError:
pass
expr = cons[var]
try:
return int(expr)
except ValueError:
pass
w = expr.split()
if len(w) == 1 and w[0] in cons:
return solve_memo(cons, w[0])
# print(w)
if w[0] == 'NOT':
assert len(w) == 2
return 65535 ^ solve_memo(cons, w[1])
op = w[1]
assert len(w) == 3
f = {'AND':
lambda x, y: x & y,
'OR': lambda x, y: x | y,
'LSHIFT': lambda x, y: (x << y) & 0xffff,
'RSHIFT': lambda x, y: (x >> y)
}
return f[op](solve_memo(cons, w[0]), solve_memo(cons, w[2]))
# %%
cons = load(example)
assert solve1(cons, 'd') == 72
assert solve1(cons, 'e') == 507
assert solve1(cons, 'f') == 492
assert solve1(cons, 'g') == 114
assert solve1(cons, 'h') == 65412
assert solve1(cons, 'i') == 65079
assert solve1(cons, 'x') == 123
assert solve1(cons, 'y') == 456
# %%
cons = load(open('../input/07.txt').read())
print(solve_memo(cons, 'a'))
# %%
# Part 2
cons = load(open('../input/07.txt').read())
cons['b'] = 16076
print(solve_memo(cons, 'a'))
# %%
|
py | 1a51f8dd6c51ff64ce5a64e0d541043a56430461 | import tensorflow as tf
from . import CustomDropout
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
class BaselineModel(tf.keras.Model):
def __init__(self, input_size, slot_size, intent_size, layer_size=128):
super(BaselineModel, self).__init__()
self.embedding = tf.keras.layers.Embedding(input_size, layer_size)
self.bilstm = tf.keras.layers.Bidirectional(CuDNNLSTM(layer_size, return_sequences=True, return_state=True))
self.dropout = CustomDropout.CustomDropout(0.5)
self.intent_out = tf.keras.layers.Dense(intent_size, activation=None)
self.slot_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(slot_size, activation=None))
@tf.function
def call(self, inputs, sequence_length, isTraining=True):
x = self.embedding(inputs)
state_outputs, forward_h, forward_c, backward_h, backward_c = self.bilstm(x)
state_outputs = self.dropout(state_outputs, isTraining)
forward_h = self.dropout(forward_h, isTraining)
backward_h = self.dropout(backward_h, isTraining)
final_state = tf.keras.layers.concatenate([forward_h, backward_h])
intent = self.intent_out(final_state)
slots = self.slot_out(state_outputs)
outputs = [slots, intent]
return outputs |
py | 1a51f8e3a57938a6f4e6b16197bf5532d0c377d7 | #!/usr/bin/env python
"""Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
class TestFFTShift(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
class TestFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN(TestCase):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
if __name__ == "__main__":
run_module_suite()
|
py | 1a51f90b1a7ce65e06e191b7e319b5a74ad63772 | import sys
from numpy.distutils.core import Extension, setup
__author__ = "Lars Andersen Bratholm"
__copyright__ = "Copyright 2017"
__credits__ = ["Lars Andersen Bratholm (2017) https://github.com/larsbratholm/fns"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Lars Andersen Bratholm"
__email__ = "[email protected]"
__status__ = "Alpha"
__description__ = "Furthest Neighbour Search"
__url__ = "https://github.com/larsbratholm/fns"
FORTRAN = "f90"
# GNU (default)
COMPILER_FLAGS = ["-fopenmp", "-m64", "-march=native", "-fPIC", "-Ofast", "-ffast-math", "-funroll-loops",
"-Wno-maybe-uninitialized", "-Wno-unused-function", "-Wno-cpp"]#, "-fcheck=all"]
LINKER_FLAGS = ["-L/usr/include","-L/include","-I/usr/include","-I/include","-lgomp"]
MATH_LINKER_FLAGS = ["-lblas", "-llapack"]
# For clang without OpenMP: (i.e. most Apple/mac system)
if sys.platform == "darwin" and all(["gnu" not in arg for arg in sys.argv]):
COMPILER_FLAGS = ["-O3", "-m64", "-march=native", "-fPIC"]
LINKER_FLAGS = []
MATH_LINKER_FLAGS = ["-lblas", "-llapack"]
# Intel
if any(["intelem" in arg for arg in sys.argv]):
COMPILER_FLAGS = ["-xHost", "-O3", "-axAVX", "-qopenmp"]
LINKER_FLAGS = ["-liomp5", " -lpthread", "-lm", "-ldl"]
MATH_LINKER_FLAGS = ["-L${MKLROOT}/lib/intel64", "-lmkl_rt"]
# UNCOMMENT TO FORCE LINKING TO MKL with GNU compilers:
# LINKER_FLAGS = ["-lgomp", " -lpthread", "-lm", "-ldl"]
# MATH_LINKER_FLAGS = ["-L${MKLROOT}/lib/intel64", "-lmkl_rt"]
ext_ffn = Extension(name = 'ffn',
sources = ['fns/ffn.f90'],
extra_f90_compile_args = COMPILER_FLAGS,
extra_f77_compile_args = COMPILER_FLAGS,
extra_compile_args = COMPILER_FLAGS,
extra_link_args = LINKER_FLAGS,
language = FORTRAN,
f2py_options=['--quiet'])
# use README.md as long description
def readme():
with open('README.md') as f:
return f.read()
def setup_pepytools():
setup(
name="fns",
packages=['fns'],
# metadata
version=__version__,
author=__author__,
author_email=__email__,
platforms = 'Any',
description = __description__,
long_description = readme(),
keywords = ['Furthest Neighbour'],
classifiers = [],
url = __url__,
# set up package contents
ext_package = 'fns',
ext_modules = [
ext_ffn,
],
)
if __name__ == '__main__':
setup_pepytools()
|
py | 1a51fad3d752403245b115228efdab772f87027e | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class lsnpool_lsnip_binding(base_resource) :
"""Binding class showing the lsnip that can be bound to lsnpool."""
def __init__(self) :
self._lsnip = ""
self._poolname = ""
self.___count = 0
@property
def poolname(self) :
"""Name for the LSN pool. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN pool is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn pool1" or 'lsn pool1').<br/>Minimum length = 1<br/>Maximum length = 127."""
try :
return self._poolname
except Exception as e:
raise e
@poolname.setter
def poolname(self, poolname) :
"""Name for the LSN pool. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN pool is created. The following requirement applies only to the NetScaler CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn pool1" or 'lsn pool1').<br/>Minimum length = 1<br/>Maximum length = 127
:param poolname:
"""
try :
self._poolname = poolname
except Exception as e:
raise e
@property
def lsnip(self) :
"""IPv4 address or a range of IPv4 addresses to be used as NAT IP address(es) for LSN.
After the pool is created, these IPv4 addresses are added to the NetScaler ADC as NetScaler owned IP address of type LSN. A maximum of 4096 IP addresses can be bound to an LSN pool. An LSN IP address associated with an LSN pool cannot be shared with other LSN pools. IP addresses specified for this parameter must not already exist on the NetScaler ADC as any NetScaler owned IP addresses. In the command line interface, separate the range with a hyphen. For example: 10.102.29.30-10.102.29.189. You can later remove some or all the LSN IP addresses from the pool, and add IP addresses to the LSN pool.
.<br/>Minimum length = 1.
"""
try :
return self._lsnip
except Exception as e:
raise e
@lsnip.setter
def lsnip(self, lsnip) :
"""IPv4 address or a range of IPv4 addresses to be used as NAT IP address(es) for LSN.
After the pool is created, these IPv4 addresses are added to the NetScaler ADC as NetScaler owned IP address of type LSN. A maximum of 4096 IP addresses can be bound to an LSN pool. An LSN IP address associated with an LSN pool cannot be shared with other LSN pools. IP addresses specified for this parameter must not already exist on the NetScaler ADC as any NetScaler owned IP addresses. In the command line interface, separate the range with a hyphen. For example: 10.102.29.30-10.102.29.189. You can later remove some or all the LSN IP addresses from the pool, and add IP addresses to the LSN pool.
.<br/>Minimum length = 1
:param lsnip:
"""
try :
self._lsnip = lsnip
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(lsnpool_lsnip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lsnpool_lsnip_binding
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.poolname is not None :
return str(self.poolname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
updateresource = lsnpool_lsnip_binding()
updateresource.poolname = resource.poolname
updateresource.lsnip = resource.lsnip
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lsnpool_lsnip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].poolname = resource[i].poolname
updateresources[i].lsnip = resource[i].lsnip
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
"""
:param client:
:param resource:
"""
try :
if resource and type(resource) is not list :
deleteresource = lsnpool_lsnip_binding()
deleteresource.poolname = resource.poolname
deleteresource.lsnip = resource.lsnip
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lsnpool_lsnip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].poolname = resource[i].poolname
deleteresources[i].lsnip = resource[i].lsnip
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, poolname) :
"""Use this API to fetch lsnpool_lsnip_binding resources.
:param service:
:param poolname:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, poolname, filter_) :
"""Use this API to fetch filtered set of lsnpool_lsnip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param poolname:
:param filter_:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, poolname) :
"""Use this API to count lsnpool_lsnip_binding resources configued on NetScaler.
:param service:
:param poolname:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, poolname, filter_) :
"""Use this API to count the filtered set of lsnpool_lsnip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param service:
:param poolname:
:param filter_:
"""
try :
obj = lsnpool_lsnip_binding()
obj.poolname = poolname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class lsnpool_lsnip_binding_response(base_response) :
""" """
def __init__(self, length=1) :
self.lsnpool_lsnip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lsnpool_lsnip_binding = [lsnpool_lsnip_binding() for _ in range(length)]
|
py | 1a51fb0c45a0ca3ca5e8a2220dd8dcf241c6f9b2 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable
from tornado.util import errno_from_exception
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current():
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as current
by `make_current`, returns that instance. Otherwise returns
`IOLoop.instance()`, i.e. the main thread's `IOLoop`.
A common pattern for classes that depend on ``IOLoops`` is to use
a default argument to enable programs with multiple ``IOLoops``
but not require the argument for simpler applications::
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explictly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
pass
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
"""
raise NotImplementedError()
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
callback()
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHILD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
try:
while True:
poll_timeout = _POLL_TIMEOUT
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
self._run_callback(callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = None
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# the timeout was cancelled
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
timeout = heapq.heappop(self._timeouts)
self._run_callback(timeout.callback)
del timeout
else:
seconds = self._timeouts[0].deadline - now
poll_timeout = min(seconds, poll_timeout)
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def add_timeout(self, deadline, callback):
timeout = _Timeout(deadline, stack_context.wrap(callback), self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if isinstance(deadline, numbers.Real):
self.deadline = deadline
elif isinstance(deadline, datetime.timedelta):
now = io_loop.time()
try:
self.deadline = now + deadline.total_seconds()
except AttributeError: # py2.6
self.deadline = now + _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r" % deadline)
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
@staticmethod
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _run(self):
if not self._running:
return
try:
self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
py | 1a51fc1695b64abbe5fb1a25d47ed009b0e4f9f3 | import xml.etree.ElementTree as ET
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.bot import TemplateBotNode
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphBotTests(TemplateGraphTestClient):
def test_bot_name_as_attrib(self):
template = ET.fromstring("""
<template>
<bot name="somebot">sometext</bot>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateBotNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(self._client_context), "somebot")
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(self._client_context), "sometext")
def test_bot_name_as_child(self):
template = ET.fromstring("""
<template>
<bot><name>somebot</name>sometext</bot>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
set_node = ast.children[0]
self.assertIsNotNone(set_node)
self.assertIsInstance(set_node, TemplateBotNode)
self.assertIsNotNone(set_node.name)
self.assertIsInstance(set_node.name, TemplateNode)
self.assertEqual(set_node.name.resolve(self._client_context), "somebot")
self.assertEqual(len(set_node.children), 1)
self.assertEqual(set_node.children[0].resolve(self._client_context), "sometext")
def test_invalid_bot_no_name(self):
template = ET.fromstring("""
<template>
<bot></bot>
</template>
""")
with self.assertRaises(ParserException):
ast = self._graph.parse_template_expression(template)
|
py | 1a51fc354717b90a1239468170527346b03bd25b | import cv2
import numpy as np
from pynput.keyboard import Key, Controller
import time
cap = cv2.VideoCapture(0)
facecascade = cv2.CascadeClassifier(r'C:\Users\yashd\Documents\Python\cv2_Face\haarcascade_frontalface_default.xml')
keyboard = Controller()
time.sleep(7)
while True:
ret,img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = facecascade.detectMultiScale(gray,1.3,5)
cv2.line(img, (200, 0), (200, 700), (0, 255, 0), 2)
cv2.line(img, (410, 0), (410, 700), (0, 255, 0), 2)
cv2.line(img, (0, 200), (700, 200), (0, 255, 0), 2)
cv2.putText(img, "Copyright: Yash", (340, 470), cv2.QT_FONT_NORMAL, 1, (255, 0, 0), 2)
#keyboard.press('p')
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(img,"Active",(30,30),cv2.QT_FONT_NORMAL,1,(0,0,255),2)
print(x,y,x+w,y+h)
#keyboard.release(('p'))
if x<100:
print("Right Arrow Key")
keyboard.press(Key.right)
if x>400:
print("Left Arrow Key")
keyboard.press(Key.left)
if y<190:
print("Up Arrow Key")
keyboard.press(Key.up)
cv2.imshow("Image",img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() |
py | 1a51fd6d32fc0d86207f571375962dfa4cf77ff7 | """
Wrapper for AWS Lambda.
"""
from __future__ import absolute_import
import traceback
import time
import functools
import warnings
from uuid import uuid4
import epsagon.trace
import epsagon.runners.aws_lambda
import epsagon.triggers.aws_lambda
import epsagon.wrappers.python_function
import epsagon.runners.python_function
from epsagon.common import EpsagonWarning
from .. import constants
STEP_DICT_NAME = 'Epsagon'
def lambda_wrapper(func):
"""Epsagon's Lambda wrapper."""
@functools.wraps(func)
def _lambda_wrapper(*args, **kwargs):
epsagon.trace.tracer.prepare()
try:
event, context = args
except ValueError:
# This can happen when someone manually calls handler without
# parameters / sends kwargs. In such case we ignore this trace.
return func(*args, **kwargs)
try:
runner = epsagon.runners.aws_lambda.LambdaRunner(
time.time(),
context
)
# pylint: disable=W0703
except Exception as exception:
# Regress to python runner.
warnings.warn(
'Lambda context is invalid, using simple python wrapper',
EpsagonWarning
)
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
return epsagon.wrappers.python_function.wrap_python_function(
func,
args,
kwargs
)
constants.COLD_START = False
try:
epsagon.trace.tracer.add_event(
epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(
time.time(),
event,
context
)
)
# pylint: disable=W0703
except Exception as exception:
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc(),
additional_data={'event': event}
)
try:
result = func(*args, **kwargs)
return result
except Exception as exception:
runner.set_exception(exception, traceback.format_exc())
raise
finally:
epsagon.trace.tracer.add_event(runner)
epsagon.trace.tracer.send_traces()
return _lambda_wrapper
def step_lambda_wrapper(func):
"""Epsagon's Step Lambda wrapper."""
@functools.wraps(func)
def _lambda_wrapper(*args, **kwargs):
epsagon.trace.tracer.prepare()
event, context = args
try:
runner = epsagon.runners.aws_lambda.StepLambdaRunner(
time.time(),
context
)
# pylint: disable=W0703
except Exception as exception:
# Regress to python runner.
warnings.warn(
'Lambda context is invalid, using simple python wrapper',
EpsagonWarning
)
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
return epsagon.wrappers.python_function.wrap_python_function(
func,
args,
kwargs
)
constants.COLD_START = False
try:
epsagon.trace.tracer.add_event(
epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory(
time.time(),
event,
context
)
)
# pylint: disable=W0703
except Exception as exception:
epsagon.trace.tracer.add_exception(
exception,
traceback.format_exc()
)
try:
result = func(*args, **kwargs)
# Add step functions data only if the result is a dictionary.
if isinstance(result, dict):
# If the step functions data is not present, then this is the
# First step.
if STEP_DICT_NAME not in event:
steps_dict = {'id': str(uuid4()), 'step_num': 0}
# Otherwise, just advance the steps number by one.
else:
steps_dict = event[STEP_DICT_NAME]
steps_dict['step_num'] += 1
result[STEP_DICT_NAME] = steps_dict
runner.add_step_data(steps_dict)
return result
except Exception as exception:
runner.set_exception(exception, traceback.format_exc())
raise
finally:
epsagon.trace.tracer.add_event(runner)
epsagon.trace.tracer.send_traces()
return _lambda_wrapper
|
py | 1a51fd741eef16f65f2d5e2ee5cb39048f4f926d | import numpy as np
import cv2
import math
r2t=180/np.pi
t2r=np.pi/180
x=0
y=0
font=cv2.FONT_HERSHEY_PLAIN
fourcc=cv2.VideoWriter_fourcc('M','J','P','G')
e=False
t=False
s=False
def f(img):
blur=cv2.GaussianBlur(img,(9,9),0)
hsv=cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
low=np.array([20,80,130])
high=np.array([39,255,255])
mask=cv2.inRange(hsv,low,high)
edges=cv2.Canny(mask,75,150)
lines=cv2.HoughLinesP(edges,1,np.pi/180,40,maxLineGap=30,minLineLength = 10)
if lines is not None:
thetas=[]
x=0
y=0
for line in lines:
x1, y1, x2, y2 = line[0]
x+=x1+x2
y+=y1+y2
if(x2-x1)==0:
th=np.pi/2
else:
th=math.atan((y2-y1)/(x2-x1))*r2t
thetas.append(th)
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 5)
M=max(thetas)
m=min(thetas)
x=int(.5*x/len(thetas))
y=int(.5*y/len(thetas))
if (M-m)>5:
theta=round(180-(M-m),2)
cv2.putText(img,"angle = "+str(theta),(x,y+5),font,2,(255,255,0),2,cv2.LINE_AA)
else:
cv2.putText(img,"unable to resolve",(x,y+5),font,2,(0,0,255),2,cv2.LINE_AA)
#else:
#cv2.putText(img,"unable to resolve",(x,y),font,1,(0,0,255),1,cv2.LINE_AA)
cv2.putText(img,"Sayansree Paria's Algorithm",(0,15),font,1,(0,255,0),1,cv2.LINE_AA)
cv2.imshow("lines",img)
if t:
cv2.putText(mask,"Sayansree Paria's Algorithm",(0,15),font,1,200,1,cv2.LINE_AA)
cv2.imshow("thresholding",mask)
if e:
cv2.putText(edges,"Sayansree Paria's Algorithm",(0,15),font,1,200,1,cv2.LINE_AA)
cv2.imshow("edges detection",edges)
fram=5#int(input("enter frame rate"))
delay=1000//fram
#path=input("enter file path")
path="robosub_path-1.mp4"
cap=cv2.VideoCapture(path)
key=input("press s if u want to save the results")
print("during video press e to show edge detection and t for thresholding)
out=0
if key=="s":
s=True
out=cv2.VideoWriter("output.avi",fourcc,fram,(int(cap.get(3)),int(cap.get(4))))
while(cap.isOpened()):
ret,img=cap.read()
if ret:
f(img)
if s:
out.write(img)
key=cv2.waitKey(delay-15)&0xff
if(key==27 or key==13):
break
elif key==ord('e'):
e=True
elif key==ord('t'):
t=True
else:
break
cap.release()
if s:
out.release()
cv2.destroyAllWindows()
quit()
|
py | 1a51fd9147c1534ddb65b7ea255109fee7971aa8 | # cellfreesim functions.py
import matplotlib.pyplot as plt
import tellurium as te # 2.1.5
import numpy as np
import sys
import progressbar as progressbar
import emcee
from scipy.integrate import odeint
#-----------------------------------------------------------------------------
# Define parsing functions
#-----------------------------------------------------------------------------
def parseODEs(r,odes):
# Parsing of ODEs into cython code
# Split odes into channels and derivatives (normally these separated by two spaces)
parts = odes.split('\n\n')
channels = parts[0].lstrip('\n').split('\n')
derivs = parts[1].rstrip('\n').split('\n')
channeldict = {}
for channel in channels:
channeldict[channel.split(' = ')[0]] = channel.split(' = ')[1]
derivdict = {}
for deriv in derivs:
derivdict[deriv.split(' = ')[0]] = deriv.split(' = ')[1]
print(derivdict)
print(channeldict)
speciesIds = []
derivatives = []
for derivkey in derivdict.keys():
speciesIds.append(derivkey[1:-3]) # Hardcoded d/dt
channelkey = derivdict[derivkey]
if channelkey[0]=='-':
derivatives.append('-'+channeldict[channelkey[1:]])
else:
derivatives.append(channeldict[channelkey])
speciesValues = r.getFloatingSpeciesConcentrations()
parameterIds = r.getGlobalParameterIds()
parameterValues = [value for value in r.getGlobalParameterValues()]
return(speciesIds, speciesValues, parameterIds, parameterValues, derivatives)
def writeCython(speciesIds,speciesValues,parameterIds,parameterValues,derivatives,OUTPATH,FILENAME):
# Generate cython script
with open(OUTPATH+FILENAME,'w') as file:
file.writelines('# Cythonized ODEs from antimony file\n\n')
# Imports
file.writelines('import numpy as np\n')
file.writelines('cimport numpy as np\n')
file.writelines('cimport cython\n')
file.writelines('from libc.math cimport exp\n')
file.writelines('from libc.math cimport sqrt\n')
file.writelines('from libc.math cimport pow\n\n')
# Model definition
file.writelines('@cython.cdivision(True) # Zero-division checking turned off\n')
file.writelines('@cython.boundscheck(False) # Bounds checking turned off for this function\n')
file.writelines('@cython.wraparound(False) # turn off negative index wrapping for entire function\n')
file.writelines('def model(np.ndarray[np.float64_t,ndim=1] y, double t, np.ndarray[np.float64_t,ndim=1] params):\n\n')
# Species
for i in range(len(speciesIds)):
file.write('\tcdef double '+speciesIds[i]+' = y['+str(i)+']\n')
file.writelines('\n')
for i in range(len(parameterIds)):
file.write('\tcdef double '+parameterIds[i]+' = params['+str(i)+']\n')
file.writelines('\n')
file.writelines('\tcdef double derivs['+str(len(derivatives))+']\n')
file.writelines('\n')
file.writelines('\tderivs = [\n')
for i in range(len(derivatives)-1):
file.write('\t'+derivatives[i]+',\n')
file.write('\t'+derivatives[len(derivatives)-1]+']\n')
file.write('\treturn derivs\n')
file.close()
#-----------------------------------------------------------------------------
# Define experiment functions
#-----------------------------------------------------------------------------
def chemostatExperiment(chemostatinputs):
# Run chemostat experiment
dilutiontimes = chemostatinputs['dilutiontimes']
y0 = chemostatinputs['y0']
params = chemostatinputs['params']
INTERVAL_IMG = chemostatinputs['interval_img']
DIL_FRAC = chemostatinputs['dil_frac']
INDEX_REFRESH = chemostatinputs['index_refresh']
CONC_REFRESH = chemostatinputs['conc_refresh']
cymodel = chemostatinputs['cymodel']
ndim = y0.shape[0]
# 1. From dilutiontimes, calculate time interval between dilution steps
interval_dil=np.zeros(dilutiontimes.shape[0]-1)
for i in range(dilutiontimes.shape[0]-1):
interval_dil[i] = dilutiontimes[i+1]-dilutiontimes[i]
# 2. Calulate number of steps in each time interval (depends on imaging frequency)
nStepsPerRun = np.zeros(dilutiontimes.shape[0]-1)
for i in range(dilutiontimes.shape[0]-1):
nStepsPerRun[i]=int(interval_dil[i]/INTERVAL_IMG)+1
# 3. Put time intervals together to make total time axis, and initialise output array
timeProgram = {}
timeTotal = np.zeros(1)
for i in range(len(dilutiontimes)-1):
timeProgram[i] = np.linspace(0,interval_dil[i],nStepsPerRun[i])
timeTotal=np.concatenate([timeTotal,timeProgram[i][1:]+timeTotal[-1]],axis=0)
dataout=np.zeros(len(timeTotal)*ndim).reshape(len(timeTotal),ndim)
indStart = int(1)
yTransfer = y0
for i in range(len(dilutiontimes)-1):
psoln = odeint(cymodel, yTransfer, timeProgram[i], args=(params,),mxstep=5000000) # scipy-Fortran RK4 solver
indStop= indStart+int(nStepsPerRun[i]-1)
dataout[indStart:indStop,:]=psoln[1:]
indStart = indStop
# Dilute everything and refresh appropriate species
yTransfer = psoln[-1,:]*(1-DIL_FRAC)
j=0
for ind in INDEX_REFRESH:
yTransfer[ind] = yTransfer[ind]+DIL_FRAC*CONC_REFRESH[j]
j+=1
dataout[0,:] = y0
return(timeTotal, dataout)
#-----------------------------------------------------------------------------
# Define MCMC functions
#-----------------------------------------------------------------------------
def normalprior(param,mu,sigma):
'''Log of the normal prior'''
return np.log( 1.0 / (np.sqrt(2*np.pi)*sigma) ) - 0.5*(param - mu)**2/sigma**2
def lnlike(theta, chemostatinputs, mcmc_inputs):
''' Log likelihood, the function to maximise'''
lnparams = [j for j in theta]
# This has to be an array if more than one parameter, otherwise just a float
paramstmp = chemostatinputs['params']
for i in range(len(mcmc_inputs['paramchannels'])):
paramstmp[int(mcmc_inputs['paramchannels'][i])] = np.exp(lnparams[i])
chemostatinputs['params'] = paramstmp
timeTotal,sim_dataout = chemostatExperiment(chemostatinputs)
X0s = []
for j in range(len(mcmc_inputs['datachannels'])):
y_obs = mcmc_inputs['data'][j]
y_model = sim_dataout[:,int(mcmc_inputs['datachannels'][j])]
INVS2 = 1/mcmc_inputs['yerr']**2
X0=-0.5*(np.sum((y_obs-y_model)**2*INVS2+np.log(2*np.pi*1/INVS2)))
X0s.append(X0)
return sum(X0s)
def lnprior(theta, mcmc_inputs):
''' Log priors'''
lnparams = [j for j in theta]
priorMus = mcmc_inputs['priorMuSigma'][0]
priorSigmas = mcmc_inputs['priorMuSigma'][1]
log_PRs = []
for j in range(len(lnparams)):
log_PRs.append(normalprior(lnparams[j],priorMus[j],priorSigmas[j]))
return np.sum(log_PRs)
def lnprob(theta,chemostatinputs, mcmc_inputs):
''' Log posterior'''
lp = lnprior(theta, mcmc_inputs)
if not np.isfinite(lp):
return -np.inf
# How to properly account for NaNs turning up in lnlike?
# This is NOT the way to do it:
if np.isnan(lp + lnlike(theta,chemostatinputs, mcmc_inputs)):
return -np.inf
else:
return lp + lnlike(theta,chemostatinputs, mcmc_inputs)
def gelman_rubin(chain):
''' Gelman-Rubin diagnostic for one walker across all parameters. This value should tend to 1. '''
ssq=np.var(chain,axis=1,ddof=1)
W=np.mean(ssq,axis=0)
Tb=np.mean(chain,axis=1)
Tbb=np.mean(Tb,axis=0)
m=chain.shape[0]*1.0
n=chain.shape[1]*1.0
B=n/(m-1)*np.sum((Tbb-Tb)**2,axis=0)
varT=(n-1)/n*W+1/n*B
Rhat=np.sqrt(varT/W)
return Rhat
def runMCMC(speciesValues,parameterValues,TMAX,INTERVAL_DIL,INTERVAL_IMG,
dilutiontimes,DIL_FRAC,INDEX_REFRESH,CONC_REFRESH,model,
DATACHANNELS,PARAMCHANNELS,PMUSIGMA,SIGMA,
iterations,nwalkers,nDimParams,threads,pos,tburn):
y0 = np.array([float(value) for value in speciesValues])
params = np.array([float(value) for value in parameterValues])
if TMAX%INTERVAL_DIL!=0:
print('\n')
print('TMAX is not divisible by INTERVAL_DIL!\n')
print('Inaccurate results expected!\n')
if INTERVAL_DIL%INTERVAL_IMG!=0:
print('\n')
print('INTERVAL_DIL is not divisible by INTERVAL_IMG!\n')
print('Inaccurate results expected!\n')
cinputkeys = ['dilutiontimes', 'y0', 'params', 'interval_img', 'dil_frac', 'index_refresh', 'conc_refresh','cymodel']
cinputvalues = [dilutiontimes, y0, params, INTERVAL_IMG, DIL_FRAC, INDEX_REFRESH, CONC_REFRESH, model.model]
chemostatinputs = dict(zip(cinputkeys,cinputvalues))
# Generate silico data
timeTotal,dataout = chemostatExperiment(chemostatinputs)
mcmc_inputs = {}
mcmc_inputs['data'] = [dataout[:,channel] for channel in DATACHANNELS]
mcmc_inputs['priorMuSigma'] = PMUSIGMA
mcmc_inputs['yerr'] = SIGMA
mcmc_inputs['datachannels'] = DATACHANNELS
mcmc_inputs['paramchannels'] = PARAMCHANNELS
##### The rest of the code is automatic #####
sampler=emcee.EnsembleSampler(nwalkers,nDimParams,lnprob,a=2,args=([chemostatinputs, mcmc_inputs]),threads=threads)
### Start MCMC
iter=iterations
bar=progressbar.ProgressBar(max_value=iter)
for i, result in enumerate(sampler.sample(pos, iterations=iter)):
bar.update(i)
### Finish MCMC
samples=sampler.chain[:,:,:].reshape((-1,nDimParams)) # shape = (nsteps, nDimParams)
samplesnoburn=sampler.chain[:,tburn:,:].reshape((-1,nDimParams)) # shape = (nsteps, nDimParams)
return(samplesnoburn, chemostatinputs, mcmc_inputs, timeTotal, dataout)
# Plotting
def plotInitialise(figW,figH):
plt.close("all")
figure_options={'figsize':(figW,figH)} # figure size in inches. A4=11.7x8.3, A5=8.3,5.8
font_options={'size':'14','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
def plotFormat(ax,xlabel=False,
ylabel=False,
xlim=False,
ylim=False,
title=False,
xticks=False,
yticks=False,
logx=False,
logy=False,
logxy=False,
symlogx=False,
legend=False):
# Set titles and labels
if title!=False:
ax.set_title(title)
if xlabel!=False:
ax.set_xlabel(xlabel, labelpad=12)
if ylabel!=False:
ax.set_ylabel(ylabel, labelpad=12)
# Set axis limits
if xlim!=False:
ax.set_xlim(xlim)
if ylim!=False:
ax.set_ylim(ylim)
# Set tick values
if xticks!=False:
ax.set_xticks(xticks)
if yticks!=False:
ax.set_yticks(yticks)
# Set line thicknesses
#ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%1.e"))
#ax.axhline(linewidth=2, color='k')
#ax.axvline(linewidth=2, color='k')
ax.spines['bottom'].set_linewidth(2)
ax.spines['top'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_linewidth(2)
# Set ticks
if logx==True:
ax.set_xscale("log")
elif logy==True:
ax.set_yscale("log")
elif logxy==True:
ax.set_xscale("log")
ax.set_yscale("log")
elif symlogx==True:
ax.set_xscale("symlog",linthreshx=1e-4)
ax.set_yscale("log")
else:
minorLocatorx=AutoMinorLocator(2) # Number of minor intervals per major interval
minorLocatory=AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
ax.tick_params(which='major', width=2, length=8, pad=9,direction='in',top='on',right='on')
ax.tick_params(which='minor', width=2, length=4, pad=9,direction='in',top='on',right='on')
if legend==True:
ax.legend(loc='upper right', fontsize=14,numpoints=1) ### Default 'best'
|
py | 1a51fd9bcbdb9d190ae67df1566364f56598a0a8 | #
# Copyright (C) 2018 The Android Open Source Project
# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# refer to tanh_v1_dynamic.mod.py about the structore
# This adds reshape as the first op in a model and
# returns output of reshape, which is dynamic tensor
# Sample UnPack model, axis = 0
import dynamic_tensor
model = Model()
model_input_shape = [6, 3, 4]
axis = Int32Scalar("axis", 1)
num_splits = Int32Scalar("num_splits", 3)
out1 = Output("output1", "TENSOR_FLOAT32", "{6, 4}")
out2 = Output("output2", "TENSOR_FLOAT32", "{6, 4}")
out3 = Output("output3", "TENSOR_FLOAT32", "{6, 4}")
dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, model_input_shape, "TENSOR_FLOAT32")
test_node_input = dynamic_layer.getTestNodeInput()
model.Operation("UNPACK_EX", test_node_input, num_splits, axis).To([out1, out2, out3])
# write UNPACK_EX test. input is `test_input`
# note output shape is used by expected output's shape
out1_data = [0.3, 1.0, 2.0, 3.0,
4.0, 5.5, 6.3, 7.2,
8.22, 9.8, 10.3, 11.0,
12.22, 13.2, 14.44, 15.32,
16.55, 17.33, 18.1, 19.0,
20.32, 21.9, 22.1, 23.22]
out2_data = [24.22, 25.1, 26.0, 27.12,
28.32, 29.11, 30.0, 31.98,
32.99, 33.11, 34.1, 35.123,
36.21, 37.22, 38.23, 39.76,
40.1, 41.43, 42.34, 43.1,
44.123, 45.43, 46.1, 47.1]
out3_data = [48.0, 49.76, 50.0, 51.1,
52.22, 53.12, 54.1, 55.5,
56.5, 57.4, 58.1, 59.23,
60.2, 61.12, 62.11, 63.34,
64.11, 65.1, 66.43, 67.1,
68.1, 69.34, 70.11, 71.45]
model_input_data = [0.3, 1.0, 2.0, 3.0,
24.22, 25.1, 26.0, 27.12,
48.0, 49.76, 50.0, 51.1,
4.0, 5.5, 6.3, 7.2,
28.32, 29.11, 30.0, 31.98,
52.22, 53.12, 54.1, 55.5,
8.22, 9.8, 10.3, 11.0,
32.99, 33.11, 34.1, 35.123,
56.5, 57.4, 58.1, 59.23,
12.22, 13.2, 14.44, 15.32,
36.21, 37.22, 38.23, 39.76,
60.2, 61.12, 62.11, 63.34,
16.55, 17.33, 18.1, 19.0,
40.1, 41.43, 42.34, 43.1,
64.11, 65.1, 66.43, 67.1,
20.32, 21.9, 22.1, 23.22,
44.123, 45.43, 46.1, 47.1,
68.1, 69.34, 70.11, 71.45]
Example(
{
dynamic_layer.getModelInput() : model_input_data,
dynamic_layer.getShapeInput() : model_input_shape,
out1 : out1_data,
out2 : out2_data,
out3 : out3_data,
})
|
py | 1a51fff39185a414ada427b660dae452640855cf | import sys
import time
import numpy as np
from c_lib import c_functions
#c_functions = ctypes.CDLL('lib/c_functions.cpython-37m-darwin.so')
#c_functions.c_a_n.argtype = (ctypes.c_int)
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from scipy.stats import cauchy
def compute():
# This main function has been adapted to use as many c functions where possible!
# Other functions are gathered in the pxd declarator
# Parameters
c_functions.printing(23)
start = time.time()
pars = {}
# Implementation does not work as there is a segmentation fault in the c file...
# Takes about 0.3462 seconds so not the fastest...
if __name__ == '__main__':
compute()
|
py | 1a51fffa9b3802216f8c90f375a20593823f9bb8 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SimpleNNModel(nn.Module):
def __init__(self):
super(SimpleNNModel, self).__init__()
self.layer1 = nn.Linear(32*32, 512)
self.layer2 = nn.Linear(512, 32)
self.layer3 = nn.Linear(32, 10)
self.loss_fn = nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
def forward(self, inputs):
batch_size = inputs.shape[0]
# Convert images to grayscale
x = (inputs[:, 0, :, :] + inputs[:, 1, :, :] + inputs[:, 2, :, :])/3
# Flatten the image
x = x.view(batch_size, -1)
h = F.relu(self.layer1(x))
h = F.relu(self.layer2(h))
out = F.softmax(self.layer3(h), dim=1)
return out
|
py | 1a52006d0a3345ce0bf3dea3408b382a5f991e24 | """Testing with training_pipeline_3d"""
from typing import Tuple
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Conv3D, Dense, Dropout, Flatten, MaxPooling3D
from tensorflow.keras.layers import BatchNormalization #added 20210226
from modules.cnn.training_models.training_pipeline_3d import pipeline_from_command_line
def create_3D_cnn_model(input_shape: Tuple[int, int, int, int]):
print(1111, input_shape)
model = Sequential()
#reduced number of filters 32 --> 16 20210303
model.add(
Conv3D(16, kernel_size=(5, 5, 5), strides=(1, 1, 1), padding='same',
activation="relu", input_shape=input_shape)#kernel 3 --> 5 20210226
)
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))
#added layer with 32 filters 20210303
model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))#input_shape, padding removed
#added layer with 32 filters 20210303
model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))#input_shape, padding removed
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))#strides added 20210303
model.add(BatchNormalization())#added 20210303
#added layer with 64 filters 20210303
model.add(Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))#input_shape, padding removed
model.add(Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))#input_shape, padding removed
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))#strides added 20210303
model.add(BatchNormalization())#added 20210303
model.add(Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))#strides added 20210303
model.add(BatchNormalization())#added 20210303
model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))#strides added 20210303
model.add(BatchNormalization())#added 20210226
# #added layer with 512 filters 20210303
model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation="relu", padding='same'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2)))#strides added 20210303
model.add(BatchNormalization())#added 20210226
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.5))#20210305 0.3 --> 0.5
model.add(Dense(1024, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(1024, activation="relu"))
model.add(Dense(4, activation="softmax"))
print(model.output_shape)
model.compile(
loss="categorical_crossentropy",
# optimizer=optimizers.adam(lr=1e-5),
optimizer='adam',
metrics=["accuracy"],
)
return model
if __name__ == "__main__":
pipeline_from_command_line(create_3D_cnn_model, rgb=False)
|
py | 1a52008431776feae3033b22d451a1e14fb79c67 | #!/usr/bin/env python3
from __future__ import print_function
# Get all of the videos for the shows.
PBSKIDS_SHOWS = "http://pbskids.org/pbsk/video/api/getShows/"
PBSKIDS_VIDS = "http://pbskids.org/pbsk/video/api/getVideos/"
VIDEOS_CACHE = "videos.json"
# Find the shows. Write to shows.json.
import requests
import json
# Create list of all videos
all_videos = list()
# Start index
start_index = 1
# To bootstrap the while loop.
total_videos = start_index + 1
# While our start index is less than the total number of videos
while start_index < total_videos:
# Only get full episodes. Can be of type 'Episode' or 'Clip'.
resp = requests.get(PBSKIDS_VIDS, params={'type': 'Episode',
'status': 'available',
'startindex': start_index} )
video_list = json.loads(resp.text)
# These should always be the same since we are requesting the startindex
if video_list["start"] != start_index:
raise("Returned start index doesn't match requested @ startIdx={}".format(start_index))
# Get total number of videos.
total_videos = video_list["matched"]
print("Grabbing video data: {}-{} of {}".format(video_list["start"],
video_list["end"],
video_list["matched"]))
start_index = video_list["end"] + 1
for item in video_list["items"]:
all_videos.append(item)
# Write to cache.
with open(VIDEOS_CACHE, 'w') as outfile:
json.dump(all_videos, outfile)
# Reload from the file, just to be sure.
with open(VIDEOS_CACHE, 'r') as infile:
all_videos2 = json.load(infile)
assert(all_videos == all_videos2)
print("Writing Cache: "+VIDEOS_CACHE) |
py | 1a5201d513c190666eb5586554e84ac07b94edf8 | from django.conf.urls import include, url
urlpatterns = [
url(r'^', include('example_app.urls')),
] |
py | 1a5201f6471bfb9e7f4b8c09f08e4163c45ff341 | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.alert_category_name import AlertCategoryName
from cohesity_management_sdk.models.notification_rule import NotificationRule
from cohesity_management_sdk.models.alert_resolution import AlertResolution
from cohesity_management_sdk.models.alert_metadata import AlertMetadata
from cohesity_management_sdk.models.alert import Alert
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class AlertsController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(AlertsController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def get_alert_categories(self):
"""Does a GET request to /public/alertCategories.
Returns alert categories in Cohesity cluster.
Returns:
list of AlertCategoryName: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_alert_categories called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_alert_categories.')
_url_path = '/public/alertCategories'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_alert_categories.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_alert_categories.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_alert_categories')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_alert_categories.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertCategoryName.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_notification_rules(self):
"""Does a GET request to /public/alertNotificationRules.
Gets all alert notification rules containing criteria to deliver
notification
to delivery targets such as email addresses, invoking external apis
etc.
Returns:
list of NotificationRule: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_notification_rules called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_notification_rules.')
_url_path = '/public/alertNotificationRules'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_notification_rules.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_notification_rules.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_notification_rules')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_notification_rules.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, NotificationRule.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_notification_rule(self,
body=None):
"""Does a POST request to /public/alertNotificationRules.
Creates a new notification rule with provided delivery targets such as
email
addresses and external apis.
Args:
body (NotificationRule, optional): Create Notification Rule
argument.
Returns:
NotificationRule: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_notification_rule called.')
# Prepare query URL
self.logger.info('Preparing query URL for create_notification_rule.')
_url_path = '/public/alertNotificationRules'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_notification_rule.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_notification_rule.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_notification_rule')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_notification_rule.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, NotificationRule.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_notification_rule(self):
"""Does a PUT request to /public/alertNotificationRules.
Updates delivery targets such as email addresses and external apis in
an
existing notification rule.
Returns:
NotificationRule: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_notification_rule called.')
# Prepare query URL
self.logger.info('Preparing query URL for update_notification_rule.')
_url_path = '/public/alertNotificationRules'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_notification_rule.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_notification_rule.')
_request = self.http_client.put(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_notification_rule')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_notification_rule.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, NotificationRule.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def delete_notification_rule(self,
rule_id):
"""Does a DELETE request to /public/alertNotificationRules/{ruleId}.
Deletes an existing alert notification rule matching the rule id.
Args:
rule_id (long|int): Specifies the rule id.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_notification_rule called.')
# Validate required parameters
self.logger.info('Validating required parameters for delete_notification_rule.')
self.validate_parameters(rule_id=rule_id)
# Prepare query URL
self.logger.info('Preparing query URL for delete_notification_rule.')
_url_path = '/public/alertNotificationRules/{ruleId}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'ruleId': rule_id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_notification_rule.')
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_notification_rule')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_notification_rule.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_resolutions(self,
max_resolutions,
tenant_ids=None,
all_under_hierarchy=None,
resolution_id_list=None,
alert_id_list=None,
start_date_usecs=None,
end_date_usecs=None):
"""Does a GET request to /public/alertResolutions.
Returns all Alert Resolution objects found on the Cohesity Cluster
that match the filter criteria specified using parameters.
If no filter parameters are specified,
all Alert Resolution objects are returned.
Each object provides details about the Alert Resolution such as
the resolution summary and details.
Args:
max_resolutions (int): Specifies the number of returned
Resolutions to be returned. The newest Resolutions are
returned.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
resolution_id_list (list of long|int, optional): Specifies list of
Alert Resolution ids to filter resolutions by.
alert_id_list (list of string, optional): Specifies list of Alert
Resolution ids to filter resolutions by.
start_date_usecs (long|int, optional): Specifies Start Time Unix
epoch in microseconds to filter resolutions by.
end_date_usecs (long|int, optional): Specifies End Time Unix epoch
in microseconds to filter resolutions by.
Returns:
list of AlertResolution: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_resolutions called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_resolutions.')
self.validate_parameters(max_resolutions=max_resolutions)
# Prepare query URL
self.logger.info('Preparing query URL for get_resolutions.')
_url_path = '/public/alertResolutions'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'maxResolutions': max_resolutions,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy,
'resolutionIdList': resolution_id_list,
'alertIdList': alert_id_list,
'startDateUsecs': start_date_usecs,
'endDateUsecs': end_date_usecs
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_resolutions.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_resolutions.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_resolutions')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_resolutions.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertResolution.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_resolution(self,
body):
"""Does a POST request to /public/alertResolutions.
Create an Alert Resolution and apply it to one or more Alerts.
Mark the Alerts as resolved.
Args:
body (AlertResolutionRequest): Request to create an Alert
Resolution and apply it to the specified Alerts.
Returns:
AlertResolution: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_resolution called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_resolution.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_resolution.')
_url_path = '/public/alertResolutions'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_resolution.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_resolution.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_resolution')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_resolution.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertResolution.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_resolution_by_id(self,
id):
"""Does a GET request to /public/alertResolutions/{id}.
Returns the Alert Resolution object corresponding to passed in Alert
Resolution Id.
Args:
id (long|int): Unique id of the Alert Resolution to return.
Returns:
AlertResolution: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_resolution_by_id called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_resolution_by_id.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info('Preparing query URL for get_resolution_by_id.')
_url_path = '/public/alertResolutions/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_resolution_by_id.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_resolution_by_id.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_resolution_by_id')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_resolution_by_id.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertResolution.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_resolution(self,
id,
body):
"""Does a PUT request to /public/alertResolutions/{id}.
Apply an existing Alert Resolution to one or more additional Alerts.
Mark those additional Alerts as resolved.
Args:
id (long|int): Unique id of the Alert Resolution to return.
body (UpdateResolutionParams): Request to apply an existing
resolution to the specified Alerts.
Returns:
AlertResolution: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_resolution called.')
# Validate required parameters
self.logger.info('Validating required parameters for update_resolution.')
self.validate_parameters(id=id,
body=body)
# Prepare query URL
self.logger.info('Preparing query URL for update_resolution.')
_url_path = '/public/alertResolutions/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_resolution.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_resolution.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_resolution')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_resolution.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertResolution.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_alert_types(self):
"""Does a GET request to /public/alertTypes.
Returns registered alerts in the Cohesity cluster that match the
filter
criteria specified using parameters. If no filter parameters are
specified,
all registered alerts in the Cohesity cluster are returned.
Returns:
list of AlertMetadata: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_alert_types called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_alert_types.')
_url_path = '/public/alertTypes'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_alert_types.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_alert_types.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_alert_types')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_alert_types.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, AlertMetadata.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_alerts(self,
max_alerts,
tenant_ids=None,
all_under_hierarchy=None,
alert_id_list=None,
alert_type_list=None,
alert_category_list=None,
property_key=None,
property_value=None,
start_date_usecs=None,
end_date_usecs=None,
alert_state_list=None,
alert_severity_list=None,
resolution_id_list=None):
"""Does a GET request to /public/alerts.
Returns all Alert objects found on the Cohesity Cluster that
match the filter criteria specified using parameters.
The Cohesity Cluster creates an Alert when a potential problem
is found or when a threshold has been exceeded on the Cohesity
Cluster.
If no filter parameters are specified, all Alert objects are
returned.
Each object provides details about the Alert such as the Status and
Severity.
Args:
max_alerts (int): Specifies the number of returned Alerts to be
returned. The newest Alerts are returned.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
alert_id_list (list of string, optional): Specifies list of Alert
ids to filter alerts by.
alert_type_list (list of int, optional): Specifies list of Alert
Types to filter alerts by.
alert_category_list (list of AlertCategoryListGetAlertsEnum,
optional): Specifies list of Alert Categories.
property_key (string, optional): Specifies name of the property to
filter alerts by.
property_value (string, optional): Specifies value of the property
to filter alerts by.
start_date_usecs (long|int, optional): Specifies start time Unix
epoch time in microseconds to filter alerts by.
end_date_usecs (long|int, optional): Specifies end time Unix epoch
time in microseconds to filter alerts by.
alert_state_list (list of AlertStateListEnum, optional): Specifies
list of Alert States to filter alerts by.
alert_severity_list (list of AlertSeverityListEnum, optional):
Specifies list of Alert severity to filter alerts by.
resolution_id_list (list of long|int, optional): Specifies alert
resolution ids to filter alerts by.
Returns:
list of Alert: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_alerts called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_alerts.')
self.validate_parameters(max_alerts=max_alerts)
# Prepare query URL
self.logger.info('Preparing query URL for get_alerts.')
_url_path = '/public/alerts'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'maxAlerts': max_alerts,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy,
'alertIdList': alert_id_list,
'alertTypeList': alert_type_list,
'alertCategoryList': alert_category_list,
'propertyKey': property_key,
'propertyValue': property_value,
'startDateUsecs': start_date_usecs,
'endDateUsecs': end_date_usecs,
'alertStateList': alert_state_list,
'alertSeverityList': alert_severity_list,
'resolutionIdList': resolution_id_list
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_alerts.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_alerts.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_alerts')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_alerts.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, Alert.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_alert_by_id(self,
id):
"""Does a GET request to /public/alerts/{id}.
Returns the Alert object corresponding to the specified id.
Args:
id (string): Unique id of the Alert to return.
Returns:
Alert: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_alert_by_id called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_alert_by_id.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info('Preparing query URL for get_alert_by_id.')
_url_path = '/public/alerts/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_alert_by_id.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_alert_by_id.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_alert_by_id')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_alert_by_id.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, Alert.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.